<?xml version="1.0" encoding="utf-8" standalone="yes"?><rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom"><channel><title>M. Geva | TSG Lab – Technical Safety &amp; Governance Lab</title><link>https://tsglab.github.io/author/m.-geva/</link><atom:link href="https://tsglab.github.io/author/m.-geva/index.xml" rel="self" type="application/rss+xml"/><description>M. Geva</description><generator>Hugo Blox Builder (https://hugoblox.com)</generator><language>en-us</language><lastBuildDate>Sat, 01 Nov 2025 00:00:00 +0000</lastBuildDate><item><title>Precise In-Parameter Concept Erasure in Large Language Models</title><link>https://tsglab.github.io/publication/precise-concept-erasure-llms/</link><pubDate>Sat, 01 Nov 2025 00:00:00 +0000</pubDate><guid>https://tsglab.github.io/publication/precise-concept-erasure-llms/</guid><description/></item><item><title>Towards Interpreting Visual Information Processing in Vision-Language Models</title><link>https://tsglab.github.io/publication/visual-information-processing-vlms/</link><pubDate>Tue, 01 Apr 2025 00:00:00 +0000</pubDate><guid>https://tsglab.github.io/publication/visual-information-processing-vlms/</guid><description/></item><item><title>Open Problems in Machine Unlearning for AI Safety</title><link>https://tsglab.github.io/publication/open-problems-machine-unlearning/</link><pubDate>Wed, 01 Jan 2025 00:00:00 +0000</pubDate><guid>https://tsglab.github.io/publication/open-problems-machine-unlearning/</guid><description/></item><item><title>Mechanistic Interpretability Workshop at ICML 2024</title><link>https://tsglab.github.io/publication/mechanistic-interpretability-workshop-icml-2024/</link><pubDate>Mon, 01 Jul 2024 00:00:00 +0000</pubDate><guid>https://tsglab.github.io/publication/mechanistic-interpretability-workshop-icml-2024/</guid><description/></item></channel></rss>