<?xml version="1.0" encoding="utf-8" standalone="yes"?><rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom"><channel><title>Societal Impact | TSG Lab – Technical Safety &amp; Governance Lab</title><link>https://tsglab.github.io/tag/societal-impact/</link><atom:link href="https://tsglab.github.io/tag/societal-impact/index.xml" rel="self" type="application/rss+xml"/><description>Societal Impact</description><generator>Hugo Blox Builder (https://hugoblox.com)</generator><language>en-us</language><lastBuildDate>Sun, 01 Mar 2026 00:00:00 +0000</lastBuildDate><item><title>Token Taxes: Mitigating AGI's Economic Risks</title><link>https://tsglab.github.io/publication/token-taxes-agi-economic-risks/</link><pubDate>Sun, 01 Mar 2026 00:00:00 +0000</pubDate><guid>https://tsglab.github.io/publication/token-taxes-agi-economic-risks/</guid><description/></item><item><title>Emerging Risks from Embodied AI Require Urgent Policy Action</title><link>https://tsglab.github.io/publication/emerging-risks-embodied-ai/</link><pubDate>Mon, 01 Dec 2025 00:00:00 +0000</pubDate><guid>https://tsglab.github.io/publication/emerging-risks-embodied-ai/</guid><description/></item><item><title>Full-Stack Alignment: Co-Aligning AI and Institutions with Thicker Models of Value</title><link>https://tsglab.github.io/publication/full-stack-alignment-institutions/</link><pubDate>Mon, 01 Dec 2025 00:00:00 +0000</pubDate><guid>https://tsglab.github.io/publication/full-stack-alignment-institutions/</guid><description/></item><item><title>Rethinking AI Cultural Alignment</title><link>https://tsglab.github.io/publication/rethinking-ai-cultural-alignment/</link><pubDate>Tue, 01 Apr 2025 00:00:00 +0000</pubDate><guid>https://tsglab.github.io/publication/rethinking-ai-cultural-alignment/</guid><description/></item><item><title>Toward Resisting AI-Enabled Authoritarianism</title><link>https://tsglab.github.io/publication/resisting-ai-authoritarianism/</link><pubDate>Wed, 01 Jan 2025 00:00:00 +0000</pubDate><guid>https://tsglab.github.io/publication/resisting-ai-authoritarianism/</guid><description/></item><item><title>Fairness in AI and Its Long-Term Implications on Society</title><link>https://tsglab.github.io/publication/fairness-ai-long-term-implications/</link><pubDate>Sun, 01 Jan 2023 00:00:00 +0000</pubDate><guid>https://tsglab.github.io/publication/fairness-ai-long-term-implications/</guid><description/></item></channel></rss>