@article{MTMT:36889584, title = {Disappointment or hype? Framing artificial intelligence in the US elite news media}, url = {https://m2.mtmt.hu/api/publication/36889584}, author = {Vicsek, Lilla and Fehér, Katalin and Nguyen, Dennis}, doi = {10.1177/17427665251411490}, journal-iso = {GLOBAL MEDIA COMMUNICATION}, journal = {GLOBAL MEDIA AND COMMUNICATION}, volume = {2026}, unique-id = {36889584}, issn = {1742-7665}, abstract = {How do elite US news outlets frame AI within accounts of hype, disappointment and tempered expectation? This study finds that rather than fueling hype, coverage tends to adopt a cautious tone – emphasizing everyday uses and current impacts over speculative futures. A content analysis of three high-profile US dailies shows more positive tones when only developers or vendors discuss their own AI tools. The findings underscore journalists’ important role in contextualizing and balancing biased narratives and will be a valuable contribution for academics, journalists and policymakers seeking to understand hype, disappointment and expectation dynamics in technology discourse and their broader social implications.}, keywords = {hype; framing; Disappointment; sociology of expectations; News media; Artificial Intelligence}, year = {2026}, eissn = {1742-7673}, pages = {1-24}, orcid-numbers = {Vicsek, Lilla/0000-0002-6034-7503; Fehér, Katalin/0000-0003-3293-0862; Nguyen, Dennis/0000-0001-6982-775X} } @article{MTMT:35176735, title = {Entangled AI. artificial intelligence that serves the future}, url = {https://m2.mtmt.hu/api/publication/35176735}, author = {Köves, Alexandra and Fehér, Katalin and Vicsek, Lilla Mária and Fischer, Máté}, doi = {10.1007/s00146-024-02037-4}, journal-iso = {AI SOCIETY}, journal = {AI AND SOCIETY: THE JOURNAL OF HUMAN-CENTERED SYSTEMS AND MACHINE INTELLIGENCE}, volume = {40}, unique-id = {35176735}, issn = {0951-5666}, abstract = {While debate is heating up regarding the development of AI and its perceived impacts on human society, policymaking is struggling to catch up with the demand to exercise some regulatory control over its rapid advancement. This paper aims to introduce the concept of entangled AI that emerged from participatory backcasting research with an AI expert panel. The concept of entanglement has been adapted from quantum physics to effectively capture the envisioned form of artificial intelligence in which a strong interconnectedness between AI, humans, society, and nature is reflected. Entanglement assumes that AI should serve nature, social well-being, justice, and the resilience of this intertwined network simultaneously and promote a dynamic balance among these factors. This approach allows us to understand the pervasive role of this technology and the scope of human agency in its development. The study shows how such concepts seem to transcend the dominant discourses related to expectations, technological determinism, and humanism. An additional aim of this paper is to demonstrate how backcasting can contribute to generating useful understandings of the future of AI and fruitful insights for policymaking. © The Author(s) 2024.}, keywords = {expectation; Backcasting; interconnectedness; AI ethics; Artifical Intelligence; Entangled AI}, year = {2025}, eissn = {1435-5655}, pages = {2765-2776}, orcid-numbers = {Köves, Alexandra/0000-0002-4642-156X; Fehér, Katalin/0000-0003-3293-0862} } @book{MTMT:35661472, title = {Generative AI, Media and Society}, url = {https://m2.mtmt.hu/api/publication/35661472}, isbn = {9781032968735}, author = {Fehér, Katalin}, doi = {10.4324/9781003591023}, publisher = {Routledge Publishing; Taylor & Francis Group}, unique-id = {35661472}, abstract = {his groundbreaking book demystifies generative AI’s transformative impact on media, socio-cultural dynamics, ethics, and policy. Defining generative AI as an evolutionary leap in the development of artificial intelligence, the author examines intricate human-machine interactions and socio-technical dynamics, advocating robust, proactive AI governance to address emerging uncertainties. The book is clearly structured into six key chapters, each exploring distinct aspects of the relationship between artificial intelligence, media, and society. The chapter on "Transformation" examines how machine behavior is reshaping our datafied society, questioning whether data is the new oil, or digital manure. "Generative AI" investigates the models and future impacts of generative AI as a co-intelligence, revisiting the Turing Test and analyzing societal-business impacts. "AI Media" explores the convergence of media and AI, highlighting robot journalism, synthetic content, and the disinformation era and discussing the trend toward high-risk optimism. "Uncertainties" addresses inherent unpredictability vs. strategic foresight, focusing on challenged business models, sustainability concerns, and emotional intelligence factors. "Ethics" analyzes generative morality and dual-use technology, covering trusted AI principles—from misuse to integrative solutions. Finally, "Policy" discusses governance, labor market impacts, and the importance of human rights and power dynamics in generative AI. Each chapter also provides summaries of impact projects, reflective art, scholarly questions, and strategic takeaways—extended with a comprehensive glossary. This is an essential resource for scholars, students, policymakers, technologists, ethicists, and AI industry leaders seeking to rapidly understand and address the challenges and opportunities of generative AI and AI media in a cohesive framework.}, year = {2025}, orcid-numbers = {Fehér, Katalin/0000-0003-3293-0862} } @article{MTMT:36170258, title = {Strategic organisational responses to generative AI-driven digital transformation in leading higher education institutions}, url = {https://m2.mtmt.hu/api/publication/36170258}, author = {Géring, Zsuzsanna and Fehér, Katalin and Harmat, Vanda and Tamássy, Réka}, doi = {10.1108/IJOA-09-2024-4850}, journal-iso = {INT J ORGAN ANAL}, journal = {INTERNATIONAL JOURNAL OF ORGANIZATIONAL ANALYSIS}, volume = {33}, unique-id = {36170258}, issn = {1934-8835}, abstract = {Purpose This study aims to explore generative artificial intelligence (AI) as a significant milestone and key driver of digital transformation in higher education, emphasising the urgent need for universities and policymakers to adapt strategies to remain effective, competitive and aligned with the rapidly evolving demands of education and research. Design/methodology/approach This study used qualitative content analysis to examine publicly available strategic documents and statements related to digital transformation from the top 30 ranked universities in the Times Higher Education 2024 Ranking, producing a data set of 98 strategies covering all key organisational domains. Findings The collected documents span eight areas, from teaching-learning strategies to information technology (IT) strategies and committees, with substantial variation among universities in scope, content and strategic combinations. A significant result is that teaching-learning offices and development centres serve as bridges between institutional strategies and grassroots innovation, absorbing top-down and bottom-up knowledge and fostering adaptive responses to generative AI-driven transformation. Practical implications By showcasing the best practices, this paper provides practical guidance for proactive institutional development, supporting university leadership in strategy-building and aiding national and international policymakers in shaping forward-looking frameworks. Originality/value Understanding and defining generative AI as a milestone in digital transformation is crucial for universities. Proactive adaptation to emerging trends and best practices enables institutions to navigate these challenges effectively.}, keywords = {Higher education; digital transformation; Futures of higher education; generative AI; Top universities; organisational strategy}, year = {2025}, eissn = {1758-8561}, pages = {132-152}, orcid-numbers = {Géring, Zsuzsanna/0000-0002-8242-950X; Fehér, Katalin/0000-0003-3293-0862; Harmat, Vanda/0000-0002-8573-2627} } @misc{MTMT:36340559, title = {Strategic foresight in AI governance. the Archer model for responsible AI futures}, url = {https://m2.mtmt.hu/api/publication/36340559}, author = {Fehér, Katalin and McKelvey, Fenwick and Rodgers, Lindsay and Marinov, Robert Nicholas}, unique-id = {36340559}, abstract = {This article presents a strategic foresight roadmap for responsible AI, connecting theory with practical governance in the real world. We introduce the Archer Model—a foresight tool co-developed with responsible AI experts and practitioners from diverse sectors—that combines survey-based horizon scanning and consensus mapping to support inclusive, practice-driven decision-making. Developed within a transatlantic knowledge hub linking Canadian and EU foresight approaches, the model enables participatory, cognitively grounded exploration of AI governance futures. The pilot test involved cross-sectoral actors and revealed key tensions such as the commodification of governance, cross-scale sociotechnical disruptions, and fragmented global AI norms. Accordingly, the model facilitates the co-creation of plural, situated futures that reflect diverse values and priorities. The Archer Model contributes to foresight studies by operationalizing anticipatory capacities at the intersection of AI, ethics, and policy, offering a structured yet flexible tool for informing responsible governance and AI policy in complex and dynamic contexts.}, keywords = {strategic foresight; Horizon scanning; AI policy; Responsible AI; AI Governance; Archer Model}, year = {2025}, pages = {1-18}, orcid-numbers = {Fehér, Katalin/0000-0003-3293-0862} } @misc{MTMT:36340566, title = {AI-driven media & synthetic knowledge: rethinking society in generative futures. exploring generative AI futures through an experimental PhD seminar}, url = {https://m2.mtmt.hu/api/publication/36340566}, author = {Fehér, Katalin}, unique-id = {36340566}, abstract = {Generative AI is not just a technological leap -- it is a societal stress test, reshaping trust, identity, equity, and authorship. This exploratory PhD seminar examined emerging academic trends in AI-driven synthetic media and worlds, emphasizing ethical risks and societal implications. In Part One, students explored core concepts such as generative AI, fake media, and synthetic knowledge production. In Part Two, they critically engaged with these challenges, producing actionable insights. The two-part format enabled deep reflection on power, responsibility, and education in AI-augmented communication. Outcomes offer practical guidance for educators, researchers, and institutions committed to fostering more responsible, human-centered AI use in media and society.}, keywords = {AI Education; Responsible AI; generative AI; AI Society; AI futures; AI culture}, year = {2025}, pages = {1-5}, orcid-numbers = {Fehér, Katalin/0000-0003-3293-0862} } @misc{MTMT:36340572, title = {Generative knowledge production pipeline driven by academic influencers}, url = {https://m2.mtmt.hu/api/publication/36340572}, author = {Fehér, Katalin and Demeter, Marton}, unique-id = {36340572}, abstract = {Generative AI transforms knowledge production, validation, and dissemination, raising academic integrity and credibility concerns. This study examines 53 academic influencer videos that reached 5.3 million viewers to identify an emerging, structured, implementation-ready pipeline balancing originality, ethical compliance, and human-AI collaboration despite the disruptive impacts. Findings highlight generative AI’s potential to automate publication workflows and democratize participation in knowledge production while challenging traditional scientific norms. Academic influencers emerge as key intermediaries in this paradigm shift, connecting bottom-up practices with institutional policies to improve adaptability. Accordingly, the study proposes a generative publication production pipeline and a policy framework for co-intelligence adaptation and reinforcing credibility-centered standards in AI-powered research. These insights support scholars, educators, and policymakers in understanding AI’s transformative impact by advocating responsible and innovation-driven knowledge production. Additionally, they reveal pathways for automating best practices, optimizing scholarly workflows, and fostering creativity in academic research and publication.}, keywords = {Social media; knowledge production; Influencers; Academic integrity; Policy implications; ChatGPT; generative AI; Academic policy}, year = {2025}, pages = {1-15}, orcid-numbers = {Fehér, Katalin/0000-0003-3293-0862} } @misc{MTMT:36341679, title = {Are the many promises of AI to our society, e.g. of increased efficiency and freedom going to become true?. conference lecture}, url = {https://m2.mtmt.hu/api/publication/36341679}, author = {Fehér, Katalin}, unique-id = {36341679}, year = {2025}, orcid-numbers = {Fehér, Katalin/0000-0003-3293-0862} } @CONFERENCE{MTMT:36347625, title = {AI media & Generative Futures}, url = {https://m2.mtmt.hu/api/publication/36347625}, author = {Fehér, Katalin}, booktitle = {Mediatization and society: truth, trust, technology}, unique-id = {36347625}, abstract = {The accelerating integration of Artificial Intelligence (AI) into media ecosystems is transforming the epistemic, cultural, and political infrastructures of information creation and dissemination. The conference presentation synthesizes foresight research findings on generative AI and generative mediatization, exploring how AI-driven content production, algorithmic curation, and synthetic personalization are transforming the public sphere. Drawing on foresight methodologies and trend analysis, the goal is to identify emerging trajectories where generative models function not merely as technological tools but as agencies in media ecologies—reshaping the dynamics of trust, identity, and democratic deliberation. The results highlight three interconnected futures: (1) Synthetic abundance, characterized by hyper-automated creativity and personalized cultural flows; (2) Generative platform governance, where AI-mediated regulation, transparency, and civic oversight redefine media accountability; and (3) Post-mediatized societies, in which human and non-human agents co-produce reality narratives across immersive, multimodal platforms. These scenarios underscore critical tensions between innovation and disinformation, openness and control, creativity and commodification—revealing how generative mediatization extends beyond content automation toward structural transformations in information circulation, content production and cultural authority. The goal is to present a proactive approach and research agenda integrating strategic foresight with critical media studies to anticipate unintended consequences and support democratic resilience. By bridging computational futures with social imaginaries, the study positions generative AI as both a catalyst and a disruptor of 21st-century mediatization processes, demanding interdisciplinary scrutiny. Ultimately, the talk invites academia, industry, and policymakers to co-create ethical, sustainable, and human-centered pathways in navigating the generative turn of AI societies.}, year = {2025}, pages = {21-22}, orcid-numbers = {Fehér, Katalin/0000-0003-3293-0862} } @article{MTMT:34476794, title = {Modeling AI Trust for 2050. perspectives from media and info-communication experts}, url = {https://m2.mtmt.hu/api/publication/34476794}, author = {Fehér, Katalin and Vicsek, Lilla Mária and Deuze, Mark}, doi = {10.1007/s00146-023-01827-6}, journal-iso = {AI SOCIETY}, journal = {AI AND SOCIETY: THE JOURNAL OF HUMAN-CENTERED SYSTEMS AND MACHINE INTELLIGENCE}, volume = {39}, unique-id = {34476794}, issn = {0951-5666}, abstract = {The study explores the future of AI-driven media and info-communication as envisioned by experts from all world regions, defining relevant terminology and expectations for 2050. Participants engaged in a 4-week series of surveys, questioning their definitions and projections about AI for the field of media and communication. Their expectations predict universal access to democratically available, automated, personalized and unbiased information determined by trusted narratives, recolonization of information technology and the demystification of the media process. These experts, as technology ambassadors, advocate AI-to-AI solutions to mitigate technology-driven misuse and misinformation. The optimistic scenarios shift responsibility to future generations, relying on AI-driven solutions and finding inspiration in nature. Their present-based forecasts could be construed as being indicative of professional near-sightedness and cognitive dissonance. Visualizing our findings into a Glasses Model of AI Trust, the study contributes to key debates regarding AI policy, developmental trajectories, and academic research in media and info-communication fields.}, year = {2024}, eissn = {1435-5655}, pages = {2933-2946}, orcid-numbers = {Fehér, Katalin/0000-0003-3293-0862; Vicsek, Lilla Mária/0000-0002-6034-7503; Deuze, Mark/0000-0002-1986-5050} }