@Article{electronics14193872, AUTHOR = {Malloy, Tailia and Bernardy, Laura and El Bachyr, Omar and Philippy, Fred and Samhi, Jordan and Klein, Jacques and Bissyandé, Tegawendé F.}, TITLE = {You Got Phished! Analyzing How to Provide Useful Feedback in Anti-Phishing Training with LLM Teacher Models}, JOURNAL = {Electronics}, VOLUME = {14}, YEAR = {2025}, NUMBER = {19}, ARTICLE-NUMBER = {3872}, URL = {https://www.mdpi.com/2079-9292/14/19/3872}, ISSN = {2079-9292}, ABSTRACT = {Training users to correctly identify potential security threats like social engineering attacks such as phishing emails is a crucial aspect of cybersecurity. One challenge in this training is providing useful educational feedback to maximize student learning outcomes. Large Language Models (LLMs) have recently been applied to wider and wider applications, including domain-specific education and training. These applications of LLMs have many benefits, such as cost and ease of access, but there are important potential biases and constraints within LLMs. These may make LLMs worse teachers for important and vulnerable subpopulations including the elderly and those with less technical knowledge. In this work we present a dataset of LLM embeddings of conversations between human students and LLM teachers in an anti-phishing setting. We apply these embeddings onto an analysis of human–LLM educational conversations to develop specific and actionable targets for LLM training, fine-tuning, and evaluation that can potentially improve the educational quality of LLM teachers and ameliorate potential biases that may disproportionally impact specific subpopulations. Specifically, we suggest that LLM teaching platforms either speak generally or mention specific quotations of emails depending on user demographics and behaviors, and to steer conversations away from an over focus on the current example.}, DOI = {10.3390/electronics14193872} }