From b7f94a7679397314c504352c79f285830148ec96 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sun, 27 Apr 2025 01:41:31 +0100 Subject: [PATCH] chore: update roadmap content json (#8564) Co-authored-by: kamranahmedse <4921183+kamranahmedse@users.noreply.github.com> --- public/roadmap-content/ai-red-teaming.json | 1382 ++++++++++++++++++-- public/roadmap-content/cyber-security.json | 88 +- 2 files changed, 1317 insertions(+), 153 deletions(-) diff --git a/public/roadmap-content/ai-red-teaming.json b/public/roadmap-content/ai-red-teaming.json index ad730b55c..bb15d8009 100644 --- a/public/roadmap-content/ai-red-teaming.json +++ b/public/roadmap-content/ai-red-teaming.json @@ -1,322 +1,1450 @@ { "R9DQNc0AyAQ2HLpP4HOk6": { "title": "AI Security Fundamentals", - "description": "This covers the foundational concepts essential for AI Red Teaming, bridging traditional cybersecurity with AI-specific threats. An AI Red Teamer must understand common vulnerabilities in ML models (like evasion or poisoning), security risks in the AI lifecycle (from data collection to deployment), and how AI capabilities can be misused. This knowledge forms the basis for designing effective tests against AI systems.\n\nLearn more from the following resources:\n\n* [@article@Building Trustworthy AI: Contending with Data Poisoning - Nisos](https://nisos.com/research/building-trustworthy-ai/) - Explores data poisoning threats in AI/ML.\n* [@article@What Is Adversarial AI in Machine Learning? - Palo Alto Networks](https://www.paloaltonetworks.co.uk/cyberpedia/what-are-adversarial-attacks-on-AI-Machine-Learning) - Overview of adversarial attacks targeting AI/ML systems.\n* [@course@AI Security | Coursera](https://www.coursera.org/learn/ai-security) - Foundational course covering AI risks, governance, security, and privacy.", - "links": [] + "description": "This covers the foundational concepts essential for AI Red Teaming, bridging traditional cybersecurity with AI-specific threats. An AI Red Teamer must understand common vulnerabilities in ML models (like evasion or poisoning), security risks in the AI lifecycle (from data collection to deployment), and how AI capabilities can be misused. This knowledge forms the basis for designing effective tests against AI systems.\n\nLearn more from the following resources:", + "links": [ + { + "title": "AI Security | Coursera", + "url": "https://www.coursera.org/learn/ai-security", + "type": "course" + }, + { + "title": "Building Trustworthy AI: Contending with Data Poisoning", + "url": "https://nisos.com/research/building-trustworthy-ai/", + "type": "article" + }, + { + "title": "What Is Adversarial AI in Machine Learning?", + "url": "https://www.paloaltonetworks.co.uk/cyberpedia/what-are-adversarial-attacks-on-AI-Machine-Learning", + "type": "article" + } + ] }, "fNTb9y3zs1HPYclAmu_Wv": { "title": "Why Red Team AI Systems?", - "description": "AI systems introduce novel risks beyond traditional software, such as emergent unintended capabilities, complex failure modes, susceptibility to subtle data manipulations, and potential for large-scale misuse (e.g., generating disinformation). AI Red Teaming is necessary because standard testing methods often fail to uncover these unique AI vulnerabilities. It provides critical, adversary-focused insights needed to build genuinely safe, reliable, and secure AI before deployment.\n\nLearn more from the following resources:\n\n@article@What's the Difference Between Traditional Red-Teaming and AI Red-Teaming? - Cranium AI - Compares objectives, techniques, expertise, and attack vectors to highlight why AI needs specialized red teaming. @article@What is AI Red Teaming? The Complete Guide - Mindgard - Details specific use cases like identifying bias, ensuring resilience against AI-specific attacks, testing data privacy, and aligning with regulations. @article@The Expanding Role of Red Teaming in Defending AI Systems - Protect AI - Explains why the dynamic, adaptive, and often opaque nature of AI necessitates red teaming beyond traditional approaches. @article@How red teaming helps safeguard the infrastructure behind AI models - IBM - Focuses on unique AI risks like model IP theft, open-source vulnerabilities, and excessive agency that red teaming addresses.", + "description": "AI systems introduce novel risks beyond traditional software, such as emergent unintended capabilities, complex failure modes, susceptibility to subtle data manipulations, and potential for large-scale misuse (e.g., generating disinformation). AI Red Teaming is necessary because standard testing methods often fail to uncover these unique AI vulnerabilities. It provides critical, adversary-focused insights needed to build genuinely safe, reliable, and secure AI before deployment.", "links": [] }, "HFJIYcI16OMyM77fAw9af": { "title": "Introduction", - "description": "AI Red Teaming is the practice of simulating adversarial attacks against AI systems to proactively identify vulnerabilities, potential misuse scenarios, and failure modes before malicious actors do. Distinct from traditional cybersecurity red teaming, it focuses on the unique attack surfaces of AI models, such as prompt manipulation, data poisoning, model extraction, and evasion techniques. The primary goal for an AI Red Teamer is to test the robustness, safety, alignment, and fairness of AI systems, particularly complex ones like LLMs, by adopting an attacker's mindset to uncover hidden flaws and provide actionable feedback for improvement.\n\nLearn more from the following resources:\n\n* [@article@A Guide to AI Red Teaming - HiddenLayer](https://hiddenlayer.com/innovation-hub/a-guide-to-ai-red-teaming/) - Discusses AI red teaming concepts and contrasts with traditional methods.\n* [@article@What is AI Red Teaming? (Learn Prompting)](https://learnprompting.org/blog/what-is-ai-red-teaming) - Overview of AI red teaming, its history, and key challenges.\n* [@article@What is AI Red Teaming? The Complete Guide - Mindgard](https://mindgard.ai/blog/what-is-ai-red-teaming) - Guide covering AI red teaming processes, use cases, and benefits.\n* [@podcast@Red Team Podcast | AI Red Teaming Insights & Defense Strategies - Mindgard](https://mindgard.ai/podcast/red-team) - Podcast series covering AI red teaming trends and strategies.", - "links": [] + "description": "AI Red Teaming is the practice of simulating adversarial attacks against AI systems to proactively identify vulnerabilities, potential misuse scenarios, and failure modes before malicious actors do. Distinct from traditional cybersecurity red teaming, it focuses on the unique attack surfaces of AI models, such as prompt manipulation, data poisoning, model extraction, and evasion techniques. The primary goal for an AI Red Teamer is to test the robustness, safety, alignment, and fairness of AI systems, particularly complex ones like LLMs, by adopting an attacker's mindset to uncover hidden flaws and provide actionable feedback for improvement.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Red Team Podcast - AI Red Teaming Insights & Defense Strategies", + "url": "https://mindgard.ai/podcast/red-team", + "type": "podcast" + }, + { + "title": "A Guide to AI Red Teaming", + "url": "https://hiddenlayer.com/innovation-hub/a-guide-to-ai-red-teaming/", + "type": "article" + }, + { + "title": "What is AI Red Teaming? (Learn Prompting)", + "url": "https://learnprompting.org/blog/what-is-ai-red-teaming", + "type": "article" + }, + { + "title": "What is AI Red Teaming? The Complete Guide", + "url": "https://mindgard.ai/blog/what-is-ai-red-teaming", + "type": "article" + } + ] }, "1gyuEV519LjN-KpROoVwv": { "title": "Ethical Considerations", - "description": "Ethical conduct is crucial for AI Red Teamers. While simulating attacks, they must operate within strict legal and ethical boundaries defined by rules of engagement, focusing on improving safety without causing real harm or enabling misuse. This includes respecting data privacy, obtaining consent where necessary, responsibly disclosing vulnerabilities, and carefully considering the potential negative impacts of both the testing process and the AI capabilities being tested. The goal is discovery for defense, not exploitation.\n\nLearn more from the following resources:\n\n* [@article@Red-Teaming in AI Testing: Stress Testing - Labelvisor](https://www.labelvisor.com/red-teaming-abstract-competitive-testing-data-selection/) - Mentions balancing attack simulation with ethical constraints.\n* [@article@Responsible AI assessment - Responsible AI | Coursera](https://www.coursera.org/learn/ai-security) (Module within AI Security course)\n* [@guide@Responsible AI Principles (Microsoft)](https://www.microsoft.com/en-us/ai/responsible-ai) - Example of corporate responsible AI guidelines influencing ethical testing.\n* [@video@Questions to Guide AI Red-Teaming (CMU SEI)](https://resources.sei.cmu.edu/library/asset-view.cfm?assetid=928382) - Key questions and ethical guidelines for AI red teaming activities (video talk).", - "links": [] + "description": "Ethical conduct is crucial for AI Red Teamers. While simulating attacks, they must operate within strict legal and ethical boundaries defined by rules of engagement, focusing on improving safety without causing real harm or enabling misuse. This includes respecting data privacy, obtaining consent where necessary, responsibly disclosing vulnerabilities, and carefully considering the potential negative impacts of both the testing process and the AI capabilities being tested. The goal is discovery for defense, not exploitation.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Red-Teaming in AI Testing: Stress Testing", + "url": "https://www.labelvisor.com/red-teaming-abstract-competitive-testing-data-selection/", + "type": "article" + }, + { + "title": "Responsible AI assessment - Responsible AI | Coursera", + "url": "https://www.coursera.org/learn/ai-security", + "type": "article" + }, + { + "title": "Responsible AI Principles (Microsoft)", + "url": "https://www.microsoft.com/en-us/ai/responsible-ai", + "type": "article" + }, + { + "title": "Questions to Guide AI Red-Teaming (CMU SEI)", + "url": "https://resources.sei.cmu.edu/library/asset-view.cfm?assetid=928382", + "type": "video" + } + ] }, "Irkc9DgBfqSn72WaJqXEt": { "title": "Role of Red Teams", - "description": "The role of an AI Red Team is to rigorously challenge AI systems from an adversarial perspective. They design and execute tests to uncover vulnerabilities related to the model's logic, data dependencies, prompt interfaces, safety alignments, and interactions with surrounding infrastructure. They provide detailed reports on findings, potential impacts, and remediation advice, acting as a critical feedback loop for AI developers and stakeholders to improve system security and trustworthiness before and after deployment.\n\nLearn more from the following resources:\n\n* [@article@The Complete Guide to Red Teaming: Process, Benefits & More - Mindgard AI](https://mindgard.ai/blog/red-teaming) - Discusses the purpose and process of red teaming.\n* [@article@The Complete Red Teaming Checklist \\[PDF\\]: 5 Key Steps - Mindgard AI](https://mindgard.ai/blog/red-teaming-checklist) - Outlines typical red team roles and responsibilities.\n* [@article@What is AI Red Teaming? - Learn Prompting](https://learnprompting.org/docs/category/ai-red-teaming) - Defines the role and activities.", - "links": [] + "description": "The role of an AI Red Team is to rigorously challenge AI systems from an adversarial perspective. They design and execute tests to uncover vulnerabilities related to the model's logic, data dependencies, prompt interfaces, safety alignments, and interactions with surrounding infrastructure. They provide detailed reports on findings, potential impacts, and remediation advice, acting as a critical feedback loop for AI developers and stakeholders to improve system security and trustworthiness before and after deployment.\n\nLearn more from the following resources:", + "links": [ + { + "title": "The Complete Guide to Red Teaming: Process, Benefits & More", + "url": "https://mindgard.ai/blog/red-teaming", + "type": "article" + }, + { + "title": "The Complete Red Teaming Checklist [PDF]: 5 Key Steps - Mindgard AI", + "url": "https://mindgard.ai/blog/red-teaming-checklist", + "type": "article" + }, + { + "title": "What is AI Red Teaming? - Learn Prompting", + "url": "https://learnprompting.org/docs/category/ai-red-teaming", + "type": "article" + } + ] }, "NvOJIv36Utpm7_kOZyr79": { "title": "Supervised Learning", - "description": "AI Red Teamers analyze systems built using supervised learning to probe for vulnerabilities like susceptibility to adversarial examples designed to cause misclassification, sensitivity to data distribution shifts, or potential for data leakage related to the labeled training data. Understanding how these models learn input-output mappings is key to devising tests that challenge their learned boundaries.\n\nLearn more from the following resources:\n\n* [@article@AI and cybersecurity: a love-hate revolution - Alter Solutions](https://www.alter-solutions.com/en-us/articles/ai-cybersecurity-love-hate-revolution) - Discusses supervised learning use in vulnerability scanning and potential exploits.\n* [@article@What Is Supervised Learning? | IBM](https://www.ibm.com/think/topics/supervised-learning) - Foundational explanation.\n* [@article@What is Supervised Learning? | Google Cloud](https://cloud.google.com/discover/what-is-supervised-learning) - Foundational explanation.", - "links": [] + "description": "AI Red Teamers analyze systems built using supervised learning to probe for vulnerabilities like susceptibility to adversarial examples designed to cause misclassification, sensitivity to data distribution shifts, or potential for data leakage related to the labeled training data. Understanding how these models learn input-output mappings is key to devising tests that challenge their learned boundaries.\n\nLearn more from the following resources:", + "links": [ + { + "title": "AI and cybersecurity: a love-hate revolution", + "url": "https://www.alter-solutions.com/en-us/articles/ai-cybersecurity-love-hate-revolution", + "type": "article" + }, + { + "title": "What Is Supervised Learning?", + "url": "https://www.ibm.com/think/topics/supervised-learning", + "type": "article" + }, + { + "title": "What is Supervised Learning?", + "url": "https://cloud.google.com/discover/what-is-supervised-learning", + "type": "article" + } + ] }, "ZC0yKsu-CJC-LZKKo2pLD": { "title": "Unsupervised Learning", - "description": "When red teaming AI systems using unsupervised learning (e.g., clustering algorithms), focus areas include assessing whether the discovered patterns reveal sensitive information, if the model can be manipulated to group data incorrectly, or if dimensionality reduction techniques obscure security-relevant features. Understanding these models helps identify risks associated with pattern discovery on unlabeled data.\n\nLearn more from the following resources:\n\n* [@article@How Unsupervised Learning Works with Examples - Coursera](https://www.coursera.org/articles/unsupervised-learning) - Foundational explanation with examples.\n* [@article@Supervised vs. Unsupervised Learning: Which Approach is Best? - DigitalOcean](https://www.digitalocean.com/resources/articles/supervised-vs-unsupervised-learning) - Contrasts learning types, relevant for understanding different attack surfaces.", - "links": [] + "description": "When red teaming AI systems using unsupervised learning (e.g., clustering algorithms), focus areas include assessing whether the discovered patterns reveal sensitive information, if the model can be manipulated to group data incorrectly, or if dimensionality reduction techniques obscure security-relevant features. Understanding these models helps identify risks associated with pattern discovery on unlabeled data.\n\nLearn more from the following resources:", + "links": [ + { + "title": "How Unsupervised Learning Works with Examples", + "url": "https://www.coursera.org/articles/unsupervised-learning", + "type": "article" + }, + { + "title": "Supervised vs. Unsupervised Learning: Which Approach is Best?", + "url": "https://www.digitalocean.com/resources/articles/supervised-vs-unsupervised-learning", + "type": "article" + } + ] }, "Xqzc4mOKsVzwaUxLGjHya": { "title": "Reinforcement Learning", - "description": "Red teaming RL-based AI systems involves testing for vulnerabilities such as reward hacking (exploiting the reward function to induce unintended behavior), unsafe exploration (agent takes harmful actions during learning), or susceptibility to adversarial perturbations in the environment's state. Understanding the agent's policy and value functions is crucial for designing effective tests against RL agents.\n\nLearn more from the following resources:\n\n* [@article@Best Resources to Learn Reinforcement Learning - Towards Data Science](https://towardsdatascience.com/best-free-courses-and-resources-to-learn-reinforcement-learning-ed6633608cb2/) - Curated list of RL learning resources.\n* [@article@What is reinforcement learning? - Blog - York Online Masters degrees](https://online.york.ac.uk/resources/what-is-reinforcement-learning/) - Foundational explanation.\n* [@course@Deep Reinforcement Learning Course by HuggingFace](https://huggingface.co/learn/deep-rl-course/unit0/introduction) - Comprehensive free course on Deep RL.\n* [@paper@Diverse and Effective Red Teaming with Auto-generated Rewards and Multi-step Reinforcement Learning - arXiv](https://arxiv.org/html/2412.18693v1) - Research on using RL for red teaming and generating attacks.", - "links": [] + "description": "Red teaming RL-based AI systems involves testing for vulnerabilities such as reward hacking (exploiting the reward function to induce unintended behavior), unsafe exploration (agent takes harmful actions during learning), or susceptibility to adversarial perturbations in the environment's state. Understanding the agent's policy and value functions is crucial for designing effective tests against RL agents.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Deep Reinforcement Learning Course by HuggingFace", + "url": "https://huggingface.co/learn/deep-rl-course/unit0/introduction", + "type": "course" + }, + { + "title": "Resources to Learn Reinforcement Learning", + "url": "https://towardsdatascience.com/best-free-courses-and-resources-to-learn-reinforcement-learning-ed6633608cb2/", + "type": "article" + }, + { + "title": "What is reinforcement learning?", + "url": "https://online.york.ac.uk/resources/what-is-reinforcement-learning/", + "type": "article" + }, + { + "title": "Diverse and Effective Red Teaming with Auto-generated Rewards and Multi-step Reinforcement Learning", + "url": "https://arxiv.org/html/2412.18693v1", + "type": "article" + } + ] }, "RuKzVhd1nZphCrlW1wZGL": { "title": "Neural Networks", - "description": "Understanding neural network architectures (layers, nodes, activation functions) is vital for AI Red Teamers. This knowledge allows for targeted testing, such as crafting adversarial examples that exploit specific activation functions or identifying potential vulnerabilities related to network depth or connectivity. It provides insight into the 'black box' for more effective white/grey-box testing.\n\nLearn more from the following resources:\n\n* [@guide@Neural Networks Explained: A Beginner's Guide - SkillCamper](https://www.skillcamper.com/blog/neural-networks-explained-a-beginners-guide) - Foundational guide.\n* [@guide@Neural networks | Machine Learning - Google for Developers](https://developers.google.com/machine-learning/crash-course/neural-networks) - Google's explanation within their ML crash course.\n* [@paper@Red Teaming with Artificial Intelligence-Driven Cyberattacks: A Scoping Review - arXiv](https://arxiv.org/html/2503.19626) - Review discussing AI methods like neural networks used in red teaming simulations.", - "links": [] + "description": "Understanding neural network architectures (layers, nodes, activation functions) is vital for AI Red Teamers. This knowledge allows for targeted testing, such as crafting adversarial examples that exploit specific activation functions or identifying potential vulnerabilities related to network depth or connectivity. It provides insight into the 'black box' for more effective white/grey-box testing.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Neural Networks Explained: A Beginner's Guide", + "url": "https://www.skillcamper.com/blog/neural-networks-explained-a-beginners-guide", + "type": "article" + }, + { + "title": "Neural networks | Machine Learning", + "url": "https://developers.google.com/machine-learning/crash-course/neural-networks", + "type": "article" + }, + { + "title": "Red Teaming with Artificial Intelligence-Driven Cyberattacks: A Scoping Review", + "url": "https://arxiv.org/html/2503.19626", + "type": "article" + } + ] }, "3XJ-g0KvHP75U18mxCqgw": { "title": "Generative Models", - "description": "AI Red Teamers focus heavily on generative models (like GANs and LLMs) due to their widespread use and unique risks. Understanding how they generate content is key to testing for issues like generating harmful/biased outputs, deepfakes, prompt injection vulnerabilities, or leaking sensitive information from their vast training data.\n\nLearn more from the following resources:\n\n* [@article@An Introduction to Generative Models | MongoDB](https://www.mongodb.com/resources/basics/artificial-intelligence/generative-models) - Explains basics and contrasts with discriminative models.\n* [@course@Generative AI for Beginners - Microsoft Open Source](https://microsoft.github.io/generative-ai-for-beginners/) - Free course covering fundamentals.\n* [@guide@Generative AI beginner's guide | Generative AI on Vertex AI - Google Cloud](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/overview) - Overview covering generative AI concepts and Google's platform context.", - "links": [] + "description": "AI Red Teamers focus heavily on generative models (like GANs and LLMs) due to their widespread use and unique risks. Understanding how they generate content is key to testing for issues like generating harmful/biased outputs, deepfakes, prompt injection vulnerabilities, or leaking sensitive information from their vast training data.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Generative AI for Beginners", + "url": "https://microsoft.github.io/generative-ai-for-beginners/", + "type": "course" + }, + { + "title": "An Introduction to Generative Models", + "url": "https://www.mongodb.com/resources/basics/artificial-intelligence/generative-models", + "type": "article" + }, + { + "title": "Generative AI beginner's guide", + "url": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/overview", + "type": "article" + } + ] }, "8K-wCn2cLc7Vs_V4sC3sE": { "title": "Large Language Models", - "description": "LLMs are a primary target for AI Red Teaming. Understanding their architecture (often Transformer-based), training processes (pre-training, fine-tuning), and capabilities (text generation, summarization, Q&A) is essential for identifying vulnerabilities like prompt injection, jailbreaking, data regurgitation, and emergent harmful behaviors specific to these large-scale models.\n\nLearn more from the following resources:\n\n* [@article@What is an LLM (large language model)? - Cloudflare](https://www.cloudflare.com/learning/ai/what-is-large-language-model/) - Concise explanation from Cloudflare.\n* [@guide@Introduction to Large Language Models - Learn Prompting](https://learnprompting.org/docs/intro_to_llms) - Learn Prompting's introduction.\n* [@guide@What Are Large Language Models? A Beginner's Guide for 2025 - KDnuggets](https://www.kdnuggets.com/large-language-models-beginners-guide-2025) - Overview of LLMs, how they work, strengths, and limitations.", - "links": [] + "description": "LLMs are a primary target for AI Red Teaming. Understanding their architecture (often Transformer-based), training processes (pre-training, fine-tuning), and capabilities (text generation, summarization, Q&A) is essential for identifying vulnerabilities like prompt injection, jailbreaking, data regurgitation, and emergent harmful behaviors specific to these large-scale models.\n\nLearn more from the following resources:", + "links": [ + { + "title": "What is an LLM (large language model)?", + "url": "https://www.cloudflare.com/learning/ai/what-is-large-language-model/", + "type": "article" + }, + { + "title": "Introduction to LLMs - Learn Prompting", + "url": "https://learnprompting.org/docs/intro_to_llms", + "type": "article" + }, + { + "title": "What Are Large Language Models? A Beginner's Guide for 2025", + "url": "https://www.kdnuggets.com/large-language-models-beginners-guide-2025", + "type": "article" + } + ] }, "gx4KaFqKgJX9n9_ZGMqlZ": { "title": "Prompt Engineering", - "description": "For AI Red Teamers, prompt engineering is both a tool and a target. It's a tool for crafting inputs to test model boundaries and vulnerabilities (e.g., creating jailbreak prompts). It's a target because understanding how prompts influence LLMs is key to identifying prompt injection vulnerabilities and designing defenses. Mastering prompt design is fundamental to effective LLM red teaming.\n\nLearn more from the following resources:\n\n* [@article@Introduction to Prompt Engineering - Datacamp](https://www.datacamp.com/tutorial/introduction-prompt-engineering) - Tutorial covering basics.\n* [@article@System Prompts - InjectPrompt](https://www.injectprompt.com/t/system-prompts) - Look at the system prompts of flagship LLMs.\n* [@course@Introduction to Prompt Engineering - Learn Prompting](https://learnprompting.org/courses/intro-to-prompt-engineering) - Foundational course from Learn Prompting.\n* [@guide@Prompt Engineering Guide - Learn Prompting](https://learnprompting.org/docs/prompt-engineering) - Comprehensive guide from Learn Prompting.\n* [@guide@The Ultimate Guide to Red Teaming LLMs and Adversarial Prompts (Kili Technology)](https://kili-technology.com/large-language-models-llms/red-teaming-llms-and-adversarial-prompts) - Connects prompt engineering directly to LLM red teaming concepts.", - "links": [] + "description": "For AI Red Teamers, prompt engineering is both a tool and a target. It's a tool for crafting inputs to test model boundaries and vulnerabilities (e.g., creating jailbreak prompts). It's a target because understanding how prompts influence LLMs is key to identifying prompt injection vulnerabilities and designing defenses. Mastering prompt design is fundamental to effective LLM red teaming.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Introduction to Prompt Engineering", + "url": "https://learnprompting.org/courses/intro-to-prompt-engineering", + "type": "course" + }, + { + "title": "Introduction to Prompt Engineering", + "url": "https://www.datacamp.com/tutorial/introduction-prompt-engineering", + "type": "article" + }, + { + "title": "System Prompts - InjectPrompt", + "url": "https://www.injectprompt.com/t/system-prompts", + "type": "article" + }, + { + "title": "Prompt Engineering Guide", + "url": "https://learnprompting.org/docs/prompt-engineering", + "type": "article" + }, + { + "title": "The Ultimate Guide to Red Teaming LLMs and Adversarial Prompts (Kili Technology)", + "url": "https://kili-technology.com/large-language-models-llms/red-teaming-llms-and-adversarial-prompts", + "type": "article" + } + ] }, "WZkIHZkV2qDYbYF9KBBRi": { "title": "Confidentiality, Integrity, Availability", - "description": "The CIA Triad is directly applicable in AI Red Teaming. Confidentiality tests focus on preventing leakage of training data or proprietary model details. Integrity tests probe for susceptibility to data poisoning or model manipulation. Availability tests assess resilience against denial-of-service attacks targeting the AI model or its supporting infrastructure.\n\nLearn more from the following resources:\n\n* [@article@Confidentiality, Integrity, Availability: Key Examples - DataSunrise](https://www.datasunrise.com/knowledge-center/confidentiality-integrity-availability-examples/) - Explains CIA triad with examples, mentioning AI/ML relevance.\n* [@article@The CIA Triad: Confidentiality, Integrity, Availability - Veeam](https://www.veeam.com/blog/cybersecurity-cia-triad-explained.html) - Breakdown of the three principles and how they apply.\n* [@article@What's The CIA Triad? Confidentiality, Integrity, & Availability, Explained | Splunk](https://www.splunk.com/en_us/blog/learn/cia-triad-confidentiality-integrity-availability.html) - Detailed explanation of the triad, mentioning modern updates and AI context.", - "links": [] + "description": "The CIA Triad is directly applicable in AI Red Teaming. Confidentiality tests focus on preventing leakage of training data or proprietary model details. Integrity tests probe for susceptibility to data poisoning or model manipulation. Availability tests assess resilience against denial-of-service attacks targeting the AI model or its supporting infrastructure.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Confidentiality, Integrity, Availability: Key Examples", + "url": "https://www.datasunrise.com/knowledge-center/confidentiality-integrity-availability-examples/", + "type": "article" + }, + { + "title": "The CIA Triad: Confidentiality, Integrity, Availability", + "url": "https://www.veeam.com/blog/cybersecurity-cia-triad-explained.html", + "type": "article" + }, + { + "title": "What's The CIA Triad? Confidentiality, Integrity, & Availability, Explained", + "url": "https://www.splunk.com/en_us/blog/learn/cia-triad-confidentiality-integrity-availability.html", + "type": "article" + } + ] }, "RDOaTBWP3aIJPUp_kcafm": { "title": "Threat Modeling", - "description": "AI Red Teams apply threat modeling to identify unique attack surfaces in AI systems, such as manipulating training data, exploiting prompt interfaces, attacking the model inference process, or compromising connected tools/APIs. Before attacking an AI system, red teamers perform threat modeling to map out possible adversaries (from curious users to state actors) and attack vectors, prioritizing tests based on likely impact and adversary capability.\n\nLearn more from the following resources:\n\n* [@article@Core Components of AI Red Team Exercises (Learn Prompting)](https://learnprompting.org/blog/what-is-ai-red-teaming) - Describes threat modeling as the first phase of an AI red team engagement.\n* [@guide@Threat Modeling Process | OWASP Foundation](https://owasp.org/www-community/Threat_Modeling_Process) - More detailed process steps.\n* [@guide@Threat Modeling | OWASP Foundation](https://owasp.org/www-community/Threat_Modeling) - General threat modeling process applicable to AI context.\n* [@video@How Microsoft Approaches AI Red Teaming (MS Build)](https://learn.microsoft.com/en-us/events/build-may-2023/breakout-responsible-ai-red-teaming/) - Video on Microsoft’s AI red team process, including threat modeling specific to AI.", - "links": [] + "description": "AI Red Teams apply threat modeling to identify unique attack surfaces in AI systems, such as manipulating training data, exploiting prompt interfaces, attacking the model inference process, or compromising connected tools/APIs. Before attacking an AI system, red teamers perform threat modeling to map out possible adversaries (from curious users to state actors) and attack vectors, prioritizing tests based on likely impact and adversary capability.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Core Components of AI Red Team Exercises (Learn Prompting)", + "url": "https://learnprompting.org/blog/what-is-ai-red-teaming", + "type": "article" + }, + { + "title": "Threat Modeling Process", + "url": "https://owasp.org/www-community/Threat_Modeling_Process", + "type": "article" + }, + { + "title": "Threat Modeling", + "url": "https://owasp.org/www-community/Threat_Modeling", + "type": "article" + }, + { + "title": "How Microsoft Approaches AI Red Teaming (MS Build)", + "url": "https://learn.microsoft.com/en-us/events/build-may-2023/breakout-responsible-ai-red-teaming/", + "type": "video" + } + ] }, "MupRvk_8Io2Hn7yEvU663": { "title": "Risk Management", - "description": "AI Red Teamers contribute to the AI risk management process by identifying and demonstrating concrete vulnerabilities. Findings from red team exercises inform risk assessments, helping organizations understand the likelihood and potential impact of specific AI threats and prioritize resources for mitigation based on demonstrated exploitability.\n\nLearn more from the following resources:\n\n* [@framework@NIST AI Risk Management Framework](https://www.nist.gov/itl/ai-risk-management-framework) - Key framework for managing AI-specific risks.\n* [@guide@A Beginner's Guide to Cybersecurity Risks and Vulnerabilities - Champlain College Online](https://online.champlain.edu/blog/beginners-guide-cybersecurity-risk-management) - Foundational understanding of risk.\n* [@guide@Cybersecurity Risk Management: Frameworks, Plans, and Best Practices - Hyperproof](https://hyperproof.io/resource/cybersecurity-risk-management-process/) - General guide applicable to AI system context.", - "links": [] + "description": "AI Red Teamers contribute to the AI risk management process by identifying and demonstrating concrete vulnerabilities. Findings from red team exercises inform risk assessments, helping organizations understand the likelihood and potential impact of specific AI threats and prioritize resources for mitigation based on demonstrated exploitability.\n\nLearn more from the following resources:", + "links": [ + { + "title": "NIST AI Risk Management Framework", + "url": "https://www.nist.gov/itl/ai-risk-management-framework", + "type": "article" + }, + { + "title": "A Beginner's Guide to Cybersecurity Risks and Vulnerabilities", + "url": "https://online.champlain.edu/blog/beginners-guide-cybersecurity-risk-management", + "type": "article" + }, + { + "title": "Cybersecurity Risk Management: Frameworks, Plans, and Best Practices", + "url": "https://hyperproof.io/resource/cybersecurity-risk-management-process/", + "type": "article" + } + ] }, "887lc3tWCRH-sOHSxWgWJ": { "title": "Vulnerability Assessment", - "description": "While general vulnerability assessment scans infrastructure, AI Red Teaming extends this to assess vulnerabilities specific to the AI model and its unique interactions. This includes probing for prompt injection flaws, testing for adversarial example robustness, checking for data privacy leaks, and evaluating safety alignment failures – weaknesses not typically found by standard IT vulnerability scanners.\n\nLearn more from the following resources:\n\n* [@article@AI red-teaming in critical infrastructure: Boosting security and trust in AI systems - DNV](https://www.dnv.com/article/ai-red-teaming-for-critical-infrastructure-industries/) - Discusses vulnerability assessment within AI red teaming for critical systems.\n* [@guide@The Ultimate Guide to Vulnerability Assessment - Strobes Security](https://strobes.co/blog/guide-vulnerability-assessment/) - Comprehensive guide on VA process (apply concepts to AI).\n* [@guide@Vulnerability Scanning Tools | OWASP Foundation](https://owasp.org/www-community/Vulnerability_Scanning_Tools) - List of tools useful in broader system assessment around AI.", - "links": [] + "description": "While general vulnerability assessment scans infrastructure, AI Red Teaming extends this to assess vulnerabilities specific to the AI model and its unique interactions. This includes probing for prompt injection flaws, testing for adversarial example robustness, checking for data privacy leaks, and evaluating safety alignment failures – weaknesses not typically found by standard IT vulnerability scanners.\n\nLearn more from the following resources:", + "links": [ + { + "title": "AI red-teaming in critical infrastructure: Boosting security and trust in AI systems", + "url": "https://www.dnv.com/article/ai-red-teaming-for-critical-infrastructure-industries/", + "type": "article" + }, + { + "title": "The Ultimate Guide to Vulnerability Assessment", + "url": "https://strobes.co/blog/guide-vulnerability-assessment/", + "type": "article" + }, + { + "title": "Vulnerability Scanning Tools", + "url": "https://owasp.org/www-community/Vulnerability_Scanning_Tools", + "type": "article" + } + ] }, "Ds8pqn4y9Npo7z6ubunvc": { "title": "Jailbreak Techniques", - "description": "Jailbreaking is a specific category of prompt hacking where the AI Red Teamer aims to bypass the LLM's safety and alignment training. They use techniques like creating fictional scenarios, asking the model to simulate an unrestricted AI, or using complex instructions to trick the model into generating content that violates its own policies (e.g., generating harmful code, hate speech, or illegal instructions).\n\nLearn more from the following resources:\n\n* [@article@InjectPrompt (David Willis-Owen)](https://injectprompt.com) - Discusses jailbreaks for several LLMs\n* [@guide@Prompt Hacking Guide - Learn Prompting](https://learnprompting.org/docs/category/prompt-hacking) - Covers jailbreaking strategies.\n* [@paper@Jailbroken: How Does LLM Safety Training Fail? (arXiv)](https://arxiv.org/abs/2307.02483) - Research analyzing jailbreak failures.", - "links": [] + "description": "Jailbreaking is a specific category of prompt hacking where the AI Red Teamer aims to bypass the LLM's safety and alignment training. They use techniques like creating fictional scenarios, asking the model to simulate an unrestricted AI, or using complex instructions to trick the model into generating content that violates its own policies (e.g., generating harmful code, hate speech, or illegal instructions).\n\nLearn more from the following resources:", + "links": [ + { + "title": "InjectPrompt (David Willis-Owen)", + "url": "https://injectprompt.com", + "type": "article" + }, + { + "title": "Prompt Hacking Guide - Learn Prompting", + "url": "https://learnprompting.org/docs/category/prompt-hacking", + "type": "article" + }, + { + "title": "Jailbroken: How Does LLM Safety Training Fail? (arXiv)", + "url": "https://arxiv.org/abs/2307.02483", + "type": "article" + } + ] }, "j7uLLpt8MkZ1rqM7UBPW4": { "title": "Safety Filter Bypasses", - "description": "AI Red Teamers specifically target the safety mechanisms (filters, guardrails) implemented within or around an AI model. They test techniques like using synonyms for blocked words, employing different languages, embedding harmful requests within harmless text, or using character-level obfuscation to evade detection and induce the model to generate prohibited content, thereby assessing the robustness of the safety controls.\n\nLearn more from the following resources:\n\n* [@article@Bypassing AI Content Filters | Restackio](https://www.restack.io/p/ai-driven-content-moderation-answer-bypass-filters-cat-ai) - Discusses techniques for evasion.\n* [@article@How to Bypass Azure AI Content Safety Guardrails - Mindgard](https://mindgard.ai/blog/bypassing-azure-ai-content-safety-guardrails) - Case study on bypassing specific safety mechanisms.\n* [@article@The Best Methods to Bypass AI Detection: Tips and Techniques - PopAi](https://www.popai.pro/resources/the-best-methods-to-bypass-ai-detection-tips-and-techniques/) - Focuses on evasion, relevant for filter bypass testing.", - "links": [] + "description": "AI Red Teamers specifically target the safety mechanisms (filters, guardrails) implemented within or around an AI model. They test techniques like using synonyms for blocked words, employing different languages, embedding harmful requests within harmless text, or using character-level obfuscation to evade detection and induce the model to generate prohibited content, thereby assessing the robustness of the safety controls.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Bypassing AI Content Filters", + "url": "https://www.restack.io/p/ai-driven-content-moderation-answer-bypass-filters-cat-ai", + "type": "article" + }, + { + "title": "How to Bypass Azure AI Content Safety Guardrails", + "url": "https://mindgard.ai/blog/bypassing-azure-ai-content-safety-guardrails", + "type": "article" + }, + { + "title": "The Best Methods to Bypass AI Detection: Tips and Techniques", + "url": "https://www.popai.pro/resources/the-best-methods-to-bypass-ai-detection-tips-and-techniques/", + "type": "article" + } + ] }, "XOrAPDRhBvde9R-znEipH": { "title": "Prompt Injection", - "description": "Prompt injection is a critical vulnerability tested by AI Red Teamers. They attempt to insert instructions into the LLM's input that override its intended system prompt or task, causing it to perform unauthorized actions, leak data, or generate malicious output. This tests the model's ability to distinguish trusted instructions from potentially harmful user/external input.\n\nLearn more from the following resources:\n\n* [@article@Prompt Injection & the Rise of Prompt Attacks: All You Need to Know | Lakera](https://www.lakera.ai/blog/guide-to-prompt-injection) - Guide covering different types of prompt attacks.\n* [@article@Prompt Injection (Learn Prompting)](https://learnprompting.org/docs/prompt_hacking/injection) - Learn Prompting article describing prompt injection with examples and mitigation strategies.\n* [@article@Prompt Injection Attack Explanation (IBM)](https://research.ibm.com/blog/prompt-injection-attacks-against-llms) - Explains what prompt injections are and how they work.\n* [@article@Prompt Injection: Impact, How It Works & 4 Defense Measures - Tigera](https://www.tigera.io/learn/guides/llm-security/prompt-injection/) - Overview of impact and defenses.\n* [@course@Advanced Prompt Hacking - Learn Prompting](https://learnprompting.org/courses/advanced-prompt-hacking) - Covers advanced injection techniques.", - "links": [] + "description": "Prompt injection is a critical vulnerability tested by AI Red Teamers. They attempt to insert instructions into the LLM's input that override its intended system prompt or task, causing it to perform unauthorized actions, leak data, or generate malicious output. This tests the model's ability to distinguish trusted instructions from potentially harmful user/external input.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Advanced Prompt Hacking - Learn Prompting", + "url": "https://learnprompting.org/courses/advanced-prompt-hacking", + "type": "course" + }, + { + "title": "Prompt Injection & the Rise of Prompt Attacks", + "url": "https://www.lakera.ai/blog/guide-to-prompt-injection", + "type": "article" + }, + { + "title": "Prompt Injection (Learn Prompting)", + "url": "https://learnprompting.org/docs/prompt_hacking/injection", + "type": "article" + }, + { + "title": "Prompt Injection Attack Explanation (IBM)", + "url": "https://research.ibm.com/blog/prompt-injection-attacks-against-llms", + "type": "article" + }, + { + "title": "Prompt Injection: Impact, How It Works & 4 Defense Measures", + "url": "https://www.tigera.io/learn/guides/llm-security/prompt-injection/", + "type": "article" + } + ] }, "1Xr7mxVekeAHzTL7G4eAZ": { "title": "Prompt Hacking", - "description": "Prompt hacking is a core technique for AI Red Teamers targeting LLMs. It involves crafting inputs (prompts) to manipulate the model into bypassing safety controls, revealing hidden information, or performing unintended actions. Red teamers systematically test various prompt hacking methods (like jailbreaking, role-playing, or instruction manipulation) to assess the LLM's resilience against adversarial user input.\n\nLearn more from the following resources:\n\n* [@course@Introduction to Prompt Hacking - Learn Prompting](https://learnprompting.org/courses/intro-to-prompt-hacking) - Free introductory course.\n* [@guide@Prompt Hacking Guide - Learn Prompting](https://learnprompting.org/docs/category/prompt-hacking) - Detailed guide covering techniques.\n* [@paper@SoK: Prompt Hacking of LLMs (arXiv 2023)](https://arxiv.org/abs/2311.05544) - Comprehensive research overview of prompt hacking types and techniques.", - "links": [] + "description": "Prompt hacking is a core technique for AI Red Teamers targeting LLMs. It involves crafting inputs (prompts) to manipulate the model into bypassing safety controls, revealing hidden information, or performing unintended actions. Red teamers systematically test various prompt hacking methods (like jailbreaking, role-playing, or instruction manipulation) to assess the LLM's resilience against adversarial user input.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Introduction to Prompt Hacking", + "url": "https://learnprompting.org/courses/intro-to-prompt-hacking", + "type": "course" + }, + { + "title": "Prompt Hacking Guide", + "url": "https://learnprompting.org/docs/category/prompt-hacking", + "type": "article" + }, + { + "title": "SoK: Prompt Hacking of LLMs (arXiv 2023)", + "url": "https://arxiv.org/abs/2311.05544", + "type": "article" + } + ] }, "5zHow4KZVpfhch5Aabeft": { "title": "Direct", - "description": "Direct injection attacks occur when malicious instructions are inserted directly into the prompt input field by the user interacting with the LLM. AI Red Teamers use this technique to assess if basic instructions like \"Ignore previous prompt\" can immediately compromise the model's safety or intended function, testing the robustness of the system prompt's influence.\n\nLearn more from the following resources:\n\n* [@article@Prompt Injection & the Rise of Prompt Attacks: All You Need to Know | Lakera](https://www.lakera.ai/blog/guide-to-prompt-injection) - Differentiates attack types.\n* [@article@Prompt Injection Cheat Sheet (FlowGPT)](https://flowgpt.com/p/prompt-injection-cheat-sheet) - Collection of prompt injection examples often used in direct attacks.\n* [@report@OpenAI GPT-4 System Card](https://openai.com/research/gpt-4-system-card) - Sections discuss how direct prompt attacks were tested during GPT-4 development.", - "links": [] + "description": "Direct injection attacks occur when malicious instructions are inserted directly into the prompt input field by the user interacting with the LLM. AI Red Teamers use this technique to assess if basic instructions like \"Ignore previous prompt\" can immediately compromise the model's safety or intended function, testing the robustness of the system prompt's influence.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Prompt Injection & the Rise of Prompt Attacks", + "url": "https://www.lakera.ai/blog/guide-to-prompt-injection", + "type": "article" + }, + { + "title": "Prompt Injection Cheat Sheet (FlowGPT)", + "url": "https://flowgpt.com/p/prompt-injection-cheat-sheet", + "type": "article" + }, + { + "title": "OpenAI GPT-4 System Card", + "url": "https://openai.com/research/gpt-4-system-card", + "type": "article" + } + ] }, "3_gJRtJSdm2iAfkwmcv0e": { "title": "Indirect", - "description": "Indirect injection involves embedding malicious prompts within external data sources that the LLM processes, such as websites, documents, or emails. AI Red Teamers test this by poisoning data sources the AI might interact with (e.g., adding hidden instructions to a webpage summarized by the AI) to see if the AI executes unintended commands or leaks data when processing that source.\n\nLearn more from the following resources:\n\n* [@paper@The Practical Application of Indirect Prompt Injection Attacks - David Willis-Owen](https://www.researchgate.net/publication/382692833_The_Practical_Application_of_Indirect_Prompt_Injection_Attacks_From_Academia_to_Industry) - Discusses a standard methodology to test for indirect injection attacks.\n* [@article@How to Prevent Indirect Prompt Injection Attacks - Cobalt](https://www.cobalt.io/blog/how-to-prevent-indirect-prompt-injection-attacks) - Explains indirect injection via external sources and mitigation.\n* [@article@Jailbreaks via Indirect Injection (Practical AI Safety Newsletter)](https://newsletter.practicalai.safety/p/jailbreaks-via-indirect-injection) - Examples of indirect prompt injection impacting LLM agents.", - "links": [] + "description": "Indirect injection involves embedding malicious prompts within external data sources that the LLM processes, such as websites, documents, or emails. AI Red Teamers test this by poisoning data sources the AI might interact with (e.g., adding hidden instructions to a webpage summarized by the AI) to see if the AI executes unintended commands or leaks data when processing that source.\n\nLearn more from the following resources:", + "links": [ + { + "title": "The Practical Application of Indirect Prompt Injection Attacks", + "url": "https://www.researchgate.net/publication/382692833_The_Practical_Application_of_Indirect_Prompt_Injection_Attacks_From_Academia_to_Industry", + "type": "article" + }, + { + "title": "How to Prevent Indirect Prompt Injection Attacks", + "url": "https://www.cobalt.io/blog/how-to-prevent-indirect-prompt-injection-attacks", + "type": "article" + }, + { + "title": "Jailbreaks via Indirect Injection (Practical AI Safety Newsletter)", + "url": "https://newsletter.practicalai.safety/p/jailbreaks-via-indirect-injection", + "type": "article" + } + ] }, "G1u_Kq4NeUsGX2qnUTuJU": { "title": "Countermeasures", - "description": "AI Red Teamers must also understand and test defenses against prompt hacking. This includes evaluating the effectiveness of input sanitization, output filtering, instruction demarcation (e.g., XML tagging), contextual awareness checks, model fine-tuning for resistance, and applying the principle of least privilege to LLM capabilities and tool access.\n\nLearn more from the following resources:\n\n* [@article@Mitigating Prompt Injection Attacks (NCC Group Research)](https://research.nccgroup.com/2023/12/01/mitigating-prompt-injection-attacks/) - Discusses various mitigation strategies and their effectiveness.\n* [@article@Prompt Injection & the Rise of Prompt Attacks: All You Need to Know | Lakera](https://www.lakera.ai/blog/guide-to-prompt-injection) - Includes discussion on best practices for prevention.\n* [@article@Prompt Injection: Impact, How It Works & 4 Defense Measures - Tigera](https://www.tigera.io/learn/guides/llm-security/prompt-injection/) - Covers defensive measures.\n* [@guide@OpenAI Best Practices for Prompt Security](https://platform.openai.com/docs/guides/prompt-engineering/strategy-write-clear-instructions) - OpenAI’s recommendations to prevent prompt manipulation.", - "links": [] + "description": "AI Red Teamers must also understand and test defenses against prompt hacking. This includes evaluating the effectiveness of input sanitization, output filtering, instruction demarcation (e.g., XML tagging), contextual awareness checks, model fine-tuning for resistance, and applying the principle of least privilege to LLM capabilities and tool access.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Mitigating Prompt Injection Attacks (NCC Group Research)", + "url": "https://research.nccgroup.com/2023/12/01/mitigating-prompt-injection-attacks/", + "type": "article" + }, + { + "title": "Prompt Injection & the Rise of Prompt Attacks", + "url": "https://www.lakera.ai/blog/guide-to-prompt-injection", + "type": "article" + }, + { + "title": "Prompt Injection: Impact, How It Works & 4 Defense Measures", + "url": "https://www.tigera.io/learn/guides/llm-security/prompt-injection/", + "type": "article" + }, + { + "title": "OpenAI Best Practices for Prompt Security", + "url": "https://platform.openai.com/docs/guides/prompt-engineering/strategy-write-clear-instructions", + "type": "article" + } + ] }, "vhBu5x8INTtqvx6vcYAhE": { "title": "Code Injection", - "description": "AI Red Teamers test for code injection vulnerabilities specifically in the context of AI applications. This involves probing whether user input, potentially manipulated via prompts, can lead to the execution of unintended code (e.g., SQL, OS commands, or script execution via generated code) within the application layer or connected systems, using the AI as a potential vector.\n\nLearn more from the following resources:\n\n* [@article@Code Injection in LLM Applications - NeuralTrust](https://neuraltrust.ai/blog/code-injection-in-llms) - Specifically discusses code injection risks involving LLMs.\n* [@docs@Secure Plugin Sandboxing (OpenAI Plugins)](https://platform.openai.com/docs/plugins/production/security-requirements) - Context on preventing code injection via AI plugins.\n* [@guide@Code Injection - OWASP Foundation](https://owasp.org/www-community/attacks/Code_Injection) - Foundational knowledge on code injection attacks.", - "links": [] + "description": "AI Red Teamers test for code injection vulnerabilities specifically in the context of AI applications. This involves probing whether user input, potentially manipulated via prompts, can lead to the execution of unintended code (e.g., SQL, OS commands, or script execution via generated code) within the application layer or connected systems, using the AI as a potential vector.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Code Injection in LLM Applications", + "url": "https://neuraltrust.ai/blog/code-injection-in-llms", + "type": "article" + }, + { + "title": "Secure Plugin Sandboxing (OpenAI Plugins)", + "url": "https://platform.openai.com/docs/plugins/production/security-requirements", + "type": "article" + }, + { + "title": "Code Injection", + "url": "https://owasp.org/www-community/attacks/Code_Injection", + "type": "article" + } + ] }, "uBXrri2bXVsNiM8fIHHOv": { "title": "Model Vulnerabilities", - "description": "This category covers attacks and tests targeting the AI model itself, beyond the prompt interface. AI Red Teamers investigate inherent weaknesses in the model's architecture, training data artifacts, or prediction mechanisms, such as susceptibility to data extraction, poisoning, or adversarial manipulation.\n\nLearn more from the following resources:\n\n* [@article@AI Security Risks Uncovered: What You Must Know in 2025 - TTMS](https://ttms.com/uk/ai-security-risks-explained-what-you-need-to-know-in-2025/) - Discusses adversarial attacks, data poisoning, and prototype theft.\n* [@article@Attacking AI Models (Trail of Bits Blog Series)](https://blog.trailofbits.com/category/ai-security/) - Series discussing model-focused attacks.\n* [@report@AI and ML Vulnerabilities (CNAS Report)](https://www.cnas.org/publications/reports/understanding-and-mitigating-ai-vulnerabilities) - Overview of known machine learning vulnerabilities.", - "links": [] + "description": "This category covers attacks and tests targeting the AI model itself, beyond the prompt interface. AI Red Teamers investigate inherent weaknesses in the model's architecture, training data artifacts, or prediction mechanisms, such as susceptibility to data extraction, poisoning, or adversarial manipulation.\n\nLearn more from the following resources:", + "links": [ + { + "title": "AI Security Risks Uncovered: What You Must Know in 2025", + "url": "https://ttms.com/uk/ai-security-risks-explained-what-you-need-to-know-in-2025/", + "type": "article" + }, + { + "title": "Attacking AI Models (Trail of Bits Blog Series)", + "url": "https://blog.trailofbits.com/category/ai-security/", + "type": "article" + }, + { + "title": "AI and ML Vulnerabilities (CNAS Report)", + "url": "https://www.cnas.org/publications/reports/understanding-and-mitigating-ai-vulnerabilities", + "type": "article" + } + ] }, "QFzLx5nc4rCCD8WVc20mo": { "title": "Model Weight Stealing", - "description": "AI Red Teamers assess the risk of attackers reconstructing or stealing the proprietary weights of a trained model, often through API query-based attacks. Testing involves simulating such attacks to understand how easily the model's functionality can be replicated, which informs defenses like query rate limiting, watermarking, or differential privacy.\n\nLearn more from the following resources:\n\n* [@article@A Playbook for Securing AI Model Weights - RAND](https://www.rand.org/pubs/research_briefs/RBA2849-1.html) - Discusses attack vectors and security levels for protecting model weights.\n* [@article@How to Steal a Machine Learning Model (SkyCryptor)](https://skycryptor.com/blog/how-to-steal-a-machine-learning-model) - Explains model weight extraction via query attacks.\n* [@paper@Defense Against Model Stealing (Microsoft Research)](https://www.microsoft.com/en-us/research/publication/defense-against-model-stealing-attacks/) - Research on detecting and defending against model stealing.\n* [@paper@On the Limitations of Model Stealing with Uncertainty Quantification Models - OpenReview](https://openreview.net/pdf?id=ONRFHoUzNk) - Research exploring model stealing techniques.", - "links": [] + "description": "AI Red Teamers assess the risk of attackers reconstructing or stealing the proprietary weights of a trained model, often through API query-based attacks. Testing involves simulating such attacks to understand how easily the model's functionality can be replicated, which informs defenses like query rate limiting, watermarking, or differential privacy.\n\nLearn more from the following resources:", + "links": [ + { + "title": "A Playbook for Securing AI Model Weights", + "url": "https://www.rand.org/pubs/research_briefs/RBA2849-1.html", + "type": "article" + }, + { + "title": "How to Steal a Machine Learning Model (SkyCryptor)", + "url": "https://skycryptor.com/blog/how-to-steal-a-machine-learning-model", + "type": "article" + }, + { + "title": "Defense Against Model Stealing (Microsoft Research)", + "url": "https://www.microsoft.com/en-us/research/publication/defense-against-model-stealing-attacks/", + "type": "article" + }, + { + "title": "On the Limitations of Model Stealing with Uncertainty Quantification Models", + "url": "https://openreview.net/pdf?id=ONRFHoUzNk", + "type": "article" + } + ] }, "DQeOavZCoXpF3k_qRDABs": { "title": "Unauthorized Access", - "description": "AI Red Teamers test if vulnerabilities in the AI system or its interfaces allow attackers to gain unauthorized access to data, functionalities, or underlying infrastructure. This includes attempting privilege escalation via prompts, exploiting insecure API endpoints connected to the AI, or manipulating the AI to access restricted system resources.\n\nLearn more from the following resources:\n\n* [@article@Unauthorized Data Access via LLMs (Security Boulevard)](https://securityboulevard.com/2023/11/unauthorized-data-access-via-llms/) - Discusses risks of LLMs accessing unauthorized data.\n* [@guide@OWASP API Security Project](https://owasp.org/www-project-api-security/) - Covers API risks like broken access control relevant to AI systems.\n* [@paper@AI System Abuse Cases (Harvard Belfer Center)](https://www.belfercenter.org/publication/ai-system-abuse-cases) - Covers various ways AI systems can be abused, including access violations.", - "links": [] + "description": "AI Red Teamers test if vulnerabilities in the AI system or its interfaces allow attackers to gain unauthorized access to data, functionalities, or underlying infrastructure. This includes attempting privilege escalation via prompts, exploiting insecure API endpoints connected to the AI, or manipulating the AI to access restricted system resources.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Unauthorized Data Access via LLMs (Security Boulevard)", + "url": "https://securityboulevard.com/2023/11/unauthorized-data-access-via-llms/", + "type": "article" + }, + { + "title": "OWASP API Security Project", + "url": "https://owasp.org/www-project-api-security/", + "type": "article" + }, + { + "title": "AI System Abuse Cases (Harvard Belfer Center)", + "url": "https://www.belfercenter.org/publication/ai-system-abuse-cases", + "type": "article" + } + ] }, "nD0_64ELEeJSN-0aZiR7i": { "title": "Data Poisoning", - "description": "AI Red Teamers simulate data poisoning attacks by evaluating how introducing manipulated or mislabeled data into potential training or fine-tuning datasets could compromise the model. They assess the impact on model accuracy, fairness, or the potential creation of exploitable backdoors, informing defenses around data validation and provenance.\n\nLearn more from the following resources:\n\n* [@article@AI Poisoning - Is It Really A Threat? - AIBlade](https://www.aiblade.net/p/ai-poisoning-is-it-really-a-threat) - Detailed exploration of data poisoning attacks and impacts.\n* [@article@Data Poisoning Attacks in ML (Towards Data Science)](https://towardsdatascience.com/data-poisoning-attacks-in-machine-learning-542169587b7f) - Overview of techniques.\n* [@paper@Detecting and Preventing Data Poisoning Attacks on AI Models - arXiv](https://arxiv.org/abs/2503.09302) - Research on detection and prevention techniques.\n* [@paper@Poisoning Web-Scale Training Data (arXiv)](https://arxiv.org/abs/2310.12818) - Analysis of poisoning risks in large datasets used for LLMs.", - "links": [] + "description": "AI Red Teamers simulate data poisoning attacks by evaluating how introducing manipulated or mislabeled data into potential training or fine-tuning datasets could compromise the model. They assess the impact on model accuracy, fairness, or the potential creation of exploitable backdoors, informing defenses around data validation and provenance.\n\nLearn more from the following resources:", + "links": [ + { + "title": "AI Poisoning", + "url": "https://www.aiblade.net/p/ai-poisoning-is-it-really-a-threat", + "type": "article" + }, + { + "title": "Data Poisoning Attacks in ML (Towards Data Science)", + "url": "https://towardsdatascience.com/data-poisoning-attacks-in-machine-learning-542169587b7f", + "type": "article" + }, + { + "title": "Detecting and Preventing Data Poisoning Attacks on AI Models", + "url": "https://arxiv.org/abs/2503.09302", + "type": "article" + }, + { + "title": "Poisoning Web-Scale Training Data (arXiv)", + "url": "https://arxiv.org/abs/2310.12818", + "type": "article" + } + ] }, "xjlttOti-_laPRn8a2fVy": { "title": "Adversarial Examples", - "description": "A core AI Red Teaming activity involves generating adversarial examples – inputs slightly perturbed to cause misclassification or bypass safety filters – to test model robustness. Red teamers use various techniques (gradient-based, optimization-based, or black-box methods) to find inputs that exploit model weaknesses, informing developers on how to harden the model.\n\nLearn more from the following resources:\n\n* [@article@Adversarial Examples Explained (OpenAI Blog)](https://openai.com/research/adversarial-examples) - Introduction by OpenAI.\n* [@guide@Adversarial Examples – Interpretable Machine Learning Book](https://christophm.github.io/interpretable-ml-book/adversarial.html) - In-depth explanation and examples.\n* [@guide@Adversarial Testing for Generative AI | Machine Learning - Google for Developers](https://developers.google.com/machine-learning/guides/adv-testing) - Google's guide on adversarial testing workflows.\n* [@video@How AI Can Be Tricked With Adversarial Attacks - Two Minute Papers](https://www.youtube.com/watch?v=J3X_JWQkvo8?v=MPcfoQBDY0w) - Short video demonstrating adversarial examples.", - "links": [] + "description": "A core AI Red Teaming activity involves generating adversarial examples – inputs slightly perturbed to cause misclassification or bypass safety filters – to test model robustness. Red teamers use various techniques (gradient-based, optimization-based, or black-box methods) to find inputs that exploit model weaknesses, informing developers on how to harden the model.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Adversarial Examples Explained (OpenAI Blog)", + "url": "https://openai.com/research/adversarial-examples", + "type": "article" + }, + { + "title": "Adversarial Examples – Interpretable Machine Learning Book", + "url": "https://christophm.github.io/interpretable-ml-book/adversarial.html", + "type": "article" + }, + { + "title": "Adversarial Testing for Generative AI", + "url": "https://developers.google.com/machine-learning/guides/adv-testing", + "type": "article" + }, + { + "title": "How AI Can Be Tricked With Adversarial Attacks", + "url": "https://www.youtube.com/watch?v=J3X_JWQkvo8?v=MPcfoQBDY0w", + "type": "video" + } + ] }, "iE5PcswBHnu_EBFIacib0": { "title": "Model Inversion", - "description": "AI Red Teamers perform model inversion tests to assess if an attacker can reconstruct sensitive training data (like images, text snippets, or personal attributes) by repeatedly querying the model and analyzing its outputs. Success indicates privacy risks due to data memorization, requiring mitigation techniques like differential privacy or output filtering.\n\nLearn more from the following resources:\n\n* [@article@Model Inversion Attacks for ML (Medium)](https://medium.com/@ODSC/model-inversion-attacks-for-machine-learning-ff407a1b10d1) - Explanation with examples (e.g., face reconstruction).\n* [@article@Model inversion and membership inference: Understanding new AI security risks - Hogan Lovells](https://www.hoganlovells.com/en/publications/model-inversion-and-membership-inference-understanding-new-ai-security-risks-and-mitigating-vulnerabilities) - Discusses risks and mitigation.\n* [@paper@Extracting Training Data from LLMs (arXiv)](https://arxiv.org/abs/2012.07805) - Research demonstrating feasibility on LLMs.\n* [@paper@Model Inversion Attacks: A Survey of Approaches and Countermeasures - arXiv](https://arxiv.org/html/2411.10023v1) - Comprehensive survey of model inversion attacks and defenses.", - "links": [] + "description": "AI Red Teamers perform model inversion tests to assess if an attacker can reconstruct sensitive training data (like images, text snippets, or personal attributes) by repeatedly querying the model and analyzing its outputs. Success indicates privacy risks due to data memorization, requiring mitigation techniques like differential privacy or output filtering.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Model Inversion Attacks for ML (Medium)", + "url": "https://medium.com/@ODSC/model-inversion-attacks-for-machine-learning-ff407a1b10d1", + "type": "article" + }, + { + "title": "Model inversion and membership inference: Understanding new AI security risks", + "url": "https://www.hoganlovells.com/en/publications/model-inversion-and-membership-inference-understanding-new-ai-security-risks-and-mitigating-vulnerabilities", + "type": "article" + }, + { + "title": "Extracting Training Data from LLMs (arXiv)", + "url": "https://arxiv.org/abs/2012.07805", + "type": "article" + }, + { + "title": "Model Inversion Attacks: A Survey of Approaches and Countermeasures", + "url": "https://arxiv.org/html/2411.10023v1", + "type": "article" + } + ] }, "2Y0ZO-etpv3XIvunDLu-W": { "title": "Adversarial Training", - "description": "AI Red Teamers evaluate the effectiveness of adversarial training as a defense. They test if models trained on adversarial examples are truly robust or if new, unseen adversarial attacks can still bypass the hardened defenses. This helps refine the adversarial training process itself.\n\nLearn more from the following resources:\n\n* [@article@Model Robustness: Building Reliable AI Models - Encord](https://encord.com/blog/model-robustness-machine-learning-strategies/) (Discusses adversarial robustness)\n* [@guide@Adversarial Testing for Generative AI | Google for Developers](https://developers.google.com/machine-learning/guides/adv-testing) - Covers the concept as part of testing.\n* [@paper@Detecting and Preventing Data Poisoning Attacks on AI Models - arXiv](https://arxiv.org/abs/2503.09302) (Mentions adversarial training as defense)", - "links": [] + "description": "AI Red Teamers evaluate the effectiveness of adversarial training as a defense. They test if models trained on adversarial examples are truly robust or if new, unseen adversarial attacks can still bypass the hardened defenses. This helps refine the adversarial training process itself.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Model Robustness: Building Reliable AI Models", + "url": "https://encord.com/blog/model-robustness-machine-learning-strategies/", + "type": "article" + }, + { + "title": "Adversarial Testing for Generative AI", + "url": "https://developers.google.com/machine-learning/guides/adv-testing", + "type": "article" + }, + { + "title": "Detecting and Preventing Data Poisoning Attacks on AI Models", + "url": "https://arxiv.org/abs/2503.09302", + "type": "article" + } + ] }, "6gEHMhh6BGJI-ZYN27YPW": { "title": "Robust Model Design", - "description": "AI Red Teamers assess whether choices made during model design (architecture selection, regularization techniques, ensemble methods) effectively contribute to robustness against anticipated attacks. They test if these design choices actually prevent common failure modes identified during threat modeling.\n\nLearn more from the following resources:\n\n* [@article@Model Robustness: Building Reliable AI Models - Encord](https://encord.com/blog/model-robustness-machine-learning-strategies/) - Discusses strategies for building robust models.\n* [@article@Understanding Robustness in Machine Learning - Alooba](https://www.alooba.com/skills/concepts/machine-learning/robustness/) - Explains the concept of ML robustness.\n* [@paper@Towards Evaluating the Robustness of Neural Networks (arXiv by Goodfellow et al.)](https://arxiv.org/abs/1608.04644) - Foundational paper on evaluating robustness.", - "links": [] + "description": "AI Red Teamers assess whether choices made during model design (architecture selection, regularization techniques, ensemble methods) effectively contribute to robustness against anticipated attacks. They test if these design choices actually prevent common failure modes identified during threat modeling.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Model Robustness: Building Reliable AI Models", + "url": "https://encord.com/blog/model-robustness-machine-learning-strategies/", + "type": "article" + }, + { + "title": "Understanding Robustness in Machine Learning", + "url": "https://www.alooba.com/skills/concepts/machine-learning/robustness/", + "type": "article" + }, + { + "title": "Towards Evaluating the Robustness of Neural Networks (arXiv by Goodfellow et al.)", + "url": "https://arxiv.org/abs/1608.04644", + "type": "article" + } + ] }, "7Km0mFpHguHYPs5UhHTsM": { "title": "Continuous Monitoring", - "description": "AI Red Teamers assess the effectiveness of continuous monitoring systems by attempting attacks and observing if detection mechanisms trigger appropriate alerts and responses. They test if monitoring covers AI-specific anomalies (like sudden shifts in output toxicity or unexpected resource consumption by the model) in addition to standard infrastructure monitoring.\n\nLearn more from the following resources:\n\n* [@article@Cyber Security Monitoring: 5 Key Components - BitSight Technologies](https://www.bitsight.com/blog/5-things-to-consider-building-continuous-security-monitoring-strategy) - Discusses key components of a monitoring strategy.\n* [@article@Cyber Security Monitoring: Definition and Best Practices - SentinelOne](https://www.sentinelone.com/cybersecurity-101/cybersecurity/cyber-security-monitoring/) - Overview of monitoring types and techniques.\n* [@article@Cybersecurity Monitoring: Definition, Tools & Best Practices - NordLayer](https://nordlayer.com/blog/cybersecurity-monitoring/) - General best practices adaptable to AI context.", - "links": [] + "description": "AI Red Teamers assess the effectiveness of continuous monitoring systems by attempting attacks and observing if detection mechanisms trigger appropriate alerts and responses. They test if monitoring covers AI-specific anomalies (like sudden shifts in output toxicity or unexpected resource consumption by the model) in addition to standard infrastructure monitoring.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Cyber Security Monitoring: 5 Key Components", + "url": "https://www.bitsight.com/blog/5-things-to-consider-building-continuous-security-monitoring-strategy", + "type": "article" + }, + { + "title": "Cyber Security Monitoring: Definition and Best Practices", + "url": "https://www.sentinelone.com/cybersecurity-101/cybersecurity/cyber-security-monitoring/", + "type": "article" + }, + { + "title": "Cybersecurity Monitoring: Definition, Tools & Best Practices", + "url": "https://nordlayer.com/blog/cybersecurity-monitoring/", + "type": "article" + } + ] }, "aKzai0A8J55-OBXTnQih1": { "title": "Insecure Deserialization", - "description": "AI Red Teamers investigate if serialized objects used by the AI system (e.g., for saving model states, configurations, or transmitting data) can be manipulated by an attacker. They test if crafting malicious serialized objects could lead to remote code execution or other exploits when the application deserializes the untrusted data.\n\nLearn more from the following resources:\n\n* [@article@Lightboard Lessons: OWASP Top 10 - Insecure Deserialization - DevCentral](https://community.f5.com/kb/technicalarticles/lightboard-lessons-owasp-top-10---insecure-deserialization/281509) - Video explanation.\n* [@article@How Hugging Face Was Ethically Hacked](https://www.aiblade.net/p/how-hugging-face-was-ethically-hacked) - Hugging Face deserialization case study.\n* [@article@OWASP TOP 10: Insecure Deserialization - Detectify Blog](https://blog.detectify.com/best-practices/owasp-top-10-insecure-deserialization/) - Overview within OWASP Top 10 context.\n* [@guide@Insecure Deserialization - OWASP Foundation](https://owasp.org/www-community/vulnerabilities/Insecure_Deserialization) - Core explanation of the vulnerability.", - "links": [] + "description": "AI Red Teamers investigate if serialized objects used by the AI system (e.g., for saving model states, configurations, or transmitting data) can be manipulated by an attacker. They test if crafting malicious serialized objects could lead to remote code execution or other exploits when the application deserializes the untrusted data.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Lightboard Lessons: OWASP Top 10 - Insecure Deserialization", + "url": "https://community.f5.com/kb/technicalarticles/lightboard-lessons-owasp-top-10---insecure-deserialization/281509", + "type": "article" + }, + { + "title": "How Hugging Face Was Ethically Hacked", + "url": "https://www.aiblade.net/p/how-hugging-face-was-ethically-hacked", + "type": "article" + }, + { + "title": "OWASP TOP 10: Insecure Deserialization", + "url": "https://blog.detectify.com/best-practices/owasp-top-10-insecure-deserialization/", + "type": "article" + }, + { + "title": "Insecure Deserialization", + "url": "https://owasp.org/www-community/vulnerabilities/Insecure_Deserialization", + "type": "article" + } + ] }, "kgDsDlBk8W2aM6LyWpFY8": { "title": "Remote Code Execution", - "description": "AI Red Teamers attempt to achieve RCE on systems hosting or interacting with AI models. This could involve exploiting vulnerabilities in the AI framework itself, the web server, connected APIs, or tricking an AI agent with code execution capabilities into running malicious commands provided via prompts. RCE is often the ultimate goal of exploiting other vulnerabilities like code injection or insecure deserialization.\n\nLearn more from the following resources:\n\n* [@article@Exploiting LLMs with Code Execution (GitHub Gist)](https://gist.github.com/coolaj86/6f4f7b30129b0251f61fa7baaa881516) - Example of achieving code execution via LLM manipulation.\n* [@article@What is remote code execution? - Cloudflare](https://www.cloudflare.com/learning/security/what-is-remote-code-execution/) - Definition and explanation of RCE.\n* [@video@DEFCON 31 - AI Village - Hacking an LLM embedded system (agent) - Johann Rehberger](https://www.google.com/search?q=https://www.youtube.com/watch%3Fv%3D6u04C1N69ks?v=1FfYnF2GXVU) - Demonstrates RCE risks with LLM agents.", - "links": [] + "description": "AI Red Teamers attempt to achieve RCE on systems hosting or interacting with AI models. This could involve exploiting vulnerabilities in the AI framework itself, the web server, connected APIs, or tricking an AI agent with code execution capabilities into running malicious commands provided via prompts. RCE is often the ultimate goal of exploiting other vulnerabilities like code injection or insecure deserialization.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Exploiting LLMs with Code Execution (GitHub Gist)", + "url": "https://gist.github.com/coolaj86/6f4f7b30129b0251f61fa7baaa881516", + "type": "article" + }, + { + "title": "What is remote code execution?", + "url": "https://www.cloudflare.com/learning/security/what-is-remote-code-execution/", + "type": "article" + }, + { + "title": "DEFCON 31 - AI Village - Hacking an LLM embedded system (agent) - Johann Rehberger", + "url": "https://www.google.com/search?q=https://www.youtube.com/watch%3Fv%3D6u04C1N69ks?v=1FfYnF2GXVU", + "type": "video" + } + ] }, "nhUKKWyBH80nyKfGT8ErC": { "title": "Infrastructure Security", - "description": "AI Red Teamers assess the security posture of the infrastructure hosting AI models (cloud environments, servers, containers). They look for misconfigurations, unpatched systems, insecure network setups, or inadequate access controls that could allow compromise of the AI system or leakage of sensitive data/models.\n\nLearn more from the following resources:\n\n* [@article@AI Infrastructure Attacks (VentureBeat)](https://venturebeat.com/ai/understanding-ai-infrastructure-attacks/) - Discussion of attacks targeting AI infrastructure.\n* [@guide@Network Infrastructure Security - Best Practices and Strategies - DataGuard](https://www.dataguard.com/blog/network-infrastructure-security-best-practices-and-strategies/) - General infra security practices applicable here.\n* [@guide@Secure Deployment of ML Systems (NIST)](https://csrc.nist.gov/publications/detail/sp/800-218/final) - Guidelines including infrastructure security for ML.", - "links": [] + "description": "AI Red Teamers assess the security posture of the infrastructure hosting AI models (cloud environments, servers, containers). They look for misconfigurations, unpatched systems, insecure network setups, or inadequate access controls that could allow compromise of the AI system or leakage of sensitive data/models.\n\nLearn more from the following resources:", + "links": [ + { + "title": "AI Infrastructure Attacks (VentureBeat)", + "url": "https://venturebeat.com/ai/understanding-ai-infrastructure-attacks/", + "type": "article" + }, + { + "title": "Network Infrastructure Security - Best Practices and Strategies", + "url": "https://www.dataguard.com/blog/network-infrastructure-security-best-practices-and-strategies/", + "type": "article" + }, + { + "title": "Secure Deployment of ML Systems (NIST)", + "url": "https://csrc.nist.gov/publications/detail/sp/800-218/final", + "type": "article" + } + ] }, "Tszl26iNBnQBdBEWOueDA": { "title": "API Protection", - "description": "AI Red Teamers rigorously test the security of APIs providing access to AI models. They probe for OWASP API Top 10 vulnerabilities like broken authentication/authorization, injection flaws, security misconfigurations, and lack of rate limiting, specifically evaluating how these could lead to misuse or compromise of the AI model itself.\n\nLearn more from the following resources:\n\n* [@article@API Protection for AI Factories: The First Step to AI Security - F5](https://www.f5.com/company/blog/api-security-for-ai-factories) - Discusses the criticality of API security for AI applications.\n* [@article@Securing APIs with AI for Advanced Threat Protection | Adeva](https://adevait.com/artificial-intelligence/securing-apis-with-ai) - Discusses using AI for API security, implies testing these is needed.\n* [@article@Securing Machine Learning APIs (IBM)](https://developer.ibm.com/articles/se-securing-machine-learning-apis/) - Best practices for protecting ML APIs.\n* [@guide@OWASP API Security Project (Top 10 2023)](https://owasp.org/www-project-api-security/) - Essential checklist for API vulnerabilities.", - "links": [] + "description": "AI Red Teamers rigorously test the security of APIs providing access to AI models. They probe for OWASP API Top 10 vulnerabilities like broken authentication/authorization, injection flaws, security misconfigurations, and lack of rate limiting, specifically evaluating how these could lead to misuse or compromise of the AI model itself.\n\nLearn more from the following resources:", + "links": [ + { + "title": "API Protection for AI Factories: The First Step to AI Security", + "url": "https://www.f5.com/company/blog/api-security-for-ai-factories", + "type": "article" + }, + { + "title": "Securing APIs with AI for Advanced Threat Protection", + "url": "https://adevait.com/artificial-intelligence/securing-apis-with-ai", + "type": "article" + }, + { + "title": "Securing Machine Learning APIs (IBM)", + "url": "https://developer.ibm.com/articles/se-securing-machine-learning-apis/", + "type": "article" + }, + { + "title": "OWASP API Security Project (Top 10 2023)", + "url": "https://owasp.org/www-project-api-security/", + "type": "article" + } + ] }, "J7gjlt2MBx7lOkOnfGvPF": { "title": "Authentication", - "description": "AI Red Teamers test the authentication mechanisms controlling access to AI systems and APIs. They attempt to bypass logins, steal or replay API keys/tokens, exploit weak password policies, or find flaws in MFA implementations to gain unauthorized access to the AI model or its management interfaces.\n\nLearn more from the following resources:\n\n* [@article@Red-Teaming in AI Testing: Stress Testing - Labelvisor](https://www.labelvisor.com/red-teaming-abstract-competitive-testing-data-selection/) - Mentions testing authentication mechanisms in AI red teaming.\n* [@article@What is Authentication vs Authorization? - Auth0](https://auth0.com/intro-to-iam/authentication-vs-authorization) - Foundational explanation.\n* [@video@How JWTs are used for Authentication (and how to bypass it) - LiveOverflow](https://www.google.com/search?q=https://www.youtube.com/watch%3Fv%3Dexample_video_panel_url?v=3OpQi65s_ME) - Covers common web authentication bypass techniques relevant to APIs.", - "links": [] + "description": "AI Red Teamers test the authentication mechanisms controlling access to AI systems and APIs. They attempt to bypass logins, steal or replay API keys/tokens, exploit weak password policies, or find flaws in MFA implementations to gain unauthorized access to the AI model or its management interfaces.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Red-Teaming in AI Testing: Stress Testing", + "url": "https://www.labelvisor.com/red-teaming-abstract-competitive-testing-data-selection/", + "type": "article" + }, + { + "title": "What is Authentication vs Authorization?", + "url": "https://auth0.com/intro-to-iam/authentication-vs-authorization", + "type": "article" + }, + { + "title": "How JWTs are used for Authentication (and how to bypass it)", + "url": "https://www.google.com/search?q=https://www.youtube.com/watch%3Fv%3Dexample_video_panel_url?v=3OpQi65s_ME", + "type": "video" + } + ] }, "JQ3bR8odXJfd-1RCEf3-Q": { "title": "Authentication", - "description": "AI Red Teamers test authorization controls to ensure that authenticated users can only access the AI features and data permitted by their roles/permissions. They attempt privilege escalation, try to access other users' data via the AI, or manipulate the AI to perform actions beyond its authorized scope.\n\nLearn more from the following resources:\n\n* [@article@What is Authentication vs Authorization? - Auth0](https://auth0.com/intro-to-iam/authentication-vs-authorization) - Foundational explanation.\n* [@guide@Identity and access management (IAM) fundamental concepts - Learn Microsoft](https://learn.microsoft.com/en-us/entra/fundamentals/identity-fundamental-concepts) - Explains roles and permissions.\n* [@guide@OWASP API Security Project](https://owasp.org/www-project-api-security/) (Covers Broken Object Level/Function Level Authorization)", - "links": [] + "description": "AI Red Teamers test authorization controls to ensure that authenticated users can only access the AI features and data permitted by their roles/permissions. They attempt privilege escalation, try to access other users' data via the AI, or manipulate the AI to perform actions beyond its authorized scope.\n\nLearn more from the following resources:", + "links": [ + { + "title": "What is Authentication vs Authorization?", + "url": "https://auth0.com/intro-to-iam/authentication-vs-authorization", + "type": "article" + }, + { + "title": "Identity and access management (IAM) fundamental concepts", + "url": "https://learn.microsoft.com/en-us/entra/fundamentals/identity-fundamental-concepts", + "type": "article" + }, + { + "title": "OWASP API Security Project", + "url": "https://owasp.org/www-project-api-security/", + "type": "article" + } + ] }, "0bApnJTt-Z2IUf0X3OCYf": { "title": "Black Box Testing", - "description": "In AI Red Teaming, black-box testing involves probing the AI system with inputs and observing outputs without any knowledge of the model's architecture, training data, or internal logic. This simulates an external attacker and is crucial for finding vulnerabilities exploitable through publicly accessible interfaces, such as prompt injection or safety bypasses discoverable via API interaction.\n\nLearn more from the following resources:\n\n* [@article@Black-Box, Gray Box, and White-Box Penetration Testing - EC-Council](https://www.eccouncil.org/cybersecurity-exchange/penetration-testing/black-box-gray-box-and-white-box-penetration-testing-importance-and-uses/) - Comparison of testing types.\n* [@article@What is Black Box Testing | Techniques & Examples - Imperva](https://www.imperva.com/learn/application-security/black-box-testing/) - General explanation.\n* [@guide@LLM red teaming guide (open source) - Promptfoo](https://www.promptfoo.dev/docs/red-team/) - Contrasts black-box and white-box approaches for LLM red teaming.", - "links": [] + "description": "In AI Red Teaming, black-box testing involves probing the AI system with inputs and observing outputs without any knowledge of the model's architecture, training data, or internal logic. This simulates an external attacker and is crucial for finding vulnerabilities exploitable through publicly accessible interfaces, such as prompt injection or safety bypasses discoverable via API interaction.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Black-Box, Gray Box, and White-Box Penetration Testing", + "url": "https://www.eccouncil.org/cybersecurity-exchange/penetration-testing/black-box-gray-box-and-white-box-penetration-testing-importance-and-uses/", + "type": "article" + }, + { + "title": "What is Black Box Testing", + "url": "https://www.imperva.com/learn/application-security/black-box-testing/", + "type": "article" + }, + { + "title": "LLM red teaming guide (open source)", + "url": "https://www.promptfoo.dev/docs/red-team/", + "type": "article" + } + ] }, "Mrk_js5UVn4dRDw-Yco3Y": { "title": "White Box Testing", - "description": "White-box testing in AI Red Teaming grants the tester full access to the model's internals (architecture, weights, training data, source code). This allows for highly targeted attacks, such as crafting precise adversarial examples using gradients, analyzing code for vulnerabilities, or directly examining training data for biases or PII leakage. It simulates insider threats or deep analysis scenarios.\n\nLearn more from the following resources:\n\n* [@article@Black-Box, Gray Box, and White-Box Penetration Testing - EC-Council](https://www.eccouncil.org/cybersecurity-exchange/penetration-testing/black-box-gray-box-and-white-box-penetration-testing-importance-and-uses/) - Comparison of testing types.\n* [@article@White-Box Adversarial Examples (OpenAI Blog)](https://openai.com/research/adversarial-robustness-toolbox) - Discusses generating attacks with full model knowledge.\n* [@guide@LLM red teaming guide (open source) - Promptfoo](https://www.promptfoo.dev/docs/red-team/) - Mentions white-box testing benefits for LLMs.", - "links": [] + "description": "White-box testing in AI Red Teaming grants the tester full access to the model's internals (architecture, weights, training data, source code). This allows for highly targeted attacks, such as crafting precise adversarial examples using gradients, analyzing code for vulnerabilities, or directly examining training data for biases or PII leakage. It simulates insider threats or deep analysis scenarios.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Black-Box, Gray Box, and White-Box Penetration Testing", + "url": "https://www.eccouncil.org/cybersecurity-exchange/penetration-testing/black-box-gray-box-and-white-box-penetration-testing-importance-and-uses/", + "type": "article" + }, + { + "title": "White-Box Adversarial Examples (OpenAI Blog)", + "url": "https://openai.com/research/adversarial-robustness-toolbox", + "type": "article" + }, + { + "title": "LLM red teaming guide (open source)", + "url": "https://www.promptfoo.dev/docs/red-team/", + "type": "article" + } + ] }, "ZVNAMCP68XKRXVxF2-hBc": { "title": "Grey Box Testing", - "description": "Grey-box AI Red Teaming involves testing with partial knowledge of the system, such as knowing the model type (e.g., GPT-4), having access to some documentation, or understanding the general system architecture but not having full model weights or source code. This allows for more targeted testing than black-box while still simulating realistic external attacker scenarios where some information might be gleaned.\n\nLearn more from the following resources:\n\n* [@article@AI Transparency: Connecting AI Red Teaming and Compliance | SplxAI Blog](https://splx.ai/blog/ai-transparency-connecting-ai-red-teaming-and-compliance) - Discusses the value of moving towards gray-box testing in AI.\n* [@article@Black-Box, Gray Box, and White-Box Penetration Testing - EC-Council](https://www.eccouncil.org/cybersecurity-exchange/penetration-testing/black-box-gray-box-and-white-box-penetration-testing-importance-and-uses/) - Comparison of testing types.\n* [@article@Understanding Black Box, White Box, and Grey Box Testing - Frugal Testing](https://www.frugaltesting.com/blog/understanding-black-box-white-box-and-grey-box-testing-in-software-testing) - General definitions.", - "links": [] + "description": "Grey-box AI Red Teaming involves testing with partial knowledge of the system, such as knowing the model type (e.g., GPT-4), having access to some documentation, or understanding the general system architecture but not having full model weights or source code. This allows for more targeted testing than black-box while still simulating realistic external attacker scenarios where some information might be gleaned.\n\nLearn more from the following resources:", + "links": [ + { + "title": "AI Transparency: Connecting AI Red Teaming and Compliance", + "url": "https://splx.ai/blog/ai-transparency-connecting-ai-red-teaming-and-compliance", + "type": "article" + }, + { + "title": "Black-Box, Gray Box, and White-Box Penetration Testing", + "url": "https://www.eccouncil.org/cybersecurity-exchange/penetration-testing/black-box-gray-box-and-white-box-penetration-testing-importance-and-uses/", + "type": "article" + }, + { + "title": "Understanding Black Box, White Box, and Grey Box Testing", + "url": "https://www.frugaltesting.com/blog/understanding-black-box-white-box-and-grey-box-testing-in-software-testing", + "type": "article" + } + ] }, "LVdYN9hyCyNPYn2Lz1y9b": { "title": "Automated vs Manual", - "description": "AI Red Teaming typically employs a blend of automated tools (for large-scale scanning, fuzzing prompts, generating basic adversarial examples) and manual human testing (for creative jailbreaking, complex multi-stage attacks, evaluating nuanced safety issues like bias). Automation provides scale, while manual testing provides depth and creativity needed to find novel vulnerabilities.\n\nLearn more from the following resources:\n\n* [@article@Automation Testing vs. Manual Testing: Which is the better approach? - Opkey](https://www.opkey.com/blog/automation-testing-vs-manual-testing-which-is-better) - General comparison.\n* [@article@Manual Testing vs Automated Testing: What's the Difference? - Leapwork](https://www.leapwork.com/blog/manual-vs-automated-testing) - General comparison.\n* [@guide@LLM red teaming guide (open source) - Promptfoo](https://www.promptfoo.dev/docs/red-team/) - Discusses using both automated generation and human ingenuity for red teaming.", - "links": [] + "description": "AI Red Teaming typically employs a blend of automated tools (for large-scale scanning, fuzzing prompts, generating basic adversarial examples) and manual human testing (for creative jailbreaking, complex multi-stage attacks, evaluating nuanced safety issues like bias). Automation provides scale, while manual testing provides depth and creativity needed to find novel vulnerabilities.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Automation Testing vs. Manual Testing: Which is the better approach?", + "url": "https://www.opkey.com/blog/automation-testing-vs-manual-testing-which-is-better", + "type": "article" + }, + { + "title": "Manual Testing vs Automated Testing: What's the Difference?", + "url": "https://www.leapwork.com/blog/manual-vs-automated-testing", + "type": "article" + }, + { + "title": "LLM red teaming guide (open source)", + "url": "https://www.promptfoo.dev/docs/red-team/", + "type": "article" + } + ] }, "65Lo60JQS5YlvvQ6KevXt": { "title": "Continuous Testing", - "description": "Applying continuous testing principles to AI security involves integrating automated red teaming checks into the development pipeline (CI/CD). This allows for regular, automated assessment of model safety, robustness, and alignment as the model or application code evolves, catching regressions or new vulnerabilities early. Tools facilitating Continuous Automated Red Teaming (CART) are emerging.\n\nLearn more from the following resources:\n\n* [@article@Continuous Automated Red Teaming (CART) - FireCompass](https://www.firecompass.com/continuous-automated-red-teaming/) - Explains the concept of CART.\n* [@article@What is Continuous Penetration Testing? Process and Benefits - Qualysec Technologies](https://qualysec.com/continuous-penetration-testing/) - Related concept applied to pen testing.\n* [@guide@What is Continuous Testing and How Does it Work? - Black Duck](https://www.blackduck.com/glossary/what-is-continuous-testing.html) - General definition and benefits.", - "links": [] + "description": "Applying continuous testing principles to AI security involves integrating automated red teaming checks into the development pipeline (CI/CD). This allows for regular, automated assessment of model safety, robustness, and alignment as the model or application code evolves, catching regressions or new vulnerabilities early. Tools facilitating Continuous Automated Red Teaming (CART) are emerging.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Continuous Automated Red Teaming (CART)", + "url": "https://www.firecompass.com/continuous-automated-red-teaming/", + "type": "article" + }, + { + "title": "What is Continuous Penetration Testing? Process and Benefits", + "url": "https://qualysec.com/continuous-penetration-testing/", + "type": "article" + }, + { + "title": "What is Continuous Testing and How Does it Work?", + "url": "https://www.blackduck.com/glossary/what-is-continuous-testing.html", + "type": "article" + } + ] }, "c8n8FcYKDOgPLQvV9xF5J": { "title": "Testing Platforms", - "description": "Platforms used by AI Red Teamers range from general penetration testing OS distributions like Kali Linux to specific AI red teaming tools/frameworks like Microsoft's PyRIT or Promptfoo, and vulnerability scanners like OWASP ZAP adapted for API testing of AI services. These platforms provide the toolsets needed to conduct assessments.\n\nLearn more from the following resources:\n\n* [@tool@AI Red Teaming Agent - Azure AI Foundry | Microsoft Learn](https://learn.microsoft.com/en-us/azure/ai-foundry/concepts/ai-red-teaming-agent) - Microsoft's tool leveraging PyRIT.\n* [@tool@Kali Linux](https://www.kali.org/) - Standard pentesting distribution.\n* [@tool@OWASP Zed Attack Proxy (ZAP)](https://owasp.org/www-project-zap/) - Widely used for web/API security testing.\n* [@tool@Promptfoo](https://www.promptfoo.dev/) - Open-source tool for testing and evaluating LLMs, includes red teaming features.\n* [@tool@PyRIT (Python Risk Identification Tool for generative AI) - GitHub](https://github.com/Azure/PyRIT) - Open-source framework from Microsoft.", - "links": [] + "description": "Platforms used by AI Red Teamers range from general penetration testing OS distributions like Kali Linux to specific AI red teaming tools/frameworks like Microsoft's PyRIT or Promptfoo, and vulnerability scanners like OWASP ZAP adapted for API testing of AI services. These platforms provide the toolsets needed to conduct assessments.\n\nLearn more from the following resources:", + "links": [ + { + "title": "AI Red Teaming Agent - Azure AI Foundry | Microsoft Learn", + "url": "https://learn.microsoft.com/en-us/azure/ai-foundry/concepts/ai-red-teaming-agent", + "type": "article" + }, + { + "title": "Kali Linux", + "url": "https://www.kali.org/", + "type": "article" + }, + { + "title": "OWASP Zed Attack Proxy (ZAP)", + "url": "https://owasp.org/www-project-zap/", + "type": "article" + }, + { + "title": "Promptfoo", + "url": "https://www.promptfoo.dev/", + "type": "article" + }, + { + "title": "PyRIT (Python Risk Identification Tool for generative AI)", + "url": "https://github.com/Azure/PyRIT", + "type": "article" + } + ] }, "59lkLcoqV4gq7f8Zm0X2p": { "title": "Monitoring Solutions", - "description": "AI Red Teamers interact with monitoring tools primarily to test their effectiveness (evasion) or potentially exploit vulnerabilities within them. Understanding tools like IDS (Snort, Suricata), network analyzers (Wireshark), and SIEMs helps red teamers simulate attacks that might bypass or target these defensive systems.\n\nLearn more from the following resources:\n\n* [@article@Open Source IDS Tools: Comparing Suricata, Snort, Bro (Zeek), Linux - LevelBlue](https://levelblue.com/blogs/security-essentials/open-source-intrusion-detection-tools-a-quick-overview) - Comparison of common open source monitoring tools.\n* [@tool@Snort](https://www.snort.org/) - Open source IDS/IPS.\n* [@tool@Suricata](https://suricata.io/) - Open source IDS/IPS/NSM.\n* [@tool@Wireshark](https://www.wireshark.org/) - Network protocol analyzer.\n* [@tool@Zeek (formerly Bro)](https://zeek.org/) - Network security monitoring framework.", - "links": [] + "description": "AI Red Teamers interact with monitoring tools primarily to test their effectiveness (evasion) or potentially exploit vulnerabilities within them. Understanding tools like IDS (Snort, Suricata), network analyzers (Wireshark), and SIEMs helps red teamers simulate attacks that might bypass or target these defensive systems.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Open Source IDS Tools: Comparing Suricata, Snort, Bro (Zeek), Linux", + "url": "https://levelblue.com/blogs/security-essentials/open-source-intrusion-detection-tools-a-quick-overview", + "type": "article" + }, + { + "title": "Snort", + "url": "https://www.snort.org/", + "type": "article" + }, + { + "title": "Suricata", + "url": "https://suricata.io/", + "type": "article" + }, + { + "title": "Wireshark", + "url": "https://www.wireshark.org/", + "type": "article" + }, + { + "title": "Zeek (formerly Bro)", + "url": "https://zeek.org/", + "type": "article" + } + ] }, "et1Xrr8ez-fmB0mAq8W_a": { "title": "Benchmark Datasets", - "description": "AI Red Teamers may use or contribute to benchmark datasets specifically designed to evaluate AI security. These datasets (like SecBench, NYU CTF Bench, CySecBench) contain prompts or scenarios targeting vulnerabilities, safety issues, or specific cybersecurity capabilities, allowing for standardized testing of models.\n\nLearn more from the following resources:\n\n* [@dataset@CySecBench: Generative AI-based CyberSecurity-focused Prompt Dataset - GitHub](https://github.com/cysecbench/dataset) - Dataset of cybersecurity prompts for benchmarking LLMs.\n* [@dataset@NYU CTF Bench: A Scalable Open-Source Benchmark Dataset for Evaluating LLMs in Offensive Security](https://proceedings.neurips.cc/paper_files/paper/2024/hash/69d97a6493fbf016fff0a751f253ad18-Abstract-Datasets_and_Benchmarks_Track.html) - Using CTF challenges to evaluate LLMs.\n* [@dataset@SecBench: A Comprehensive Multi-Dimensional Benchmarking Dataset for LLMs in Cybersecurity - arXiv](https://arxiv.org/abs/2412.20787) - Benchmarking LLMs on cybersecurity tasks.", - "links": [] + "description": "AI Red Teamers may use or contribute to benchmark datasets specifically designed to evaluate AI security. These datasets (like SecBench, NYU CTF Bench, CySecBench) contain prompts or scenarios targeting vulnerabilities, safety issues, or specific cybersecurity capabilities, allowing for standardized testing of models.\n\nLearn more from the following resources:", + "links": [ + { + "title": "CySecBench: Generative AI-based CyberSecurity-focused Prompt Dataset", + "url": "https://github.com/cysecbench/dataset", + "type": "article" + }, + { + "title": "NYU CTF Bench: A Scalable Open-Source Benchmark Dataset for Evaluating LLMs in Offensive Security", + "url": "https://proceedings.neurips.cc/paper_files/paper/2024/hash/69d97a6493fbf016fff0a751f253ad18-Abstract-Datasets_and_Benchmarks_Track.html", + "type": "article" + }, + { + "title": "SecBench: A Comprehensive Multi-Dimensional Benchmarking Dataset for LLMs in Cybersecurity", + "url": "https://arxiv.org/abs/2412.20787", + "type": "article" + } + ] }, "C1zO2xC0AqyV53p2YEPWg": { "title": "Custom Testing Scripts", - "description": "AI Red Teamers frequently write custom scripts (often in Python) to automate bespoke attacks, interact with specific AI APIs, generate complex prompt sequences, parse model outputs at scale, or implement novel exploit techniques not found in standard tools. Proficiency in scripting is essential for advanced AI red teaming.\n\nLearn more from the following resources:\n\n* [@guide@Python for Cybersecurity: Key Use Cases and Tools - Panther](https://panther.com/blog/python-for-cybersecurity-key-use-cases-and-tools) - Discusses Python's role in automation, pen testing, etc.\n* [@guide@Python for cybersecurity: use cases, tools and best practices - SoftTeco](https://softteco.com/blog/python-for-cybersecurity) - Covers using Python for various security tasks.\n* [@tool@Scapy](https://scapy.net/) - Powerful Python library for packet manipulation.", - "links": [] + "description": "AI Red Teamers frequently write custom scripts (often in Python) to automate bespoke attacks, interact with specific AI APIs, generate complex prompt sequences, parse model outputs at scale, or implement novel exploit techniques not found in standard tools. Proficiency in scripting is essential for advanced AI red teaming.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Python for Cybersecurity: Key Use Cases and Tools", + "url": "https://panther.com/blog/python-for-cybersecurity-key-use-cases-and-tools", + "type": "article" + }, + { + "title": "Python for cybersecurity: use cases, tools and best practices", + "url": "https://softteco.com/blog/python-for-cybersecurity", + "type": "article" + }, + { + "title": "Scapy", + "url": "https://scapy.net/", + "type": "article" + } + ] }, "BLnfNlA0C4yzy1dvifjwx": { "title": "Reporting Tools", - "description": "AI Red Teamers use reporting techniques and potentially tools to clearly document their findings, including discovered vulnerabilities, successful exploit steps (e.g., effective prompts), assessed impact, and actionable recommendations tailored to AI systems. Good reporting translates technical findings into understandable risks for stakeholders.\n\nLearn more from the following resources:\n\n* [@article@The Complete Red Teaming Checklist \\[PDF\\]: 5 Key Steps - Mindgard AI](https://mindgard.ai/blog/red-teaming-checklist) (Mentions reporting and templates)\n* [@guide@Penetration Testing Report: 6 Key Sections and 4 Best Practices - Bright Security](https://brightsec.com/blog/penetration-testing-report/) - General best practices for reporting security findings.\n* [@guide@Penetration testing best practices: Strategies for all test types - Strike Graph](https://www.strikegraph.com/blog/pen-testing-best-practices) - Includes tips on documentation.", - "links": [] + "description": "AI Red Teamers use reporting techniques and potentially tools to clearly document their findings, including discovered vulnerabilities, successful exploit steps (e.g., effective prompts), assessed impact, and actionable recommendations tailored to AI systems. Good reporting translates technical findings into understandable risks for stakeholders.\n\nLearn more from the following resources:", + "links": [ + { + "title": "The Complete Red Teaming Checklist [PDF]: 5 Key Steps - Mindgard AI", + "url": "https://mindgard.ai/blog/red-teaming-checklist", + "type": "article" + }, + { + "title": "Penetration Testing Report: 6 Key Sections and 4 Best Practices", + "url": "https://brightsec.com/blog/penetration-testing-report/", + "type": "article" + }, + { + "title": "Penetration testing best practices: Strategies for all test types", + "url": "https://www.strikegraph.com/blog/pen-testing-best-practices", + "type": "article" + } + ] }, "s1xKK8HL5-QGZpcutiuvj": { "title": "Specialized Courses", - "description": "Targeted training is crucial for mastering AI Red Teaming. Look for courses covering adversarial ML, prompt hacking, LLM security, ethical hacking for AI, and specific red teaming methodologies applied to AI systems offered by platforms like Learn Prompting, Coursera, or security training providers.\n\nLearn more from the following resources:\n\n* [@course@AI Red Teaming Courses - Learn Prompting](https://learnprompting.org/blog/ai-red-teaming-courses) - Curated list including free and paid options.\n* [@course@AI Security | Coursera](https://www.coursera.org/learn/ai-security) - Covers AI security risks and governance.\n* [@course@Exploring Adversarial Machine Learning - NVIDIA](https://www.nvidia.com/en-us/training/instructor-led-workshops/exploring-adversarial-machine-learning/) - Focused training on adversarial ML (paid).\n* [@course@Free Online Cyber Security Courses with Certificates in 2025 - EC-Council](https://www.eccouncil.org/cybersecurity-exchange/cyber-novice/free-cybersecurity-courses-beginners/) - Offers foundational cybersecurity courses.", - "links": [] + "description": "Targeted training is crucial for mastering AI Red Teaming. Look for courses covering adversarial ML, prompt hacking, LLM security, ethical hacking for AI, and specific red teaming methodologies applied to AI systems offered by platforms like Learn Prompting, Coursera, or security training providers.\n\nLearn more from the following resources:", + "links": [ + { + "title": "AI Red Teaming Courses - Learn Prompting", + "url": "https://learnprompting.org/blog/ai-red-teaming-courses", + "type": "course" + }, + { + "title": "AI Security | Coursera", + "url": "https://www.coursera.org/learn/ai-security", + "type": "course" + }, + { + "title": "Exploring Adversarial Machine Learning", + "url": "https://www.nvidia.com/en-us/training/instructor-led-workshops/exploring-adversarial-machine-learning/", + "type": "course" + }, + { + "title": "Free Online Cyber Security Courses with Certificates in 2025", + "url": "https://www.eccouncil.org/cybersecurity-exchange/cyber-novice/free-cybersecurity-courses-beginners/", + "type": "course" + } + ] }, "HHjsFR6wRDqUd66PMDE_7": { "title": "Industry Credentials", - "description": "Beyond formal certifications, recognition in the AI Red Teaming field comes from practical achievements like finding significant vulnerabilities (responsible disclosure), winning AI-focused CTFs or hackathons (like HackAPrompt), contributing to AI security research, or building open-source testing tools.\n\nLearn more from the following resources:\n\n* [@community@DEF CON - Wikipedia (Mentions Black Badge)](https://en.wikipedia.org/wiki/DEF_CON#Black_Badge) - Example of a high-prestige credential from CTFs.\n* [@community@HackAPrompt (Learn Prompting)](https://learnprompting.org/hackaprompt) - Example of a major AI Red Teaming competition.", - "links": [] + "description": "Beyond formal certifications, recognition in the AI Red Teaming field comes from practical achievements like finding significant vulnerabilities (responsible disclosure), winning AI-focused CTFs or hackathons (like HackAPrompt), contributing to AI security research, or building open-source testing tools.\n\nLearn more from the following resources:", + "links": [ + { + "title": "DEF CON - Wikipedia (Mentions Black Badge)", + "url": "https://en.wikipedia.org/wiki/DEF_CON#Black_Badge", + "type": "article" + }, + { + "title": "HackAPrompt (Learn Prompting)", + "url": "https://learnprompting.org/hackaprompt", + "type": "article" + } + ] }, "MmwwRK4I9aRH_ha7duPqf": { "title": "Lab Environments", - "description": "AI Red Teamers need environments to practice attacking vulnerable systems safely. While traditional labs (HTB, THM, VulnHub) build general pentesting skills, platforms are emerging with labs specifically focused on AI/LLM vulnerabilities, prompt injection, or adversarial ML challenges.\n\nLearn more from the following resources:\n\n* [@platform@Gandalf AI Prompt Injection Lab](https://gandalf.lakera.ai/) - A popular web-based lab for prompt injection practice.\n* [@platform@Hack The Box: Hacking Labs](https://www.hackthebox.com/hacker/hacking-labs) - General pentesting labs.\n* [@platform@TryHackMe: Learn Cyber Security](https://tryhackme.com/) - Gamified cybersecurity training labs.\n* [@platform@VulnHub](https://www.vulnhub.com/) - Provides vulnerable VM images for practice.", - "links": [] + "description": "AI Red Teamers need environments to practice attacking vulnerable systems safely. While traditional labs (HTB, THM, VulnHub) build general pentesting skills, platforms are emerging with labs specifically focused on AI/LLM vulnerabilities, prompt injection, or adversarial ML challenges.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Gandalf AI Prompt Injection Lab", + "url": "https://gandalf.lakera.ai/", + "type": "article" + }, + { + "title": "Hack The Box: Hacking Labs", + "url": "https://www.hackthebox.com/hacker/hacking-labs", + "type": "article" + }, + { + "title": "TryHackMe: Learn Cyber Security", + "url": "https://tryhackme.com/", + "type": "article" + }, + { + "title": "VulnHub", + "url": "https://www.vulnhub.com/", + "type": "article" + } + ] }, "2Imb64Px3ZQcBpSQjdc_G": { "title": "CTF Challenges", - "description": "Capture The Flag competitions increasingly include AI/ML security challenges. Participating in CTFs (tracked on CTFtime) or platforms like picoCTF helps AI Red Teamers hone skills in reverse engineering, web exploitation, and cryptography applied to AI systems, including specialized AI safety CTFs.\n\nLearn more from the following resources:\n\n* [@article@Capture the flag (cybersecurity) - Wikipedia](https://en.wikipedia.org/wiki/Capture_the_flag_\\(cybersecurity\\)) - Overview of CTFs.\n* [@article@Progress from our Frontier Red Team - Anthropic](https://www.anthropic.com/news/strategic-warning-for-ai-risk-progress-and-insights-from-our-frontier-red-team) - Mentions using CTFs (Cybench) for evaluating AI model security.\n* [@platform@CTFtime.org](https://ctftime.org/) - Global CTF event tracker.\n* [@platform@picoCTF](https://picoctf.org/) - Beginner-friendly CTF platform.", - "links": [] + "description": "Capture The Flag competitions increasingly include AI/ML security challenges. Participating in CTFs (tracked on CTFtime) or platforms like picoCTF helps AI Red Teamers hone skills in reverse engineering, web exploitation, and cryptography applied to AI systems, including specialized AI safety CTFs.\n\nLearn more from the following resources:", + "links": [ + { + "title": "https://en.wikipedia.org/wiki/Capture_the_flag_(cybersecurity)", + "url": "https://en.wikipedia.org/wiki/Capture_the_flag_(cybersecurity)", + "type": "article" + }, + { + "title": "Progress from our Frontier Red Team", + "url": "https://www.anthropic.com/news/strategic-warning-for-ai-risk-progress-and-insights-from-our-frontier-red-team", + "type": "article" + }, + { + "title": "CTFtime.org", + "url": "https://ctftime.org/", + "type": "article" + }, + { + "title": "picoCTF", + "url": "https://picoctf.org/", + "type": "article" + } + ] }, "DpYsL0du37n40toH33fIr": { "title": "Red Team Simulations", - "description": "Participating in or conducting structured red team simulations against AI systems (or components) provides the most realistic practice. This involves applying methodologies, TTPs (Tactics, Techniques, and Procedures), reconnaissance, exploitation, and reporting within a defined scope and objective, specifically targeting AI vulnerabilities.\n\nLearn more from the following resources:\n\n* [@guide@A Simple Guide to Successful Red Teaming - Cobalt Strike](https://www.cobaltstrike.com/resources/guides/a-simple-guide-to-successful-red-teaming) - General guide adaptable to AI context.\n* [@guide@The Complete Guide to Red Teaming: Process, Benefits & More - Mindgard AI](https://mindgard.ai/blog/red-teaming) - Overview of red teaming process.\n* [@guide@The Complete Red Teaming Checklist \\[PDF\\]: 5 Key Steps - Mindgard AI](https://mindgard.ai/blog/red-teaming-checklist) - Checklist for planning engagements.", - "links": [] + "description": "Participating in or conducting structured red team simulations against AI systems (or components) provides the most realistic practice. This involves applying methodologies, TTPs (Tactics, Techniques, and Procedures), reconnaissance, exploitation, and reporting within a defined scope and objective, specifically targeting AI vulnerabilities.\n\nLearn more from the following resources:", + "links": [ + { + "title": "A Simple Guide to Successful Red Teaming", + "url": "https://www.cobaltstrike.com/resources/guides/a-simple-guide-to-successful-red-teaming", + "type": "article" + }, + { + "title": "The Complete Guide to Red Teaming: Process, Benefits & More", + "url": "https://mindgard.ai/blog/red-teaming", + "type": "article" + }, + { + "title": "The Complete Red Teaming Checklist [PDF]: 5 Key Steps - Mindgard AI", + "url": "https://mindgard.ai/blog/red-teaming-checklist", + "type": "article" + } + ] }, "LuKnmd9nSz9yLbTU_5Yp2": { "title": "Conferences", - "description": "Attending major cybersecurity conferences (DEF CON, Black Hat, RSA) and increasingly specialized AI Safety/Security conferences allows AI Red Teamers to learn about cutting-edge research, network with peers, and discover new tools and attack/defense techniques.\n\nLearn more from the following resources:\n\n* [@conference@Black Hat Events](https://www.blackhat.com/) - Professional security conference with AI tracks.\n* [@conference@DEF CON Hacking Conference](https://defcon.org/) - Major hacking conference with relevant villages/talks.\n* [@conference@Global Conference on AI, Security and Ethics 2025 - UNIDIR](https://unidir.org/event/global-conference-on-ai-security-and-ethics-2025/) - Example of a specialized AI security/ethics conference.\n* [@conference@RSA Conference](https://www.rsaconference.com/) - Large industry conference covering AI security.", - "links": [] + "description": "Attending major cybersecurity conferences (DEF CON, Black Hat, RSA) and increasingly specialized AI Safety/Security conferences allows AI Red Teamers to learn about cutting-edge research, network with peers, and discover new tools and attack/defense techniques.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Black Hat Events", + "url": "https://www.blackhat.com/", + "type": "article" + }, + { + "title": "DEF CON Hacking Conference", + "url": "https://defcon.org/", + "type": "article" + }, + { + "title": "Global Conference on AI, Security and Ethics 2025", + "url": "https://unidir.org/event/global-conference-on-ai-security-and-ethics-2025/", + "type": "article" + }, + { + "title": "RSA Conference", + "url": "https://www.rsaconference.com/", + "type": "article" + } + ] }, "ZlR03pM-sqVFZNhD1gMSJ": { "title": "Research Groups", - "description": "Following and potentially contributing to research groups at universities (like CMU, Stanford, Oxford), non-profits (like OpenAI, Anthropic), or government bodies (like UK's AISI) focused on AI safety, security, and alignment provides deep insights into emerging threats and mitigation strategies relevant to AI Red Teaming.\n\nLearn more from the following resources:\n\n* [@group@AI Cybersecurity | Global Cyber Security Capacity Centre (Oxford)](https://gcscc.ox.ac.uk/ai-security) - Academic research center.\n* [@group@Anthropic Research](https://www.anthropic.com/research) - AI safety research lab.\n* [@group@Center for AI Safety](https://www.safe.ai/) - Non-profit research organization.\n* [@group@The AI Security Institute (AISI)](https://www.aisi.gov.uk/) - UK government institute focused on AI safety/security research.", - "links": [] + "description": "Following and potentially contributing to research groups at universities (like CMU, Stanford, Oxford), non-profits (like OpenAI, Anthropic), or government bodies (like UK's AISI) focused on AI safety, security, and alignment provides deep insights into emerging threats and mitigation strategies relevant to AI Red Teaming.\n\nLearn more from the following resources:", + "links": [ + { + "title": "AI Cybersecurity | Global Cyber Security Capacity Centre (Oxford)", + "url": "https://gcscc.ox.ac.uk/ai-security", + "type": "article" + }, + { + "title": "Anthropic Research", + "url": "https://www.anthropic.com/research", + "type": "article" + }, + { + "title": "Center for AI Safety", + "url": "https://www.safe.ai/", + "type": "article" + }, + { + "title": "The AI Security Institute (AISI)", + "url": "https://www.aisi.gov.uk/", + "type": "article" + } + ] }, "Smncq-n1OlnLAY27AFQOO": { "title": "Forums", - "description": "Engaging in online forums, mailing lists, Discord servers, or subreddits dedicated to AI security, adversarial ML, prompt engineering, or general cybersecurity helps AI Red Teamers exchange knowledge, ask questions, learn about new tools/techniques, and find collaboration opportunities.\n\nLearn more from the following resources:\n\n* [@community@List of Cybersecurity Discord Servers - DFIR Training](https://www.dfir.training/dfir-groups/discord?category%5B0%5D=17&category_children=1) - List including relevant servers.\n* [@community@Reddit - r/MachineLearning](https://www.reddit.com/r/MachineLearning/) - ML specific discussion.\n* [@community@Reddit - r/artificial](https://www.reddit.com/r/artificial/) - General AI discussion.\n* [@community@Reddit - r/cybersecurity](https://www.reddit.com/r/cybersecurity/) - General cybersecurity forum.", - "links": [] + "description": "Engaging in online forums, mailing lists, Discord servers, or subreddits dedicated to AI security, adversarial ML, prompt engineering, or general cybersecurity helps AI Red Teamers exchange knowledge, ask questions, learn about new tools/techniques, and find collaboration opportunities.\n\nLearn more from the following resources:", + "links": [ + { + "title": "List of Cybersecurity Discord Servers", + "url": "https://www.dfir.training/dfir-groups/discord?category%5B0%5D=17&category_children=1", + "type": "article" + }, + { + "title": "Reddit - r/MachineLearning", + "url": "https://www.reddit.com/r/MachineLearning/", + "type": "article" + }, + { + "title": "Reddit - r/artificial", + "url": "https://www.reddit.com/r/artificial/", + "type": "article" + }, + { + "title": "Reddit - r/cybersecurity", + "url": "https://www.reddit.com/r/cybersecurity/", + "type": "article" + } + ] }, "xJYTRbPxMn0Xs5ea0Ygn6": { "title": "LLM Security Testing", - "description": "The core application area for many AI Red Teamers today involves specifically testing Large Language Models for vulnerabilities like prompt injection, jailbreaking, harmful content generation, bias, and data privacy issues using specialized prompts and evaluation frameworks.\n\nLearn more from the following resources:\n\n* [@course@AI Red Teaming Courses - Learn Prompting](https://learnprompting.org/blog/ai-red-teaming-courses) - Courses focused on testing LLMs.\n* [@dataset@SecBench: A Comprehensive Multi-Dimensional Benchmarking Dataset for LLMs in Cybersecurity - arXiv](https://arxiv.org/abs/2412.20787) - Dataset for evaluating LLMs on security tasks.\n* [@guide@The Ultimate Guide to Red Teaming LLMs and Adversarial Prompts (Kili Technology)](https://kili-technology.com/large-language-models-llms/red-teaming-llms-and-adversarial-prompts) - Guide specifically on red teaming LLMs.", - "links": [] + "description": "The core application area for many AI Red Teamers today involves specifically testing Large Language Models for vulnerabilities like prompt injection, jailbreaking, harmful content generation, bias, and data privacy issues using specialized prompts and evaluation frameworks.\n\nLearn more from the following resources:", + "links": [ + { + "title": "AI Red Teaming Courses - Learn Prompting", + "url": "https://learnprompting.org/blog/ai-red-teaming-courses", + "type": "course" + }, + { + "title": "SecBench: A Comprehensive Multi-Dimensional Benchmarking Dataset for LLMs in Cybersecurity", + "url": "https://arxiv.org/abs/2412.20787", + "type": "article" + }, + { + "title": "The Ultimate Guide to Red Teaming LLMs and Adversarial Prompts (Kili Technology)", + "url": "https://kili-technology.com/large-language-models-llms/red-teaming-llms-and-adversarial-prompts", + "type": "article" + } + ] }, "FVsKivsJrIb82B0lpPmgw": { "title": "Agentic AI Security", - "description": "As AI agents capable of autonomous action become more common, AI Red Teamers must test their unique security implications. This involves assessing risks related to goal hijacking, unintended actions through tool use, exploitation of planning mechanisms, and ensuring agents operate safely within their designated boundaries.\n\nLearn more from the following resources:\n\n* [@article@AI Agents - Learn Prompting](https://learnprompting.org/docs/intermediate/ai_agents) (Background on agents)\n* [@article@Reasoning models don't always say what they think - Anthropic](https://www.anthropic.com/research/reasoning-models-dont-always-say-what-they-think) (Discusses agent alignment challenges)\n* [@course@Certified AI Red Team Operator – Autonomous Systems (CAIRTO-AS) from Tonex, Inc.](https://niccs.cisa.gov/education-training/catalog/tonex-inc/certified-ai-red-team-operator-autonomous-systems-cairto) - Certification focusing on autonomous AI security.", - "links": [] + "description": "As AI agents capable of autonomous action become more common, AI Red Teamers must test their unique security implications. This involves assessing risks related to goal hijacking, unintended actions through tool use, exploitation of planning mechanisms, and ensuring agents operate safely within their designated boundaries.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Certified AI Red Team Operator – Autonomous Systems (CAIRTO-AS) from Tonex, Inc.", + "url": "https://niccs.cisa.gov/education-training/catalog/tonex-inc/certified-ai-red-team-operator-autonomous-systems-cairto", + "type": "course" + }, + { + "title": "AI Agents - Learn Prompting", + "url": "https://learnprompting.org/docs/intermediate/ai_agents", + "type": "article" + }, + { + "title": "Reasoning models don't always say what they think", + "url": "https://www.anthropic.com/research/reasoning-models-dont-always-say-what-they-think", + "type": "article" + } + ] }, "KAcCZ3zcv25R6HwzAsfUG": { "title": "Responsible Disclosure", - "description": "A critical practice for AI Red Teamers is responsible disclosure: privately reporting discovered AI vulnerabilities (e.g., a successful jailbreak, data leak method, or severe bias) to the model developers or system owners, allowing them time to remediate before any public discussion, thus preventing malicious exploitation.\n\nLearn more from the following resources:\n\n* [@guide@Responsible Disclosure of AI Vulnerabilities - Preamble AI](https://www.preamble.com/blog/responsible-disclosure-of-ai-vulnerabilities) - Discusses the process specifically for AI vulnerabilities.\n* [@guide@Vulnerability Disclosure Program | CISA](https://www.cisa.gov/resources-tools/programs/vulnerability-disclosure-program-vdp) - Government VDP example.\n* [@policy@Google Vulnerability Reward Program (VRP)](https://bughunters.google.com/) - Example of a major tech company's VDP/bug bounty program.", - "links": [] + "description": "A critical practice for AI Red Teamers is responsible disclosure: privately reporting discovered AI vulnerabilities (e.g., a successful jailbreak, data leak method, or severe bias) to the model developers or system owners, allowing them time to remediate before any public discussion, thus preventing malicious exploitation.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Responsible Disclosure of AI Vulnerabilities", + "url": "https://www.preamble.com/blog/responsible-disclosure-of-ai-vulnerabilities", + "type": "article" + }, + { + "title": "Vulnerability Disclosure Program", + "url": "https://www.cisa.gov/resources-tools/programs/vulnerability-disclosure-program-vdp", + "type": "article" + }, + { + "title": "Google Vulnerability Reward Program (VRP)", + "url": "https://bughunters.google.com/", + "type": "article" + } + ] }, "-G8v_CNa8wO_g-46_RFQo": { "title": "Emerging Threats", - "description": "AI Red Teamers must stay informed about potential future threats enabled by more advanced AI, such as highly autonomous attack agents, AI-generated malware that evades detection, sophisticated deepfakes for social engineering, or large-scale exploitation of interconnected AI systems. Anticipating these helps shape current testing priorities.\n\nLearn more from the following resources:\n\n* [@article@AI Security Risks Uncovered: What You Must Know in 2025 - TTMS](https://ttms.com/uk/ai-security-risks-explained-what-you-need-to-know-in-2025/) - Discusses future AI-driven cyberattacks.\n* [@article@Why Artificial Intelligence is the Future of Cybersecurity - Darktrace](https://www.darktrace.com/blog/why-artificial-intelligence-is-the-future-of-cybersecurity) - Covers AI misuse and the future threat landscape.\n* [@report@AI Index 2024 - Stanford University](https://aiindex.stanford.edu/report/) - Annual report tracking AI capabilities and societal implications, including risks.", - "links": [] + "description": "AI Red Teamers must stay informed about potential future threats enabled by more advanced AI, such as highly autonomous attack agents, AI-generated malware that evades detection, sophisticated deepfakes for social engineering, or large-scale exploitation of interconnected AI systems. Anticipating these helps shape current testing priorities.\n\nLearn more from the following resources:", + "links": [ + { + "title": "AI Security Risks Uncovered: What You Must Know in 2025", + "url": "https://ttms.com/uk/ai-security-risks-explained-what-you-need-to-know-in-2025/", + "type": "article" + }, + { + "title": "Why Artificial Intelligence is the Future of Cybersecurity", + "url": "https://www.darktrace.com/blog/why-artificial-intelligence-is-the-future-of-cybersecurity", + "type": "article" + }, + { + "title": "AI Index 2024", + "url": "https://aiindex.stanford.edu/report/", + "type": "article" + } + ] }, "soC-kcem1ISbnCQMa6BIB": { "title": "Advanced Techniques", - "description": "The practice of AI Red Teaming itself will evolve. Future techniques may involve using AI adversaries to automatically discover complex vulnerabilities, developing more sophisticated methods for testing AI alignment and safety properties, simulating multi-agent system failures, and creating novel metrics for evaluating AI robustness against unknown future attacks.\n\nLearn more from the following resources:\n\n* [@article@AI red-teaming in critical infrastructure: Boosting security and trust in AI systems - DNV](https://www.dnv.com/article/ai-red-teaming-for-critical-infrastructure-industries/) - Discusses applying red teaming to complex systems.\n* [@article@Advanced Techniques in AI Red Teaming for LLMs | NeuralTrust](https://neuraltrust.ai/blog/advanced-techniques-in-ai-red-teaming) - Discusses techniques like adversarial ML and automated threat intelligence for red teaming.\n* [@paper@Diverse and Effective Red Teaming with Auto-generated Rewards and Multi-step Reinforcement Learning - arXiv](https://arxiv.org/html/2412.18693v1) - Research on using RL for more advanced automated red teaming.", - "links": [] + "description": "The practice of AI Red Teaming itself will evolve. Future techniques may involve using AI adversaries to automatically discover complex vulnerabilities, developing more sophisticated methods for testing AI alignment and safety properties, simulating multi-agent system failures, and creating novel metrics for evaluating AI robustness against unknown future attacks.\n\nLearn more from the following resources:", + "links": [ + { + "title": "AI red-teaming in critical infrastructure: Boosting security and trust in AI systems", + "url": "https://www.dnv.com/article/ai-red-teaming-for-critical-infrastructure-industries/", + "type": "article" + }, + { + "title": "Advanced Techniques in AI Red Teaming for LLMs", + "url": "https://neuraltrust.ai/blog/advanced-techniques-in-ai-red-teaming", + "type": "article" + }, + { + "title": "Diverse and Effective Red Teaming with Auto-generated Rewards and Multi-step Reinforcement Learning", + "url": "https://arxiv.org/html/2412.18693v1", + "type": "article" + } + ] }, "VmaIHVsCpq2um_0cA33V3": { "title": "Research Opportunities", - "description": "AI Red Teaming relies on ongoing research. Key areas needing further investigation include scalable methods for finding elusive vulnerabilities, understanding emergent behaviors in complex models, developing provable safety guarantees, creating better benchmarks for AI security, and exploring the socio-technical aspects of AI misuse and defense.\n\nLearn more from the following resources:\n\n* [@article@Cutting-Edge Research on AI Security bolstered with new Challenge Fund - GOV.UK](https://www.gov.uk/government/news/cutting-edge-research-on-ai-security-bolstered-with-new-challenge-fund-to-ramp-up-public-trust-and-adoption) - Highlights government funding for AI security research priorities.\n* [@research@Careers | The AI Security Institute (AISI)](https://www.aisi.gov.uk/careers) - Outlines research focus areas for the UK's AISI.\n* [@research@Research - Anthropic](https://www.anthropic.com/research) - Example of research areas at a leading AI safety lab.", - "links": [] + "description": "AI Red Teaming relies on ongoing research. Key areas needing further investigation include scalable methods for finding elusive vulnerabilities, understanding emergent behaviors in complex models, developing provable safety guarantees, creating better benchmarks for AI security, and exploring the socio-technical aspects of AI misuse and defense.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Cutting-Edge Research on AI Security bolstered with new Challenge Fund", + "url": "https://www.gov.uk/government/news/cutting-edge-research-on-ai-security-bolstered-with-new-challenge-fund-to-ramp-up-public-trust-and-adoption", + "type": "article" + }, + { + "title": "Careers | The AI Security Institute (AISI)", + "url": "https://www.aisi.gov.uk/careers", + "type": "article" + }, + { + "title": "Research - Anthropic", + "url": "https://www.anthropic.com/research", + "type": "article" + } + ] }, "WePO66_4-gNcSdE00WKmw": { "title": "Industry Standards", - "description": "As AI matures, AI Red Teamers will increasingly need to understand and test against emerging industry standards and regulations for AI safety, security, and risk management, such as the NIST AI RMF, ISO/IEC 42001, and sector-specific guidelines, ensuring AI systems meet compliance requirements.\n\nLearn more from the following resources:\n\n* [@article@ISO 42001: The New Compliance Standard for AI Management Systems - Bright Defense](https://www.brightdefense.com/resources/iso-42001-compliance/) - Overview of ISO 42001 requirements.\n* [@article@ISO 42001: What it is & why it matters for AI management - IT Governance](https://www.itgovernance.co.uk/iso-42001) - Explanation of the standard.\n* [@framework@NIST AI Risk Management Framework (AI RMF)](https://www.nist.gov/itl/ai-risk-management-framework) - Voluntary framework gaining wide adoption.\n* [@standard@ISO/IEC 42001: Information technology — Artificial intelligence — Management system](https://www.iso.org/standard/81230.html) - International standard for AI management systems.", - "links": [] + "description": "As AI matures, AI Red Teamers will increasingly need to understand and test against emerging industry standards and regulations for AI safety, security, and risk management, such as the NIST AI RMF, ISO/IEC 42001, and sector-specific guidelines, ensuring AI systems meet compliance requirements.\n\nLearn more from the following resources:", + "links": [ + { + "title": "ISO 42001: The New Compliance Standard for AI Management Systems", + "url": "https://www.brightdefense.com/resources/iso-42001-compliance/", + "type": "article" + }, + { + "title": "ISO 42001: What it is & why it matters for AI management", + "url": "https://www.itgovernance.co.uk/iso-42001", + "type": "article" + }, + { + "title": "NIST AI Risk Management Framework (AI RMF)", + "url": "https://www.nist.gov/itl/ai-risk-management-framework", + "type": "article" + }, + { + "title": "ISO/IEC 42001: Information technology — Artificial intelligence — Management system", + "url": "https://www.iso.org/standard/81230.html", + "type": "article" + } + ] } } \ No newline at end of file diff --git a/public/roadmap-content/cyber-security.json b/public/roadmap-content/cyber-security.json index 846aee499..4cfaade79 100644 --- a/public/roadmap-content/cyber-security.json +++ b/public/roadmap-content/cyber-security.json @@ -584,7 +584,7 @@ "type": "course" }, { - "title": "Linux Roadmap", + "title": "Visit Linux Roadmap", "url": "https://roadmap.sh/linux", "type": "article" }, @@ -615,8 +615,8 @@ "description": "**macOS** is an operating system developed by Apple Inc. for its line of Mac computers. Known for its user-friendly interface and integration with other Apple products, macOS features a Unix-based architecture, offering stability, security, and performance. It includes a suite of built-in applications, such as Safari, Mail, and Finder, and supports a wide range of third-party software. macOS provides seamless integration with services like iCloud, Continuity, and Handoff, enhancing productivity and connectivity across Apple devices. Regular updates and a focus on design and usability make macOS a popular choice for both personal and professional use.\n\nLearn more from the following resources:", "links": [ { - "title": "MacOS Website", - "url": "https://www.apple.com/uk/macos/macos-sequoia/", + "title": "macOS", + "url": "https://www.apple.com/macos/macos-sequoia/", "type": "article" }, { @@ -874,7 +874,7 @@ "description": "**Loopback** refers to a special network interface used to send traffic back to the same device for testing and diagnostic purposes. The loopback address for IPv4 is `127.0.0.1`, while for IPv6 it is `::1`. When a device sends a request to the loopback address, the network data does not leave the local machine; instead, it is processed internally, allowing developers to test applications or network services without requiring external network access. Loopback is commonly used to simulate network traffic, check local services, or debug issues locally.\n\nLearn more from the following resources:", "links": [ { - "title": "Understanding the loopback address and loopback interfaces", + "title": "Understanding the Loopback Address and Loopback Interfaces", "url": "https://study-ccna.com/loopback-interface-loopback-address/", "type": "article" } @@ -964,6 +964,11 @@ "title": "ARP", "description": "Address Resolution Protocol (ARP) is a crucial mechanism used in networking that allows the Internet Protocol (IP) to map an IP address to a corresponding physical address, commonly known as a Media Access Control (MAC) address. This protocol is essential for enabling devices within a Local Area Network (LAN) to communicate by translating IP addresses into specific hardware addresses.\n\nWhen one device on a LAN wants to communicate with another, it needs to know the MAC address associated with the target device’s IP address. ARP facilitates this by sending out an ARP request, which broadcasts the target IP to all devices in the network. Each device checks the requested IP against its own. The device that recognizes the IP as its own responds with an ARP reply, which includes its MAC address.\n\nOnce the requesting device receives the MAC address, it updates its ARP cache—a table that stores IP-to-MAC address mappings—allowing it to send data directly to the correct hardware address.\n\nLearn more from the following resources:", "links": [ + { + "title": "ARP - Wikipedia", + "url": "https://en.wikipedia.org/wiki/Address_Resolution_Protocol", + "type": "article" + }, { "title": "What is Address Resolution Protocol?", "url": "https://www.fortinet.com/resources/cyberglossary/what-is-arp", @@ -1304,12 +1309,12 @@ "description": "Mesh topology is a network architecture where devices or nodes are interconnected with multiple direct, point-to-point links to every other node in the network. This structure allows data to travel from source to destination through multiple paths, enhancing reliability and fault tolerance. In a full mesh topology, every node is connected to every other node, while in a partial mesh, only some nodes have multiple connections. Mesh networks are highly resilient to failures, as traffic can be rerouted if a link goes down. They're commonly used in wireless networks, IoT applications, and critical infrastructure where redundancy and self-healing capabilities are crucial. However, mesh topologies can be complex and expensive to implement, especially in large networks due to the high number of connections required.\n\nLearn more from the following resources:", "links": [ { - "title": "What is mesh topology?", + "title": "What is Mesh Topology?", "url": "https://www.lenovo.com/gb/en/glossary/mesh-topology", "type": "article" }, { - "title": "Mesh topology explained", + "title": "Mesh Topology explained", "url": "https://www.computerhope.com/jargon/m/mesh.htm", "type": "article" } @@ -1712,6 +1717,11 @@ "title": "arp", "description": "ARP is a protocol used by the Internet Protocol (IP) to map an IP address to a physical address, also known as a Media Access Control (MAC) address. ARP is essential for routing data between devices in a Local Area Network (LAN) as it allows for the translation of IP addresses to specific hardware on the network. When a device wants to communicate with another device on the same LAN, it needs to determine the corresponding MAC address for the target IP address. ARP helps in this process by broadcasting an ARP request containing the target IP address. All devices within the broadcast domain receive this ARP request and compare the target IP address with their own IP address. If a match is found, the device with the matching IP address sends an ARP reply which contains its MAC address. The device that initiated the ARP request can now update its ARP cache (a table that stores IP-to-MAC mappings) with the new information, and then proceed to send data to the target's MAC address.\n\nLearn more from the following resources:", "links": [ + { + "title": "ARP - Wikipedia", + "url": "https://en.wikipedia.org/wiki/Address_Resolution_Protocol", + "type": "article" + }, { "title": "What is Address Resolution Protocol?", "url": "https://www.fortinet.com/resources/cyberglossary/what-is-arp", @@ -1896,8 +1906,14 @@ }, "vYvFuz7lAJXZ1vK_4999a": { "title": "Local Auth", - "description": "Local authentication refers to the process of verifying a user's identity on a specific device or system without relying on external servers or networks. It typically involves storing and checking credentials directly on the device itself. Common methods include username/password combinations, biometrics (fingerprint, face recognition), or PIN codes. Local authentication is often used for device access, offline applications, or as a fallback when network-based authentication is unavailable. While it offers quick access and works without internet connectivity, it can be less secure than centralized authentication systems and more challenging to manage across multiple devices. Local authentication is commonly used in personal devices, standalone systems, and scenarios where network-based authentication is impractical or unnecessary.", - "links": [] + "description": "Local authentication refers to the process of verifying a user's identity on a specific device or system without relying on external servers or networks. It typically involves storing and checking credentials directly on the device itself. Common methods include username/password combinations, biometrics (fingerprint, face recognition), or PIN codes. Local authentication is often used for device access, offline applications, or as a fallback when network-based authentication is unavailable. While it offers quick access and works without internet connectivity, it can be less secure than centralized authentication systems and more challenging to manage across multiple devices. Local authentication is commonly used in personal devices, standalone systems, and scenarios where network-based authentication is impractical or unnecessary.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Local authentication, registration, and other settings", + "url": "https://learn.microsoft.com/en-us/power-pages/security/authentication/set-authentication-identity", + "type": "article" + } + ] }, "_hYN0gEi9BL24nptEtXWU": { "title": "Security Skills and Knowledge", @@ -2256,7 +2272,7 @@ }, "WG7DdsxESm31VcLFfkVTz": { "title": "Authentication vs Authorization", - "description": "Authentication vs Authorization\n-------------------------------\n\n**Authentication** is the process of validating the identity of a user, device, or system. It confirms that the entity attempting to access the resource is who or what they claim to be. The most common form of authentication is the use of usernames and passwords. Other methods include:\n\n**Authorization** comes into play after the authentication process is complete. It involves granting or denying access to a resource, based on the authenticated user's privileges. Authorization determines what actions the authenticated user or entity is allowed to perform within a system or application.", + "description": "**Authentication** is the process of validating the identity of a user, device, or system. It confirms that the entity attempting to access the resource is who or what they claim to be. The most common form of authentication is the use of usernames and passwords. Other methods include:\n\n**Authorization** comes into play after the authentication process is complete. It involves granting or denying access to a resource, based on the authenticated user's privileges. Authorization determines what actions the authenticated user or entity is allowed to perform within a system or application.\n\nLearn more from the following resources:", "links": [ { "title": "Two-factor authentication (2FA)", @@ -2430,7 +2446,7 @@ }, "v7CD_sHqLWbm9ibXXESIK": { "title": "Learn how Malware works and Types", - "description": "Malware, short for malicious software, refers to any software intentionally created to cause harm to a computer system, server, network, or user. It is a broad term that encompasses various types of harmful software created by cybercriminals for various purposes. In this guide, we will delve deeper into the major types of malware and their characteristics.\n\nVirus\n-----\n\nA computer virus is a type of malware that, much like a biological virus, attaches itself to a host (e.g., a file or software) and replicates when the host is executed. Viruses can corrupt, delete or modify data, and slow down system performance.\n\nWorm\n----\n\nWorms are self-replicating malware that spread through networks without human intervention. They exploit system vulnerabilities, consuming bandwidth and sometimes carrying a payload to infect target machines.\n\nTrojan Horse\n------------\n\nA trojan horse is a piece of software disguised as a legitimate program but contains harmful code. Users unknowingly download and install it, giving the attacker unauthorized access to the computer or network. Trojans can be used to steal data, create a backdoor, or launch additional malware attacks.\n\nRansomware\n----------\n\nRansomware is a type of malware that encrypts its victims' files and demands a ransom, typically in the form of cryptocurrency, for the decryption key. If the victim refuses or fails to pay within a specified time, the encrypted data may be lost forever.\n\nSpyware\n-------\n\nSpyware is a type of malware designed to collect and relay information about a user or organization without their consent. It can capture keystrokes, record browsing history, and access personal data such as usernames and passwords.\n\nAdware\n------\n\nAdware is advertising-supported software that automatically displays or downloads advertising materials, often in the form of pop-up ads, on a user's computer. While not always malicious, adware can be intrusive and open the door for other malware infections.\n\nRootkit\n-------\n\nA rootkit is a type of malware designed to hide or obscure the presence of other malicious programs on a computer system. This enables it to maintain persistent unauthorized access to the system and can make it difficult for users or security software to detect and remove infected files.\n\nKeylogger\n---------\n\nKeyloggers are a type of malware that monitor and record users' keystrokes, allowing attackers to capture sensitive information, such as login credentials or financial information entered on a keyboard.\n\nUnderstanding the different types of malware can help you better identify and protect against various cyber threats. As the cyber landscape continues to evolve, it's essential to stay informed about emerging malware and equip yourself with the necessary security skills and knowledge.", + "description": "Malware, short for malicious software, refers to any software intentionally created to cause harm to a computer system, server, network, or user. It is a broad term that encompasses various types of harmful software created by cybercriminals for various purposes. In this guide, we will delve deeper into the major types of malware and their characteristics.\n\nVirus\n-----\n\nA computer virus is a type of malware that, much like a biological virus, attaches itself to a host (e.g., a file or software) and replicates when the host is executed. Viruses can corrupt, delete or modify data, and slow down system performance.\n\nWorm\n----\n\nWorms are self-replicating malware that spread through networks without human intervention. They exploit system vulnerabilities, consuming bandwidth and sometimes carrying a payload to infect target machines.\n\nTrojan Horse\n------------\n\nA trojan horse is a piece of software disguised as a legitimate program but contains harmful code. Users unknowingly download and install it, giving the attacker unauthorized access to the computer or network. Trojans can be used to steal data, create a backdoor, or launch additional malware attacks.\n\nRansomware\n----------\n\nRansomware is a type of malware that encrypts its victims' files and demands a ransom, typically in the form of cryptocurrency, for the decryption key. If the victim refuses or fails to pay within a specified time, the encrypted data may be lost forever.\n\nSpyware\n-------\n\nSpyware is a type of malware designed to collect and relay information about a user or organization without their consent. It can capture keystrokes, record browsing history, and access personal data such as usernames and passwords.\n\nAdware\n------\n\nAdware is advertising-supported software that automatically displays or downloads advertising materials, often in the form of pop-up ads, on a user's computer. While not always malicious, adware can be intrusive and open the door for other malware infections.\n\nRootkit\n-------\n\nA rootkit is a type of malware designed to hide or obscure the presence of other malicious programs on a computer system. This enables it to maintain persistent unauthorized access to the system and can make it difficult for users or security software to detect and remove infected files.\n\nKeylogger\n---------\n\nKeyloggers are a type of malware that monitor and record users' keystrokes, allowing attackers to capture sensitive information, such as login credentials or financial information entered on a keyboard.\n\nLearn more from the following resources:", "links": [] }, "Hoou7kWyfB2wx_yFHug_H": { @@ -2438,7 +2454,7 @@ "description": "**Nmap** (Network Mapper) is an open-source network scanning tool used to discover hosts and services on a network, identify open ports, and detect vulnerabilities. It provides detailed information about networked devices, including their IP addresses, operating systems, and running services. Nmap supports various scanning techniques such as TCP SYN scan, UDP scan, and service version detection. It's widely used for network security assessments, vulnerability scanning, and network inventory management, helping administrators and security professionals understand and secure their network environments.\n\nLearn more from the following resources:", "links": [ { - "title": "NMAP Website", + "title": "NMAP", "url": "https://nmap.org/", "type": "article" }, @@ -2446,6 +2462,11 @@ "title": "NMAP Cheat Sheet", "url": "https://www.tutorialspoint.com/nmap-cheat-sheet", "type": "article" + }, + { + "title": "Nmap Tutorial to find Network Vulnerabilities", + "url": "https://www.youtube.com/watch?v=4t4kBkMsDbQ", + "type": "video" } ] }, @@ -2538,6 +2559,11 @@ "title": "arp", "description": "ARP is a protocol used by the Internet Protocol (IP) to map an IP address to a physical address, also known as a Media Access Control (MAC) address. ARP is essential for routing data between devices in a Local Area Network (LAN) as it allows for the translation of IP addresses to specific hardware on the network. When a device wants to communicate with another device on the same LAN, it needs to determine the corresponding MAC address for the target IP address. ARP helps in this process by broadcasting an ARP request containing the target IP address. All devices within the broadcast domain receive this ARP request and compare the target IP address with their own IP address. If a match is found, the device with the matching IP address sends an ARP reply which contains its MAC address. The device that initiated the ARP request can now update its ARP cache (a table that stores IP-to-MAC mappings) with the new information, and then proceed to send data to the target's MAC address.\n\nLearn more from the following resources:", "links": [ + { + "title": "ARP - Wikipedia", + "url": "https://en.wikipedia.org/wiki/Address_Resolution_Protocol", + "type": "article" + }, { "title": "What is Address Resolution Protocol?", "url": "https://www.fortinet.com/resources/cyberglossary/what-is-arp", @@ -2660,6 +2686,11 @@ "title": "memdump", "description": "**memdump** is a tool or process used to capture the contents of a computer's physical memory (RAM) for analysis. This \"memory dump\" can be useful in digital forensics, debugging, or incident response to identify active processes, open files, network connections, or potentially malicious code running in memory. By analyzing a memory dump, security professionals can investigate malware, recover encryption keys, or gather evidence in case of a breach. Tools like `memdump` (Linux utility) or `DumpIt` (Windows) are commonly used to perform this process.\n\nLearn more from the following resources:", "links": [ + { + "title": "memdump - Github", + "url": "https://github.com/tchebb/memdump", + "type": "opensource" + }, { "title": "memdump", "url": "https://www.kali.org/tools/memdump/", @@ -2688,7 +2719,7 @@ "description": "Autopsy is a versatile and powerful open-source digital forensics platform that is primarily used for incident response, cyber security investigations, and data recovery. As an investigator, you can utilize Autopsy to quickly and efficiently analyze a compromised system, extract crucial artifacts, and generate comprehensive reports. Integrated with The Sleuth Kit and other plug-ins, Autopsy allows examiners to automate tasks and dig deep into a system's structure to discover the root cause of an incident.\n\nLearn more from the following resources:", "links": [ { - "title": "Autopsy Website", + "title": "Autopsy", "url": "https://www.autopsy.com/", "type": "article" }, @@ -2917,7 +2948,7 @@ "description": "**NIST (National Institute of Standards and Technology)** is a U.S. federal agency that develops and promotes measurement standards, technology, and best practices. In the context of cybersecurity, NIST provides widely recognized guidelines and frameworks, such as the **NIST Cybersecurity Framework (CSF)**, which offers a structured approach to managing and mitigating cybersecurity risks. NIST also publishes the **NIST Special Publication (SP) 800 series**, which includes standards and guidelines for securing information systems, protecting data, and ensuring system integrity. These resources are essential for organizations seeking to enhance their security posture and comply with industry regulations.\n\nLearn more from the following resources:", "links": [ { - "title": "NIST Website", + "title": "NIST", "url": "https://www.nist.gov/", "type": "article" }, @@ -3060,7 +3091,7 @@ "description": "**LOLBAS** (Living Off the Land Binaries and Scripts) refers to a collection of legitimate system binaries and scripts that can be abused by attackers to perform malicious actions while evading detection. These tools, which are often part of the operating system or installed software, can be leveraged for various purposes, such as executing commands, accessing data, or modifying system configurations, thereby allowing attackers to carry out their activities without deploying custom malware. The use of LOLBAS techniques makes it harder for traditional security solutions to detect and prevent malicious activities since the binaries and scripts used are typically trusted and deemed legitimate.\n\nLearn more from the following resources:", "links": [ { - "title": "LOLBAS project", + "title": "LOLBAS Project", "url": "https://lolbas-project.github.io/#", "type": "article" }, @@ -3113,7 +3144,7 @@ "description": "**NetFlow** is a network protocol developed by Cisco for collecting and analyzing network traffic data. It provides detailed information about network flows, including the source and destination IP addresses, ports, and the amount of data transferred. NetFlow data helps network administrators monitor traffic patterns, assess network performance, and identify potential security threats. By analyzing flow data, organizations can gain insights into bandwidth usage, detect anomalies, and optimize network resources. NetFlow is widely supported across various network devices and often integrated with network management and security tools for enhanced visibility and control.\n\nLearn more from the following resources:", "links": [ { - "title": "Cisco NetFlow Website", + "title": "Cisco NetFlow", "url": "https://www.cisco.com/c/en/us/products/ios-nx-os-software/ios-netflow/index.html", "type": "article" }, @@ -3174,7 +3205,7 @@ }, "6oAzYfwsHQYNVbi7c2Tly": { "title": "NAC-based", - "description": "Network Access Control (NAC) based hardening is a crucial component in enhancing the security of your network infrastructure. NAC provides organizations with the ability to control and manage access to the network resources, ensuring that only authorized users and devices can connect to the network. It plays a vital role in reducing the attack surface and preventing unauthorized access to sensitive data and resources. By implementing NAC-based hardening in your cybersecurity strategy, you protect your organization from threats and maintain secure access to critical resources.\n\nLearn more from the following resouces:", + "description": "Network Access Control (NAC) based hardening is a crucial component in enhancing the security of your network infrastructure. NAC provides organizations with the ability to control and manage access to the network resources, ensuring that only authorized users and devices can connect to the network. It plays a vital role in reducing the attack surface and preventing unauthorized access to sensitive data and resources. By implementing NAC-based hardening in your cybersecurity strategy, you protect your organization from threats and maintain secure access to critical resources.\n\nLearn more from the following resources:", "links": [ { "title": "What is Network Access Control", @@ -3407,12 +3438,12 @@ "description": "Antivirus software is a specialized program designed to detect, prevent, and remove malicious software, such as viruses, worms, and trojans, from computer systems. It works by scanning files and programs for known malware signatures, monitoring system behavior for suspicious activity, and providing real-time protection against potential threats. Regular updates are essential for antivirus software to recognize and defend against the latest threats. While it is a critical component of cybersecurity, antivirus solutions are often part of a broader security strategy that includes firewalls, anti-malware tools, and user education to protect against a wide range of cyber threats.\n\nLearn more from the following resources:", "links": [ { - "title": "What is antivirus software?", + "title": "What is Antivirus Software?", "url": "https://www.webroot.com/gb/en/resources/tips-articles/what-is-anti-virus-software", "type": "article" }, { - "title": "What is an antivirus and how does it keep us safe?", + "title": "What is an Antivirus and how does it keep us safe?", "url": "https://www.youtube.com/watch?v=jW626WMWNAE", "type": "video" } @@ -3423,7 +3454,12 @@ "description": "Anti-malware is a type of software designed to detect, prevent, and remove malicious software, such as viruses, worms, trojans, ransomware, and spyware, from computer systems. By continuously scanning files, applications, and incoming data, anti-malware solutions protect devices from a wide range of threats that can compromise system integrity, steal sensitive information, or disrupt operations. Advanced anti-malware programs utilize real-time monitoring, heuristic analysis, and behavioral detection techniques to identify and neutralize both known and emerging threats, ensuring that systems remain secure against evolving cyber attacks.\n\nLearn more from the following resources:", "links": [ { - "title": "What is antimalware?", + "title": "Anti-malware Definition", + "url": "https://www.computertechreviews.com/definition/anti-malware/", + "type": "article" + }, + { + "title": "What is Antimalware?", "url": "https://riskxchange.co/1006974/cybersecurity-what-is-anti-malware/", "type": "article" }, @@ -3811,7 +3847,7 @@ "description": "ANY.RUN is an interactive online malware analysis platform that allows users to safely execute and analyze suspicious files and URLs in a controlled, virtualized environment. This sandbox service provides real-time insights into the behavior of potentially malicious software, such as how it interacts with the system, what files it modifies, and what network connections it attempts to make. Users can observe and control the analysis process, making it a valuable tool for cybersecurity professionals to identify and understand new threats, assess their impact, and develop appropriate countermeasures. ANY.RUN is particularly useful for dynamic analysis, enabling a deeper understanding of malware behavior in real-time.\n\nLearn more from the following resources:", "links": [ { - "title": "ANY.RUN Website", + "title": "Any.run", "url": "https://any.run/", "type": "article" }, @@ -4162,7 +4198,7 @@ "description": "A Man-in-the-Middle (MITM) attack occurs when a malicious actor intercepts communication between two parties, such as a user and a website, without their knowledge. The attacker can eavesdrop, alter, or inject false information into the communication, often to steal sensitive data like login credentials or manipulate transactions. MITM attacks are commonly executed through compromised Wi-Fi networks or by exploiting security vulnerabilities in protocols.\n\nVisit the following resources to learn more:", "links": [ { - "title": "Wikipedia - Man-in-the-middle attack", + "title": "Man-in-the-middle attack", "url": "https://en.wikipedia.org/wiki/Man-in-the-middle_attack", "type": "article" } @@ -4276,7 +4312,7 @@ }, "nOND14t7ISgSH3zNpV3F8": { "title": "Memory Leak", - "description": "A Memory Leak occurs when a computer program consumes memory but fails to release it back to the operating system after it is no longer needed. Over time, this can lead to reduced system performance, increased memory usage, and, in severe cases, the program or system may crash due to the exhaustion of available memory.", + "description": "A Memory Leak occurs when a computer program consumes memory but fails to release it back to the operating system after it is no longer needed. Over time, this can lead to reduced system performance, increased memory usage, and, in severe cases, the program or system may crash due to the exhaustion of available memory.\n\nLearn more from the following resources:", "links": [ { "title": "What are memory leaks?", @@ -4443,7 +4479,7 @@ "description": "A legal department within an organization is responsible for handling all legal matters that affect the business, ensuring compliance with laws and regulations, and providing advice on various legal issues. Its primary functions include managing contracts, intellectual property, employment law, and regulatory compliance, as well as addressing disputes, litigation, and risk management. The legal department also plays a crucial role in corporate governance, ensuring that the company operates within the boundaries of the law while minimizing legal risks. In some cases, they work with external legal counsel for specialized legal matters, such as mergers and acquisitions or complex litigation.\n\nLearn more from the following resources:", "links": [ { - "title": "Key functions of a legal team", + "title": "Key Functions of a Legal Team", "url": "https://uk.practicallaw.thomsonreuters.com/w-009-3932?transitionType=Default&contextData=(sc.Default)&firstPage=true", "type": "article" }, @@ -4703,12 +4739,12 @@ "type": "course" }, { - "title": "AWS Roadmap", + "title": "Visit Dedicated AWS Roadmap", "url": "https://roadmap.sh/aws", "type": "article" }, { - "title": "AWS Website", + "title": "AWS", "url": "https://aws.amazon.com", "type": "article" }, @@ -4745,7 +4781,7 @@ "description": "Azure is Microsoft's comprehensive cloud computing platform that offers a wide range of services for building, deploying, and managing applications. It provides infrastructure as a service (IaaS), platform as a service (PaaS), and software as a service (SaaS) solutions, supporting various programming languages, tools, and frameworks. Azure's services include virtual machines, storage, databases, AI and machine learning, IoT, and more. It offers global data center coverage, integrated DevOps tools, and robust security features, making it a versatile platform for businesses of all sizes to innovate, scale, and transform their operations in the cloud.\n\nLearn more from the following resources:", "links": [ { - "title": "Azure Website", + "title": "Azure", "url": "https://azure.microsoft.com", "type": "article" },