Merge branch 'master' into dansholds/add-shift-left-devops-guide

dansholds/add-shift-left-devops-guide
Kamran Ahmed 5 months ago committed by GitHub
commit a0b309b1cf
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 55
      contributing.md
  2. 2
      public/roadmap-content/api-design.json
  3. 22
      public/roadmap-content/cyber-security.json
  4. 5
      public/roadmap-content/nodejs.json
  5. 259
      src/data/guides/devops-lifecycle.md
  6. 191
      src/data/guides/full-stack-job-description.md
  7. 4
      src/data/projects/portfolio-website.md
  8. 2
      src/data/roadmaps/api-design/content/cookies@UFuX8wcxZQ7dvaQF_2Yp8.md
  9. 3
      src/data/roadmaps/cyber-security/content/basics-of-subnetting@E8Z7qFFW-I9ivr0HzoXCq.md
  10. 2
      src/data/roadmaps/cyber-security/content/ldap@lV3swvD6QGLmD9iVfbKIF.md
  11. 2
      src/data/roadmaps/data-analyst/data-analyst.json
  12. 6
      src/data/roadmaps/devops/faqs.astro
  13. 12
      src/data/roadmaps/full-stack/faqs.astro
  14. 3
      src/data/roadmaps/java/content/100-java-fundamentals/100-basic-syntax.md
  15. 6
      src/data/roadmaps/linux/content/104-text-processing/109-pipe.md
  16. 1
      src/data/roadmaps/nodejs/content/winston@Oe9cQCz2lwSaot7miZOta.md
  17. 6
      src/data/roadmaps/prompt-engineering/content/100-basic-llm/100-what-are-llms.md
  18. 6
      src/data/roadmaps/prompt-engineering/content/100-basic-llm/101-llm-types.md
  19. 6
      src/data/roadmaps/prompt-engineering/content/100-basic-llm/102-how-llms-built.md
  20. 6
      src/data/roadmaps/prompt-engineering/content/101-prompting-introduction/100-basic-prompting.md
  21. 5
      src/data/roadmaps/prompt-engineering/content/101-prompting-introduction/101-need-for-prompting.md
  22. 3
      src/data/roadmaps/prompt-engineering/content/101-prompting-introduction/index.md
  23. 9
      src/data/roadmaps/prompt-engineering/content/102-prompts/101-parts-of-a-prompt.md
  24. 7
      src/data/roadmaps/prompt-engineering/content/102-prompts/index.md
  25. 7
      src/data/roadmaps/prompt-engineering/content/102-prompts/prompting-techniques/100-role-prompting.md
  26. 8
      src/data/roadmaps/prompt-engineering/content/102-prompts/prompting-techniques/101-few-shot-prompting.md
  27. 8
      src/data/roadmaps/prompt-engineering/content/102-prompts/prompting-techniques/102-chain-of-thought.md
  28. 9
      src/data/roadmaps/prompt-engineering/content/102-prompts/prompting-techniques/103-zeroshot-chain-of-thought.md
  29. 58
      src/data/roadmaps/prompt-engineering/content/102-prompts/prompting-techniques/104-least-to-most.md
  30. 5
      src/data/roadmaps/prompt-engineering/content/102-prompts/prompting-techniques/105-dual-prompt.md
  31. 10
      src/data/roadmaps/prompt-engineering/content/102-prompts/prompting-techniques/106-combining-techniques.md
  32. 65
      src/data/roadmaps/prompt-engineering/content/103-real-world/100-structured-data.md
  33. 12
      src/data/roadmaps/prompt-engineering/content/103-real-world/101-inferring.md
  34. 6
      src/data/roadmaps/prompt-engineering/content/103-real-world/102-writing-emails.md
  35. 11
      src/data/roadmaps/prompt-engineering/content/103-real-world/103-coding-assistance.md
  36. 5
      src/data/roadmaps/prompt-engineering/content/103-real-world/104-study-buddy.md
  37. 7
      src/data/roadmaps/prompt-engineering/content/103-real-world/105-designing-chatbots.md
  38. 17
      src/data/roadmaps/prompt-engineering/content/103-real-world/index.md
  39. 7
      src/data/roadmaps/prompt-engineering/content/104-llm-pitfalls/100-citing-sources.md
  40. 9
      src/data/roadmaps/prompt-engineering/content/104-llm-pitfalls/101-bias.md
  41. 29
      src/data/roadmaps/prompt-engineering/content/104-llm-pitfalls/102-hallucinations.md
  42. 6
      src/data/roadmaps/prompt-engineering/content/104-llm-pitfalls/103-math.md
  43. 16
      src/data/roadmaps/prompt-engineering/content/104-llm-pitfalls/104-prompt-hacking.md
  44. 75
      src/data/roadmaps/prompt-engineering/content/104-llm-pitfalls/index.md
  45. 1
      src/data/roadmaps/spring-boot/content/109-spring-mvc/103-components.md
  46. 31
      src/pages/devops/lifecycle.astro
  47. 31
      src/pages/full-stack/job-description.astro

@ -2,7 +2,6 @@
First of all, thank you for considering to contribute. Please look at the details below:
- [Hacktoberfest Contributions](#hacktoberfest-contributions)
- [New Roadmaps](#new-roadmaps)
- [Existing Roadmaps](#existing-roadmaps)
- [Adding Projects](#adding-projects)
@ -10,60 +9,6 @@ First of all, thank you for considering to contribute. Please look at the detail
- [Guidelines](#guidelines)
- [Good vs. Not So Good Contributions](#good-vs-not-so-good-contributions)
## Hacktoberfest Contributions
We are taking part in [Hacktoberfest 11](https://hacktoberfest.com/)!
Before you start to contribute to our project in order to satisfy [Hacktoberfest requirements](https://hacktoberfest.com/participation/#contributors), please bare in mind the following:
- There is *not* a Hacktoberfest t-shirt this year [(see their FAQ)](https://hacktoberfest.com/participation/#faq)
- There is *not* an infinite opportunity to contribute to the roadmap.sh project.
### Hacktoberfest Specific Contribution Rules
As Hacktoberfest attracts a lot of contributors (which is awesome), it does require a more rigid and strictly enforced set of guidelines than the average contribution.
These are as follows:
1. No single file contributions, please contribute to a minimum of two.
Whilst single file contributions, such as adding one link to a single topic, is perfectly fine outside of hacktoberfest, this can (and probably will) result it an easy 4 pull requests for everyone and we will just become a Hacktoberfest farming project.
***Note: If you contribute the entire contents of a topic i.e. the topic has 0 copy and 0 links, this will count.***
2. Typo fixes will not count (by themselves).
Whilst fixing typos is a great thing to do, lets bundle them in with actual contributions if we see them!
3. The same basic rules apply.
- Content must be in English.
- Maximum of 8 links per topic.
- Follow the below style guide for content.
Here is an example of a **fully complete** topic:
```markdown
# Redis
Redis is an open-source, in-memory data structure store known for its speed and versatility. It supports various data types, including strings, lists, sets, hashes, and sorted sets, and provides functionalities such as caching, session management, real-time analytics, and message brokering. Redis operates as a key-value store, allowing for rapid read and write operations, and is often used to enhance performance and scalability in applications. It supports persistence options to save data to disk, replication for high availability, and clustering for horizontal scaling. Redis is widely used for scenarios requiring low-latency access to data and high-throughput performance.
Learn more from the following resources:
[@official@Link 1](https:/google.com)
[@article@Link 2](https:/google.com)
[@article@Link 3](https:/google.com)
[@course@Link 4](https:/google.com)
[@course@Link 5](https:/google.com)
[@video@Link 6](https:/google.com)
[@video@Link 7](https:/google.com)
[@video@Link 8](https:/google.com)
```
Contributions to the project that meet these requirements will be given the label `hacktoberfest-accepted` and merged, contributions that do not meet the requirements will simply be closed.
Any attempts at spam PRs will be given the `spam` tag. If you receive 2 `spam` tags against you, you will be [disqualified from Hacktoberfest](https://hacktoberfest.com/participation/#spam).
## New Roadmaps
For new roadmaps, you can either:

@ -176,7 +176,7 @@
"type": "article"
},
{
"title": "Cookes - Mozilla",
"title": "Cookies - Mozilla",
"url": "https://developer.mozilla.org/en-US/docs/Mozilla/Add-ons/WebExtensions/API/cookies",
"type": "article"
}

@ -811,13 +811,8 @@
"type": "article"
},
{
"title": "Lets subnet your home network!",
"url": "https://www.youtube.com/watch?v=mJ_5qeqGOaI&list=PLIhvC56v63IKrRHh3gvZZBAGvsvOhwrRF&index=6",
"type": "video"
},
{
"title": "Subnetting for hackers",
"url": "https://www.youtube.com/watch?v=o0dZFcIFIAw",
"title": "Subnetting",
"url": "https://www.youtube.com/playlist?list=PLIhvC56v63IKrRHh3gvZZBAGvsvOhwrRF",
"type": "video"
}
]
@ -1403,11 +1398,16 @@
},
"LKK1A5-xawA7yCIAWHS8P": {
"title": "SSL / TLS",
"description": "Single Sign-On (SSO) is an authentication method that allows users to access multiple applications or systems with one set of login credentials. It enables users to log in once and gain access to various connected systems without re-entering credentials. SSO enhances user experience by reducing password fatigue, streamlines access management for IT departments, and can improve security by centralizing authentication controls. It typically uses protocols like SAML, OAuth, or OpenID Connect to securely share authentication information across different domains. While SSO offers convenience and can strengthen security when implemented correctly, it also presents a single point of failure if compromised, making robust security measures for the SSO system critical.\n\nLearn more from the following resources:",
"description": "Secure Sockets Layer (SSL) and Transport Layer Security (TLS) are cryptographic protocols used to provide security in internet communications. These protocols encrypt the data that is transmitted over the web, so anyone who tries to intercept packets will not be able to interpret the data. One difference that is important to know is that SSL is now deprecated due to security flaws, and most modern web browsers no longer support it. But TLS is still secure and widely supported, so preferably use TLS.\n\nLearn more from the following resources:",
"links": [
{
"title": "What’s the Difference Between SSL and TLS?",
"url": "https://aws.amazon.com/compare/the-difference-between-ssl-and-tls/",
"title": "What is SSL? | SSL definition",
"url": "https://www.cloudflare.com/en-gb/learning/ssl/what-is-ssl/",
"type": "article"
},
{
"title": "TLS Basics",
"url": "https://www.internetsociety.org/deploy360/tls/basics/",
"type": "article"
},
{
@ -1805,7 +1805,7 @@
"type": "article"
},
{
"title": "",
"title": "What is LDAP",
"url": "https://www.youtube.com/watch?v=vy3e6ekuqqg",
"type": "video"
}

@ -1892,6 +1892,11 @@
"title": "winston Website",
"url": "https://github.com/winstonjs/winston",
"type": "opensource"
},
{
"title": "A Complete Guide to Winston Logging in Node.js",
"url": "https://betterstack.com/community/guides/logging/how-to-install-setup-and-use-winston-and-morgan-to-log-node-js-applications/",
"type": "article"
}
]
},

@ -0,0 +1,259 @@
---
title: 'What Are the 7 Key Phases of the DevOps Lifecycle?'
description: 'Master the DevOps lifecycle by exploring its 7 phases, designed to enhance collaboration, streamline processes, and deliver software with agility.'
authorId: william
excludedBySlug: '/devops/lifecycle'
seo:
title: 'What Are the 7 Key Phases of the DevOps Lifecycle?'
description: 'Master the DevOps lifecycle by exploring its 7 phases, designed to enhance collaboration, streamline processes, and deliver software with agility.'
ogImageUrl: 'https://assets.roadmap.sh/guest/key-phases-of-devops-lifecycle-788fa.jpg'
isNew: true
type: 'textual'
date: 2024-11-01
sitemap:
priority: 0.7
changefreq: 'weekly'
tags:
- 'guide'
- 'textual-guide'
- 'guide-sitemap'
---
![Learn the key phases of the DevOps lifecycle.](https://assets.roadmap.sh/guest/key-phases-of-devops-lifecycle-788fa.jpg)
Whether you’re an experienced DevOps engineer or trying to expand your expertise, you’ll likely adopt (or are already using) parts of the **7 key phases of the DevOps lifecycle** as a core **process** for developing, testing, and deploying software projects.
But what are these phases, and do they really need to be followed in a specific order to truly matter?
As a [DevOps engineer](https://roadmap.sh/devops), your primary role is to help the development and operations teams operate better. You’ll do this by collaborating closely with software engineers, quality assurance teams, and other stakeholders to set up **processes**, implement tools, and create standards to achieve the overall goal of the project.
In this guide, you’ll learn about these phases, how they're implemented, and the sequence in which they are applied in software development.
**TL;DR:** 7 key phases of the DevOps lifecycle are:
- Continuous development
- Continuous integration (CI)
- Continuous testing
- Continuous deployment (CD)
- Continuous monitoring
- Continuous feedback
- Continuous operations
Let’s look at the DevOps lifecycle in detail.
## What is the DevOps lifecycle?
DevOps lifecycle is a set of stages that software development (Dev) and IT operations (Ops) teams use to deliver software applications in an efficient and reliable manner. It is a continuous and iterative process that facilitates integration and collaboration between these teams.
In traditional software development, developers don’t just build and deploy applications. They must also accommodate changes, fix bugs, consider feature requests, and handle various administrative tasks. The same approach to continuous improvement applies in DevOps, which has led to industries adopting DevOps to factor in the lifecycle processes into their operations.
The primary goal of the DevOps lifecycle is to streamline your development and delivery process and ensure applications are reliable and efficiently deployed.
It follows a range of continuous development, integration, testing, monitoring, and feedback gathering, with each section using sets of best practices and tools to ensure the overall project goal.
![DevOps Lifecycle](https://assets.roadmap.sh/guest/7-key-phases-of-the-devops-lifecycle-4zoj6.png)
## 7 key phases of the DevOps lifecycle
The 7 key phases of the DevOps lifecycle, also known as the 7 C’s of DevOps, are sets of interconnected stages that work together in a continuous loop to help you develop, test, and deploy applications quickly. Below are the key phases of the DevOps lifecycle:
### 1. Continuous development
This phase is about planning and coding the software application. Developers plan the software and break the entire development process into smaller cycles that add value to the overall software development goal.
![Continuous Development](https://assets.roadmap.sh/guest/continuous-development-xbxj3.png)
By following this process, DevOps teams can easily map out the **software development lifecycle (SLDC)** to other stakeholders regarding expectations, responsibilities, and timelines. Additionally, because the development teams, testers, and other stakeholders build software piece-by-piece, the development process is fast, large-scale risk is minimal, and the process can easily adapt to changing requirements and business needs.
**Tools used for continuous development**
1. **Planning:** DevOps teams use project management tools like Jira, Linear, and ClickUp to help teams plan, track, and release software.
2. **Coding**: DevOps teams can use version control systems like Git, editors like Visual Studio Code, and pair programming tools like Tuple to effectively collaborate with other development teams when building software.
### 2. Continuous integration (CI)
After writing the code and storing it in a shared repository, DevOps teams can set up a CI pipeline on the repository so that when developers commit changes to the source code, they can do the following:
- Detect changes to existing code and initiate actions like unit testing, integration testing, and the build process.
- Perform code quality analysis.
- Generate deployment artifacts.
![Continuous Integration](https://assets.roadmap.sh/guest/continuous-integration-b4lef.png)
This is particularly important because the development team will continue to push updates into the source code to build new features, fix bugs, perform code improvement, and refactoring.
**Tools used**
Jenkins, CircleCI, Travis CI, and GitHub Actions are some automation tools DevOps teams use to build, test, and deploy code changes.
### 3. Continuous testing
Continuous testing involves automating tests on the developed code to ensure that changes are validated at each step of the development cycle, catch defects, and provide feedback without the need for human intervention.
![Continuous testing](https://assets.roadmap.sh/guest/continuous-testing-d90gb.png)
If an error or bug occurs, the code is returned to the previous phase (integration) for correction and possible fixes. Automated testing improves the overall workflow by saving time and resources.
**Tools used**
Selenium, JUnit, TestNG, and Cucumber are some automation testing tools that DevOps teams use to automate testing at scale.
### 4. Continuous deployment (CD)
This is the phase when the codes that have passed all tests are automatically deployed to the staging or production environment. Continuous deployment's overall goals are:
- Reduce the time between development and deployment.
- Facilitate the deployment of finished code to production servers.
- Ensure consistency across development, testing, staging, and production environments.
![Continuous Deployment](https://assets.roadmap.sh/guest/continuous-deployment-bprfv.png)
**Tools used**
1. **Configuration tools**: The DevOps team uses configuration management tools like Ansible, Puppet, Chef, and SaltStack to automate the provisioning, configuration, management, and continuous delivery of IT infrastructure. These tools help the DevOps team increase efficiency, maintain consistency across environments, and reduce errors.
2. **Containerization and orchestration tools**: The DevOps team uses tools like [Docker](https://roadmap.sh/docker), Vagrant, and [Kubernetes](https://roadmap.sh/kubernetes) to build and test applications. These tools help applications respond to demand (scaling up and scaling down) and maintain consistency across environments.
### 5. Continuous monitoring
This is the phase where you keep an eye on the deployed application to monitor performance, security, and other helpful data. It involves the collection of metrics and other application usage-related data to detect issues such as system errors, server downtime, application errors, and security vulnerabilities. Additionally, it involves collaboration with the operation teams to monitor bugs and identify improper system behavior.
![Continuous Monitoring](https://assets.roadmap.sh/guest/continuous-monitoring-cydj0.png)
Continuous monitoring improves the productivity and reliability of the system while reducing IT support costs. Any issues detected during this phase can be promptly reported and addressed in the continuous development phase, creating a more efficient feedback loop.
**Tools used**
Prometheus, Grafana, ELK Stack (Elasticsearch, Logstash, Kibana), and Datadog are some tools DevOps teams use to continuously monitor the application and infrastructure to identify and resolve issues.
### 6. Continuous feedback
Continuous feedback is about gathering information from users and stakeholders to understand how the software performs in real-life scenarios. The feedback is then continuously analyzed and used to make informed decisions and improve the overall development process.
![Feedback](https://assets.roadmap.sh/guest/continuous-feedback-eg1tr.png)
**Tools used**
DevOps teams use tools like Datadog and LogRocket to gather and gain insights into how users interact with their products.
### 7. Continuous operations
In the traditional software development process, developers might need to pull down the server when they want to update and maintain applications. This approach disrupts the development process, potentially increases organizational costs, and can lead to user service interruptions.
![Continuous operations](https://assets.roadmap.sh/guest/continuous-operations-h2yrj.png)
Continuous operations address these challenges, among others. It ensures the software remains available and operational with minimal downtime. This phase involves tasks such as:
- Performing zero-downtime deployments.
- Automating backups and recovery.
- Using infrastructure management to provision and scale resources.
- Distributing traffic across multiple servers to maintain performance during updates or high-traffic periods.
- Implementing strategies like database replication and rolling updates to maintain data availability.
**Tools used**
Puppet, Terraform, and Chef are some tools DevOps teams use to automate resource provisioning and ensure system reliability.
The DevOps lifecycle is a continuous process that involves development, integration, testing, deployment, monitoring, feedback, and operations. Beyond the improvement it brings, you’ll also notice that organizations are extending DevOps and further advancing its capability.
Let’s explore some of these extensions and how they’re changing the development process.
## Key DevOps extensions to watch in 2024
Below are some extensions that build on the core principles of DevOps, like automation, collaboration, and continuous improvement:
- DevSecOps
- GitOps
- DataOps
- FinOps
- MLOps
- AIOps
![DevOps extension](https://assets.roadmap.sh/guest/devops-extensions-8fy7t.png)
### DevSecOps
DevSecOps stands for **Development**, **Security**, and **Operations**. It’s an extension of DevOps that continuously integrates security practices into every phase of the software development lifecycle rather than treating them as an afterthought.
With the increase in cybersecurity threats and regulatory requirements, it has become more important to use DevSecOps to embed security into the pipeline so that organizations can deliver secure software faster.
DevSecOps uses tools like HashiCorp Vault, Snyk, OWASP ZAP, and Aqua Security to:
- Automate security testing.
- Perform continuous compliance.
- Enforce secure coding practices
- Perform vulnerability assessment.
### GitOps
GitOps stands for **Git Operations**. It’s an extension of DevOps that uses Git as a source of truth for managing infrastructure and application development. This means the DevOps teams can make changes to infrastructure through Git pull requests, which are then automatically applied via the CI/CD pipelines.
By adopting GitOps, organizations can improve the reliability of their systems, enforce standards for the team, and accelerate software delivery.
GitOps involves using tools like Jenkins X, Flux, and ArgoCD to automate the delivery and deployment of applications.
### DataOps
DataOps stands for **Data Operations**. It’s an extension of DevOps methodology designed to improve data pipeline communication, integration, and automation across the data and IT operations teams. DataOps aims to ensure that the data pipeline is fast, scalable, and reliable.
DataOps uses tools like Apache NiFi, data build tool (dbt), and Prefect to:
- Perform data versioning.
- Automate data testing.
- Automate the delivery of data pipelines.
### FinOps
FinOps stands for **Financial Operations**. It’s an extension of DevOps that enables organizations that use cloud services to efficiently manage their cloud costs and financial operations. The goal of FinOps is to optimize cloud-related costs by encouraging close collaboration between finance, operations, and engineering teams.
![finOps](https://assets.roadmap.sh/guest/1tvyy8hg-f4fd8.png)
FinOps also uses a lifecycle approach to optimize organization costs. It involves:
1. **Inform**: This phase involves gaining visibility into cloud spending by tracking cloud costs, setting budgets, and leveraging discounts or other freebies offered by cloud providers. Basically, it provides the team insights into where the money is being spent.
2. **Optimize**: This phase is all about optimizing cloud costs. It involves sizing resources, identifying areas of inefficiency, and other cost-improvement tasks that will help make cost-effective decisions without compromising performance.
3. **Operate:** This phase is about monitoring cloud spending, enforcing policies, and making needed adjustments to ensure the budget is not exceeded.
FinOps leverage tools like Azure Cost Management, AWS Cost Explorer, Cloudability, and CloudHealth to achieve the organization's cloud-related financial goals.
### MLOps
MLOps stands for **Machine Learning Operations**. It’s an extension of DevOps workflow that streamlines and automates the deployment, monitoring, and management of ML models in a production environment. It promotes collaboration between the data science and IT operations teams so that models can be versioned, continuously delivered, and retrained when needed.
Tools used include TensorFlow Extended (TFX), Kubeflow, KitOps, and MLflow.
### AIOps
AIOps stands for **Artificial Intelligence for IT Operations**. It’s an extension of DevOps that promotes using artificial intelligence, machine learning, and data analytics to automate and improve IT operations processes. When AIOps is integrated into DevOps processes, the organization benefits from enhanced efficiency, faster issue resolution, and proactive system monitoring.
Tools used include IBM Watson AIOps and Dynatrace.
The extension of DevOps workflow is a response to modern software challenges, driven by the ongoing shift in the DevOps ecosystem and the need for specialized practices across different software engineering fields.
## Essential DevOps lifecycle best practices
An essential part of DevOps culture is the lifecycle phases. While the lifecycle phases streamline the operational process and help you build reliable software, there are still some gotchas that you need to consider when integrating this process into your SDLC. Below are some best practices you should consider:
1. **Promote collaboration**: As a DevOps engineer, you need to encourage cross-functional collaboration and shared responsibilities among direct teams and other stakeholders. This will help you and your team avoid the traditional siloed approach, break communication barriers, and promote DevOps culture.
2. **Show empathy and support**: Implementing DevOps lifecycle into your development process may take time and require some adjustment for you and your team members. You need to support the team with resources and any helpful training material to help facilitate the process. Most importantly, allow time for everyone to adapt to the new process.
3. **Set metrics or milestones**: As the popular saying goes, **“You can’t manage what you can’t measure****.****”** You must set clear objectives and define performance metrics at the beginning or during the adoption of a new process. This will help you and your team know what success looks like.
4. **Invest in tools**: At the heart of DevOps are the toolchains that automate toils and enable easy collaboration between development and operations teams. You should invest in DevOps tools that your team needs to automate their DevOps workflow. Below are some DevOps tools that can help you automate processes:
- **CI/CD tools**: Tools like Jenkins, GitLab CI/CD, CircleCI, Azure Pipeline, and GitHub Actions help automate the integration and deployment of code changes.
- **Infrastructure as Code (IaC) tools**: Tools like Terraform, Ansible, Pulumi, Chef, AWS CloudFormation, and Vagrant help automate the provisioning and management of infrastructure.
- **Containerization and orchestration tools**: Tools like Docker, Kubernetes, OpenShift, Docker Swarm, and Amazon ECS (Elastic Container Service) help manage and orchestrate containers at scale.
- **Monitoring and logging tools**: Tools like Prometheus, ELK Stack (Elasticsearch, Logstash, and Kibana), Datadog, Splunk, and Grafana help track system performance, logging, and alerting.
- **Configuration management tools**: Tools like Chef, Puppet, CFEngine, SaltStack, and Ansible help ensure that system configurations remain consistent across environments.
- **Security and compliance tools**: Tools like HashiCorp Vault, OWASP ZAP, Snyk, SonarQube, and Aqua Security help enforce security policies, scanning, and compliance checks.
- **Collaboration and communication tools**: Tools like Slack, Microsoft Teams, Trello, Jira, and Confluence help facilitate communication and collaboration between teams.
5. **Continuous improvement**: Encourage your teams to share knowledge across teams, conduct service failure postmortem, and experiment with new ideas and potential solutions.
## Key takeaways
At the core of the DevOps lifecycle is continuity. By following these key phases in an iterative pattern, you’ll be able to take advantage of the lifecycle process to build applications that are maintainable, scalable, and reliable.
Use the [DevOps roadmap](https://roadmap.sh/devops) to stay up to date with the latest developments and extensions in the DevOps ecosystem. Additionally, you can create a [custom roadmap](https://roadmap.sh/teams) for your team to plan, track, and document the team's skills and growth.

@ -0,0 +1,191 @@
---
title: 'Full Stack Developer Job Description [2024 Template]'
description: 'Looking to hire a Fullstack Engineer? Get the complete job description, skills, and responsibilities right here!'
authorId: william
excludedBySlug: '/full-stack/job-description'
seo:
title: 'Full Stack Developer Job Description [2024 Template]'
description: 'Looking to hire a Fullstack Engineer? Get the complete job description, skills, and responsibilities right here!'
ogImageUrl: 'https://assets.roadmap.sh/guest/fullstack-job-h15x6.jpg'
isNew: true
type: 'textual'
date: 2024-11-01
sitemap:
priority: 0.7
changefreq: 'weekly'
tags:
- 'guide'
- 'textual-guide'
- 'guide-sitemap'
---
![Full Stack developer job description example and template.](https://assets.roadmap.sh/guest/fullstack-job-h15x6.jpg)
One of the main challenges I face as a hiring manager looking for a full stack engineer is assessing the versatility of potential candidates. With tons of applications to review, I need to make decisions about potential hires and ascertain that they are knowledgeable in both front-end and back-end languages, frameworks, and tools.
This guide will discuss who a [full stack engineer](https://roadmap.sh/full-stack) is, their job description, roles, and objectives. It will also cover the essential skills and qualifications I look out for when hiring candidates for a full stack developer role.
Here is a summary of the full stack developer job description:
- Design and develop the user interface of the application using technologies such as HTML, CSS, and JavaScript.
- Build and manage server-side logic, databases, and application programming interfaces (APIs) using technologies such as JavaScript, Python, Java, Go, and Rust.
- Connect the frontend application to the backend services and ensure a seamless data flow from the client to the server.
- Solve business problems by writing clean, maintainable, and reusable code.
- Collaborate with other stakeholders in the project to ensure the go-live of the project.
## Full stack engineer job description template
The complexity of a project, the technology adopted, and the domain knowledge are some factors that might influence the job description of a full stack engineer. Based on my experience as a full stack engineer recruiter and an analysis of full stack engineer job descriptions on popular platforms like LinkedIn and Indeed, here is a template of a full stack developer job description you can adopt during your hiring process:
**Job title: Full stack engineer.**
**Company**: [Company Name].
**Location**: [Supported location, region, hybrid, or remote].
**Job Type**: [Full-time, Part-time, or Contract].
**About Us**: [Company Name] is [give a brief description of the company’s history and goals].
**Job Description**
[**Company Name**] is looking for an experienced full stack engineer. As a full stack engineer, you will develop and manage [**company products and features**] and collaborate closely with [**company teams**]. The ideal candidate will have a solid understanding of frontend and backend technologies.
**Responsibilities**
- Development of new business applications based on detailed specifications.
- Working with project stakeholders to shape project scope, approach, and structure.
- Identify and fix bugs on both frontend and backend codebases.
- Designing project specifications and translating them into implementation details.
- Write clean, maintainable, and reusable code based on [best practices](https://roadmap.sh/best-practices/backend-performance).
- Performing code reviews and mentoring junior frontend, backend, and full stack developers to support the organization's growth.
**Requirements**
- Professional experience in full stack engineering.
- Built APIs and microservices with Python.
- Strong proficiency in frontend technologies like HTML, CSS, JavaScript, and modern frameworks like React.
- Good understanding of databases and data management systems.
- Basic knowledge of CI/CD pipelines.
- Experience with debugging and automation tools like Jenkins and Ansible.
- Bachelor’s degree in computer science, computer engineering, or a related field (or equivalent experience).
**Nice to have**
- Experience with Docker.
- Familiarity with server-side events and streaming services.
- Prior experience in a similar role within a distributed team.
**What we offer**: [Company’s offer like workspace setup allowance, training, and other pecks].
**How to apply**: [Mode of application (email or job portal), resumes, cover letters, and any other required information].
## What skills should I look for in a full stack engineer?
A full stack engineer requires a diverse set of skills spanning across technical knowledge and other complementary skills. These are some required skills I look out for when hiring:
- Frontend development skills.
- Backend development skills.
- Basic DevOps skills.
- Testing and caching skills.
- Soft skills.
### Frontend development skills
Full stack engineers must have a good understanding of [frontend development skills](https://roadmap.sh/frontend/developer-skills). These include proficiency in languages like HTML, CSS, and [JavaScript](https://roadmap.sh/javascript), which are essential for creating structure, responsive design, and implementing interactive web functionalities. Additionally, they should be skilled in leading JavaScript libraries and frameworks like [React](https://roadmap.sh/react), [Vue](https://roadmap.sh/vue), and [Angular](https://roadmap.sh/angular), which can be used to develop medium to large applications.
![frontend skills](https://assets.roadmap.sh/guest/frontend-development-common-languages-o88e3.png)
### Backend development skills
A full stack engineer must possess strong [backend developer skills](https://roadmap.sh/backend/developer-skills). These include a deep understanding of [API design and development](https://roadmap.sh/api-design), database management, and [security best practices](https://roadmap.sh/best-practices/api-security). Additionally, proficiency in server-side programming languages such as [JavaScript](https://roadmap.sh/javascript), [Java](https://roadmap.sh/java), [Python](https://roadmap.sh/python), C#, [Go](https://roadmap.sh/golang), and [Rust](https://roadmap.sh/rust) is important.
![Backend skills](https://assets.roadmap.sh/guest/backend-programming-common-languages-gzcyd.png)
### DevOps skills
In full stack development, [**DevOps skills**] are highly valuable. A basic understanding of how to package software using Docker or Kubernetes, deploy and automate software delivery, and familiarity with cloud providers such as [AWS](https://roadmap.sh/aws), Google Cloud Platform, and Azure will come in handy.
![DevOps](https://assets.roadmap.sh/guest/devops-p4c0y.png)
### Testing and performance improvement skills
In full stack development, proficiency in automated testing and debugging is essential for identifying and resolving bugs in both frontend and backend code. Moreover, a strong grasp of caching techniques and technologies such as Redis can significantly enhance application performance and improve the overall user experience.
![testing and performnace](https://assets.roadmap.sh/guest/testing-and-performance-improvement-skills-1z2hm.png)
### Soft skills
While technical skills are essential, full stack engineers must also possess a strong soft skill set. Full stack developers must have good communication skills (written and spoken), organization skills, and the ability to collaborate effectively with other team members to ensure the project's success.
![Soft skills](https://assets.roadmap.sh/guest/soft-skills-qnz1f.png)
## Additional skills to consider when hiring full stack engineers
As a hiring manager in the current job market, you will get to review multiple CVs and profiles when hiring a full stack engineer. It is important to identify additional skills to help narrow the search and pick the right candidate for the role. Below are some additional skills to look for:
- Problem-solving
- Stakeholder communication
- Adaptability
- Project management
- Community and networking
### Problem-solving
A full stack engineer should be able to tackle complex problems spanning both the client and server sides of applications. They must demonstrate a solid problem-solving mindset and creative solutions through projects, open-source contributions, and other endeavors.
### Stakeholder communication
Beyond collaborating with other teams to ensure a smooth software development process, it is even more important for full stack engineer to articulate technical concepts to non-technical stakeholders, as the success or failure of the project depends on them.
### Adaptability
Frameworks, libraries, design principles, and so on will continue to evolve. Full stack engineer candidates must demonstrate a track record of quickly acquiring new skills and technologies.
### Project management skills
Working on multiple projects simultaneously is inevitable. Full stack engineer candidates should have a basic understanding of project management principles and methodologies like Agile and Scrum. Additionally, they should be able to manage their time, prioritize tasks, and meet deadlines.
## Community and networking
As a hiring manager looking for a full stack developer, you should seek candidates who actively participate in developer communities, attend meetups, contribute to open-source projects, and join hackathons. This shows that they have a growth mindset, can easily unblock themselves by leveraging community engagement, and can increase their skills.
## Common interview questions when hiring for full stack engineer role
While CVs, resumes, and portfolios give you an idea of what a potential candidate is capable of, you still need to conduct interviews to determine if the candidate fits the role. Check out these interview questions that can help you check if they're a good fit:
### What programming languages and frameworks are you most comfortable with?
Look for proficiency in languages and frameworks related to your company’s current tech stack.
### What types of databases have you worked with?
Assess the candidate's understanding of SQL and NoSQL databases, ability to explain pros and cons, and what influences their decision to use a particular database.
### What's your approach to ensuring responsive design across different devices?
Look for knowledge of design principles, mobile-first approach, and familiarity with CSS frameworks.
### How do you handle API security and authentication in your projects?
Look for proficiency in authentication methods (like JWT and OAuth) and security best practices.
### How do you collaborate with non-technical team members?
Look for strong communication skills, ability to explain technical concepts in simple terms, and empathy.
### Ask scenario-based questions like “If our main application went down, what steps would you take to diagnose and resolve the issue?”
Look for their approach to troubleshooting, ability to remain calm under pressure, and knowledge of debugging.
## Wrapping up
The possibilities offered by the web will continue to evolve, and the role of software engineers building for it will also change. While the internet is filled with resources such as courses, articles, and blogs on front-end, back-end, and full stack engineering skills and job descriptions, these often become outdated quickly. Therefore, a reliable source of truth is needed. [The full stack developer roadmap](https://roadmap.sh/full-stack) is a source of truth for hiring managers looking for full stack engineers.
Additionally, roadmap.sh has a [supportive community](https://roadmap.sh/discord), a goldmine for connecting with full stack engineers and spotting potential employees.

@ -28,7 +28,7 @@ roadmapIds:
In this project, you will style the HTML website structure you created previously [in a different project](/projects/basic-html-website). The focus will be on learning how to use CSS to create responsive layouts, apply color and typography, and enhance the overall design of your website.
Rough mockups of the website structure for mobile and desctop devices are given below. Do not worry about the design details i.e. colors backgrounds etc at this stage; we are not looking for a beautiful design, but a well-structured and responsive website. Just focus on making the layout same as the mockup and ensuring it looks good on different screen sizes.
Rough mockups of the website structure for mobile and desktop devices are given below. Do not worry about the design details i.e. colors backgrounds etc at this stage; we are not looking for a beautiful design, but a well-structured and responsive website. Just focus on making the layout same as the mockup and ensuring it looks good on different screen sizes.
[![Styled HTML Website](https://assets.roadmap.sh/guest/portfolio-template-xdhki.png)](https://assets.roadmap.sh/guest/portfolio-template-xdhki.png)
@ -51,4 +51,4 @@ For bonus points, you can:
---
After completing this project, you will have a solid understanding of how to style a basic HTML website using CSS. You can move on to more advanced topics, such as CSS animations, transitions, and grid layouts, in future projects.
After completing this project, you will have a solid understanding of how to style a basic HTML website using CSS. You can move on to more advanced topics, such as CSS animations, transitions, and grid layouts, in future projects.

@ -5,4 +5,4 @@ Cookies play an instrumental role in the field of API (Application Programming I
Learn more from the following resources:
- [@article@What Are API Cookies? How to Send it?](https://apidog.com/articles/what-are-api-cookies/)
- [@article@Cookes - Mozilla](https://developer.mozilla.org/en-US/docs/Mozilla/Add-ons/WebExtensions/API/cookies)
- [@article@Cookies - Mozilla](https://developer.mozilla.org/en-US/docs/Mozilla/Add-ons/WebExtensions/API/cookies)

@ -5,5 +5,4 @@ Subnetting is a technique used in computer networking to divide a large network
Learn more from the following resources:
- [@article@Networking Basics: What is IPv4 Subnetting?](https://www.cbtnuggets.com/blog/technology/networking/networking-basics-what-is-ipv4-subnetting)
- [@video@Lets subnet your home network!](https://www.youtube.com/watch?v=mJ_5qeqGOaI&list=PLIhvC56v63IKrRHh3gvZZBAGvsvOhwrRF&index=6)
- [@video@Subnetting for hackers](https://www.youtube.com/watch?v=o0dZFcIFIAw)
- [@video@Subnetting](https://www.youtube.com/playlist?list=PLIhvC56v63IKrRHh3gvZZBAGvsvOhwrRF)

@ -5,4 +5,4 @@ LDAP (Lightweight Directory Access Protocol) is a standardized application proto
Learn more from the following resources:
- [@article@What Is LDAP & How Does It Work?](https://www.okta.com/uk/identity-101/what-is-ldap/)
- [@video@](https://www.youtube.com/watch?v=vy3e6ekuqqg)
- [@video@What is LDAP](https://www.youtube.com/watch?v=vy3e6ekuqqg)

@ -4754,7 +4754,7 @@
"oldId": "dOrnsftQKhJAmOmsSzDI0"
},
"zIndex": 999,
"width": 402,
"width": 430,
"height": 30,
"dragging": false,
"positionAbsolute": {

@ -47,5 +47,11 @@ export const faqs: FAQType[] = [
'Shift left is a software development practice that involves moving tasks and processes earlier in the development lifecycle. By shifting tasks left, teams can identify and address issues earlier in the development process, reducing the cost and effort required to fix them. [Shift left](https://roadmap.sh/devops/shift-left-testing) practices include implementing automated testing, code reviews, and security checks early in the development process to catch and fix issues before they become more costly to address.',
],
},
{
question: 'What is a DevOps Lifecycle?',
answer: [
'A [DevOps lifecycle](https://roadmap.sh/devops/lifecycle) is a set of practices and processes that help organizations deliver software more quickly and reliably. The DevOps lifecycle typically includes planning, development, testing, deployment, monitoring, and feedback. By automating and integrating these stages, organizations can improve collaboration between development and operations teams, increase the speed of software delivery, and enhance the quality of software products.',
],
},
];
---

@ -0,0 +1,12 @@
---
import type { FAQType } from '../../../components/FAQs/FAQs.astro';
export const faqs: FAQType[] = [
{
question: 'What does a Full Stack Developer do?',
answer: [
'A Full Stack Developer is a developer who is comfortable working with both the front-end and back-end of a web application. They are responsible for developing and maintaining the entire application, from the user interface to the server-side logic and a [full stack developer job description](https://roadmap.sh/full-stack/job-description) can vary depending on the company and the project they are working on.',
],
},
];
---

@ -1,11 +1,12 @@
# Basic Syntax
Understanding the basics is the key to a solid foundation. In this section, learn the basic terminologies, naming conventions, reserved words, conditions, functions, data structures, OOP, packages, etc.
Understanding the basics is the key to a solid foundation. In this section, learn the basic terminologies, naming conventions, reserved keywords, expressions, statements, data structures, OOP, packages, etc.
- To print output use --> System.out.println();
- To take input from user --> Scanner or BufferedReader class can be used
Visit the following resources to learn more:
- [@official@Java Language Basics](https://dev.java/learn/language-basics)
- [@video@Java - Basic Syntax](https://www.youtube.com/watch?v=81piDKqPxjQ)
- [@video@Java Tutorial for Beginners](https://www.youtube.com/watch?v=RRubcjpTkks)

@ -5,11 +5,11 @@ The pipe (`|`) is a powerful feature in Linux used to connect two or more comman
Here is a simple example of piping two commands, `ls` and `grep`, to list all the text files in the current directory:
```bash
ls | grep .txt
ls | grep '\.txt$'
```
In this example, `ls` lists the files in the current directory and `grep .txt` filters out any files that don't end with `.txt`. The pipe command, `|`, takes the output from `ls` and uses it as the input to `grep .txt`. The output of the entire command is the list of text files in the current directory.
In this example, `ls` lists the files in the current directory and `grep '\.txt$'` filters out any files that don't end with `.txt`. The pipe command, `|`, takes the output from `ls` and uses it as the input to `grep '\.txt$'`. The output of the entire command is the list of text files in the current directory.
Visit the following resources to learn more:
- [@article@Piping and Redirection](https://ryanstutorials.net/linuxtutorial/piping.php#piping)
- [@article@Piping and Redirection](https://ryanstutorials.net/linuxtutorial/piping.php#piping)

@ -5,3 +5,4 @@ winston is designed to be a simple and universal logging library with support fo
Visit the following resources to learn more:
- [@opensource@winston Website](https://github.com/winstonjs/winston)
- -[@article@A Complete Guide to Winston Logging in Node.js](https://betterstack.com/community/guides/logging/how-to-install-setup-and-use-winston-and-morgan-to-log-node-js-applications/)

@ -6,6 +6,12 @@ LLMs have the ability to achieve state-of-the-art performance in multiple Natura
As an example, OpenAI's GPT-3 is a prominent LLM that has gained significant attention due to its capability to generate high-quality text and perform a variety of language tasks with minimal fine-tuning.
Learn more from the following resources:
- [@roadmap.sh@Introduction to LLMs](https://roadmap.sh/guides/introduction-to-llms)
- [@article@Large language model](https://en.wikipedia.org/wiki/Large_language_model)
- [@video@Intro to Large Language Models](https://www.youtube.com/watch?v=zjkBMFhNj_g)
- [@video@Large Language Model Operations (LLMOps) Explained](https://www.youtube.com/watch?v=cvPEiPt7HXo)
- [@video@How Large Language Models Work](https://youtu.be/5sLYAQS9sWQ)
- [@feed@Explore top posts about LLM](https://app.daily.dev/tags/llm?ref=roadmapsh)

@ -17,3 +17,9 @@ Instruction Tuned LLMs = Base LLMs + Further Tuning + RLHF
```
To build an Instruction Tuned LLM, a Base LLM is taken and is further trained using a large dataset covering sample "Instructions" and how the model should perform as a result of those instructions. The model is then fine-tuned using a technique called "Reinforcement Learning with Human Feedback" (RLHF) which allows the model to learn from human feedback and improve its performance over time.
Learn more from the following resources:
- [@article@Understanding AI Models: Base Language Learning Models vs. Instruction Tuned Language Learning Models - Olivier Mills](https://oliviermills.com/articles/understanding-ai-models-base-language-learning-models-vs-instruction-tuned-language-learning-models)
- [@video@Why Are There So Many Foundation Models?](https://www.youtube.com/watch?v=QPQy7jUpmyA)
- [@video@How to Pick the Right AI Foundation Model](https://www.youtube.com/watch?v=pePAAGfh-IU)

@ -9,3 +9,9 @@ On a high level, training an LLM model involves three steps i.e. data collection
- **Evaluation**: The final step is to evaluate the performance of the model to see how well it performs on various tasks such as question answering, summarization, translation etc.
The output from the training Pipeline is an LLM model which is simply the parameters or weights which capture the knowledge learned during the training process. These parameters or weights are typically serialized and stored in a file, which can then be loaded into any application that requires language processing capabilities e.g. text generation, question answering, language processing etc.
Learn more from the following resources:
- [@article@What is LLM & How to Build Your Own Large Language Models?](https://www.signitysolutions.com/blog/how-to-build-large-language-models)
- [@guides@Large language model](https://en.wikipedia.org/wiki/Large_language_model)
- [@video@Five Steps to Create a New AI Model](https://youtu.be/jcgaNrC4ElU)

@ -26,3 +26,9 @@ Write me an introductory guide about Prompt Engineering.
```
However, using plain text as prompts i.e. without using any best practices you may not be able to fully utilise the power of LLMs. That's where "Prompt Engineering" or knowing the best practices for writing better prompts and getting the most out of LLMs comes in.
- [@guides@Basics of Prompting | Prompt Engineering Guide](https://www.promptingguide.ai/introduction/basics)
- [@article@Prompting Basics](https://learnprompting.org/docs/basics/prompting)
- [@offical@Prompt engineering - OpenAI API](https://platform.openai.com/docs/guides/prompt-engineering)
- [@offical@Prompt engineering overview - Anthropic](https://docs.anthropic.com/en/docs/build-with-claude/prompt-engineering/overview)
- [@course@Introduction to Prompt Engineering (Playlist)](https://youtube.com/playlist?list=PLYio3GBcDKsPP2_zuxEp8eCulgFjI5a3g&si=n3Ot-tFECp4axL8L)

@ -24,4 +24,7 @@ Prompts can help reduce inaccuracies and ambiguities in the AI's responses. By p
In conclusion, the need for prompting stems from its role in guiding AI model behavior, improving text quality and relevance, eliciting a specific output, aligning AI and human intent, and reducing inaccuracies and ambiguity in generated content. By understanding and mastering the art of prompting, users can unlock the true potential of AI language models.
- [@article@Prompting Basics](https://learnprompting.org/docs/basics/prompting)
- [@article@Prompting Basics](https://learnprompting.org/docs/basics/prompting)
- [@video@AI prompt engineering: A deep dive](https://youtu.be/T9aRN5JkmL8?si=3uW2BQuNHLcHjqTv)
- [@video@What is Prompt Tuning?](https://www.youtube.com/watch?v=yu27PWzJI_Y)
- [@guides@What is Prompt Engineering? A Detailed Guide For 2024](https://www.datacamp.com/blog/what-is-prompt-engineering-the-future-of-ai-communication)

@ -24,4 +24,5 @@ Hello, how are you?
But it's one of the best practices to be clear and use delimiters to separate the content in prompt from the instructions. You will learn more about it in the "Best Practices" nodes of the roadmap.
- [@article@Basic Prompting](https://learnprompting.org/docs/basics/intro)
- [@article@Basic Prompting - Learn Prompting](https://learnprompting.org/docs/basics/intro)
- [@guides@Basics of Prompting - Prompt Engineering Guide](https://www.promptingguide.ai/introduction/basics)

@ -14,4 +14,11 @@ When constructing a prompt, it's essential to understand the different parts tha
*Example: To further clarify the email-writing task, you might provide a brief example of the tone or structure you want, such as "Dear [Recipient], Thank you for taking the time to meet with me yesterday. We discussed [topic 1], [topic 2], and [topic 3]. I look forward to our future collaboration."*
By considering these three parts of a prompt — context, instruction, and example — you can create effective and well-formed prompts that produce targeted and accurate responses from the AI.
By considering these three parts of a prompt — context, instruction, and example — you can create effective and well-formed prompts that produce targeted and accurate responses from the AI.
Learn more from the following resources:
- [@guides@Understanding Prompt Structure: Key Elements of a Prompt](https://learnprompting.org/docs/basics/formalizing)
- [@guides@Elements of a Prompt | Prompt Engineering Guide](https://www.promptingguide.ai/introduction/elements)
- [@video@Building with Anthropic Claude: Prompt Workshop ](https://youtu.be/hkhDdcM5V94?si=mW-GLSvi7TMg33wx)
- [@guides@General Tips for Designing Prompts | Prompt Engineering Guide](https://www.promptingguide.ai/introduction/tips)

@ -1,3 +1,8 @@
# Prompts
At this point, you probably already know what the Prompts are and the importance of writing good prompts. This section covers the best practices for writing good prompts as well as covering some of the commonly used prompting techniques.
At this point, you probably already know what the Prompts are and the importance of writing good prompts. This section covers the best practices for writing good prompts as well as covering some of the commonly used prompting techniques.
Learn more from the following resources:
- [@guides@Prompt Engineering Guide: The Ultimate Guide to Generative AI](https://learnprompting.org/docs/introduction)
- [@guides@Optimizing Prompts | Prompt Engineering Guide](https://www.promptingguide.ai/guides/optimizing-prompts)

@ -14,4 +14,9 @@ Role prompting is a technique used in prompt engineering to encourage the AI to
Imagine you want to explore solutions to an environmental problem. You can use role prompting to elicit diverse perspectives and insights. Here's an example prompt with role prompting:
_As a climate scientist, how would you approach the problem of deforestation to minimize its impact on the environment?_
_As a climate scientist, how would you approach the problem of deforestation to minimize its impact on the environment?_
Learn more from the following resources:
- [@article@Assigning Roles to Chatbots](https://learnprompting.org/docs/basics/roles)
- [@article@Revisiting Roles in Prompting](https://learnprompting.org/docs/intermediate/revisiting_roles)

@ -29,4 +29,10 @@ The book was hard to put down. - {sentiment_label}
This prompt structure assists the model in grasping the sentiment analysis task and increases the likelihood of getting the correct output (i.e., "Positive").
Remember to experiment with the number of examples and their content to find the optimal balance for your specific task. Additionally, you can use inline instructions to guide the model further, such as asking it to classify the sentiment of a given sentence.
Remember to experiment with the number of examples and their content to find the optimal balance for your specific task. Additionally, you can use inline instructions to guide the model further, such as asking it to classify the sentiment of a given sentence.
Learn more from the following resources:
- [@article@Few-Shot Prompting | Prompt Engineering Guide](https://www.promptingguide.ai/techniques/fewshot)
- [@article@Shot-Based Prompting: Zero-Shot, One-Shot, and Few-Shot Prompting](https://learnprompting.org/docs/basics/few_shot)
- [@guides@Introduction to Advanced Few-Shot Prompting Techniques](https://learnprompting.org/docs/advanced/few_shot/introduction)

@ -21,4 +21,10 @@ We can approach the topic through a series of connected prompts:
- "Now, consider the challenges of working from home. Identify three potential disadvantages or negative effects."
- "For each of these challenges, discuss how individuals and organizations can mitigate or address them."
By employing the Chain of Thought technique, we have directed the model to provide a thorough and systematic analysis of the subject in question, ultimately resulting in a more meaningful and accurate response.
By employing the Chain of Thought technique, we have directed the model to provide a thorough and systematic analysis of the subject in question, ultimately resulting in a more meaningful and accurate response.
Learn more from the following resources:
- [@article@Chain-of-Thought Prompting | Prompt Engineering Guide](https://www.promptingguide.ai/techniques/cot)
- [@article@Chain-of-Thought Prompting](https://learnprompting.org/docs/intermediate/chain_of_thought)
- [@guides@Introduction to Thought Generation Prompting Techniques for LLMs](https://learnprompting.org/docs/advanced/thought_generation/introduction)

@ -17,4 +17,11 @@ Suppose you want the model to explain how a solar panel works. A zeroshot chain
Please explain the process of how a solar panel works, starting with sunlight hitting the panel's surface and ending with electricity being produced. Structure your response as a step-by-step chain of thought, taking care to clarify how each step leads to the next.
```
By designing prompts that explicitly request step-by-step reasoning, the zeroshot chain of thought technique can lead to more comprehensive and insightful answers that go beyond simple factual statements.
By designing prompts that explicitly request step-by-step reasoning, the zeroshot chain of thought technique can lead to more comprehensive and insightful answers that go beyond simple factual statements.
Learn more from the following resources:
- [@article@Zero-Shot Prompting | Prompt Engineering Guide](https://www.promptingguide.ai/techniques/zeroshot)
- [@article@Zero-Shot CoT Prompting: Improving AI with Step-by-Step Reasoning](https://learnprompting.org/docs/intermediate/zero_shot_cot)
- [@article@Shot-Based Prompting: Zero-Shot, One-Shot, and Few-Shot Prompting](https://learnprompting.org/docs/basics/few_shot)
- [@guides@Introduction to Advanced Zero-Shot Prompting Techniques](https://learnprompting.org/docs/advanced/zero_shot/introduction)

@ -10,30 +10,43 @@ Let's ask a slightly complicated customer service question:
```
CUSTOMER INQUIRY:
I just bought a T-shirt from your Arnold collection on March 1st. I saw that it was on discount, so bought a shirt that was originall $30, and got 40% off. I saw that you have a new discount for shirts at 50%. I'm wondering if I can return the shirt and have enough store credit to buy two of your shirts?
I just bought a T-shirt from your Arnold collection on March 1st.
I saw that it was on discount, so bought a shirt that was originall $30, and got 40% off.
I saw that you have a new discount for shirts at 50%.
I'm wondering if I can return the shirt and have enough store credit to buy two of your shirts?
INSTRUCTIONS:
You are a customer service agent tasked with kindly responding to customer inquiries. Returns are allowed within 30 days. Today's date is March 29th. There is currently a 50% discount on all shirts. Shirt prices range from $18-$100 at your store. Do not make up any information about discount policies.CUSTOMER INQUIRY:
I just bought a T-shirt from your Arnold collection on March 1st. I saw that it was on discount, so bought a shirt that was originall $30, and got 40% off. I saw that you have a new discount for shirts at 50%. I'm wondering if I can return the shirt and have enough store credit to buy two of your shirts?
INSTRUCTIONS:
You are a customer service agent tasked with kindly responding to customer inquiries. Returns are allowed within 30 days. Today's date is March 29th. There is currently a 50% discount on all shirts. Shirt prices range from $18-$100 at your store. Do not make up any information about discount policies.
You are a customer service agent tasked with kindly responding to customer inquiries.
Returns are allowed within 30 days.
Today's date is March 29th.
There is currently a 50% discount on all shirts. Shirt prices range from $18-$100 at your store.
Do not make up any information about discount policies.
```
Output from the model would be:
```
Thank you for your inquiry. Unfortunately, since you purchased the shirt on March 1st, it is outside of our 30-day return policy. However, we do have a 50% discount on all shirts currently, so you could purchase two shirts with the same amount of store credit. Prices for our shirts range from $18-$100. Please let us know if you have any other questions.
Thank you for your inquiry. Unfortunately, since you purchased the shirt on March 1st, it is outside of our 30-day return policy.
However, we do have a 50% discount on all shirts currently, so you could purchase two shirts with the same amount of store credit.
Prices for our shirts range from $18-$100. Please let us know if you have any other questions.
```
That failed (we are within the return time), so lets try to break it down into subproblems:
```
CUSTOMER INQUIRY:
I just bought a T-shirt from your Arnold collection on March 1st. I saw that it was on discount, so bought a shirt that was originall $30, and got 40% off. I saw that you have a new discount for shirts at 50%. I'm wondering if I can return the shirt and have enough store credit to buy two of your shirts?
I just bought a T-shirt from your Arnold collection on March 1st.
I saw that it was on discount, so bought a shirt that was originall $30, and got 40% off.
I saw that you have a new discount for shirts at 50%. I'm wondering if
I can return the shirt and have enough store credit to buy two of your shirts?
INSTRUCTIONS:
You are a customer service agent tasked with kindly responding to customer inquiries. Returns are allowed within 30 days. Today's date is March 29th. There is currently a 50% discount on all shirts. Shirt prices range from $18-$100 at your store. Do not make up any information about discount policies.
You are a customer service agent tasked with kindly responding to customer inquiries.
Returns are allowed within 30 days.
Today's date is March 29th.
There is currently a 50% discount on all shirts.
Shirt prices range from $18-$100 at your store.
Do not make up any information about discount policies.
What subproblems must be solved before answering the inquiry?
```
@ -50,19 +63,34 @@ Let's try to solve the first subproblem:
```
CUSTOMER INQUIRY:
I just bought a T-shirt from your Arnold collection on March 1st. I saw that it was on discount, so bought a shirt that was originally $30, and got 40% off. I saw that you have a new discount for shirts at 50%. I'm wondering if I can return the shirt and have enough store credit to buy two of your shirts?
I just bought a T-shirt from your Arnold collection on March 1st.
I saw that it was on discount, so bought a shirt that was originally $30, and got 40% off.
I saw that you have a new discount for shirts at 50%.
I'm wondering if I can return the shirt and have enough store credit to buy two of your shirts?
INSTRUCTIONS:
You are a customer service agent tasked with kindly responding to customer inquiries. Returns are allowed within 30 days. Today's date is March 29th. There is currently a 50% discount on all shirts. Shirt prices range from $18-$100 at your store. Do not make up any information about discount policies.
Determine if the customer is within the 30-day return window. Let's go step by step.
You are a customer service agent tasked with kindly responding to customer inquiries.
Returns are allowed within 30 days.
Today's date is March 29th.
There is currently a 50% discount on all shirts.
Shirt prices range from $18-$100 at your store.
Do not make up any information about discount policies.
Determine if the customer is within the 30-day return window.
Let's go step by step.
```
Output from the model would be:
```
Yes, the customer is within the 30-day return window. The customer purchased the shirt on March 1st, and today's date is March 29th, which is within the 30-day return window.
Yes, the customer is within the 30-day return window.
The customer purchased the shirt on March 1st, and today's date is March 29th, which is within the 30-day return window.
Therefore, the customer is eligible to return the shirt and receive store credit for the purchase. The customer can use the store credit to purchase two shirts at the current 50% discount.
Therefore, the customer is eligible to return the shirt and receive store credit for the purchase.
The customer can use the store credit to purchase two shirts at the current 50% discount.
```
By just solving the first subproblem, we were able to solve the entire problem. If GPT-3 did not return an answer immediately, we could have solved the next subproblem and so on until it did return an answer. Note that we use Let's go step by step.. The addition of this phrase is not always necessary, but it helps for this example.
By just solving the first subproblem, we were able to solve the entire problem. If GPT-3 did not return an answer immediately, we could have solved the next subproblem and so on until it did return an answer. Note that we use Let's go step by step.. The addition of this phrase is not always necessary, but it helps for this example.
Learn more from the following resources:
- [@article@Least-to-Most Prompting](https://learnprompting.org/docs/intermediate/least_to_most)

@ -27,3 +27,8 @@ Or you could also combine these prompts into a single prompt as follows:
```
I am writing a guide about SEO. Take the 10 key topics about SEO and write a detailed introduction to each.
```
Learn more from the following resources:
- [@article@Generated Knowledge in Prompts: Boosting AI Accuracy and Reliability](https://learnprompting.org/docs/intermediate/generated_knowledge#dual-prompt-approach)
- [@article@Recitation-augmented and Dual Prompting](https://aimlapi.com/academy-articles/recitation-augmented-and-dual-prompting)

@ -1,3 +1,11 @@
# Combining Techniques
All the techniques we've covered so far are useful on their own, but they're even more powerful when combined. For example, you can combine "Role Prompting" and any other prompting technique e.g. Chain of Thought, Dual Prompt, etc. to get more specific responses.
All the techniques we've covered so far are useful on their own, but they're even more powerful when combined. For example, you can combine "Role Prompting" and any other prompting technique e.g. Chain of Thought, Dual Prompt, etc. to get more specific responses.
Learn more from the following resources:
- [@article@Combining Prompting Techniques: Enhance AI Outputs](https://learnprompting.org/docs/basics/combining_techniques)
- [@course@Combining prompting techniques - Hyperskill](https://hyperskill.org/learn/step/47989)
- [@guides@The Ultimate Guide to LLM Prompting, Fine-tuning, and Data Management ](https://medium.com/@subhraj07/the-ultimate-guide-to-llm-prompting-fine-tuning-and-data-management-933bbd2d05f4)
- [@video@4 Methods of Prompt Engineering - IBM](https://www.youtube.com/watch?v=1c9iyoVIwDs)

@ -1,18 +1,26 @@
# Structured Data
Structured data helps in organizing information. It is especially useful in applications like e-commerce where you need to convert user input into a structured format.
Structured data is a powerful tool for organizing information effectively, particularly in real-world applications like e-commerce, mobile, and search. It helps convert user inputs into structured formats such as JSON, Markdown tables, HTML, and YAML, enhancing data management and retrieval. Recent improvements in this field include Google's expanded product information types, the increasing use of AI to automate and enhance structured data processes, the continued preference for JSON-LD as a structured data format, and enhanced mobile experiences through rich media and detailed information in search results.
### Example 1
## Examples
Let's say a user searches for `Birthday gift for my 18 months old daughter`. You can use a prompt to generate a JSON object from this input:
### Example: JSON Object
```
Print a JSON object containing `gender` ("male", "female"), `occasion` (one of "party", "birthday", "anniversary"), `age_years` (numeric value) from the text delimited by triple quotes:
"""Birthday gift for my 18 months old daughter"""
```
Consider a user searching for a "Birthday gift for my 18 months old daughter." Using prompts, you can generate structured data in various formats.
JSON Example:
```json
Print a JSON object containing:
- `gender` ("male", "female"),
- `occasion` (one of "party", "birthday", "anniversary"),
- `age_years` (numeric value)
from the text delimited by triple quotes:
"""Birthday gift for my 18 months old daughter"""
### Output
```
**Output**
The model would generate the following JSON object:
```json
@ -21,25 +29,38 @@ The model would generate the following JSON object:
"occasion": "birthday",
"age_years": 1.5
}
```
### Example 2
The following examples extend the original Print JSON prompt to convert one structured data into other formats, demonstrating another capability with structured data, data format interchange from a single source prompt.
Consider a user input `Anniversary gift for my husband`. You can use a prompt to generate a JSON object from this input:
### Example: Markdown Table
```
Print a JSON object containing `gender` ("male", "female"), `occasion` (one of "party", "birthday", "anniversary"), `age_years` (numeric value) from the text delimited by triple quotes:
"""Anniversary gift for my husband"""
From JSON to Markdown table
```markdown
| Gender | Occasion | Age (years) |
|--------|----------|-------------|
| Female | Birthday | 1.5 |
```
### Output
### Example: HTML Table
The model would generate the following JSON object:
From JSON to HTML table
```json
{
"gender": "male",
"occasion": "anniversary",
"age_years": null
}
```
<table>
<tr>
<th>Gender</th>
<th>Occasion</th>
<th>Age (years)</th>
</tr>
<tr>
<td>Female</td>
<td>Birthday</td>
<td>1.5</td>
</tr>
</table>
Learn more from the following resources:
- [@article@How to Use AI to Organize Data into Tables for Reports and Articles](https://learnprompting.org/docs/basic_applications/table_generation)

@ -1,6 +1,10 @@
# Inferring
Making use of LLMs to infer information from text is another common use case. Imagine you are running a famous e-commerce store with thousands of reviews. You want to group the reviews by the sentiment of the review. You can use LLMs to infer the sentiment of the review.
Making use of LLMs to infer information from text, i.e. text analysis, is another common use case. A common use case is sentiment analysis, such as grouping e-commerce reviews by sentiment. Beyond this, LLMs have diverse real-world applications, including customer service automation, market research, fraud detection, compliance monitoring, speech-to-text transcription, and anomaly detection. These applications demonstrate the versatility of LLMs in extracting valuable insights from text data, automating processes, and enhancing decision-making across different sectors. By leveraging LLMs, businesses can improve operational efficiency, gain deeper customer insights, and tackle complex challenges in areas such as cybersecurity and regulatory compliance
### Example
Imagine you are running a famous e-commerce store with thousands of reviews. You want to group the reviews by the sentiment of the review. You can use LLMs to infer the sentiment of the review.
Here are some prompts showing the use of inferrence:
@ -21,3 +25,9 @@ Identify a list of emotions that the writer of the following review is expressin
```
Output: `happy, satisfied, pleased, content, enthusiastic`
Learn more from the following resources:
- [@article@Generating Data | Prompt Engineering Guide](https://www.promptingguide.ai/applications/generating)
- [@article@Every Day Prompt Engineering - Text Analysis](https://www.ikangai.com/every-day-prompt-engineering-part-5-text-analysis/)
- [@course@Prompt Engineering Course — Inferring, Transforming, and Expanding with ChatGPT ](https://medium.com/geekculture/prompt-engineering-course-openai-inferring-transforming-expanding-chatgpt-chatgpt4-e5f63132f422)

@ -26,3 +26,9 @@ We value your business and are committed to ensuring your complete satisfaction.
Best regards,
AI Customer Service
```
Learn more from the following resources:
- [@article@Write Emails Faster Using AI: Sick Days, Cold Outreach, and More](https://learnprompting.org/docs/basic_applications/writing_emails)
- [@guides@AI Tools for Business Writing](https://communicationprogram.wharton.upenn.edu/library/ai-tools/)
- [@article@Everyday Prompt Engineering -Emails](https://www.ikangai.com/everyday-prompt-engineering-part-one-emails/)

@ -1,6 +1,6 @@
# Coding Assistance
You can use ChatGPT for debugging, code generation, reformatting, commenting, and more.
Large Language Models (LLMs) tailored for coding have become invaluable tools for developers, offering a wide range of functionalities to assist in coding tasks. These include debugging, code generation, reformatting and reviewing, translating between programming languages, refactoring code, program simulations, and generating unit tests. Recent advancements have expanded their capabilities to include code optimization, documentation generation, bug fixing, project management assistance, security analysis, and version control support. New features also allow users to interact with outputs directly, such as editing or refining code suggestions with or without additional prompts, like OpenAI's Canvas or Anthropic's Artefacts. These advancements make LLMs' capabilities available across the software development process, enhancing productivity and streamlining workflows across various stages of coding and project management.
## Code Generation
@ -26,6 +26,7 @@ noun = random.choice(nouns)
madlib = f"I {verb} {preposition} the {noun}."
print(madlib)
```
## Commenting and Reformatting Code
@ -62,6 +63,7 @@ for i in range(num_points):
# Print a space for padding, followed by an asterisk at the appropriate position on the wave
print(' ' * (amplitude + y) + '*')
```
## Debugging
@ -111,7 +113,9 @@ for i in numbers:
if j in numbers:
numbers.remove(j)
print(numbers)
```
Note that we asked the bot to "act like a senior developer" to optimize the script. You can also dictate that it have a certain area of expertise (e.g., sorting algorithms) or number of years of experience. Alternatively, if you have a script that seems overly complicated, you can ask ChatGPT to write that script "as a very junior developer."
## Translating Between Programming Languages
@ -147,8 +151,13 @@ salary = float(input("Enter employee's salary: "))
ss_tax = salary * 0.062
print("Social Security tax is: $", round(ss_tax, 2))
```
Learn more from the following resources:
- [@article@LLM Coding Assistance](https://learnprompting.org/docs/basic_applications/coding_assistance)
- [@guides@Generating Code | Prompt Engineering Guide](https://www.promptingguide.ai/applications/coding)
- [@article@Prompt engineering 101 for developers](https://www.pluralsight.com/resources/blog/software-development/prompt-engineering-for-developers)
- [@guide@Prompt Engineering for Coding Tasks ](https://towardsdatascience.com/prompt-engineering-llms-coding-chatgpt-artificial-intelligence-c16620503e4e)
- [@article@How GenAI Transforms Software Development: 11 Use Cases](https://www.index.dev/blog/11-generative-ai-use-cases-software-development)

@ -1,6 +1,6 @@
# Study Buddy
One of our favorite ways to use LLMs is as a study tool! In particular, it is useful for explaining confusing terms as well as quizzing you on test content.
LLMs have become powerful study & tutoring tools, offering various applications in education. They can explain complex terms, generate quizzes, and provide personalized learning experiences adapted to individual needs, including neurodivergent learning styles. Recent developments have expanded their capabilities to include multilingual support, automated grading and feedback, content creation for diverse educational materials, and improved accessibility for all students. These advancements enable LLMs to enhance comprehension, retention, and overall educational outcomes while allowing educators to focus on more complex tasks.
## Explaining Terms
@ -46,4 +46,7 @@ And the model will generate 5 questions for you to answer.
There are probably other ways to use LLMs to study. Many learning companies like Duolingo and Quizlet are implementing LLMs into their products.
Learn more from the following resources:
- [@article@LLMs as a Study Buddy](https://learnprompting.org/docs/basic_applications/study_tool)
-

@ -1,4 +1,9 @@
# Designing Chatbots
Building chatbots to offer customer support, sales, or other services is a hot topic in the tech industry. LLMs make it possible to build chatbots that can respond to a wide variety of user inputs, and can be trained to respond to new inputs with minimal effort.
Designing chatbots and other interfaces for real-world usage cases remains a key focus in the tech industry. Large language models (LLMs) continue to enhance chatbot capabilities, allowing them to respond effectively to diverse user inputs with minimal effort and training. The use cases for new user interfaces is expanding beyond traditional chatbots like: voice interfaces, gesture-based interfaces, multimodal AI systems using various input types, AI agents performing complex tasks autonomously, extended reality (XR) combining AI with AR/VR for immersive experiences. These advancements highlight the evolving nature of user interfaces, driven by technological innovations that promise more engaging and efficient interactions.
Learn more from the following resources:
- [@video@What is a Chatbot?](https://www.youtube.com/watch?v=o9-ObGgfpEk)
- [@video@Do Chatbots Need AI?](https://www.youtube.com/watch?v=93l-2Usc08U)
- [@video@Generative vs Rules-Based Chatbots](https://www.youtube.com/watch?v=lZjUS_8btEo)

@ -1,11 +1,24 @@
# Real World Usage Examples
LLMs are used in a variety of ways. Here are some examples of how LLMs are used in the real world.
LLMs are being used in an increasing number and variety of usages, applications and embedded approaches. These use cases are dynamic and ever evolving.
### Examples
Here are some examples of how LLMs are used in the real world, though not exhaustive:
- Natural Language Processing
- Information Retrieval
- Internet Search & Citations
- Question Answering
- Text Generation
- Text Summarization
- Learning Tools
- Chatbots
- Chatbots and Assistants
- Agents & Automation
- Multimedia Content
Follow the resources and latest updates to learn more:
- [@article@Solving Everyday Tasks with GenAI: Emails, Contracts, and More](https://learnprompting.org/docs/basic_applications/introduction)
- [@article@LLM Applications & Guides | Prompt Engineering Guide](https://www.promptingguide.ai/applications)
- [@article@7 Large Language Model (LLM) Use Cases and Applications: How AI is Reshaping Industries ](https://collabnix.com/7-top-large-language-model-llm-use-cases-and-applications-how-ai-is-reshaping-industries/)

@ -1,5 +1,8 @@
# Citing Sources
LLMs for the most part cannot accurately cite sources. This is because they do not have access to the Internet, and do not exactly remember where their information came from. They will frequently generate sources that look good, but are entirely inaccurate.
As advancements have been made in the ability of Large Language Models (LLMs) to cite sources — particularly through realtime API access, search-augmented generation and specialized training — significant limitations persist. LLMs continue to struggle with hallucinations, generating inaccurate or fictitious citation. Many LLM lack real-time API access, which hampers their ability to provide up-to-date information or are limited by their knowledge cut off dates. They sometimes cannot independently verify sources or fully grasp the contextual relevance of citations, raising concerns regarding plagiarism and intellectual property. To address these challenges, ongoing efforts focus on improving realtime retrieval (RAG) methods, enhancing training, and integrating human oversight to ensure accuracy in citations.
Strategies like search augmented LLMs (LLMs that can search the Internet and other sources) can often fix this problem though.
Learn more from the following resources:
- [@guides@Why Don’t Large Language Models Share URL References in Their Responses](https://medium.com/@gcentulani/why-dont-large-language-models-share-url-references-in-their-responses-bf427e513861)
- [@article@Effective large language model adaptation for improved grounding](https://research.google/blog/effective-large-language-model-adaptation-for-improved-grounding/)

@ -1,4 +1,11 @@
# Bias
LLMs are often biased towards generating stereotypical responses. Even with safe guards in place, they will sometimes say sexist/racist/homophobic things. Be careful when using LLMs in consumer-facing applications, and also be careful when using them in research (they can generate biased results).
Bias in Large Language Models (LLMs) remains a significant challenge, with models often generating stereotypical or discriminatory responses despite advancements in mitigation techniques. These biases can manifest in various forms, including gender, racial, and cultural prejudices, potentially leading to underfitting or overfitting in model outputs. Recent studies have highlighted persistent biases in LLM-generated content, emphasizing the need for caution when deploying these models in consumer-facing applications or research settings. Efforts to address this issue include developing diverse training datasets, implementing regulatory frameworks, and creating new evaluation tools. However, the challenge remains substantial as LLMs continue to influence societal perceptions. Developers and users must be aware of these pitfalls to avoid reputational damage and unintended negative impacts on individuals or communities.
Learn more from the following resources:
- [@guides@Biases in Prompts: Learn how to tackle them](https://mindfulengineer.ai/understanding-biases-in-prompts/)
- [@guides@Bias in AI: tackling the issues through regulations and standards](https://publicpolicy.ie/papers/bias-in-ai-tackling-the-issues-through-regulations-and-standards/)
- [@article@What Is AI Bias?](https://www.ibm.com/topics/ai-bias)
- [@article@What Is Algorithmic Bias?](https://www.ibm.com/think/topics/algorithmic-bias)
- [@article@AI Bias Examples](https://www.ibm.com/think/topics/shedding-light-on-ai-bias-with-real-world-examples)

@ -1,27 +1,14 @@
# Hallucinations
LLMs will frequently generate falsehoods when asked a question that they do not know the answer to. Sometimes they will state that they do not know the answer, but much of the time they will confidently give a wrong answer.
Large Language Model (LLM) hallucinations in 2024 can be broadly categorized into faithfulness and factuality issues. **Faithfulness hallucinations** occur when the model's output deviates from provided sources or context, including problems with source-reference divergence, context retrieval, dialogue history misinterpretation, and erroneous summarization. **Factuality hallucinations**, on the other hand, involve the generation of incorrect or unsupported information, encompassing factual inaccuracies, entity errors, overclaims, unverifiable statements, nonsensical responses, contradictions, and fabricated data.
### Causes of Hallucinations
These hallucinations stem from various causes such as training data issues, model limitations, prompt-related problems, and overfitting. To mitigate these challenges, strategies like Retrieval-Augmented Generation (RAG), improved training data, rigorous evaluation, clear user communication, advanced prompt engineering, model fine-tuning, output filtering, and multi-model approaches are being employed. As the field progresses, understanding and addressing these hallucination types remains crucial for enhancing the reliability and trustworthiness of LLM-generated content.
There are several factors contributing to hallucinations in LMs:
Learn more from the following resources:
1. **Inherent limitations**: The training data for the LMs are massive, yet they still cannot contain the entire knowledge about the world. As a result, LMs have inherent limitations in handling certain facts or details, which leads to hallucinations in the generated text.
- [@article@What are AI hallucinations?](https://www.ibm.com/topics/ai-hallucinations)
- [@article@Hallucination (artificial intelligence) - Wikipedia](https://en.wikipedia.org/wiki/Hallucination_(artificial_intelligence))
- [@video@Why Large Language Models Hallucinate - IBM](https://www.youtube.com/watch?v=cfqtFvWOfg0)
- [@video@Risks of Large Language Models - IBM](https://www.youtube.com/watch?v=r4kButlDLUc)
- [@guides@Key Strategies to Minimize LLM Hallucinations](https://www.turing.com/resources/minimize-llm-hallucinations-strategy)
2. **Training data biases**: If the training data contains biases or errors, it may lead to hallucinations in the output as LMs learn from the data they've been exposed to.
3. **Token-based scoring**: The default behavior of many LMs, like GPT models, is to generate text based on token probabilities. Sometimes this can lead to high-probability tokens being selected even if it doesn't make sense with the given prompt.
### Mitigating Hallucinations
To reduce the occurrence of hallucinations in the generated text, consider the following strategies:
1. **Specify instructions**: Make the prompt more explicit with clear details and constraints. This can help guide the model to generate more accurate and coherent responses.
2. **Step-by-step approach**: Instead of asking the model to generate a complete response in one go, break down the task into smaller steps and iteratively generate the output. This can help in maintaining better control over the generated content.
3. **Model adjustments**: Tweak various parameters, such as `temperature` or `top_p`, to adjust the randomness and control of the generated text. Lower values will make the output more conservative, which can help reduce hallucinations.
4. **Validating and filtering**: Develop post-processing steps to validate and filter the generated text based on specific criteria or rules to minimize the prevalence of hallucinations in the output.
Remember that even with these strategies, it's impossible to completely eliminate hallucinations. However, being aware of their existence and employing methods to mitigate them can significantly improve the quality and reliability of LM-generated content.

@ -1,3 +1,7 @@
# Math
LLMs are often bad at math. They have difficulty solving simple math problems, and they are often unable to solve more complex math problems.
LLMs struggle with math. While they may have improved in solving simple math problems; they, however, coming up short when solving more complex math problems when minor semantic variation happens. This is particularly relevant in terms of mathematical reasoning. Despite advancements, they often fail at solving simple math problems and are unable to handle more complex ones effectively. Studies show that LLMs rely heavily on pattern recognition rather than genuine logical reasoning, leading to significant performance drops when faced with minor changes in problem wording or irrelevant information. This highlights a critical limitation in their reasoning capabilities.
Learn more from the following resources:
- [@article@Apple Says AI’s Math Skills Fall Short](https://www.pymnts.com/artificial-intelligence-2/2024/apple-says-ais-math-skills-fall-short/)

@ -1,13 +1,11 @@
# Prompt Hacking
Prompt hacking is a term used to describe a situation where a model, specifically a language model, is tricked or manipulated into generating outputs that violate safety guidelines or are off-topic. This could include content that's harmful, offensive, or not relevant to the prompt.
Prompt hacking is a form of adversarial prompting where language models are manipulated to generate outputs that violate safety guidelines or are off-topic. Common techniques include manipulating keywords, exploiting grammar and negations, and using leading questions. To combat this, developers implement safety mechanisms such as content filters, continual analysis, and carefully designed prompt templates. As language models become more integrated into digital infrastructure, concerns about prompt injection, data leakage, and potential misuse have grown. In response, evolving defense strategies like prompt shields, enhanced input validation, and fine-tuning for adversarial detection are being developed. Continuous monitoring and improvement of these safety measures are crucial to ensure responsible model behaviour and output alignment with desired guidelines.
There are a few common techniques employed by users to attempt "prompt hacking," such as:
Learn more from the following resources:
1. **Manipulating keywords**: Users may introduce specific keywords or phrases that are linked to controversial, inappropriate, or harmful content in order to trick the model into generating unsafe outputs.
2. **Playing with grammar**: Users could purposely use poor grammar, spelling, or punctuation to confuse the model and elicit responses that might not be detected by safety mitigations.
3. **Asking leading questions**: Users can try to manipulate the model by asking highly biased or loaded questions, hoping to get a similar response from the model.
To counteract prompt hacking, it's essential for developers and researchers to build in safety mechanisms such as content filters and carefully designed prompt templates to prevent the model from generating harmful or unwanted outputs. Constant monitoring, analysis, and improvement to the safety mitigations in place can help ensure the model's output aligns with the desired guidelines and behaves responsibly.
Read more about prompt hacking here [Prompt Hacking](https://learnprompting.org/docs/category/-prompt-hacking).
- [@article@Prompt Hacking](https://learnprompting.org/docs/category/-prompt-hacking)
- [@article@LLM Security Guide - Understanding the Risks of Prompt Injections and Other Attacks on Large Language Models ](https://www.mlopsaudits.com/blog/llm-security-guide-understanding-the-risks-of-prompt-injections-and-other-attacks-on-large-language-models)
- [@guides@OWASP Top 10 for LLM & Generative AI Security](https://genai.owasp.org/llm-top-10/)
- [@video@Explained: The OWASP Top 10 for Large Language Model Applications](https://www.youtube.com/watch?v=cYuesqIKf9A)
- [@video@Artificial Intelligence: The new attack surface](https://www.youtube.com/watch?v=_9x-mAHGgC4)

@ -1,27 +1,76 @@
# Pitfalls of LLMs
LLMs are extremely powerful, but they are by no means perfect. There are many pitfalls that you should be aware of when using them.
LLMs are extremely powerful. There are many pitfalls, safety challenges and risks that you should be aware of when using them.
### Model Guessing Your Intentions
### Language Translation
Sometimes, LLMs might not fully comprehend the intent of your prompt and may generate generic or safe responses. To mitigate this, make your prompts more explicit or ask the model to think step-by-step before providing a final answer.
There are several risks associated with LLMs in language translation.
### Sensitivity to Prompt Phrasing
- Inaccurate translations
- Contextual misinterpretation
- Biased translations
- Deepfakes
- Privacy and data security
- Legal and regulatory compliance
LLMs can be sensitive to the phrasing of your prompts, which might result in completely different or inconsistent responses. Ensure that your prompts are well-phrased and clear to minimize confusion.
### Text Generation
### Model Generating Plausible but Incorrect Answers
Text generation is a powerful capability of LLMs but also introduces certain risks and challenges.
In some cases, LLMs might generate answers that sound plausible but are actually incorrect. One way to deal with this is by adding a step for the model to verify the accuracy of its response or by prompting the model to provide evidence or a source for the given information.
- Misinformation and fake news
- Bias amplification
- Offensive or inappropriate content
- Plagiarism and copyright infringement
- Lack of transparency
- Privacy breaches
### Verbose or Overly Technical Responses
### Question Answering
LLMs, especially larger ones, may generate responses that are unnecessarily verbose or overly technical. To avoid this, explicitly guide the model by making your prompt more specific, asking for a simpler response, or requesting a particular format.
LLMs present several risks in the domain of question answering.
### LLMs Not Asking for Clarification
- Hallucination
- Outdated information
- Bias
- Harmful answers
- Lack of contextual understanding
- Privacy and security concerns
- Lack of transparency and xxplainability
When faced with an ambiguous prompt, LLMs might try to answer it without asking for clarification. To encourage the model to seek clarification, you can prepend your prompt with "If the question is unclear, please ask for clarification."
### Text summarization
### Model Failure to Perform Multi-part Tasks
Text summarization is a powerful application of LLMs but also introduces certain risks and challenge
Sometimes, LLMs might not complete all parts of a multi-part task or might only focus on one aspect of it. To avoid this, consider breaking the task into smaller, more manageable sub-tasks or ensure that each part of the task is clearly identified in the prompt.
- Information loss
- Bias amplification
- Contextual misinterpretation
### Sentiment analysis
Sentiment analysis, the process of determining a piece of text’s sentiment or emotional tone, is an application where LLMs are frequently employed.
- Biased sentiment analysis
- Cultural and contextual nuances
- Limited domain understanding
- Misinterpretation of negation and ambiguity
- Overgeneralization and lack of individual variation
### Code Assistance
Code assistance and generation is an area where LLMs have shown promising capabilities.
- Security vulnerabilities
- Performance and efficiency challenges
- Quality and reliability concerns
- Insufficient understanding of business or domain context
- Intellectual property concerns
Read more from [Risks of Large Language Models: A comprehensive guide](https://www.deepchecks.com/risks-of-large-language-models/).
Learn more from the following resources:
- [@video@Risks of Large Language Models - IBM](https://www.youtube.com/watch?v=r4kButlDLUc)
- [@article@Risks of Large Language Models: A comprehensive guide](https://www.deepchecks.com/risks-of-large-language-models/)
- [@article@Limitations of LLMs: Bias, Hallucinations, and More](https://learnprompting.org/docs/basics/pitfalls)
- [@guides@Risks & Misuses | Prompt Engineering Guide](https://www.promptingguide.ai/risks)
- [@guides@OWASP Top 10 for LLM & Generative AI Security](https://genai.owasp.org/llm-top-10/)
- [@guides@LLM Security Guide - Understanding the Risks of Prompt Injections and Other Attacks on Large Language Models ](https://www.mlopsaudits.com/blog/llm-security-guide-understanding-the-risks-of-prompt-injections-and-other-attacks-on-large-language-models)

@ -16,4 +16,3 @@ There are other supporting components that are used to manage the lifecycle of t
For more information, visit the following links:
- [@article@Spring Boot vs Spring MVC: How do They Compare?](https://www.javatpoint.com/spring-vs-spring-boot-vs-spring-mvc)
- [@article@Spring MVC components](https://www.javatpoint.com/spring-vs-spring-boot-vs-spring-mvc)

@ -0,0 +1,31 @@
---
import GuideContent from '../../components/Guide/GuideContent.astro';
import GuideHeader from '../../components/GuideHeader.astro';
import BaseLayout from '../../layouts/BaseLayout.astro';
import { getGuideById } from '../../lib/guide';
import { getOpenGraphImageUrl } from '../../lib/open-graph';
import { replaceVariables } from '../../lib/markdown';
const guideId = 'devops-lifecycle';
const guide = await getGuideById(guideId);
const { frontmatter: guideData } = guide!;
const ogImageUrl =
guideData.seo.ogImageUrl ||
getOpenGraphImageUrl({
group: 'guide',
resourceId: guideId,
});
---
<BaseLayout
title={replaceVariables(guideData.seo.title)}
description={replaceVariables(guideData.seo.description)}
permalink={guide.frontmatter.excludedBySlug}
canonicalUrl={guideData.canonicalUrl}
ogImageUrl={ogImageUrl}
>
<GuideHeader guide={guide!} />
<GuideContent guide={guide!} />
</BaseLayout>

@ -0,0 +1,31 @@
---
import GuideContent from '../../components/Guide/GuideContent.astro';
import GuideHeader from '../../components/GuideHeader.astro';
import BaseLayout from '../../layouts/BaseLayout.astro';
import { getGuideById } from '../../lib/guide';
import { getOpenGraphImageUrl } from '../../lib/open-graph';
import { replaceVariables } from '../../lib/markdown';
const guideId = 'full-stack-job-description';
const guide = await getGuideById(guideId);
const { frontmatter: guideData } = guide!;
const ogImageUrl =
guideData.seo.ogImageUrl ||
getOpenGraphImageUrl({
group: 'guide',
resourceId: guideId,
});
---
<BaseLayout
title={replaceVariables(guideData.seo.title)}
description={replaceVariables(guideData.seo.description)}
permalink={guide.frontmatter.excludedBySlug}
canonicalUrl={guideData.canonicalUrl}
ogImageUrl={ogImageUrl}
>
<GuideHeader guide={guide!} />
<GuideContent guide={guide!} />
</BaseLayout>
Loading…
Cancel
Save