chore: update roadmap content json (#7322)

Co-authored-by: kamranahmedse <4921183+kamranahmedse@users.noreply.github.com>
pull/7327/head
github-actions[bot] 2 weeks ago committed by GitHub
parent 487145b9a4
commit c40cda13d8
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 802
      public/roadmap-content/ai-engineer.json
  2. 2
      public/roadmap-content/computer-science.json
  3. 19
      public/roadmap-content/devops.json
  4. 265
      public/roadmap-content/frontend.json
  5. 15
      public/roadmap-content/python.json
  6. 15
      public/roadmap-content/redis.json

@ -0,0 +1,802 @@
{
"_hYN0gEi9BL24nptEtXWU": {
"title": "Introduction",
"description": "An AI Engineer uses pre-trained models and existing AI tools to improve user experiences. They focus on applying AI in practical ways, without building models from scratch. This is different from AI Researchers and ML Engineers, who focus more on creating new models or developing AI theory.",
"links": []
},
"GN6SnI7RXIeW8JeD-qORW": {
"title": "What is an AI Engineer?",
"description": "An AI Engineer uses pre-trained models and existing AI tools to improve user experiences. They focus on applying AI in practical ways, without building models from scratch. This is different from AI Researchers and ML Engineers, who focus more on creating new models or developing AI theory.",
"links": []
},
"jSZ1LhPdhlkW-9QJhIvFs": {
"title": "AI Engineer vs ML Engineer",
"description": "AI Engineer differs from an AI Researcher or ML Engineer. AI Engineers focus on leveraging pre-trained models and existing AI technologies to enhance user experiences without the need to train models from scratch.",
"links": []
},
"wf2BSyUekr1S1q6l8kyq6": {
"title": "LLMs",
"description": "LLM or Large Language Models are AI models that are trained on a large amount of text data to understand and generate human language. They are the core of applications like ChatGPT, and are used for a variety of tasks, including language translation, question answering, and more.",
"links": []
},
"KWjD4xEPhOOYS51dvRLd2": {
"title": "Inference",
"description": "Inference involves using models developed through machine learning to make predictions or decisions. As part of the AI Engineer Roadmap, an AI engineer might create an inference engine, which uses rules and logic to infer new information based on existing data. Often used in natural language processing, image recognition, and similar tasks, inference can help AI systems provide useful outputs based on their training. Working with inference involves understanding different models, how they work, and how to apply them to new data to achieve reliable results.",
"links": []
},
"xostGgoaYkqMO28iN2gx8": {
"title": "Training",
"description": "Training in the field of AI involves feeding an algorithm or a neural network with data, and allowing it to adjust its parameters in order to improve its predictions over time. The main objective is to design the system to accurately recognize the patterns within the data set and make accurate predictions or decisions when confronted with new data. In the roadmap to becoming an AI engineer, understanding and implementing various training methodologies is a critical step. AI engineers require this skill to develop accurate and efficient algorithms that can learn from and make decisions based on data.",
"links": []
},
"XyEp6jnBSpCxMGwALnYfT": {
"title": "Embeddings",
"description": "Embedding refers to the conversion or mapping of discrete objects such as words, phrases, or even entire sentences into vectors of real numbers. It's an essential part of data preprocessing where high-dimensional data is transformed into a lower-dimensional equivalent. This dimensional reduction helps to preserve the semantic relationships between objects. In AI engineering, embedding techniques are often used in language-orientated tasks like sentiment analysis, text classification, and Natural Language Processing (NLP) to provide an understanding of the vast linguistic inputs AI models receive.",
"links": []
},
"LnQ2AatMWpExUHcZhDIPd": {
"title": "Vector Databases",
"description": "Vector databases are specialized databases that are capable of handling and processing data in the form of vectors. Unlike traditional relational databases, which store data in tables, vector databases work with data that can be represented as mathematical vectors. This makes them particularly well-suited for dealing with large, multi-dimensional datasets, which are commonplace in the field of artificial intelligence. As an AI Engineer, understanding and utilizing vector databases can come in handy for numerous tasks such as similarity search, image recognition, and other machine learning applications where large-scale vector data needs to be quickly queried.",
"links": []
},
"9JwWIK0Z2MK8-6EQQJsCO": {
"title": "RAG",
"description": "RAG is a paradigm for applying transformer-based generative models in Natural Language Processing tasks. It leverages a hybrid approach, i.e. it combines the capabilities of pre-trained language models and powerful retrieval methods to generate responses. For an AI Engineer, RAG forms an essential part of the NLP (Natural Language Processing) toolkit. This model operates by first retrieving relevant context from a large corpus, and then utilizing this context to generate detailed and contextually rich responses. Its successful application spans across a multitude of NLP tasks including machine translation, dialogue systems, QnA systems, and more. Therefore, RAG is a significant stop on the route to becoming an accomplished AI engineer as it equips them with skills to deal with complex NLP tasks efficiently.",
"links": []
},
"Dc15ayFlzqMF24RqIF_-X": {
"title": "Prompt Engineering",
"description": "Prompt Engineering refers to the process of carefully designing and shaping queries or prompts to extract specific responses or behaviors from artificial intelligence models. These prompts are often thought of as the gateway to exploiting these AI models and essential tools for machine testing and performance evaluations. They can affect the model's response, making it invaluable to AI Engineers who are developing AI systems and need to test model's reaction and adaptability with diverse prompts.",
"links": []
},
"9XCxilAQ7FRet7lHQr1gE": {
"title": "AI Agents",
"description": "AI Agents are a type of LLM that can be used to automate complex workflows that involve multiple steps and decisions.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "What are AI Agents?",
"url": "https://aws.amazon.com/what-is/ai-agents/",
"type": "article"
},
{
"title": "What are AI Agents?",
"url": "https://www.youtube.com/watch?v=F8NKVhkZZWI",
"type": "video"
}
]
},
"5QdihE1lLpMc3DFrGy46M": {
"title": "AI vs AGI",
"description": "AI (Artificial Intelligence) refers to systems designed to perform specific tasks, like image recognition or language translation, often excelling in those narrow areas. In contrast, AGI (Artificial General Intelligence) would be a system capable of understanding, learning, and applying intelligence across a wide range of tasks, much like a human, and could adapt to new situations without specific programming.",
"links": []
},
"qJVgKe9uBvXc-YPfvX_Y7": {
"title": "Impact on Product Development",
"description": "Incorporating Artificial Intelligence (AI) can transform the process of creating, testing, and delivering products. This could range from utilizing AI for enhanced data analysis to inform product design, use of AI-powered automation in production processes, or even AI as a core feature of the product itself.",
"links": []
},
"K9EiuFgPBFgeRxY4wxAmb": {
"title": "Roles and Responsiblities",
"description": "An AI Engineer is entrusted with the task of designing and implementing AI models. This involves working closely with Data Scientists to transform machine learning models into APIs, ensuring that the models are equipped to interact with software applications. AI Engineers are proficient in a variety of programming languages, work with vast datasets, and utilize AI-related applications. Additionally, they often handle tasks such as data preprocessing, data analysis, and machine learning algorithm deployment. They also troubleshoot any issues that might emerge during the AI lifecycle, while maintaining a high level of knowledge about the latest industry trends and technological advancements.",
"links": []
},
"d7fzv_ft12EopsQdmEsel": {
"title": "Pre-trained Models",
"description": "Pre-trained models are simply models created by some machine learning engineers to solve a problem. Such models are often shared and other machine learning engineers use these models for similar problems. These models are called pre-trained models because they have been previously trained by using large datasets. These pre-trained models can be used as the starting point for a variety of AI tasks, often as part of transfer learning, to save on the resources that would be needed to start a learning process from scratch. This hastens the journey of becoming an AI engineer as one gets to understand how to improve and fine-tune pre-existing models to specific tasks, making them an essential part of an AI engineer's development plan.",
"links": []
},
"1Ga6DbOPc6Crz7ilsZMYy": {
"title": "Benefits of Pre-trained Models",
"description": "LLM models are not only difficult to train, but they are also expensive. Pre-trained models are a cost-effective solution for developers and organizations looking to leverage the power of AI without the need to train models from scratch.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "Why you should use Pre-trained Models",
"url": "https://cohere.com/blog/pre-trained-vs-in-house-nlp-models",
"type": "article"
}
]
},
"MXqbQGhNM3xpXlMC2ib_6": {
"title": "Limitations and Considerations",
"description": "Pre-trained Models are AI models that are previously trained on a large benchmark dataset and provide a starting point for AI developers. They help in saving training time and computational resources. However, they also come with certain limitations and considerations. These models can sometimes fail to generalize well to tasks outside of their original context due to issues like dataset bias or overfitting. Furthermore, using them without understanding their internal working can lead to problematic consequences. Finally, transfer learning, which is the mechanism to deploy these pre-trained models, might not always be the optimum solution for every AI project. Thus, an AI Engineer must be aware of these factors while working with pre-trained models.",
"links": []
},
"2WbVpRLqwi3Oeqk1JPui4": {
"title": "Open AI Models",
"description": "Open AI Models are a set of pre-designed, predefined models provided by OpenAI. These models are trained using Machine Learning algorithms to perform artificial intelligence tasks without any need of explicit programming. OpenAI's models are suited for various applications such as text generation, classification and extraction, allowing AI engineers to leverage them for effective implementations. Therefore, understanding and utilizing these models becomes an essential aspect in the roadmap for an AI engineer to develop AI-powered solutions with more efficiency and quality.",
"links": []
},
"vvpYkmycH0_W030E-L12f": {
"title": "Capabilities / Context Length",
"description": "OpenAI's capabilities include processing complex tasks like language understanding, code generation, and problem-solving. However, context length limits how much information the model can retain and reference during a session, affecting long conversations or documents. Advances aim to increase this context window for more coherent and detailed outputs over extended interactions.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "OpenAI Website",
"url": "https://platform.openai.com/docs/guides/fine-tuning/token-limits",
"type": "article"
}
]
},
"LbB2PeytxRSuU07Bk0KlJ": {
"title": "Cut-off Dates / Knowledge",
"description": "OpenAI models have a knowledge cutoff date, meaning they only have access to information available up until a specific time. For example, my knowledge is up to date until September 2023. As a result, I may not be aware of recent developments, events, or newly released technology. Additionally, these models don’t have real-time internet access, so they can't retrieve or update information beyond their training data. This can limit the ability to provide the latest details or react to rapidly changing topics.",
"links": []
},
"hy6EyKiNxk1x84J63dhez": {
"title": "Anthropic's Claude",
"description": "Claude is a family of large language models developed by Anthropic. Claude 3.5 Sonnet is the latest model (at the time of this writing) in the series, known for its advanced reasoning and multi-modality capabilities.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "Clause Website",
"url": "https://claude.ai/",
"type": "article"
}
]
},
"oe8E6ZIQWuYvHVbYJHUc1": {
"title": "Google's Gemini",
"description": "Gemini, formerly known as Bard, is a generative artificial intelligence chatbot developed by Google. Based on the large language model of the same name, it was launched in 2023 after being developed as a direct response to the rise of OpenAI's ChatGPT",
"links": []
},
"3PQVZbcr4neNMRr6CuNzS": {
"title": "Azure AI",
"description": "Azure AI is a comprehensive set of AI services and tools provided by Microsoft. It includes a range of capabilities such as natural language processing, computer vision, speech recognition, and more. Azure AI is designed to help developers and organizations build, deploy, and scale AI solutions quickly and easily.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "Azure Website",
"url": "https://azure.microsoft.com/en-us/products/ai-services/",
"type": "article"
}
]
},
"OkYO-aSPiuVYuLXHswBCn": {
"title": "AWS Sagemaker",
"description": "AWS Sagemaker is a fully managed platform that provides every developer and data scientist with the ability to build, train, and deploy machine learning (ML) models quickly. Sagemaker takes care of the underlying infrastructure, allowing developers to focus on building and improving their models.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "AWS Website",
"url": "https://aws.amazon.com/sagemaker/",
"type": "article"
}
]
},
"8XjkRqHOdyH-DbXHYiBEt": {
"title": "Hugging Face Models",
"description": "Hugging Face has a wide range of pre-trained models that can be used for a variety of tasks, including language understanding and generation, translation, chatbots, and more. Anyone can create an account and use their models, and the models are organized by task, provider, and other criteria.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "Hugging Face",
"url": "https://huggingface.co/models",
"type": "article"
}
]
},
"n-Ud2dXkqIzK37jlKItN4": {
"title": "Mistral AI",
"description": "Mistral AI is a French startup founded in 2023, specializing in open-source large language models (LLMs). Created by former Meta and Google DeepMind researchers, it focuses on efficient, customizable AI solutions that promote transparency. Its flagship models, Mistral Large and Mixtral, offer state-of-the-art performance with lower resource requirements, gaining significant attention in the AI field.",
"links": []
},
"a7qsvoauFe5u953I699ps": {
"title": "Cohere",
"description": "Cohere is an AI platform that provides natural language processing (NLP) models and tools, enabling developers to integrate powerful language understanding capabilities into their applications. It offers features like text generation, semantic search, classification, and embeddings. Cohere focuses on scalability and ease of use, making it popular for tasks such as content creation, customer support automation, and building search engines with advanced semantic understanding. It also provides a user-friendly API for custom NLP applications.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "Cohere",
"url": "https://cohere.com/",
"type": "website"
}
]
},
"5ShWZl1QUqPwO-NRGN85V": {
"title": "OpenAI Models",
"description": "OpenAI is an artificial intelligence research lab that is known for its cutting-edge models. These models, like GPT-3, are pre-trained on vast amounts of data and perform remarkably well on tasks like language translation, question-answering, and more without needing any specific task training. Using these pre-trained models can give a massive head-start in building AI applications, as it saves the substantial time and resources that are required for training models from scratch. For an AI Engineer, understanding and leveraging these pre-trained models can greatly accelerate development and lead to superior AI systems.",
"links": []
},
"zdeuA4GbdBl2DwKgiOA4G": {
"title": "OpenAI API",
"description": "OpenAI API is a powerful language model developed by OpenAI, a non-profit artificial intelligence research organization. It uses machine learning to generate text from a given set of keywords or sentences, presenting the capability to learn, understand, and generate human-friendly content. As an AI Engineering aspirant, familiarity with tools like the OpenAI API positions you on the right track. It can help with creating AI applications that can analyze and generate text, which is particularly useful in AI tasks such as data extraction, summarization, translation, and natural language processing.",
"links": []
},
"_bPTciEA1GT1JwfXim19z": {
"title": "Chat Completions API",
"description": "The Chat Completions API allows developers to create conversational agents by sending user inputs (prompts) and receiving model-generated responses. It supports multiple-turn dialogues, maintaining context across exchanges to deliver relevant responses. This API is often used for chatbots, customer support, and interactive applications where maintaining conversation flow is essential.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "OpenAI Website",
"url": "https://platform.openai.com/docs/api-reference/chat/completions",
"type": "article"
}
]
},
"9-5DYeOnKJq9XvEMWP45A": {
"title": "Writing Prompts",
"description": "Writing Prompts are specific instructions or guides provided to OpenAI API to produce desired texts. They can range from simple, straight sentences intended for generating specific outputs to more complex, creative ones aiming for more open-ended, artificial intelligent responses. While the OpenAI API is capable of executing an extensive variety of tasks, how it performs is strongly influenced by how these writing prompts are crafted and constructed. During the journey to become an AI Engineer, understanding and designing effective prompts becomes vital for proper system training and interaction.",
"links": []
},
"nyBgEHvUhwF-NANMwkRJW": {
"title": "Open AI Playground",
"description": "Open AI Playground is an interactive platform, provided by OpenAI, that enables developers to experiment with and understand the capabilities of OpenAI's offerings. Here, you can try out several cutting-edge language models like GPT-3 or Codex. This tool is crucial in the journey of becoming an AI Engineer, because it provides a hands-on experience in implementing and testing language models. Manipulating models directly helps you get a good grasp on how AI models can influence the results based on input parameters. Therefore, Open AI Playground holds significance on the AI Engineer's roadmap not only as a learning tool, but also as a vital platform for rapid prototyping and debugging.",
"links": []
},
"15XOFdVp0IC-kLYPXUJWh": {
"title": "Fine-tuning",
"description": "OpenAI API allows you to fine-tune and adapt pre-trained models to specific tasks or datasets, improving performance on domain-specific problems. By providing custom training data, the model learns from examples relevant to the intended application, such as specialized customer support, unique content generation, or industry-specific tasks.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "OpenAI Docs",
"url": "https://platform.openai.com/docs/guides/fine-tuning",
"type": "article"
}
]
},
"qzvp6YxWDiGakA2mtspfh": {
"title": "Maximum Tokens",
"description": "Number of Maximum tokens in OpenAI API depends on the model you are using.\n\nFor example, the `gpt-4o` model has a maximum of 128,000 tokens.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "OpenAI API Documentation",
"url": "https://platform.openai.com/docs/api-reference/completions/create",
"type": "article"
}
]
},
"FjV3oD7G2Ocq5HhUC17iH": {
"title": "Token Counting",
"description": "Token counting refers to the process of quantifying the occurrence of tokens—unique instances of meaningful words, symbols, or other atomic units in a dataset. In artificial intelligence (AI), specifically in natural language processing (NLP), token counting serves as a foundational building block to understand and analyze text data. It helps in performing tasks such as text classification, sentiment analysis, and other language modeling tasks. It also forms the basis of techniques like Bag of Words, Term Frequency-Inverse Document Frequency (TF-IDF), and word embeddings, which are crucial in developing effective AI models.",
"links": []
},
"DZPM9zjCbYYWBPLmQImxQ": {
"title": "Pricing Considerations",
"description": "Pricing Considerations refer to the factors and elements that need to be taken into account when setting the price for a product or service. It includes aspects such as cost of production, market demand, competition, and perceived value. In the AI Engineer Roadmap, it can denote the determination of the cost involved in AI model development, implementation, maintenance, and upgrades. Various factors such as the complexity of models, the resources required, timeline, expertise needed, and the value provided to the user play a significant role in pricing considerations.",
"links": []
},
"8ndKHDJgL_gYwaXC7XMer": {
"title": "AI Safety and Ethics",
"description": "Learn about the principles and guidelines for building safe and ethical AI systems.",
"links": []
},
"cUyLT6ctYQ1pgmodCKREq": {
"title": "Prompt Injection Attacks",
"description": "Prompt Injection Attacks refer to a cyber threat where nefarious actors manipulate or inject malicious codes into the system using various techniques like SQL injection, Cross-Site Scripting (XSS), or Command Injection. This practice aims to exploit a software system's vulnerabilities, allowing unauthorized access to sensitive information. In the AI Engineer Roadmap, understanding these attack types is essential. Knowledge about such attacks can help developers in AI to build robust and secure AI systems that can resist potential threats and ensure system integrity. Better understanding of threat landscape can guide engineers toward implementing additional security measures during the design and development process of AI applications.",
"links": []
},
"lhIU0ulpvDAn1Xc3ooYz_": {
"title": "Bias and Fareness",
"description": "Bias and fairness in AI arise when models produce skewed or unequal outcomes for different groups, often reflecting imbalances in the training data. This can lead to discriminatory effects in critical areas like hiring, lending, and law enforcement. Addressing these concerns involves ensuring diverse and representative data, implementing fairness metrics, and ongoing monitoring to prevent biased outcomes. Techniques like debiasing algorithms and transparency in model development help mitigate bias and promote fairness in AI systems.",
"links": []
},
"sWBT-j2cRuFqRFYtV_5TK": {
"title": "Security and Privacy Concerns",
"description": "Security and Privacy Concerns encapsulates the understanding and addressing of potential risks associated with AI systems. These include, but are not limited to, data protection, access control, regulatory compliance, and the ethically complex area of how AI impacts individual privacy. As an aspiring AI Engineer, it is essential to acknowledge these concerns alongside technical skills as they influence the design, implementation, and application of AI technologies. Familiarity with this area helps in designing AI solutions that align with standards of security and privacy while effectively addressing the needs of the user.",
"links": []
},
"Pt-AJmSJrOxKvolb5_HEv": {
"title": "Conducting adversarial testing",
"description": "Adversarial testing involves creating malicious inputs to test the robustness of AI models. This includes testing for prompt injection, evasion, and other adversarial attacks.",
"links": []
},
"ljZLa3yjQpegiZWwtnn_q": {
"title": "OpenAI Moderation API",
"description": "OpenAI Moderation API is a feature or service provided by OpenAI that helps in controlling or filtering the output generated by an AI model. It is highly useful in identifying and preventing content that violates OpenAI’s usage policies from being shown. As an AI engineer, learning to work with this API helps implement a layer of security to ensure that the AI models developed are producing content that aligns with the ethical and moral guidelines set in place. Thus, it becomes a fundamental aspect of the AI Engineer Roadmap when dealing with user-generated content or creating AI-based services that interact with people.",
"links": []
},
"4Q5x2VCXedAWISBXUIyin": {
"title": "Adding end-user IDs in prompts",
"description": "Sending end-user IDs in your requests can be a useful tool to help OpenAI monitor and detect abuse. This allows OpenAI to provide you with more actionable feedback in the event that they may detect any policy violations in applications.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "OpenAI Documentation",
"url": "https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids",
"type": "article"
}
]
},
"qmx6OHqx4_0JXVIv8dASp": {
"title": "Robust prompt engineering",
"description": "Robust prompt engineering refers to designing, refining, and optimizing the instructions or queries given to an AI model to execute specific tasks. Originally, AI models were trained on a wide range of internet text without any specific commands. However, it can be more effective to provide these models with explicit prompts to guide their responses or actions. Prompt engineering aids in shaping the output of an AI model, significantly improving the accuracy of its responses. This becomes particularly valuable for AI engineers when working on state-of-the-art models like GPT-3, where the output's quality and relevance can be heavily influenced by innovative and well-structured prompts. With robust prompt engineering, AI practitioners can better channel the model's raw capabilities into desired outcomes, marking a crucial skill in an AI Engineer's journey.",
"links": []
},
"t1SObMWkDZ1cKqNNlcd9L": {
"title": "Know your Customers / Usecases",
"description": "Understanding your target customers and use-cases helps making informed decisions during the development to ensure that the final AI solution appropriately meets the relevant needs of the users. You can use this knowledge to choose the right tools, frameworks, technologies, design the right architecture, and even prevent abuse.",
"links": []
},
"ONLDyczNacGVZGojYyJrU": {
"title": "Constraining outputs and inputs",
"description": "Constraining outputs and inputs is important for controlling the behavior of AI models. This includes techniques like output filtering, input validation, and rate limiting.",
"links": []
},
"a_3SabylVqzzOyw3tZN5f": {
"title": "OpenSource AI",
"description": "OpenSource AI refers to artificial intelligence tools, software, libraries and platforms that are freely available to the public, allowing individuals and organizations to use, modify and distribute them as per their requirements. The OpenSource AI initiatives provide an ecosystem for AI developers to innovate, collaborate and mutually learn by sharing their codebase and datasets. Specifically, in the AI engineer's roadmap, OpenSource AI aids in accelerating the AI application development process, provides access to pre-trained models, and promotes the understanding of AI technology through transparency.",
"links": []
},
"RBwGsq9DngUsl8PrrCbqx": {
"title": "Open vs Closed Source Models",
"description": "Open source models are types of software whose source code is available for the public to view, modify, and distribute. They encourage collaboration and transparency, often resulting in rapid improvements and innovations. Closed source models, on the other hand, do not make their source code available and are typically developed and maintained by specific companies or teams. They often provide more stability, support, and consistency. Within the AI Engineer Roadmap, both open and closed source models play a unique role. While open source models allow for customization, experimentation and a broader understanding of underlying algorithms, closed source models might offer proprietary algorithms and structures that could lead to more efficient or unique solutions. Therefore, understanding the differences, advantages, and drawbacks of both models is essential for an aspiring AI engineer.",
"links": []
},
"97eu-XxYUH9pYbD_KjAtA": {
"title": "Popular Open Source Models",
"description": "Open-source models consist of pre-made algorithms and mathematical models that are freely available for anyone to use, modify, and distribute. In the realm of Artificial Intelligence, these models often include frameworks for machine learning, deep learning, natural language processing, and other AI methodologies. Thanks to their openly accessible nature, AI engineers often utilize these open-source models during project execution, fostering increased efficiency by reducing the need to create complex models from scratch. They serve as a valuable resource, often speeding up the development phase and promoting collaboration among the global AI community. Popular examples include TensorFlow, PyTorch, and Keras, each offering unique strengths and capabilities for different areas of AI engineering.",
"links": []
},
"v99C5Bml2a6148LCJ9gy9": {
"title": "Hugging Face",
"description": "Hugging Face is the platform where the machine learning community collaborates on models, datasets, and applications.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "Hugging Face",
"url": "https://huggingface.co/",
"type": "article"
}
]
},
"YLOdOvLXa5Fa7_mmuvKEi": {
"title": "Hugging Face Hub",
"description": "Hugging Face Hub is a platform where you can share, access and collaborate upon a wide array of machine learning models, primarily focused on Natural Language Processing (NLP) tasks. It is a central repository that facilitates storage and sharing of models, reducing the time and overhead usually associated with these tasks. For an AI Engineer, leveraging Hugging Face Hub can accelerate model development and deployment, effectively allowing them to work on structuring efficient AI solutions instead of worrying about model storage and accessibility issues.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "Hugging Face",
"url": "https://huggingface.co/",
"type": "article"
}
]
},
"YKIPOiSj_FNtg0h8uaSMq": {
"title": "Hugging Face Tasks",
"description": "Hugging face has a section where they have a list of tasks with the popular models for that task.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "Hugging Face",
"url": "https://huggingface.co/tasks",
"type": "article"
}
]
},
"3kRTzlLNBnXdTsAEXVu_M": {
"title": "Inference SDK",
"description": "Inference is the process of using a trained model to make predictions on new data. As this process can be compute-intensive, running on a dedicated server can be an interesting option. The huggingface\\_hub library provides an easy way to call a service that runs inference for hosted models. There are several services you can connect to:\n\nVisit the following resources to learn more:",
"links": [
{
"title": "Hugging Face Inference Client",
"url": "https://huggingface.co/docs/huggingface_hub/en/package_reference/inference_client",
"type": "article"
},
{
"title": "Hugging Face Inference API",
"url": "https://huggingface.co/docs/api-inference/en/index",
"type": "article"
}
]
},
"bGLrbpxKgENe2xS1eQtdh": {
"title": "Transformers.js",
"description": "Transformers.js is a JavaScript library providing the ability to use machine learning transformers in applications. It is based on the concept of attention mechanism - \"transformers\" in artificial intelligence, which allows the model to focus on different words in a sentence during translation or text generation tasks. The benefits of using such transformers are their capacity to handle long sequences of data and their parallelization abilities which offer faster computational time. In the AI Engineer's learning pathway, understanding and working with transformers can be pivotal as they form a fundamental part of natural language processing tasks, often used within AI solutions. With JavaScript being a language of choice for many web applications, having an understanding of Transformers.js provides an AI engineer with the knowledge necessary to integrate powerful language models directly within web-applications.",
"links": []
},
"rTT2UnvqFO3GH6ThPLEjO": {
"title": "Ollama",
"description": "Ollama is an open-source tool for running large language models (LLMs) locally on personal computers. It supports various models like Llama 2, Mistral, and Code Llama, bundling weights, configurations, and data into a single package. Ollama offers a user-friendly interface, API access, and integration capabilities, allowing users to leverage AI capabilities while maintaining data privacy and control. It's designed for easy installation and use on macOS and Linux, with Windows support in development.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "Ollama",
"url": "https://ollama.com",
"type": "article"
}
]
},
"ro3vY_sp6xMQ-hfzO-rc1": {
"title": "Ollama Models",
"description": "Ollama supports a wide range of language models, including but not limited to Llama, Phi, Mistral, Gemma and more.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "Ollama Models",
"url": "https://ollama.com/library",
"type": "article"
}
]
},
"TsG_I7FL-cOCSw8gvZH3r": {
"title": "Ollama SDK",
"description": "Ollama SDK can be used to develop applications locally.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "Ollama SDK",
"url": "https://ollama.com",
"type": "article"
}
]
},
"--ig0Ume_BnXb9K2U7HJN": {
"title": "What are Embeddings",
"description": "Embeddings are a way of representing complex and high-dimensional data in a low-dimensional space, typically a vector. For example, words in a language can be represented as multi-dimensional vectors through word embedding techniques, such as Word2Vec or GloVe. These representations capture semantic relationships among words that machines can understand and process. In the roadmap of becoming an AI Engineer, handling and understanding embeddings is vital because they are essential to natural language processing, recommendation systems and any AI component that deals with complex data in a compact, meaningful manner.",
"links": []
},
"eMfcyBxnMY_l_5-8eg6sD": {
"title": "Semantic Search",
"description": "Semantic Search is an information retrieval approach which leverages not just the keywords in a search query, but also the intent and contextual meaning behind them to produce highly relevant results. In other words, it makes search engines more intelligent and precise, understanding user intent and making connections like a human brain would. It's an important technique that an AI Engineer might utilize, especially when dealing with large amounts of data or if they're involved in creating intelligent search systems. From natural language processing to relationship mapping, semantic search plays a key role in advancing artificial intelligence search capabilities.",
"links": []
},
"HQe9GKy3p0kTUPxojIfSF": {
"title": "Recommendation Systems",
"description": "Recommendation systems are a subclass of information filtering systems that are meant to predict the preferences or ratings that a user would give to a particular item. Broadly speaking, these systems are primarily used in applications where a user receives suggestions for the products or services they might be interested in, such as Netflix's movie recommendations or Amazon's product suggestions. In terms of an AI Engineer Roadmap, building recommendation systems is a fundamental skill, as these systems typically utilize concepts of machine learning and data mining, and their purpose primarily revolves around making predictions based on large volumes of data. These skills make an integral part of AI-related fields like natural language processing, robotics, and deep learning.",
"links": []
},
"AglWJ7gb9rTT2rMkstxtk": {
"title": "Anomaly Detection",
"description": "Embeddings transform complex data (like text or behavior) into numerical vectors, capturing relationships between data points. These vectors are stored in a vector database, which allows for efficient similarity searches. Anomalies can be detected by measuring the distance between a data point's vector and its nearest neighbors—if a point is significantly distant, it's likely anomalous. This approach is scalable, adaptable to various data types, and effective for tasks like fraud detection, predictive maintenance, and cybersecurity.",
"links": []
},
"06Xta-OqSci05nV2QMFdF": {
"title": "Data Classification",
"description": "Embeddings are used in data classification by converting data (like text or images) into numerical vectors that capture underlying patterns and relationships. These vector representations make it easier for machine learning models to distinguish between different classes based on the similarity or distance between vectors in high-dimensional space. By training a classifier on these embeddings, tasks like sentiment analysis, document categorization, and image classification can be performed more accurately and efficiently. Embeddings simplify complex data and enhance classification by highlighting key features relevant to each class.",
"links": []
},
"l6priWeJhbdUD5tJ7uHyG": {
"title": "Open AI Embeddings API",
"description": "Open AI Embeddings API is a powerful system that is used to generate high-quality word and sentence embeddings. With this API, it becomes a breeze to convert textual data into a numerical format that Machine Learning models can process. This conversion of text into numerical data is crucial for Natural Language Processing (NLP) tasks that an AI Engineer often encounters. Understanding and harnessing the capabilities of the Open AI Embeddings API, therefore, forms an essential part of the AI Engineer's roadmap.",
"links": []
},
"y0qD5Kb4Pf-ymIwW-tvhX": {
"title": "Open AI Embedding Models",
"description": "Open AI embedding models refer to the artificial intelligence variants designed to reformat or transcribe input data into compact, dense numerical vectors. These models simplify and reduce the input data from its original complex nature, creating a digital representation that is easier to manipulate. This data reduction technique is critical in the AI Engineer Roadmap because it paves the way for natural language processing tasks. It helps in making precise predictions, clustering similar data, and producing accurate search results based on contextual relevance.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "Open AI Embedding Models",
"url": "https://platform.openai.com/docs/guides/embeddings",
"type": "article"
}
]
},
"4GArjDYipit4SLqKZAWDf": {
"title": "Pricing Considerations",
"description": "OpenAI Embeddings API allows users to compute and extract textual embeddings from large-scale models that OpenAI trains. The pricing for this API can vary based on multiple factors like the number of requests, number of tokens in the text, total computation time, throughput, and others. Understanding the pricing model for the OpenAI Embeddings API is vital for AI Engineers to effectively manage costs while using the API. They should be aware of any limitations or additional costs associated with high volume requests, speed of processing, or special features they plan to use. This knowledge helps the engineers to optimize costs, which is important for the budgeting of AI projects and the overall roadmap of an AI Engineer.",
"links": []
},
"apVYIV4EyejPft25oAvdI": {
"title": "Open-Source Embeddings",
"description": "Open-source embeddings, such as Word2Vec, GloVe, and FastText, are essentially vector representations of words or phrases. These representations capture the semantic relationships between words and their surrounding context in a multi-dimensional space, making it easier for machine learning models to understand and process textual data. In the AI Engineer Roadmap, gaining knowledge of open-source embeddings is critical. These embeddings serve as a foundation for natural language processing tasks, ranging from sentiment analysis to chatbot development, and are widely used in the AI field for their ability to enhance the performance of machine learning models dealing with text data.",
"links": []
},
"ZV_V6sqOnRodgaw4mzokC": {
"title": "Sentence Transformers",
"description": "Sentence Transformers refer to a variant of the popular Transformers model that is specifically designed and optimized for creating meaningful and effective sentence embeddings. It enables developers to easily convert paragraphs and sentences into dense vector representations that can be compared for semantic similarity. In the AI engineer's journey, getting familiar with Sentence Transformers is important because it allows the modelling of natural language in AI systems to provide richer, more nuanced interactions. This can be especially valuable in designing and implementing AI applications such as chatbots, sentiment analysis tools, and more.",
"links": []
},
"dLEg4IA3F5jgc44Bst9if": {
"title": "Models on Hugging Face",
"description": "Hugging Face has a wide range of pre-trained models that can be used for a variety of tasks, including language understanding and generation, translation, chatbots, and more. Anyone can create an account and use their models, and the models are organized by task, provider, and other criteria.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "Hugging Face",
"url": "https://huggingface.co/models",
"type": "article"
}
]
},
"tt9u3oFlsjEMfPyojuqpc": {
"title": "Vector Databases",
"description": "Vector databases are a type of database system specifically designed to handle vector space model data, typically used for high-dimensional data sets. With the explosion of data in AI applications, these databases have become an integral part of the infrastructure, providing an efficient and scalable way to manage and query large volumes of vector data. For AI engineers, understanding how to use and optimize vector databases can significantly improve the performance of AI models which use vector-based data, such as natural language processing (NLP) and image recognition models. Proficiency in vector databases is hence a crucial skill in the AI Engineer Roadmap.",
"links": []
},
"WcjX6p-V-Rdd77EL8Ega9": {
"title": "Purpose and Functionality",
"description": "The Purpose and Functionality are fundamental concepts in the AI Engineer Roadmap. To put simply, 'Purpose' refers to the intended goal or desired result that an AI engineer wants to achieve in an AI project. These goals can range from building neural networks to creating self-driving cars. 'Functionality', on the other hand, pertains to the behaviors and actions that an AI program can perform to fulfill its purpose. This could involve machine learning algorithms, language processing techniques, or data analysis methods among others. Understanding the purpose and functionality of an AI project allows an AI engineer to strategically plan, develop, and manage AI systems effectively.",
"links": []
},
"dSd2C9lNl-ymmCRT9_ZC3": {
"title": "Chroma",
"description": "Chroma is a vector database designed to efficiently store, index, and query high-dimensional embeddings. It’s optimized for AI applications like semantic search, recommendation systems, and anomaly detection by allowing fast retrieval of similar vectors based on distance metrics (e.g., cosine similarity). Chroma enables scalable and real-time processing, making it a popular choice for projects involving embeddings from text, images, or other data types.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "Chroma Website",
"url": "https://docs.trychroma.com/",
"type": "article"
}
]
},
"_Cf7S1DCvX7p1_3-tP3C3": {
"title": "Pinecone",
"description": "Pinecone is a vector database designed specifically for machine learning applications. It facilitates the process of transforming data into a vector and indexing it for quick retrieval. As a cloud-based service, it allows AI Engineers to easily handle high-dimensional data and utilize it for building models. As part of an AI Engineer's Roadmap, understanding and using vector databases like Pinecone can help streamline the development and deployment of AI and ML applications. This is particularly useful in building recommendation systems, personalized search and similarity search which are important components of an AI-based service.",
"links": []
},
"VgUnrZGKVjAAO4n_llq5-": {
"title": "Weaviate",
"description": "Weaviate is an open-source, GraphQL and RESTful API-based, knowledge graph that allows you to store, search, and retrieve data. One of its core features is machine learning algorithms that enhance information handling. For an AI Engineer, mastering Weaviate becomes relevant as it bridges the gap between unstructured data and structured data, which is a common challenge when working with AI and machine learning models. By understanding this, an AI engineer can leverage these abilities to manipulate structured data more effectively, optimize data searchability, and improve the efficiency of data-dependent processes in AI projects.",
"links": []
},
"JurLbOO1Z8r6C3yUqRNwf": {
"title": "FAISS",
"description": "FAISS stands for Facebook AI Similarity Search, it is a database management library developed by Facebook's AI team. Primarily used for efficient similarity search and clustering of dense vectors, it allows users to search through billions of feature vectors swiftly and efficiently. As an AI engineer, learning FAISS is beneficial because these vectors represent objects that are typically used in machine learning or AI applications. For instance, in an image recognition task, a dense vector might be a list of pixels from an image, and FAISS allows a quick search of similar images in a large database.",
"links": []
},
"rjaCNT3Li45kwu2gXckke": {
"title": "LanceDB",
"description": "LanceDB is a relatively new, multithreaded, high-speed data warehouse optimized for AI and machine learning data processing. It's designed to handle massive amounts of data, enables quick storage and retrieval, and supports lossless data compression. For an AI engineer, learning LanceDB could be beneficial as it can be integrated with machine learning frameworks for collecting, processing and analyzing large datasets. These functionalities can help to streamline the process for AI model training, which requires extensive data testing and validation.",
"links": []
},
"DwOAL5mOBgBiw-EQpAzQl": {
"title": "Qdrant",
"description": "Qdrant is a high-performance vector similarity search engine with extensive restful API and distributed support, written in Rust. It allows efficiently storing, handling, and retrieving large amounts of vector data. Integrating Qdrant as a part of the AI Engineer's toolkit can drastically improve functionality and efficiency for AI Engineers, as they often work with vectors during data preprocessing, feature extraction, and modeling. Qdrant's flexibility and control over data indexing and query processing make it particularly handy when dealing with large datasets prevalent in AI projects.",
"links": []
},
"9kT7EEQsbeD2WDdN9ADx7": {
"title": "Supabase",
"description": "Supabase is an open-source Firebase alternative that offers a suite of tools for database management, realtime subscriptions, and automating tasks. As an AI Engineer, you'll often have to manage and work with data to develop and test AI models. With Supabase, you can build databases more efficiently and interact with your data in real-time. It also supports user authentication and provides serverless functions which can be crucial in AI development workflows.",
"links": []
},
"j6bkm0VUgLkHdMDDJFiMC": {
"title": "MongoDB Atlas",
"description": "MongoDB Atlas is a fully managed cloud-based NoSQL database service by MongoDB. It simplifies database deployment and management across platforms like AWS, Azure, and Google Cloud. Using a flexible document model, Atlas automates tasks such as scaling, backups, and security, allowing developers to focus on building applications. With features like real-time analytics and global clusters, it offers a powerful solution for scalable and resilient app development.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "MongoDB Atlas Vector Search",
"url": "https://www.mongodb.com/products/platform/atlas-vector-search",
"type": "article"
}
]
},
"5TQnO9B4_LTHwqjI7iHB1": {
"title": "Indexing Embeddings",
"description": "This step involves converting data (such as text, images, or other content) into numerical vectors (embeddings) using a pre-trained model. These embeddings capture the semantic relationships between data points. Once generated, the embeddings are stored in a vector database, which organizes them in a way that enables efficient retrieval based on similarity. This indexed structure allows fast querying and comparison of vectors, facilitating tasks like semantic search, recommendation systems, and anomaly detection.",
"links": []
},
"ZcbRPtgaptqKqWBgRrEBU": {
"title": "Performing Similarity Search",
"description": "This step involves querying the vector database to find the most similar embeddings to a given input vector. When a query is made, the system computes the distance between the input vector and stored embeddings using metrics like cosine similarity or Euclidean distance. The closest matches—those with the smallest distances—are retrieved as results, allowing for accurate semantic search, recommendations, or content retrieval based on similarity in the embedded space. This process enables highly efficient and relevant searches across large datasets.",
"links": []
},
"lVhWhZGR558O-ljHobxIi": {
"title": "RAG & Implementation",
"description": "RAG (Relation and Graph) is a mechanism used in artificial intelligence that represents the structured relationships existing between different data entities. Programming languages such as Python provide libraries for RAG implementation, making it simpler for AI engineers. In the AI Engineer roadmap, understanding and implementing RAG models can prove beneficial especially while working with AI algorithms that extensively deal with relational and structured data, such as graph-based Deep Learning algorithms, or while creating knowledge graphs in contexts like Natural Language Processing (NLP). Implementing RAG efficiently can lead to more accurate, efficient, and interpretable AI models.",
"links": []
},
"GCn4LGNEtPI0NWYAZCRE-": {
"title": "RAG Usecases",
"description": "Retrieval-Augmented Generation (RAG) is a type of sequence-to-sequence model with documents retrievers in their architecture. This method integrates the power of pre-trained language models and extractive question answering methods to answer any queries with high precision. In the AI Engineer Roadmap, this tool has practical applications, such as enabling machines to provide detailed responses based on large-scale databases instead of generating responses only from a fixed context. This feature is highly beneficial in developing advanced AI systems with extensive knowledge recall capabilities. RAG's use-cases cover areas like customer service chatbots, automated legal assistance, healthcare advice systems, and other areas where comprehensive information retrieval is crucial.",
"links": []
},
"qlBEXrbV88e_wAGRwO9hW": {
"title": "RAG vs Fine-tuning",
"description": "RAG (Retrieval-Augmented Generation) and Fine-tuning are two distinct techniques utilized in Natural Language Processing (NLP). RAG introduces an approach where the model retrieves documents and faqs from a database to enhance the content generation process. It enables more factual accuracy and relevant context in the outputs. On the other hand, Fine-tuning involves modifying a pre-trained Neural Network model on a new or different task. Adjustments are made to the model's parameters to enhance performance on the new task. Typically, an AI engineer might use RAG for tasks requiring contextual understanding and factual accuracy, while implementing fine-tuning techniques to leverage existing pre-trained models for optimizing new tasks and projects.",
"links": []
},
"mX987wiZF7p3V_gExrPeX": {
"title": "Chunking",
"description": "In Retrieval-Augmented Generation (RAG), **chunking** refers to breaking large documents or data into smaller, manageable pieces (chunks) to improve retrieval and generation efficiency. This process helps the system retrieve relevant information more accurately by indexing these chunks in a vector database. During a query, the model retrieves relevant chunks instead of entire documents, which enhances the precision of the generated responses and allows better handling of long-form content within the context length limits.",
"links": []
},
"grTcbzT7jKk_sIUwOTZTD": {
"title": "Embedding",
"description": "Embedding refers to the conversion or mapping of discrete objects such as words, phrases, or even entire sentences into vectors of real numbers. It's an essential part of data preprocessing where high-dimensional data is transformed into a lower-dimensional equivalent. This dimensional reduction helps to preserve the semantic relationships between objects. In AI engineering, embedding techniques are often used in language-orientated tasks like sentiment analysis, text classification, and Natural Language Processing (NLP) to provide an understanding of the vast linguistic inputs AI models receive.",
"links": []
},
"zZA1FBhf1y4kCoUZ-hM4H": {
"title": "Vector Database",
"description": "A Vector Database is a tool that specializes in storing and efficiently retrieving vector representations (or embeddings). These vectors often represent embeddings of items or entities in high-dimensional space. This indexation process enables search and clustering algorithms. In this step of implementing RAG, we use a Vector Database to store the embeddings of the documents.",
"links": []
},
"OCGCzHQM2LQyUWmiqe6E0": {
"title": "Retrieval Process",
"description": "In this step of implementing RAG, we clean up the user's query by removing any extra information, we then generate an embedding for the query and look for the most similar embeddings in the vector database.",
"links": []
},
"2jJnS9vRYhaS69d6OxrMh": {
"title": "Generation",
"description": "In this step of implementing RAG, we use the found chunks to generate a response to the user's query using an LLM.",
"links": []
},
"WZVW8FQu6LyspSKm1C_sl": {
"title": "Using SDKs Directly",
"description": "Software Development Kits, often referred to as SDKs, are a collection of development tools bundled together. These tools assist in creating innovative applications for specific software frameworks or hardware platforms. In the AI Engineer Roadmap, using SDKs directly implies that AI engineers leverage these kits to interact directly with AI-related services or platforms. This approach provides a lower level control, allowing engineers to customize applications according to their unique requirements. Therefore, acquiring skill in using SDKs directly forms an instrumental part of the AI Engineer Roadmap, enabling practitioners to build and enhance AI applications effectively and efficiently.",
"links": []
},
"ebXXEhNRROjbbof-Gym4p": {
"title": "Langchain",
"description": "LangChain is a software framework that helps facilitate the integration of large language models into applications. As a language model integration framework, LangChain's use-cases largely overlap with those of language models in general, including document analysis and summarization, chatbots, and code analysis.",
"links": []
},
"d0ontCII8KI8wfP-8Y45R": {
"title": "Llama Index",
"description": "LlamaIndex is a simple, flexible data framework for connecting custom data sources to large language models.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "LlamaIndex Official Website",
"url": "https://llamaindex.ai/",
"type": "article"
}
]
},
"eOqCBgBTKM8CmY3nsWjre": {
"title": "Open AI Assistant API",
"description": "OpenAI Assistant API is a tool provided by OpenAI that allows developers to integrate the same AI used in ChatGPT into their own applications, products or services. This AI conducts dynamic, interactive and context-aware conversations useful for building AI assistants in various applications. In the AI Engineer Roadmap, mastering the use of APIs like the Open AI Assistant API is a crucial skill, as it allows engineers to harness the power and versatility of pre-trained algorithms and use them for their desired tasks. AI Engineers can offload the intricacies of model training and maintenance, focusing more on product development and innovation.",
"links": []
},
"c0RPhpD00VIUgF4HJgN2T": {
"title": "Replicate",
"description": "Replicate is a version-control tool specifically designed for machine learning. It enables effective tracking of experiments, facilitating the comparison of different models and parameters. As an AI Engineer, knowing how to use Replicate provides you with the ability to save versions of data and model files, thereby preventing loss of work and confusion. It also contributes to smoother teamwork and collaborations by allowing effective sharing and reproduction of experiments, which is crucial in an AI project life cycle.",
"links": []
},
"AeHkNU-uJ_gBdo5-xdpEu": {
"title": "AI Agents",
"description": "AI Agents are a type of LLM that can be used to automate complex workflows that involve multiple steps and decisions.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "What are AI Agents?",
"url": "https://aws.amazon.com/what-is/ai-agents/",
"type": "article"
},
{
"title": "What are AI Agents?",
"url": "https://www.youtube.com/watch?v=F8NKVhkZZWI",
"type": "video"
}
]
},
"778HsQzTuJ_3c9OSn5DmH": {
"title": "Agents Usecases",
"description": "AI Agents allow you to automate complex workflows that involve multiple steps and decisions.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "What are AI Agents?",
"url": "https://aws.amazon.com/what-is/ai-agents/",
"type": "article"
},
{
"title": "What are AI Agents?",
"url": "https://www.youtube.com/watch?v=F8NKVhkZZWI",
"type": "video"
}
]
},
"voDKcKvXtyLzeZdx2g3Qn": {
"title": "ReAct Prompting",
"description": "ReAct prompting is a tactical approach employed in Conversational AI to generate textual responses. It is essentially utilized in scenarios where a chatbot or an AI-generated persona is required to carry on a conversation. This strategy adds a layer of intelligence to the conversation, maneuvering the AI to generate responses that are contextually sensitive and relevant. In an AI Engineer's Roadmap, an understanding of React Prompting becomes significant during the design of AI interaction models. Using this technique, AI Engineers are capable of creating more intuitive, engaging, and user-friendly conversational AI agents.",
"links": []
},
"6xaRB34_g0HGt-y1dGYXR": {
"title": "Manual Implementation",
"description": "You can build the AI agents manually by coding the logic from scratch without using any frameworks or libraries. For example, you can use the OpenAI API and write the looping logic yourself to keep the agent running until it has the answer.",
"links": []
},
"Sm0Ne5Nx72hcZCdAcC0C2": {
"title": "OpenAI Functions / Tools",
"description": "OpenAI, a leading organization in the field of artificial intelligence, provides a suite of functions and tools to enable developers and AI engineers to design, test, and deploy AI models. These tools include robust APIs for tasks like natural language processing, vision, and reinforcement learning, and platforms like GPT-3, CLIP, and Codex that provide pre-trained models. Utilization of these OpenAI components allows AI engineers to get a head-start in application development, simplifying the process of integration and reducing the time required for model training and tuning. Understanding and being adept at these tools forms a crucial part of the AI Engineer's roadmap to build impactful AI-driven applications.",
"links": []
},
"mbp2NoL-VZ5hZIIblNBXt": {
"title": "OpenAI Assistant API",
"description": "OpenAI Assistant API is a tool developed by OpenAI which allows developers to establish interaction between their applications, products or services and state-of-the-art AI models. By integrating this API in their software architecture, artificial intelligence engineers can leverage the power of advanced language models developed by the OpenAI community. These integrated models can accomplish a multitude of tasks, like writing emails, generating code, answering questions, tutoring in different subjects and even creating conversational agents. For an AI engineer, mastery over such APIs means they can deploy and control highly sophisticated AI models with just a few lines of code.",
"links": []
},
"W7cKPt_UxcUgwp8J6hS4p": {
"title": "Multimodal AI",
"description": "Multimodal AI refers to artificial intelligence systems capable of processing and integrating multiple types of data inputs simultaneously, such as text, images, audio, and video. Unlike traditional AI models that focus on a single data type, multimodal AI combines various inputs to achieve a more comprehensive understanding and generate more robust outputs. This approach mimics human cognition, which naturally integrates information from multiple senses to form a complete perception of the world. By leveraging diverse data sources, multimodal AI can perform complex tasks like image captioning, visual question answering, and cross-modal content generation.",
"links": []
},
"sGR9qcro68KrzM8qWxcH8": {
"title": "Multimodal AI Usecases",
"description": "Multimodal AI integrates various data types for diverse applications. In human-computer interaction, it enhances interfaces using speech, gestures, and facial expressions. In healthcare, it combines medical scans and records for accurate diagnoses. For autonomous vehicles, it processes data from sensors for real-time navigation. Additionally, it generates images from text and summarizes videos in content creation, while also analyzing satellite and sensor data for climate insights.",
"links": []
},
"fzVq4hGoa2gdbIzoyY1Zp": {
"title": "Image Understanding",
"description": "Image Understanding involves extracting meaningful information from images, such as photos or videos. This process includes tasks like image recognition, where an AI system is trained to recognize certain objects within an image, and image segmentation, where an image is divided into multiple regions according to some criteria. For an AI engineer, mastering techniques in Image Understanding is crucial because it forms the basis for more complex tasks such as object detection, facial recognition, or even whole scene understanding, all of which play significant roles in various AI applications. As AI technologies continue evolving, the ability to analyze and interpret visual data becomes increasingly important in fields ranging from healthcare to autonomous vehicles.",
"links": []
},
"49BWxYVFpIgZCCqsikH7l": {
"title": "Image Generation",
"description": "Image Generation often refers to the process of creating new images from an existing dataset or completely from scratch. For an AI Engineer, understanding image generation is crucial as it is one of the key aspects of machine learning and deep learning related to computer vision. It often involves techniques like convolutional neural networks (CNN), generative adversarial networks (GANs), and autoencoders. These technologies are used to generate artificial images that closely resemble original input, and can be applied in various fields such as healthcare, entertainment, security and more.",
"links": []
},
"TxaZCtTCTUfwCxAJ2pmND": {
"title": "Video Understanding",
"description": "Video Understanding is the process of analyzing videos to comprehend its content and context. Leveraging Machine Learning and AI technologies, this branch is responsible for extracting valuable information from video data. In the AI Engineer's Roadmap, video understanding comes into play when building AI models that can interpret video inputs. These engines need to recognize patterns and actions within the video, can track object's movements, and may also infer the future actions from the video stream. Training an AI model in video understanding requires knowledge of convolutional neural networks (CNN), recurring neural networks (RNN), and preferably some experience with Long Short-Term Memory (LSTM) networks.",
"links": []
},
"mxQYB820447DC6kogyZIL": {
"title": "Audio Processing",
"description": "Using Multimodal AI, audio data can be processed with other types of data, such as text, images, or video, to enhance understanding and analysis. For example, it can synchronize audio with corresponding visual inputs, like lip movements in video, to improve speech recognition or emotion detection. This fusion of modalities enables more accurate transcription, better sentiment analysis, and enriched context understanding in applications such as virtual assistants, multimedia content analysis, and real-time communication systems.",
"links": []
},
"GCERpLz5BcRtWPpv-asUz": {
"title": "Text-to-Speech",
"description": "Text-to-Speech (TTS) is a type of assistive technology that reads digital text out loud. It is a technology widely used in various programming fields, including AI Engineering. Traditionally, TTS has been employed in accessibility applications, but with the advent of AI, it's now being used to develop voice assistants, audio book narrators, and many more intelligent applications. For AI engineers, knowledge of TTS techniques opens up new possibilities for user interaction and can form an essential part of any AI application which interacts with its users primarily through spoken language.",
"links": []
},
"jQX10XKd_QM5wdQweEkVJ": {
"title": "Speech-to-Text",
"description": "Speech-to-Text is a type of technology that converts spoken language into written text. This technology is often incorporated in virtual assistants, transcription services and many other applications where transforming voice into text can facilitate better user interaction or communication. For an AI engineer, this falls under the wider ambit of Natural Language Processing (NLP), making it an important skill to understand and comprehend. The ability to design and implement speech-to-text models can allow AI engineers to create more interactive and adaptive machine learning systems, improve accessibility, and expand the scope of potential AI applications.",
"links": []
},
"CRrqa-dBw1LlOwVbrZhjK": {
"title": "OpenAI Vision API",
"description": "OpenAI Vision API is an API provided by OpenAI that is designed to analyze and generate insights from images. By feeding it an image, the Vision API can provide information about the objects and activities present in the image. For AI Engineers, this tool can be particularly useful for conducting Computer Vision tasks effortlessly. Using this API can support in creating applications that need image recognition, object detection and similar functionality, saving AI Engineers from having to create complex image processing algorithms from scratch. Understanding how to work with APIs, especially ones as advanced as the OpenAI Vision API, is an essential skill in the AI Engineer's roadmap.",
"links": []
},
"LKFwwjtcawJ4Z12X102Cb": {
"title": "DALL-E API",
"description": "The DALL-E API allows developers to integrate OpenAI's image generation model into their applications. Using text-based prompts, the API generates unique images that match the descriptions provided by users. This makes it useful for tasks like creative design, marketing, product prototyping, and content creation. The API is highly customizable, enabling developers to adjust parameters such as image size and style. DALL-E excels at creating visually rich content from textual descriptions, expanding the possibilities for AI-driven creative workflows.",
"links": []
},
"OTBd6cPUayKaAM-fLWdSt": {
"title": "Whisper API",
"description": "Whisper API is an interface primarily used for interacting with OpenAI's Whisper ASR system. It's a system designed to convert spoken language into written text, a technique that is commonly known as Automatic Speech Recognition (ASR). As an AI engineer, understanding and using Whisper API in the roadmap is key as it fuses with several other machine learning principles to improve an application's ability to understand and transcribe spoken language, which is becoming increasingly significant in domains like virtual assistants, transcription services, voice biometrics and more.",
"links": []
},
"EIDbwbdolR_qsNKVDla6V": {
"title": "Hugging Face Models",
"description": "Hugging Face has a wide range of pre-trained models that can be used for a variety of tasks, including language understanding and generation, translation, chatbots, and more. Anyone can create an account and use their models, and the models are organized by task, provider, and other criteria.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "Hugging Face",
"url": "https://huggingface.co/models",
"type": "article"
}
]
},
"j9zD3pHysB1CBhLfLjhpD": {
"title": "LangChain for Multimodal Apps",
"description": "LangChain is a software framework that helps facilitate the integration of large language models into applications. As a language model integration framework, LangChain's use-cases largely overlap with those of language models in general, including document analysis and summarization, chatbots, and code analysis.",
"links": []
},
"akQTCKuPRRelj2GORqvsh": {
"title": "LlamaIndex for Multimodal Apps",
"description": "LlamaIndex is a simple, flexible data framework for connecting custom data sources to large language models.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "LlamaIndex Official Website",
"url": "https://llamaindex.ai/",
"type": "article"
}
]
},
"NYge7PNtfI-y6QWefXJ4d": {
"title": "Development Tools",
"description": "A lot of developer related tools have popped up since the AI revolution. It's being used in the coding editors, in the terminal, in the CI/CD pipelines, and more.",
"links": []
},
"XcKeQfpTA5ITgdX51I4y-": {
"title": "AI Code Editors",
"description": "AI code editors have the first-class support for AI in the editor. You can use AI to generate code, fix bugs, chat with your code, and more.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "Cursor",
"url": "https://cursor.com/",
"type": "website"
},
{
"title": "Zed AI",
"url": "https://zed.dev/ai",
"type": "website"
}
]
},
"TifVhqFm1zXNssA8QR3SM": {
"title": "Code Completion Tools",
"description": "AI Code Completion Tools are software tools that use AI models to assist with code generation and editing. These tools help developers write code more quickly and efficiently by providing suggestions, completing code snippets, and suggesting improvements. AI Code Completion Tools can also be used to generate documentation, comments, and other code-related content.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "GitHub Copilot",
"url": "https://copilot.github.com/",
"type": "website"
},
{
"title": "Codeium",
"url": "https://codeium.com/",
"type": "website"
},
{
"title": "Supermaven",
"url": "https://supermaven.com/",
"type": "website"
},
{
"title": "TabNine",
"url": "https://www.tabnine.com/",
"type": "website"
}
]
}
}

@ -3786,7 +3786,7 @@
},
"7r7o8pYhFHVAJIv0wNT6X": {
"title": "Hashing / Encryption / Encoding",
"description": "Hashing is a one-way function that takes an input and produces a fixed-length output. The output is called a hash. The hash is a unique representation of the input. The hash is deterministic, meaning that the same input will always produce the same hash. The hash is irreversible, meaning that it is impossible to go from the hash back to the original input. The hash is collision-resistant, meaning that it is impossible to find two different inputs that produce the same hash.\n\nEncryption is a two-way function that takes an input and produces an output. The output is called ciphertext. The ciphertext is a unique representation of the input. The ciphertext is deterministic, meaning that the same input will always produce the same ciphertext. The ciphertext is reversible, meaning that it is possible to go from the ciphertext back to the original input. The ciphertext is collision-resistant, meaning that it is impossible to find two different inputs that produce the same ciphertext.\n\nEncoding is a two-way function that takes an input and produces an output. The output is called encoded text. The encoded text is a unique representation of the input. The encoded text is deterministic, meaning that the same input will always produce the same encoded text. The encoded text is reversible, meaning that it is possible to go from the encoded text back to the original input. The encoded text is not collision-resistant, meaning that it is possible to find two different inputs that produce the same encoded text.\n\nVisit the following resources to learn more:",
"description": "Hashing is a one-way function that takes an input and produces a fixed-length output. The output is called a hash. The hash is a unique representation of the input. The hash is deterministic, meaning that the same input will always produce the same hash. The hash is irreversible, meaning that it is impossible to go from the hash back to the original input. The hash is not collision-resistant, meaning that it is possible to find two different inputs that produce the same hash.\n\nEncryption is a two-way function that takes an input and produces an output. The output is called ciphertext. The ciphertext is a unique representation of the input. The ciphertext is deterministic, meaning that the same input will always produce the same ciphertext. The ciphertext is reversible, meaning that it is possible to go from the ciphertext back to the original input. The ciphertext is collision-resistant, meaning that it is impossible to find two different inputs that produce the same ciphertext.\n\nEncoding is a two-way function that takes an input and produces an output. The output is called encoded text. The encoded text is a unique representation of the input. The encoded text is deterministic, meaning that the same input will always produce the same encoded text. The encoded text is reversible, meaning that it is possible to go from the encoded text back to the original input. The encoded text is collision-resistant, meaning that it is impossible to find two different inputs that produce the same encoded text.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "Explore top posts about Encryption",

@ -117,6 +117,11 @@
"title": "Rust",
"description": "Rust is a systems programming language that focuses on safety, concurrency, and performance. Developed by Mozilla Research, Rust combines low-level control over system resources with high-level abstractions, preventing common programming errors like null or dangling pointer references at compile-time. It features a borrow checker for managing memory and preventing data races, making it ideal for building reliable and efficient software. Rust's zero-cost abstractions, pattern matching, and trait-based generics offer powerful tools for expressing complex ideas clearly. While primarily used for systems programming, web assembly, and game development, Rust's growing ecosystem supports various domains. Its emphasis on memory safety without sacrificing performance makes it increasingly popular for developing secure, high-performance applications in fields ranging from operating systems to web services.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "Rust by Practice - Interactive Rust Course",
"url": "https://practice.course.rs/",
"type": "course"
},
{
"title": "The Rust Programming Language - online book",
"url": "https://doc.rust-lang.org/book/",
@ -189,9 +194,14 @@
"title": "Operating System",
"description": "Operating systems (OS) are fundamental software that manage computer hardware and software resources, providing common services for computer programs. They act as an intermediary between applications and hardware, handling tasks like memory management, process scheduling, file system management, and device control. Common desktop operating systems include Microsoft Windows, macOS, and various Linux distributions. Mobile devices typically run iOS or Android. Server environments often use Linux distributions like Ubuntu Server, Red Hat Enterprise Linux, or Windows Server. Each OS type offers distinct features, user interfaces, and compatibility with different software and hardware. Operating systems play a crucial role in system security, performance optimization, and providing a consistent user experience across diverse computing devices and environments.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "Operating Systems - Wiki",
"url": "https://en.wikipedia.org/wiki/Operating_system",
"type": "article"
},
{
"title": "All you need to know about OS.",
"url": "https://www.javatpoint.com/os-tutorial",
"url": "https://www.javatpoint.com/operating-system",
"type": "article"
},
{
@ -205,7 +215,7 @@
"type": "video"
},
{
"title": "Operating Systems!",
"title": "Operating Systems",
"url": "https://www.youtube.com/watch?v=vBURTt97EkA&list=PLBlnK6fEyqRiVhbXDGLXDk_OQAeuVcp2O",
"type": "video"
}
@ -636,6 +646,11 @@
"title": "Git",
"description": "Git is a distributed version control system designed to track changes in source code during software development. It allows multiple developers to work on the same project simultaneously, maintaining a complete history of modifications. Git features local repositories on each developer's machine, enabling offline work and fast operations. It supports non-linear development through branching and merging, facilitating parallel work streams. Git's distributed nature enhances collaboration, backup, and experimentation. Key concepts include commits, branches, merges, and remote repositories. With its speed, flexibility, and robust branching and merging capabilities, Git has become the standard for version control in modern software development, powering platforms like GitHub and GitLab.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "Git by Example - Learn Version Control with Bite-sized Lessons",
"url": "https://antonz.org/git-by-example/",
"type": "course"
},
{
"title": "Learn Git & GitHub",
"url": "https://roadmap.sh/git-github",

@ -1,13 +1,29 @@
{
"VlNNwIEDWqQXtqkHWJYzC": {
"title": "Internet",
"description": "The Internet is a global network of interconnected computer networks that use the Internet Protocol Suite (TCP/IP) to communicate. It enables the exchange of data, information, and services across the world, connecting billions of devices and users. The Internet has revolutionized communication, commerce, education, and entertainment, becoming an integral part of modern society. It supports various applications and services, from web browsing and instant messaging to streaming media and online gaming. While offering unprecedented access to information and connectivity, the Internet also raises concerns about privacy, security, and digital divide issues.",
"links": []
"description": "The Internet is a global network of interconnected computer networks that use the Internet Protocol Suite (TCP/IP) to communicate. It enables the exchange of data, information, and services across the world, connecting billions of devices and users. The Internet has revolutionized communication, commerce, education, and entertainment, becoming an integral part of modern society. It supports various applications and services, from web browsing and instant messaging to streaming media and online gaming. While offering unprecedented access to information and connectivity, the Internet also raises concerns about privacy, security, and digital divide issues.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "Introduction to Internet",
"url": "https://roadmap.sh/guides/what-is-internet",
"type": "article"
},
{
"title": "The Internet",
"url": "https://en.wikipedia.org/wiki/Internet",
"type": "article"
}
]
},
"yCnn-NfSxIybUQ2iTuUGq": {
"title": "How does the internet work?",
"description": "The Internet works through a global network of interconnected computers and servers, communicating via standardized protocols. Data is broken into packets and routed through various network nodes using the Internet Protocol (IP). These packets travel across different physical infrastructures, including fiber optic cables, satellites, and wireless networks. The Transmission Control Protocol (TCP) ensures reliable delivery and reassembly of packets at their destination. Domain Name System (DNS) servers translate human-readable website names into IP addresses. When you access a website, your device sends a request to the appropriate server, which responds with the requested data. This process, facilitated by routers, switches, and other networking equipment, enables the seamless exchange of information across vast distances, forming the backbone of our digital communications.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "Introduction to Internet",
"url": "https://roadmap.sh/guides/what-is-internet",
"type": "article"
},
{
"title": "How does the Internet Work?",
"url": "https://cs.fyi/guide/how-does-internet-work",
@ -18,11 +34,6 @@
"url": "https://developer.mozilla.org/en-US/docs/Learn/Common_questions/How_does_the_Internet_work",
"type": "article"
},
{
"title": "Introduction to Internet",
"url": "/guides/what-is-internet",
"type": "article"
},
{
"title": "How the Internet Works in 5 Minutes",
"url": "https://www.youtube.com/watch?v=7_LPdttKXPc",
@ -87,7 +98,7 @@
"description": "Web hosting is a service that allows individuals and organizations to make their websites accessible on the internet. It involves storing website files on powerful computers called servers, which are connected to a high-speed network. When users enter a domain name in their browser, the web host serves the website's content. Hosting services range from shared hosting (where multiple websites share server resources) to dedicated hosting (where a server is exclusively used by one client). Other types include VPS (Virtual Private Server) hosting, cloud hosting, and managed WordPress hosting. Web hosts typically provide additional services such as email hosting, domain registration, SSL certificates, and technical support. The choice of web hosting impacts a website's performance, security, and scalability, making it a crucial decision for any online presence.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "What is web hosting?",
"title": "What is Web Hosting?",
"url": "https://www.namecheap.com/hosting/what-is-web-hosting-definition/",
"type": "article"
},
@ -209,7 +220,7 @@
},
"z8-556o-PaHXjlytrawaF": {
"title": "Writing Semantic HTML",
"description": "Semantic HTML refers to the use of HTML markup to reinforce the meaning of web content, rather than merely defining its appearance. It involves using HTML elements that clearly describe their purpose and content. Semantic HTML improves accessibility, SEO, and code readability. Key elements include `<header>`, `<nav>`, `<main>`, `<article>`, `<section>`, `<aside>`, and `<footer>`. It also encompasses using appropriate heading levels (`<h1>` to `<h6>`), lists (`<ul>`, `<ol>`, `<li>`), and data tables (`<table>`, `<th>`, `<td>`). Semantic HTML helps screen readers interpret page content, enables better browser rendering, and provides clearer structure for developers. By using semantically correct elements, developers create more meaningful, accessible, and maintainable web documents that are easier for both humans and machines to understand and process.\n\nVisit the following resources to learn more:",
"description": "Semantic HTML refers to the use of HTML markup to reinforce the meaning of web content, rather than merely defining its appearance. It involves using HTML elements that clearly describe their purpose and content. Semantic HTML improves accessibility, SEO, and code readability. Key elements include `<header>`, `<nav>`, `<main>`, `<article>`, `<section>`, `<aside>`, and `<footer>`. It also encompasses using appropriate heading levels (`<h1>` to `<h6>`), lists (`<ul>`, `<ol>`,`<li>`), and data tables (`<table>`, `<th>`, `<td>`). Semantic HTML helps screen readers interpret page content, enables better browser rendering, and provides clearer structure for developers. By using semantically correct elements, developers create more meaningful, accessible, and maintainable web documents that are easier for both humans and machines to understand and process.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "Guide to Writing Semantic HTML",
@ -227,7 +238,7 @@
"type": "article"
},
{
"title": "Why & When to Use Semantic HTML Elements over Divs",
"title": "Why & When to Use Semantic HTML Elements over Div(s)",
"url": "https://www.youtube.com/watch?v=bOUhq46fd5g",
"type": "video"
}
@ -515,7 +526,7 @@
"links": [
{
"title": "Learn Git & Github",
"url": "/git-github",
"url": "https://roadmap.sh/git-github",
"type": "article"
},
{
@ -530,7 +541,7 @@
},
{
"title": "GitLab Website",
"url": "https://gitlab.com",
"url": "https://about.gitlab.com",
"type": "article"
},
{
@ -545,8 +556,8 @@
"description": "Version Control Systems (VCS) are tools that help developers track and manage changes to code over time. They allow multiple people to work on a project simultaneously, maintaining a history of modifications. Git is the most popular VCS, known for its distributed nature and branching model. Other systems include Subversion (SVN) and Mercurial. VCS enables features like branching for parallel development, merging to combine changes, and reverting to previous states. They facilitate collaboration through remote repositories, pull requests, and code reviews. VCS also provides backup and recovery capabilities, conflict resolution, and the ability to tag specific points in history. By maintaining a detailed record of changes and supporting non-linear development, VCS has become an essential tool in modern software development, enhancing productivity, code quality, and team collaboration.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "Learn Git and GitHub",
"url": "/git-github",
"title": "Learn Git & Github",
"url": "https://roadmap.sh/git-github",
"type": "article"
},
{
@ -571,8 +582,8 @@
"description": "Git is a distributed version control system designed to handle projects of any size with speed and efficiency. Created by Linus Torvalds in 2005, Git tracks changes in source code during software development, allowing multiple developers to work together on non-linear development. It provides strong support for branching, merging, and distributed development workflows. Git maintains a complete history of all changes, enabling easy rollbacks and comparisons between versions. Its distributed nature means each developer has a full copy of the repository, allowing for offline work and backup. Git's speed, flexibility, and robust branching and merging capabilities have made it the most widely used version control system in software development, particularly for open-source projects.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "Learn Git & GitHub",
"url": "/git-github",
"title": "Visit Dedicated Git & Github Roadmap",
"url": "https://roadmap.sh/git-github",
"type": "article"
},
{
@ -602,8 +613,8 @@
"description": "Package managers are tools that automate the process of installing, updating, configuring, and removing software packages in a consistent manner. They handle dependency resolution, version management, and package distribution for programming languages and operating systems. Popular package managers include npm for JavaScript, pip for Python, and apt for Debian-based Linux distributions. These tools maintain a centralized repository of packages, allowing developers to easily share and reuse code. Package managers simplify project setup, ensure consistency across development environments, and help manage complex dependency trees. They play a crucial role in modern software development by streamlining workflow, enhancing collaboration, and improving code reusability.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "An Absolute Beginners Guide to Using npm",
"url": "https://nodesource.com/blog/an-absolute-beginners-guide-to-using-npm/",
"title": "PNPM: The Faster, More Performant NPM",
"url": "https://pnpm.io/",
"type": "article"
},
{
@ -611,6 +622,11 @@
"url": "https://yarnpkg.com/en/docs/getting-started",
"type": "article"
},
{
"title": "An Absolute Beginners Guide to Using npm",
"url": "https://nodesource.com/blog/an-absolute-beginners-guide-to-using-npm/",
"type": "article"
},
{
"title": "NPM Crash Course",
"url": "https://www.youtube.com/watch?v=jHDhaSSKmB0",
@ -628,8 +644,8 @@
"description": "GitHub has become a central hub for open-source projects and is widely used by developers, companies, and organizations for both private and public repositories. It was acquired by Microsoft in 2018 but continues to operate as a relatively independent entity. GitHub's popularity has made it an essential tool in modern software development workflows and a key platform for showcasing coding projects and contributing to open-source software.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "Learn Git & GitHub",
"url": "/git-github",
"title": "Visit Dedicated Git & Github Roadmap",
"url": "https://roadmap.sh/git-github",
"type": "article"
},
{
@ -659,13 +675,13 @@
"description": "GitLab is a web-based DevOps platform that provides a complete solution for the software development lifecycle. GitLab emphasizes an all-in-one approach, integrating various development tools into a single platform. It's available as both a cloud-hosted service and a self-hosted solution, giving organizations flexibility in deployment. GitLab's focus on DevOps practices and its comprehensive feature set make it popular among enterprises and teams seeking a unified platform for their entire development workflow. While similar to GitHub in many respects, GitLab's integrated CI/CD capabilities and self-hosting options are often cited as key differentiators.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "GitLab Documentation",
"url": "https://docs.gitlab.com/",
"title": "GitLab Website",
"url": "https://gitlab.com/",
"type": "article"
},
{
"title": "GitLab Website",
"url": "https://gitlab.com/",
"title": "GitLab Documentation",
"url": "https://docs.gitlab.com/",
"type": "article"
},
{
@ -727,7 +743,7 @@
"description": "pnpm (performant npm) is a fast, disk-space efficient package manager for JavaScript and Node.js projects. It addresses inefficiencies in npm and Yarn by using a unique approach to storing and linking dependencies. pnpm creates a single, global store for all packages and uses hard links to reference them in project node\\_modules, significantly reducing disk space usage and installation time. It strictly adheres to package.json specifications, ensuring consistent installs across environments. pnpm offers features like workspace support for monorepos, side-by-side versioning, and improved security through better isolation of dependencies. While less widely adopted than npm or Yarn, pnpm's performance benefits and efficient disk usage are attracting increasing attention in the JavaScript community.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "Official Website",
"title": "PNPM Website",
"url": "https://pnpm.io",
"type": "article"
},
@ -752,6 +768,11 @@
"url": "https://github.com/workshopper/how-to-npm",
"type": "opensource"
},
{
"title": "NPM Website",
"url": "https://www.npmjs.com/https://www.npmjs.com/",
"type": "article"
},
{
"title": "Modern JavaScript for Dinosaurs",
"url": "https://peterxjang.com/blog/modern-javascript-explained-for-dinosaurs.html",
@ -774,12 +795,12 @@
"description": "Web frameworks are designed to write web applications. Frameworks are collections of libraries that aid in the development of a software product or website. Frameworks for web application development are collections of various tools. Frameworks vary in their capabilities and functions, depending on the tasks set. They define the structure, establish the rules, and provide the development tools required.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "15 crazy new JS framework features you don’t know yet",
"title": "15 Crazy New JS Framework Features You Don’t Know Yet",
"url": "https://www.youtube.com/watch?v=466U-2D86bc",
"type": "video"
},
{
"title": "Which JS Framework is best?",
"title": "Which JS Framework is Best?",
"url": "https://www.youtube.com/watch?v=cuHDQhDhvPE",
"type": "video"
}
@ -795,7 +816,7 @@
"type": "article"
},
{
"title": "Official - Getting started with Angular",
"title": "Getting started with Angular",
"url": "https://angular.io/start",
"type": "article"
},
@ -899,7 +920,7 @@
"description": "SolidJS is a declarative, efficient, and flexible JavaScript library for building user interfaces. It uses a fine-grained reactivity system that updates only what changes, resulting in high performance. SolidJS compiles templates to real DOM nodes and updates them in-place, avoiding the overhead of a virtual DOM. It offers a syntax similar to React, making it familiar to many developers, but with a different underlying mechanism. SolidJS supports JSX, provides built-in state management, and emphasizes composition over inheritance. Its small size and lack of runtime overhead make it particularly suitable for applications requiring high performance. While newer compared to some frameworks, SolidJS is gaining popularity for its simplicity, performance, and developer-friendly approach to reactive programming.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "Official Website - SolidJS",
"title": "SolidJS Website",
"url": "https://www.solidjs.com/",
"type": "article"
},
@ -930,7 +951,7 @@
"type": "article"
},
{
"title": "Qwik… the world's first O(1) JavaScript framework?",
"title": "Qwik - The world's first O(1) JavaScript Framework?",
"url": "https://www.youtube.com/watch?v=x2eF3YLiNhY",
"type": "video"
}
@ -938,8 +959,24 @@
},
"XDTD8el6OwuQ55wC-X4iV": {
"title": "Writing CSS",
"description": "Modern CSS emphasizes responsive design with techniques like media queries and fluid typography. It also includes methodologies like CSS-in-JS and utility-first frameworks (e.g., Tailwind CSS). Features such as CSS Logical Properties improve internationalization, while CSS Houdini allows for more powerful custom styling. Modern CSS focuses on performance optimization, maintainability, and creating adaptive, accessible designs across various devices and screen sizes, significantly improving the capabilities and efficiency of web styling.",
"links": []
"description": "Modern CSS emphasizes responsive design with techniques like media queries and fluid typography. It also includes methodologies like CSS-in-JS and utility-first frameworks (e.g., Tailwind CSS). Features such as CSS Logical Properties improve internationalization, while CSS Houdini allows for more powerful custom styling. Modern CSS focuses on performance optimization, maintainability, and creating adaptive, accessible designs across various devices and screen sizes, significantly improving the capabilities and efficiency of web styling.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "Modern CSS: A Comprehensive Guide",
"url": "https://moderncss.dev/",
"type": "article"
},
{
"title": "CSS-Tricks: Modern CSS",
"url": "https://css-tricks.com/modern-css/",
"type": "article"
},
{
"title": "Explore top posts about CSS",
"url": "https://app.daily.dev/tags/css?ref=roadmapsh",
"type": "article"
}
]
},
"eghnfG4p7i-EDWfp3CQXC": {
"title": "Tailwind",
@ -955,6 +992,11 @@
"url": "https://tailwindcss.com",
"type": "article"
},
{
"title": "Online Playground",
"url": "https://play.tailwindcss.com",
"type": "article"
},
{
"title": "Explore top posts about CSS",
"url": "https://app.daily.dev/tags/css?ref=roadmapsh",
@ -997,6 +1039,26 @@
"title": "CSS Preprocessors",
"description": "CSS preprocessors are scripting languages that extend the capabilities of standard CSS, allowing developers to write more maintainable and efficient stylesheets. They introduce features like variables, nesting, mixins, functions, and mathematical operations, which are then compiled into standard CSS. Popular preprocessors include Sass, Less, and Stylus. These tools enable developers to organize styles more logically, reuse code more effectively, and create complex CSS structures with less repetition. Preprocessors often support features like partials for modular stylesheets and built-in color manipulation functions. By using a preprocessor, developers can write more DRY (Don't Repeat Yourself) code, manage large-scale projects more easily, and potentially improve the performance of their stylesheets through optimization during the compilation process.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "Sass Website",
"url": "https://sass-lang.com/",
"type": "article"
},
{
"title": "Less Website",
"url": "https://lesscss.org/",
"type": "article"
},
{
"title": "Stylus Website",
"url": "https://stylus-lang.com/",
"type": "article"
},
{
"title": "PostCSS Website",
"url": "https://postcss.org/",
"type": "article"
},
{
"title": "Explore top posts about CSS",
"url": "https://app.daily.dev/tags/css?ref=roadmapsh",
@ -1051,7 +1113,7 @@
"description": "PostCSS is a tool for transforming CSS with JavaScript plugins. It allows developers to enhance their CSS workflow by automating repetitive tasks, adding vendor prefixes, and implementing future CSS features. PostCSS works as a preprocessor, but unlike Sass or Less, it's highly modular and customizable. Users can choose from a wide range of plugins or create their own to suit specific needs. Popular plugins include Autoprefixer for adding vendor prefixes, cssnext for using future CSS features, and cssnano for minification. PostCSS integrates well with various build tools and can be used alongside traditional CSS preprocessors. Its flexibility and performance make it a popular choice for optimizing CSS in modern web development workflows.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "Official Website",
"title": "PostCSS Website",
"url": "https://postcss.org/",
"type": "article"
},
@ -1135,7 +1197,7 @@
"description": "Parcel is a zero-configuration web application bundler that simplifies the process of building and deploying web projects. It supports multiple programming languages and file types out of the box, including JavaScript, CSS, HTML, and various image formats. Parcel automatically analyzes dependencies, transforms code, and optimizes assets without requiring a complex configuration file. It offers features like hot module replacement, code splitting, and tree shaking by default. Parcel's main selling point is its ease of use and fast build times, achieved through parallel processing and caching. While it may lack some advanced features of more established bundlers like Webpack, Parcel's simplicity and performance make it an attractive option for rapid prototyping and smaller projects.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "Official Website and Docs",
"title": "Parcel Website",
"url": "https://parceljs.org/plugin-system/bundler/",
"type": "article"
},
@ -1156,7 +1218,7 @@
"description": "Rollup is a module bundler for JavaScript that compiles small pieces of code into larger, more complex structures. It specializes in producing smaller, more efficient bundles for ES modules. Rollup excels at tree-shaking, eliminating unused code for leaner outputs. It's particularly well-suited for libraries and applications using the ES module format. Rollup supports various output formats, including UMD and CommonJS, making it versatile for different deployment scenarios. While it may require more configuration than some alternatives, Rollup's focus on ES modules and its efficient bundling make it popular for projects prioritizing small bundle sizes and modern JavaScript practices.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "Official Website and Docs",
"title": "Rollup Website and Docs",
"url": "https://rollupjs.org/",
"type": "article"
},
@ -1177,7 +1239,7 @@
"description": "Webpack is a popular open-source JavaScript module bundler that transforms, bundles, or packages resources for the web. It takes modules with dependencies and generates static assets representing those modules. Webpack can handle not just JavaScript, but also other assets like CSS, images, and fonts. It uses loaders to preprocess files and plugins to perform a wider range of tasks like bundle optimization. Webpack's key features include code splitting, lazy loading, and a rich ecosystem of extensions. It supports hot module replacement for faster development and tree shaking to eliminate unused code. While it has a steeper learning curve compared to some alternatives, Webpack's flexibility and powerful features make it a standard tool in many modern JavaScript development workflows, especially for complex applications.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "Webpack Official Website",
"title": "Webpack Website",
"url": "https://webpack.js.org/",
"type": "article"
},
@ -1203,7 +1265,7 @@
"type": "course"
},
{
"title": "Esbuild Official Website",
"title": "Esbuild Website",
"url": "https://esbuild.github.io/",
"type": "article"
},
@ -1224,10 +1286,15 @@
"description": "Vite is a modern build tool and development server designed for fast and lean development of web applications. Created by Evan You, the author of Vue.js, Vite leverages native ES modules in the browser to enable near-instantaneous server start and lightning-fast hot module replacement (HMR). It supports various frameworks including Vue, React, and Svelte out of the box. Vite uses Rollup for production builds, resulting in highly optimized bundles. It offers features like CSS pre-processor support, TypeScript integration, and plugin extensibility. Vite's architecture, which separates dev and build concerns, allows for faster development cycles and improved developer experience, particularly for large-scale projects where traditional bundlers might struggle with performance.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "Vite Website",
"title": "Vite - The Build Tool for the Web",
"url": "https://vitejs.dev",
"type": "article"
},
{
"title": "Vite Documentation",
"url": "https://vitejs.dev/guide/",
"type": "article"
},
{
"title": "Explore top posts about Vite",
"url": "https://app.daily.dev/tags/vite?ref=roadmapsh",
@ -1271,7 +1338,7 @@
"description": "ESLint is a popular open-source static code analysis tool for identifying and fixing problems in JavaScript code. It enforces coding standards, detects potential errors, and promotes consistent coding practices across projects. ESLint is highly configurable, allowing developers to define custom rules or use preset configurations. It supports modern JavaScript features, JSX, and TypeScript through plugins. ESLint can be integrated into development workflows through IDE extensions, build processes, or git hooks, providing real-time feedback to developers. Its ability to automatically fix many issues it detects makes it a valuable tool for maintaining code quality and consistency, especially in large teams or projects. ESLint's extensibility and wide adoption in the JavaScript ecosystem have made it a standard tool in modern JavaScript development.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "ESLint Official Website",
"title": "ESLint Website",
"url": "https://eslint.org/",
"type": "article"
},
@ -1297,12 +1364,7 @@
"description": "Testing apps involves systematically evaluating software to ensure it meets requirements, functions correctly, and maintains quality. Key testing types include:\n\n1. Unit testing: Verifying individual components or functions\n2. Integration testing: Checking interactions between different parts of the app\n3. Functional testing: Ensuring the app meets specified requirements\n4. UI/UX testing: Evaluating the user interface and experience\n5. Performance testing: Assessing app speed, responsiveness, and stability\n6. Security testing: Identifying vulnerabilities and ensuring data protection\n7. Accessibility testing: Verifying usability for people with disabilities\n8. Compatibility testing: Checking functionality across different devices and platforms\n\nModern testing often incorporates automated testing tools and continuous integration/continuous deployment (CI/CD) pipelines. Test-driven development (TDD) and behavior-driven development (BDD) are popular methodologies that emphasize writing tests before or alongside code. Effective testing strategies help identify bugs early, improve code quality, and ensure a reliable user experience.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "A comprehensive dive into software testing.",
"url": "https://www.softwaretestingmaterial.com/software-testing/",
"type": "article"
},
{
"title": "The different types of software tests",
"title": "The Different Types of Software Tests",
"url": "https://www.atlassian.com/continuous-delivery/software-testing/types-of-software-testing",
"type": "article"
},
@ -1312,7 +1374,7 @@
"type": "article"
},
{
"title": "How to test web applications - dotJS 2024",
"title": "How to Test Web Applications - dotJS 2024",
"url": "https://www.youtube.com/watch?v=l3qjQpYBR8c",
"type": "video"
}
@ -1323,12 +1385,17 @@
"description": "Vitest is a fast and lightweight testing framework for JavaScript and TypeScript projects, designed as a Vite-native alternative to Jest. It leverages Vite's transformation pipeline and config resolution, offering near-instant test execution and hot module replacement (HMR) for tests. Vitest provides a Jest-compatible API, making migration easier for projects already using Jest. It supports features like snapshot testing, mocking, and code coverage out of the box. Vitest's architecture allows for parallel test execution and watch mode, significantly speeding up the testing process. Its integration with Vite's ecosystem makes it particularly well-suited for projects already using Vite, but it can be used in any JavaScript project. Vitest's focus on speed and developer experience has made it an increasingly popular choice for modern web development workflows.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "Vitest Website",
"title": "Vitest - Next Generation Testing Framework",
"url": "https://vitest.dev/",
"type": "article"
},
{
"title": "Vitets simplified",
"title": "Vitest Documentation",
"url": "https://vitest.dev/guide/",
"type": "article"
},
{
"title": "Vitest Simplified",
"url": "https://www.youtube.com/watch?v=snCLQmINqCU",
"type": "video"
}
@ -1381,13 +1448,13 @@
"description": "Cypress framework is a JavaScript-based end-to-end testing framework built on top of Mocha – a feature-rich JavaScript test framework running on and in the browser, making asynchronous testing simple and convenient. It also uses a BDD/TDD assertion library and a browser to pair with any JavaScript testing framework.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "Official Website",
"title": "Cypress Website",
"url": "https://www.cypress.io/",
"type": "article"
},
{
"title": "Official Documentation",
"url": "https://docs.cypress.io/guides/overview/why-cypress#Other",
"title": "Cypress Documentation",
"url": "https://docs.cypress.io/",
"type": "article"
},
{
@ -1606,15 +1673,31 @@
},
"hwPOGT0-duy3KfI8QaEwF": {
"title": "Type Checkers",
"description": "Type checkers are tools that analyze code to detect and prevent type-related errors without executing the program. They enforce type consistency, helping developers catch mistakes early in the development process. Popular type checkers include TypeScript for JavaScript, Flow for JavaScript, and mypy for Python. These tools add static typing to dynamically typed languages, offering benefits like improved code reliability, better documentation, and enhanced developer tooling support. Type checkers can infer types in many cases and allow for gradual adoption in existing projects. They help prevent common runtime errors, facilitate refactoring, and improve code maintainability. While adding some overhead to the development process, type checkers are widely adopted in large-scale applications for their ability to catch errors before runtime and improve overall code quality.",
"links": []
"description": "Type checkers are tools that analyze code to detect and prevent type-related errors without executing the program. They enforce type consistency, helping developers catch mistakes early in the development process. Popular type checkers include TypeScript for JavaScript, Flow for JavaScript, and mypy for Python. These tools add static typing to dynamically typed languages, offering benefits like improved code reliability, better documentation, and enhanced developer tooling support. Type checkers can infer types in many cases and allow for gradual adoption in existing projects. They help prevent common runtime errors, facilitate refactoring, and improve code maintainability. While adding some overhead to the development process, type checkers are widely adopted in large-scale applications for their ability to catch errors before runtime and improve overall code quality.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "Flow - Static Type Checker for JavaScript",
"url": "https://flow.org/",
"type": "article"
},
{
"title": "TypeScript",
"url": "https://www.typescriptlang.org/",
"type": "article"
},
{
"title": "Mypy - Static Type Checker for Python",
"url": "https://mypy.readthedocs.io/en/stable/",
"type": "article"
}
]
},
"VxiQPgcYDFAT6WgSRWpIA": {
"title": "Custom Elements",
"description": "One of the key features of the Web Components standard is the ability to create custom elements that encapsulate your functionality on an HTML page, rather than having to make do with a long, nested batch of elements that together provide a custom page feature.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "Using custom elements | MDN web docs",
"title": "Using Custom Elements - MDN",
"url": "https://developer.mozilla.org/en-US/docs/Web/Web_Components/Using_custom_elements",
"type": "article"
},
@ -1635,12 +1718,12 @@
"description": "The `<template>` HTML element is a mechanism for holding HTML that is not to be rendered immediately when a page is loaded but may be instantiated subsequently during runtime using JavaScript. Think of a template as a content fragment that is being stored for subsequent use in the document.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "Using templates and slots | MDN web docs",
"title": "Using Templates and Slots - MDN",
"url": "https://developer.mozilla.org/en-US/docs/Web/Web_Components/Using_templates_and_slots",
"type": "article"
},
{
"title": "HTML Template tag",
"title": "HTML Template Tag",
"url": "https://www.w3schools.com/tags/tag_template.asp",
"type": "article"
},
@ -1678,14 +1761,19 @@
"links": [
{
"title": "TypeScript Roadmap",
"url": "/typescript",
"url": "https://roadmap.sh/typescript",
"type": "article"
},
{
"title": "Official Website",
"title": "TypeScript Website",
"url": "https://www.typescriptlang.org/",
"type": "article"
},
{
"title": "TypeScript Playground",
"url": "https://www.typescriptlang.org/play",
"type": "article"
},
{
"title": "The TypeScript Handbook",
"url": "https://www.typescriptlang.org/docs/handbook/intro.html",
@ -1838,17 +1926,17 @@
"description": "React Router is a popular routing library for React applications that enables dynamic, client-side routing. It allows developers to create single-page applications with multiple views, managing the URL and history of the browser while keeping the UI in sync with the URL. React Router provides a declarative way to define routes, supporting nested routes, route parameters, and programmatic navigation. It offers components like BrowserRouter, Route, and Link to handle routing logic and navigation. The library also supports features such as lazy loading of components, route guards, and custom history management. React Router's integration with React's component model makes it a go-to solution for managing navigation and creating complex, multi-view applications in React ecosystems.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "Official Website",
"title": "React Router Website",
"url": "https://reactrouter.com/en/main",
"type": "article"
},
{
"title": "A complete guide to routing in react",
"title": "A Complete Guide to Routing in React",
"url": "https://hygraph.com/blog/routing-in-react",
"type": "article"
},
{
"title": "React router - Complete tutorial",
"title": "React Router - Complete tutorial",
"url": "https://www.youtube.com/watch?v=oTIJunBa6MA",
"type": "video"
}
@ -1906,7 +1994,7 @@
"description": "SvelteKit is a framework for building web applications using Svelte, a component-based JavaScript framework. It provides a full-stack development experience, handling both server-side and client-side rendering. SvelteKit offers features like file-based routing, code-splitting, and server-side rendering out of the box. It supports both static site generation and server-side rendering, allowing developers to choose the most appropriate approach for each page. SvelteKit emphasizes simplicity and performance, leveraging Svelte's compile-time approach to generate highly optimized JavaScript. It includes built-in development tools, easy deployment options, and integrates well with various backend services. SvelteKit's efficient development experience and flexibility make it an attractive option for building modern, performant web applications.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "Svelte Kit Official Website",
"title": "Svelte Kit Website",
"url": "https://kit.svelte.dev/",
"type": "article"
},
@ -1932,8 +2020,8 @@
"description": "GraphQL is a query language and runtime for APIs, developed by Facebook. GraphQL's flexibility and efficiency make it popular for building complex applications, especially those with diverse client requirements. It's particularly useful for mobile applications where bandwidth efficiency is crucial. While it requires a paradigm shift from REST, many developers and organizations find GraphQL's benefits outweigh the learning curve, especially for large-scale or rapidly evolving APIs.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "GraphQL Roadmap",
"url": "/graphql",
"title": "visit Dedicated GraphQL Roadmap",
"url": "https://roadmap.sh/graphql",
"type": "article"
},
{
@ -1973,12 +2061,12 @@
"type": "article"
},
{
"title": "Official Docs",
"title": "Apollo Docs",
"url": "https://www.apollographql.com/docs/",
"type": "article"
},
{
"title": "",
"title": "Visit Dedicated GraphQL Roadmap",
"url": "https://roadmap.sh/graphql",
"type": "article"
},
@ -1999,7 +2087,7 @@
"type": "opensource"
},
{
"title": "Official Website",
"title": "Relay Website",
"url": "https://relay.dev/",
"type": "article"
},
@ -2015,7 +2103,7 @@
"description": "Static site generators (SSGs) are tools that create HTML websites from raw data and templates, producing pre-rendered pages at build time rather than at runtime. They combine the benefits of static websites (speed, security, simplicity) with the flexibility of dynamic sites. SSGs typically use markup languages like Markdown for content, templating engines for layouts, and generate a fully static website that can be hosted on simple web servers or content delivery networks. Popular SSGs include Jekyll, Hugo, Gatsby, and Eleventy. They're well-suited for blogs, documentation sites, and content-focused websites. SSGs offer advantages in performance, version control integration, and reduced server-side complexity, making them increasingly popular for a wide range of web projects.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "What is a static site generator?",
"title": "What is a Static Site Generator?",
"url": "https://www.cloudflare.com/learning/performance/static-site-generator/",
"type": "article"
},
@ -2097,6 +2185,11 @@
"url": "https://astro.build/",
"type": "article"
},
{
"title": "Getting Started with Astro",
"url": "https://docs.astro.build/en/getting-started/",
"type": "article"
},
{
"title": "What is Astro?",
"url": "https://www.contentful.com/blog/what-is-astro/",
@ -2130,7 +2223,7 @@
"description": "Next.js is a React-based open-source framework for building server-side rendered and statically generated web applications. It provides features like automatic code splitting, optimized performance, and simplified routing out of the box. Next.js supports both static site generation (SSG) and server-side rendering (SSR), allowing developers to choose the most appropriate rendering method for each page. The framework offers built-in CSS support, API routes for backend functionality, and easy deployment options. Next.js is known for its developer-friendly experience, with features like hot module replacement and automatic prefetching. Its ability to create hybrid apps that combine static and server-rendered pages makes it popular for building scalable, SEO-friendly web applications.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "Official Website",
"title": "Next.js Website",
"url": "https://nextjs.org/",
"type": "article"
},
@ -2156,7 +2249,7 @@
"type": "article"
},
{
"title": "MDN Web Docs: Progressive Web Apps ",
"title": "MDN - Progressive Web Apps",
"url": "https://developer.mozilla.org/en-US/docs/Web/Progressive_web_apps/",
"type": "article"
},
@ -2208,12 +2301,12 @@
"description": "React Native is an open-source mobile application development framework created by Facebook. It allows developers to build native mobile apps for iOS and Android using JavaScript and React. React Native translates JavaScript code into native components, providing near-native performance and a genuine native user interface. It enables code reuse across platforms, speeding up development and reducing costs. The framework offers hot reloading for quick iterations, access to native APIs, and a large ecosystem of third-party plugins. React Native's \"learn once, write anywhere\" philosophy and its ability to bridge web and mobile development make it popular for creating cross-platform mobile applications, especially among teams already familiar with React for web development.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "React Native Roadmap",
"url": "/react-native",
"title": "Visit Dedicated React Roadmap",
"url": "https://roadmap.sh/react-native",
"type": "article"
},
{
"title": "Official Website",
"title": "React Native Website",
"url": "https://reactnative.dev/",
"type": "article"
},
@ -2270,12 +2363,12 @@
"type": "opensource"
},
{
"title": "Official Website",
"title": "Ionic Framework Website",
"url": "https://ionicframework.com/",
"type": "article"
},
{
"title": "Ionic 8 Announcment",
"title": "Ionic 8 Announcement",
"url": "https://ionic.io/blog/ionic-8-is-here",
"type": "article"
},
@ -2288,8 +2381,24 @@
},
"KMA7NkxFbPoUDtFnGBFnj": {
"title": "Desktop Apps",
"description": "Desktop applications applications typically use frameworks like Electron, NW.js (Node-WebKit), or Tauri, which combine a JavaScript runtime with a native GUI toolkit. This approach allows developers to use their web development skills to create cross-platform desktop apps. Electron, developed by GitHub, is particularly popular, powering applications like Visual Studio Code, Atom, and Discord. These frameworks provide APIs to access native system features, enabling JavaScript to interact with the file system, system tray, and other OS-specific functionalities. While offering rapid development and cross-platform compatibility, JavaScript desktop apps can face challenges in terms of performance and resource usage compared to traditional native applications. However, they benefit from the vast ecosystem of JavaScript libraries and tools, making them an attractive option for many developers and businesses.",
"links": []
"description": "Desktop applications applications typically use frameworks like Electron, NW.js (Node-WebKit), or Tauri, which combine a JavaScript runtime with a native GUI toolkit. This approach allows developers to use their web development skills to create cross-platform desktop apps. Electron, developed by GitHub, is particularly popular, powering applications like Visual Studio Code, Atom, and Discord. These frameworks provide APIs to access native system features, enabling JavaScript to interact with the file system, system tray, and other OS-specific functionalities. While offering rapid development and cross-platform compatibility, JavaScript desktop apps can face challenges in terms of performance and resource usage compared to traditional native applications. However, they benefit from the vast ecosystem of JavaScript libraries and tools, making them an attractive option for many developers and businesses.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "Electron Website",
"url": "https://www.electronjs.org/",
"type": "article"
},
{
"title": "NW.js Website",
"url": "https://nwjs.io/",
"type": "article"
},
{
"title": "Tauri Website",
"url": "https://tauri.app/",
"type": "article"
}
]
},
"mQHpSyMR4Rra4mqAslgiS": {
"title": "Electron",
@ -2408,7 +2517,7 @@
},
"X0Y3-IpPiFUCsNDK4RFxw": {
"title": "Performance Metrics",
"description": "Web performance metrics are quantitative measures of the performance of a web page or application. They are used to assess the speed and efficiency of a web page, and they can help identify areas for improvement. Some common web performance metrics include:\n\n* Load time: The time it takes for a web page to fully load and become interactive.\n* First contentful paint (FCP): The time it takes for the first content to appear on the page.\n* Time to interactive (TTI): The time it takes for the page to become fully interactive.\n* First input delay (FID): The time it takes for the page to respond to the first user input.\n* Total blocking time (TBT): The time it takes for the page to become fully interactive, taking into account the time spent blocking the main thread.\n\nThere are many tools and techniques available for measuring web performance metrics, including browser dev tools, performance monitoring tools, and web performance APIs. By tracking these metrics and analyzing the results, web developers can identify areas for improvement and optimize the performance of their web pages.\n\nVisit the following resources to learn more:",
"description": "Web performance metrics are quantitative measures of the performance of a web page or application. They are used to assess the speed and efficiency of a web page, and they can help identify areas for improvement. Some common web performance metrics include:\n\n* Load time: The time it takes for a web page to fully load and become interactive.\n* First contentful paint (FCP): The time it takes for the first content to appear on the page.\n* Time to interactive (TTI): The time it takes for the page to become fully interactive.\n* First input delay (FID): The time it takes for the page to respond to the first user input.\n* Total blocking time (TBT): The time it takes for the page to become fully interactive, taking into account the time spent blocking the main thread.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "Web Performance Metrics - Google Developers",

@ -58,6 +58,11 @@
"title": "Python for Beginners: Data Types",
"url": "https://thenewstack.io/python-for-beginners-data-types/",
"type": "article"
},
{
"title": "Python Variables and Data Types",
"url": "https://www.youtube.com/playlist?list=PLBlnK6fEyqRhN-sfWgCU1z_Qhakc1AGOn",
"type": "video"
}
]
},
@ -237,6 +242,11 @@
"title": "Python \"for\" Loops (Definite Iteration)",
"url": "https://realpython.com/python-for-loop/#the-guts-of-the-python-for-loop",
"type": "article"
},
{
"title": "Python For Loops",
"url": "https://www.youtube.com/watch?v=KWgYha0clzw",
"type": "video"
}
]
},
@ -618,6 +628,11 @@
"title": "OOP in Python One Shot",
"url": "https://www.youtube.com/watch?v=Ej_02ICOIgs",
"type": "video"
},
{
"title": "Python OOP Tutorial",
"url": "https://www.youtube.com/watch?v=IbMDCwVm63M",
"type": "video"
}
]
},

@ -23,8 +23,19 @@
},
"l2aXyO3STnhbFjvUXPpm2": {
"title": "Key-value Database",
"description": "",
"links": []
"description": "Key-value Database is a non-relational (NoSQL) database that stores data as a collection of key-value pairs. In this model, each piece of data is associated with a unique identifier (key) that is used to retrieve the corresponding value. This simple structure allows for high performance and scalability, making key-value databases ideal for certain use cases.\n\nVisit the following resources to learn more:",
"links": [
{
"title": "What is a Key-Value Database?",
"url": "https://redis.io/nosql/key-value-databases/",
"type": "article"
},
{
"title": "Key Value Store - System Design Interview Basics",
"url": "https://www.youtube.com/watch?v=ozJHmm05EXM",
"type": "video"
}
]
},
"eHuBz_zSZK3rubn7nkd7g": {
"title": "Cache",

Loading…
Cancel
Save