diff --git a/.github/workflows/refresh-roadmap-content-json.yml b/.github/workflows/refresh-roadmap-content-json.yml new file mode 100644 index 000000000..65a03348a --- /dev/null +++ b/.github/workflows/refresh-roadmap-content-json.yml @@ -0,0 +1,37 @@ +name: Refreshes roadmap content JSON + +on: + workflow_dispatch: # allow manual run + schedule: + - cron: '0 0 * * *' # every day at midnight + +jobs: + upgrade-deps: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-node@v3 + with: + node-version: 18 + - uses: pnpm/action-setup@v2.2.2 + with: + version: 7.13.4 + - name: Upgrade dependencies + run: | + pnpm install + npm run generate:roadmap-content-json + - name: Create PR + uses: peter-evans/create-pull-request@v4 + with: + delete-branch: false + branch: "chore/update-content-json" + base: "master" + labels: | + dependencies + automated pr + reviewers: kamranahmedse,arikchakma + commit-message: "chore: update roadmap content json" + title: "Update roadmap content json" + body: | + Updates the roadmap content JSON files in the `public` folder. + Please review the changes and merge if everything looks good. diff --git a/package.json b/package.json index 0bb1ddf70..39b316099 100644 --- a/package.json +++ b/package.json @@ -25,6 +25,7 @@ "generate:og": "node ./scripts/generate-og-images.mjs", "warm:urls": "sh ./scripts/warm-urls.sh https://roadmap.sh/sitemap-0.xml", "compress:images": "tsx ./scripts/compress-images.ts", + "generate:roadmap-content-json": "tsx ./scripts/editor-roadmap-content-json.ts", "test:e2e": "playwright test" }, "dependencies": { @@ -70,6 +71,7 @@ "slugify": "^1.6.6", "tailwind-merge": "^2.4.0", "tailwindcss": "^3.4.7", + "turndown": "^7.2.0", "unified": "^11.0.5", "zustand": "^4.5.4" }, @@ -80,6 +82,7 @@ "@types/js-cookie": "^3.0.6", "@types/prismjs": "^1.26.4", "@types/react-calendar-heatmap": "^1.6.7", + "@types/turndown": "^5.0.5", "csv-parser": "^3.0.0", "gh-pages": "^6.1.1", "js-yaml": "^4.1.0", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index a3f7cfaee..b81dd5cfa 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -134,6 +134,9 @@ importers: tailwindcss: specifier: ^3.4.7 version: 3.4.7 + turndown: + specifier: ^7.2.0 + version: 7.2.0 unified: specifier: ^11.0.5 version: 11.0.5 @@ -159,6 +162,9 @@ importers: '@types/react-calendar-heatmap': specifier: ^1.6.7 version: 1.6.7 + '@types/turndown': + specifier: ^5.0.5 + version: 5.0.5 csv-parser: specifier: ^3.0.0 version: 3.0.0 @@ -696,6 +702,9 @@ packages: '@jridgewell/trace-mapping@0.3.25': resolution: {integrity: sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==} + '@mixmark-io/domino@2.2.0': + resolution: {integrity: sha512-Y28PR25bHXUg88kCV7nivXrP2Nj2RueZ3/l/jdx6J9f8J4nsEGcgX0Qe6lt7Pa+J79+kPiJU3LguR6O/6zrLOw==} + '@nanostores/react@0.7.2': resolution: {integrity: sha512-e3OhHJFv3NMSFYDgREdlAQqkyBTHJM91s31kOZ4OvZwJKdFk5BLk0MLbh51EOGUz9QGX2aCHfy1RvweSi7fgwA==} engines: {node: ^18.0.0 || >=20.0.0} @@ -1172,6 +1181,9 @@ packages: '@types/sax@1.2.7': resolution: {integrity: sha512-rO73L89PJxeYM3s3pPPjiPgVVcymqU490g0YO5n5By0k2Erzj6tay/4lr1CHAAU4JyOWd1rpQ8bCf6cZfHU96A==} + '@types/turndown@5.0.5': + resolution: {integrity: sha512-TL2IgGgc7B5j78rIccBtlYAnkuv8nUQqhQc+DSYV5j9Be9XOcm/SKOVRuA47xAVI3680Tk9B1d8flK2GWT2+4w==} + '@types/unist@3.0.2': resolution: {integrity: sha512-dqId9J8K/vGi5Zr7oo212BGii5m3q5Hxlkwy3WpYuKPklmBEvsbMYYyLxAQpSffdLl/gdW0XUpKWFvYmyoWCoQ==} @@ -2965,6 +2977,9 @@ packages: engines: {node: '>=18.0.0'} hasBin: true + turndown@7.2.0: + resolution: {integrity: sha512-eCZGBN4nNNqM9Owkv9HAtWRYfLA4h909E/WGAWWBpmB275ehNhZyk87/Tpvjbp0jjNl9XwCsbe6bm6CqFsgD+A==} + tween-functions@1.2.0: resolution: {integrity: sha512-PZBtLYcCLtEcjL14Fzb1gSxPBeL7nWvGhO5ZFPGqziCcr8uvHp0NDmdjBchp6KHL+tExcg0m3NISmKxhU394dA==} @@ -3699,6 +3714,8 @@ snapshots: '@jridgewell/resolve-uri': 3.1.2 '@jridgewell/sourcemap-codec': 1.5.0 + '@mixmark-io/domino@2.2.0': {} + '@nanostores/react@0.7.2(nanostores@0.10.3)(react@18.3.1)': dependencies: nanostores: 0.10.3 @@ -4180,6 +4197,8 @@ snapshots: dependencies: '@types/node': 17.0.45 + '@types/turndown@5.0.5': {} + '@types/unist@3.0.2': {} '@ungap/structured-clone@1.2.0': {} @@ -6237,6 +6256,10 @@ snapshots: optionalDependencies: fsevents: 2.3.3 + turndown@7.2.0: + dependencies: + '@mixmark-io/domino': 2.2.0 + tween-functions@1.2.0: {} type-fest@2.19.0: {} diff --git a/public/roadmap-content/ai-data-scientist.json b/public/roadmap-content/ai-data-scientist.json new file mode 100644 index 000000000..098223ecc --- /dev/null +++ b/public/roadmap-content/ai-data-scientist.json @@ -0,0 +1,420 @@ +{ + "aStaDENn5PhEa-cFvNzXa": { + "title": "Mathematics", + "description": "Mathematics is the foundation of AI and Data Science. It is essential to have a good understanding of mathematics to excel in these fields.", + "links": [ + { + "title": "Mathematics for Machine Learning", + "url": "https://imp.i384100.net/baqMYv", + "type": "article" + }, + { + "title": "Algebra and Differential Calculus", + "url": "https://imp.i384100.net/LX5M7M", + "type": "article" + } + ] + }, + "4WZL_fzJ3cZdWLLDoWN8D": { + "title": "Statistics", + "description": "Statistics is the science of collecting, analyzing, interpreting, presenting, and organizing data. It is a branch of mathematics that deals with the collection, analysis, interpretation, presentation, and organization of data. It is used in a wide range of fields, including science, engineering, medicine, and social science. Statistics is used to make informed decisions, to predict future events, and to test hypotheses. It is also used to summarize data, to describe relationships between variables, and to make inferences about populations based on samples.\n\nLearn more from the resources given on the roadmap.", + "links": [] + }, + "gWMvD83hVXeTmCuHGIiOL": { + "title": "Linear Algebra, Calculus, Mathematical Analysis", + "description": "", + "links": [ + { + "title": "Mathematics for Machine Learning Specialization", + "url": "https://imp.i384100.net/baqMYv", + "type": "article" + }, + { + "title": "Explore top posts about Math", + "url": "https://app.daily.dev/tags/math?ref=roadmapsh", + "type": "article" + }, + { + "title": "Linear Algebra Youtube Course", + "url": "https://www.youtube.com/playlist?list=PLZHQObOWTQDPD3MizzM2xVFitgF8hE_ab", + "type": "video" + } + ] + }, + "mwPJh33MEUQ4Co_LiVEOb": { + "title": "Differential Calculus", + "description": "", + "links": [ + { + "title": "Algebra and Differential Calculus for Data Science", + "url": "https://imp.i384100.net/LX5M7M", + "type": "article" + } + ] + }, + "Y9YJdARIRqqCBCy3GVYdA": { + "title": "Statistics, CLT", + "description": "", + "links": [ + { + "title": "Introduction to Statistics", + "url": "https://imp.i384100.net/3eRv4v", + "type": "article" + } + ] + }, + "XJXIkWVDIrPJ-bVIvX0ZO": { + "title": "Hypothesis Testing", + "description": "", + "links": [ + { + "title": "Introduction to Statistical Analysis: Hypothesis Testing", + "url": "https://imp.i384100.net/vN0JAA", + "type": "article" + }, + { + "title": "Explore top posts about Testing", + "url": "https://app.daily.dev/tags/testing?ref=roadmapsh", + "type": "article" + } + ] + }, + "jxJtwbiCvxHqmkWkE7zdx": { + "title": "Probability and Sampling", + "description": "", + "links": [ + { + "title": "Probability and Statistics: To p or not to p?", + "url": "https://imp.i384100.net/daDM6Q", + "type": "article" + }, + { + "title": "Explore top posts about Statistics", + "url": "https://app.daily.dev/tags/statistics?ref=roadmapsh", + "type": "article" + } + ] + }, + "mJq9b50MJM9o9dLhx40iN": { + "title": "AB Testing", + "description": "", + "links": [ + { + "title": "Practitioner’s Guide to Statistical Tests", + "url": "https://vkteam.medium.com/practitioners-guide-to-statistical-tests-ed2d580ef04f#1e3b", + "type": "article" + }, + { + "title": "Step by Step Process for Planning an A/B Test", + "url": "https://towardsdatascience.com/step-by-step-for-planning-an-a-b-test-ef3c93143c0b", + "type": "article" + }, + { + "title": "Explore top posts about A/B Testing", + "url": "https://app.daily.dev/tags/ab-testing?ref=roadmapsh", + "type": "article" + } + ] + }, + "v68nwX914qCwHDSwY_ZhG": { + "title": "Increasing Test Sensitivity", + "description": "", + "links": [ + { + "title": "Minimum Detectable Effect (MDE)", + "url": "https://splitmetrics.com/resources/minimum-detectable-effect-mde/", + "type": "article" + }, + { + "title": "Improving the Sensitivity of Online Controlled Experiments: Case Studies at Netflix", + "url": "https://kdd.org/kdd2016/papers/files/adp0945-xieA.pdf", + "type": "article" + }, + { + "title": "Improving the Sensitivity of Online Controlled Experiments by Utilizing Pre-Experiment Data", + "url": "https://exp-platform.com/Documents/2013-02-CUPED-ImprovingSensitivityOfControlledExperiments.pdf", + "type": "article" + }, + { + "title": "How Booking.com increases the power of online experiments with CUPED", + "url": "https://booking.ai/how-booking-com-increases-the-power-of-online-experiments-with-cuped-995d186fff1d", + "type": "article" + }, + { + "title": "Improving Experimental Power through Control Using Predictions as Covariate — CUPAC", + "url": "https://doordash.engineering/2020/06/08/improving-experimental-power-through-control-using-predictions-as-covariate-cupac/", + "type": "article" + }, + { + "title": "Improving the Sensitivity of Online Controlled Experiments: Case Studies at Netflix", + "url": "https://www.researchgate.net/publication/305997925_Improving_the_Sensitivity_of_Online_Controlled_Experiments_Case_Studies_at_Netflix", + "type": "article" + } + ] + }, + "n2JFGwFxTuOviW6kHO1Uv": { + "title": "Ratio Metrics", + "description": "", + "links": [ + { + "title": "Applying the Delta Method in Metric Analytics: A Practical Guide with Novel Ideas", + "url": "https://arxiv.org/pdf/1803.06336.pdf", + "type": "article" + }, + { + "title": "Approximations for Mean and Variance of a Ratio", + "url": "https://www.stat.cmu.edu/~hseltman/files/ratio.pdf", + "type": "article" + } + ] + }, + "Gd2egqKZPnbPW1W2jw4j8": { + "title": "Econometrics", + "description": "Ecenometrics is the application of statistical methods to economic data. It is a branch of economics that aims to give empirical content to economic relations. More precisely, it is \"the quantitative analysis of actual economic phenomena based on the concurrent development of theory and observation, related by appropriate methods of inference.\" Econometrics can be described as something that allows economists \"to sift through mountains of data to extract simple relationships.\"", + "links": [] + }, + "y6xXsc-uSAmRDnNuyhqH2": { + "title": "Pre-requisites of Econometrics", + "description": "", + "links": [ + { + "title": "10 Fundamental Theorems for Econometrics", + "url": "https://bookdown.org/ts_robinson1994/10EconometricTheorems/", + "type": "article" + } + ] + }, + "h19k9Fn5XPh3_pKEC8Ftp": { + "title": "Regression, Timeseries, Fitting Distributions", + "description": "", + "links": [ + { + "title": "Blockchain.com Data Scientist TakeHome Test", + "url": "https://github.com/stalkermustang/bcdc_ds_takehome", + "type": "opensource" + }, + { + "title": "10 Fundamental Theorems for Econometrics", + "url": "https://bookdown.org/ts_robinson1994/10EconometricTheorems/", + "type": "article" + }, + { + "title": "Dougherty Intro to Econometrics 4th edition", + "url": "https://www.academia.edu/33062577/Dougherty_Intro_to_Econometrics_4th_ed_small", + "type": "article" + }, + { + "title": "Econometrics: Methods and Applications", + "url": "https://imp.i384100.net/k0krYL", + "type": "article" + }, + { + "title": "Kaggle - Learn Time Series", + "url": "https://www.kaggle.com/learn/time-series", + "type": "article" + }, + { + "title": "Time series Basics : Exploring traditional TS", + "url": "https://www.kaggle.com/code/jagangupta/time-series-basics-exploring-traditional-ts#Hierarchical-time-series", + "type": "article" + }, + { + "title": "How to Create an ARIMA Model for Time Series Forecasting in Python", + "url": "https://machinelearningmastery.com/arima-for-time-series-forecasting-with-python", + "type": "article" + }, + { + "title": "11 Classical Time Series Forecasting Methods in Python", + "url": "https://machinelearningmastery.com/time-series-forecasting-methods-in-python-cheat-sheet/", + "type": "article" + }, + { + "title": "Linear Regression for Business Statistics", + "url": "https://imp.i384100.net/9g97Ke", + "type": "article" + } + ] + }, + "XLDWuSt4tI4gnmqMFdpmy": { + "title": "Coding", + "description": "Programming is a fundamental skill for data scientists. You need to be able to write code to manipulate data, build models, and deploy solutions. The most common programming languages used in data science are Python and R. Python is a general-purpose programming language that is easy to learn and has a large number of libraries for data manipulation and machine learning. R is a programming language and free software environment for statistical computing and graphics. It is widely used for statistical analysis and data visualization.", + "links": [] + }, + "MVrAqizgkoAs2aghN8TgV": { + "title": "Learn Python Programming Language", + "description": "", + "links": [ + { + "title": "Kaggle — Python", + "url": "https://www.kaggle.com/learn/python", + "type": "article" + }, + { + "title": "Google's Python Class", + "url": "https://developers.google.com/edu/python", + "type": "article" + }, + { + "title": "Explore top posts about Python", + "url": "https://app.daily.dev/tags/python?ref=roadmapsh", + "type": "article" + } + ] + }, + "StBCykpzpM4g9PRFeSNXa": { + "title": "Data Structures and Algorithms (Python)", + "description": "", + "links": [ + { + "title": "Learn Algorithms", + "url": "https://leetcode.com/explore/learn/", + "type": "article" + }, + { + "title": "Leetcode - Study Plans", + "url": "https://leetcode.com/studyplan/", + "type": "article" + }, + { + "title": "Algorithms Specialization", + "url": "https://imp.i384100.net/5gqv4n", + "type": "article" + } + ] + }, + "Im0tXXn3GC-FUq2aMHgwm": { + "title": "Learn SQL", + "description": "", + "links": [ + { + "title": "SQL Tutorial", + "url": "https://www.sqltutorial.org/", + "type": "article" + }, + { + "title": "Explore top posts about SQL", + "url": "https://app.daily.dev/tags/sql?ref=roadmapsh", + "type": "article" + } + ] + }, + "l1027SBZxTHKzqWw98Ee-": { + "title": "Exploratory Data Analysis", + "description": "Exploratory Data Analysis (EDA) is an approach to analyzing data sets to summarize their main characteristics, often with visual methods. EDA is used to understand what the data can tell us beyond the formal modeling or hypothesis testing task. It is a crucial step in the data analysis process.", + "links": [] + }, + "JaN8YhMeN3whAe2TCXvw9": { + "title": "Data understanding, Data Analysis and Visualization", + "description": "", + "links": [ + { + "title": "Exploratory Data Analysis With Python and Pandas", + "url": "https://imp.i384100.net/AWAv4R", + "type": "article" + }, + { + "title": "Exploratory Data Analysis for Machine Learning", + "url": "https://imp.i384100.net/GmQMLE", + "type": "article" + }, + { + "title": "Exploratory Data Analysis with Seaborn", + "url": "https://imp.i384100.net/ZQmMgR", + "type": "article" + } + ] + }, + "kBdt_t2SvVsY3blfubWIz": { + "title": "Machine Learning", + "description": "Machine learning is a field of artificial intelligence that uses statistical techniques to give computer systems the ability to \"learn\" (e.g., progressively improve performance on a specific task) from data, without being explicitly programmed. The name machine learning was coined in 1959 by Arthur Samuel. Evolved from the study of pattern recognition and computational learning theory in artificial intelligence, machine learning explores the study and construction of algorithms that can learn from and make predictions on data – such algorithms overcome following strictly static program instructions by making data-driven predictions or decisions, through building a model from sample inputs. Machine learning is employed in a range of computing tasks where designing and programming explicit algorithms with good performance is difficult or infeasible; example applications include email filtering, detection of network intruders, and computer vision.", + "links": [] + }, + "FdBih8tlGPPy97YWq463y": { + "title": "Classic ML (Sup., Unsup.), Advanced ML (Ensembles, NNs)", + "description": "", + "links": [ + { + "title": "Repository of notes, code and notebooks in Python for the book Pattern Recognition and Machine Learning by Christopher Bishop", + "url": "https://github.com/gerdm/prml", + "type": "opensource" + }, + { + "title": "Open Machine Learning Course", + "url": "https://mlcourse.ai/book/topic01/topic01_intro.html", + "type": "article" + }, + { + "title": "Coursera: Machine Learning Specialization", + "url": "https://imp.i384100.net/oqGkrg", + "type": "article" + }, + { + "title": "Pattern Recognition and Machine Learning by Christopher Bishop", + "url": "https://www.microsoft.com/en-us/research/uploads/prod/2006/01/Bishop-Pattern-Recognition-and-Machine-Learning-2006.pdf", + "type": "article" + }, + { + "title": "Explore top posts about Machine Learning", + "url": "https://app.daily.dev/tags/machine-learning?ref=roadmapsh", + "type": "article" + } + ] + }, + "cjvVLN0XjrKPn6o20oMmc": { + "title": "Deep Learning", + "description": "Deep Learning\n-------------\n\nDeep learning is a subset of machine learning that deals with algorithms inspired by the structure and function of the brain called artificial neural networks. Deep learning is a key technology behind driverless cars, enabling them to recognize a stop sign, or to distinguish a pedestrian from a lamppost. It is the key to voice control in consumer devices like phones, tablets, TVs, and hands-free speakers. Deep learning is getting lots of attention lately and for good reason. It’s achieving results that were not possible before.", + "links": [] + }, + "eOFoGKveaHaBm_6ppJUtA": { + "title": "Fully Connected, CNN, RNN, LSTM, Transformers, TL", + "description": "", + "links": [ + { + "title": "The Illustrated Transformer", + "url": "https://jalammar.github.io/illustrated-transformer/", + "type": "article" + }, + { + "title": "Attention is All you Need", + "url": "https://arxiv.org/pdf/1706.03762.pdf", + "type": "article" + }, + { + "title": "Deep Learning Book", + "url": "https://www.deeplearningbook.org/", + "type": "article" + }, + { + "title": "Deep Learning Specialization", + "url": "https://imp.i384100.net/Wq9MV3", + "type": "article" + } + ] + }, + "Qa85hEVe2kz62k9Pj4QCA": { + "title": "MLOps", + "description": "MLOps is a practice for collaboration and communication between data scientists and operations professionals to help manage production ML lifecycle. It is a set of best practices that aims to automate the ML lifecycle, including training, deployment, and monitoring. MLOps helps organizations to scale ML models and deliver business value faster.", + "links": [] + }, + "uPzzUpI0--7OWDfNeBIjt": { + "title": "Deployment Models, CI/CD", + "description": "", + "links": [ + { + "title": "Machine Learning Engineering for Production (MLOps) Specialization", + "url": "https://imp.i384100.net/nLA5mx", + "type": "article" + }, + { + "title": "Full Stack Deep Learning", + "url": "https://fullstackdeeplearning.com/course/2022/", + "type": "article" + }, + { + "title": "Explore top posts about CI/CD", + "url": "https://app.daily.dev/tags/cicd?ref=roadmapsh", + "type": "article" + } + ] + } +} \ No newline at end of file diff --git a/public/roadmap-content/android.json b/public/roadmap-content/android.json new file mode 100644 index 000000000..b3423de9d --- /dev/null +++ b/public/roadmap-content/android.json @@ -0,0 +1,587 @@ +{ + "Suws-7f_6Z1ChpfcnxX2M": { + "title": "Pick a Language", + "description": "When developing for Android, one crucial step is picking a programming language to use. There are multiple languages you can choose from, but the three most popular ones are Java, Kotlin, and C++. Java is the original language used for Android development and is widely used, making it a good choice for beginners due to the wealth of resources and developer communities. Kotlin is a newer option that is fully supported by Google and Android Studio, and addressing many of the drawbacks of Java which makes it a popular choice for many developers. Lastly, C++ can be used in Android development through the Android Native Development Kit (NDK), though it comes with more complexities and is usually not recommended for beginners. Your selection might depend on your existing familiarity with these languages, the complexity and specific requirements of your project, and the resources or libraries you wish to use.", + "links": [] + }, + "qIzUv8-GgQnkqChEdgD50": { + "title": "Kotlin", + "description": "`Kotlin` is a cross-platform, statically typed general-purpose programming language with type inference. Developed by JetBrains, the makers of the world’s leading IDEs, Kotlin has a syntax, which is more expressive and concise. This allows for more readable and maintainable code. It is fully interoperable with Java and comes with no limitations. It can be used almost everywhere Java is used today, for server-side development, Android apps, and much more. Kotlin introduces several improvements for programmers over Java, which makes it a preferred choice for many developers. With more concise code base and modern programming concept support - it's certainly a future of Android app development.\n\nUse following resources to learn more:", + "links": [ + { + "title": "Learn Kotlin - w3schools", + "url": "https://www.w3schools.com/kotlin/", + "type": "article" + }, + { + "title": "Explore top posts about Kotlin", + "url": "https://app.daily.dev/tags/kotlin?ref=roadmapsh", + "type": "article" + }, + { + "title": "Learn Kotlin Programming for Beginners - Free Code Camp", + "url": "https://youtu.be/EExSSotojVI?si=4VPW8ZHa2UMX0HH1", + "type": "video" + } + ] + }, + "RBABbkzD_uNFwEO-hssZO": { + "title": "Java", + "description": "Java is a popular programming language used for Android development due to its robustness and ease of use. Its object-oriented structure allows developers to create modular programs and reusable code. The language was built with the philosophy of \"write once, run anywhere\" (WORA), meaning compiled Java code can run on all platforms without the need for recompilation. Android’s API and core libraries are primarily written in Java, therefore understanding Java is fundamental in creating diverse and powerful Android apps. Java is a statically-typed language, which can be beneficial for detecting errors at compile-time rather than at runtime. Oracle, who owns Java, provides comprehensive documentation and support for the language.", + "links": [] + }, + "HlUUGj3dOZ68t4gIjerXh": { + "title": "The Fundamentals", + "description": "\"The Fundamentals\" of Android primarily concentrate on 5 components; Activities, Services, Broadcast Receivers, Content Providers, and Intents. **Activities** are essentially what you see on your screen; each screen in an app is a separate activity. **Services** run in the background to perform long-running operations or to perform work for remote processes. They do not provide a user interface. **Broadcast Receivers** respond to broadcast messages from other applications or from the system itself. These messages are often in the form of Intents. **Content Providers** manage a shared set of app data that other apps can query or modify, through a structured interface. Finally, **Intents** are messaging objects which facilitate the communication between the aforementioned components. Understanding these five core concepts is key to mastering Android fundamentals.", + "links": [] + }, + "ZRGsokU313Ky-anWbWK6q": { + "title": "Development IDE", + "description": "\"Development IDE\" refers to Development Integrated Development Environment that is vital for Android App development. For Android, the primary IDE is **Android Studio**. This official IDE from Google includes everything you need to build an Android app, such as a code editor, code analysis tools, emulators for all of Android's supported OS versions and hardware configurations, and more. Other popular IDEs include **Eclipse** (with an Android Developer Tools plugin), **NetBeans**, and **IntelliJ IDEA**. Each of these IDEs tends to have its own set of specialized features, but all are designed to provide the tools and services needed for Android development. The choice of IDE often depends on the specific needs and preferences of the developer or development team.", + "links": [] + }, + "jl1FsQ5-WGKeFyaILNt_p": { + "title": "Basics of Kotlin", + "description": "Kotlin is a statically-typed programming language that runs on the Java Virtual Machine (JVM) and can be used to develop all types of Android apps. It is Google's preferred language for Android app development. Kotlin's syntax is more concise than Java, which means less code to write and read, and fewer opportunities for errors. It provides several high-level features like lambdas, coroutines and higher order functions that help making the code more clean and understandable. Key basics of Kotlin include control flow statements (if, when, for, while), variables (mutable and non-mutable), null safety, classes and objects, inheritance, interfaces, and exception handling. While learning Kotlin, experience with Java will certainly be helpful, but it's not a prerequisite.", + "links": [] + }, + "j69erqfosSZMDlmKcnnn0": { + "title": "Basics of OOP", + "description": "In Android development, understanding the `Basics of Object-Oriented Programming (OOP)` is crucial. OOP is a programming paradigm that uses \"Objects\" - entities that contain both data and functions that manipulate the data. Key concepts include `Classes`, which are blueprints from which objects are created; `Objects`, instances of a class; `Inheritance`, where one class acquires properties from another; `Polymorphism`, the ability of an object to take many forms; `Abstraction`, showing only necessary details and hiding implementation from the user; and `Encapsulation`, the concept of wrapping data and the methods that work on data within one unit. By understanding these fundamentals, you can create more efficient and effective Android apps.", + "links": [] + }, + "cNeT1dJDfgn0ndPzSxhSL": { + "title": "Data Structures and Algorithms", + "description": "In Android, **data structures** are primarily used to collect, organize and perform operations on the stored data more effectively. They are essential for designing advanced-level Android applications. Examples include Array, Linked List, Stack, Queue, Hash Map, and Tree.\n\nMeanwhile, **algorithms** are a sequence of instructions or rules for performing a particular task. In Android, algorithms can be used for data searching, sorting, or performing complex business logic. Some commonly used algorithms are Binary Search, Bubble Sort, Selection Sort, etc. A deep understanding of data structures and algorithms is crucial in optimizing the performance and the memory consumption of the Android applications.", + "links": [] + }, + "FVxNjbDBxgf6vkZWw1Awt": { + "title": "What is and how to use Gradle?", + "description": "**Using Gradle**: Gradle is a powerful build system used in Android development that allows you to define your project and dependencies, and distinguish between different build types and flavors. Gradle uses a domain-specific language (DSL) which gives developers almost complete control over the build process. When you trigger a build in Android Studio, Gradle is the tool working behind the scenes to compile and package your app. It looks at the dependencies you declared in your build.gradle files and create a build script accordingly. Using Gradle in Android development requires continuous editing of the build.gradle files to manage app dependencies, build variants, signing configurations, and other essential aspects related to building your app.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Get going with Gradle - PDF", + "url": "https://assets.gradlehero.com/get-going-with-gradle/get-going-with-gradle-book.pdf", + "type": "article" + }, + { + "title": "Explore top posts about Gradle", + "url": "https://app.daily.dev/tags/gradle?ref=roadmapsh", + "type": "article" + }, + { + "title": "Introduction to Gradle for Complete Beginners - 25minutes", + "url": "https://youtu.be/-dtcEMLNmn0?si=NuIP-3wNpUrxfTxA", + "type": "video" + }, + { + "title": "Gradle Course for Beginners - 55minutes", + "url": "https://www.youtube.com/watch?v=R6Z-Sxb837I", + "type": "video" + } + ] + }, + "5m_7DvInF8C_4Ml1xVI6L": { + "title": "Create a Basic Hello World App", + "description": "The \"Hello World\" app is a simple project that you can build when you're getting started with Android development. It's often the first program that beginners learn to build in a new system. It's usually considered the simplest form of program that displays a message to the user - \"Hello, World!\" In Android, this involves creating a new project from the Android Studio and setting up the main activity. The main activity file is primarily written in Java or Kotlin where you can code for the display message, while the layout design view can be created in the XML file.\n\nResources:", + "links": [ + { + "title": "Create your first android app", + "url": "https://developer.android.com/codelabs/basic-android-kotlin-compose-first-app", + "type": "article" + } + ] + }, + "5s1CqsYCOXjNroDHaGKGa": { + "title": "Version Control", + "description": "_Version Control_ is a system that records changes to a file or set of files over time so that you can recall specific versions later. An essential tool for software development, it helps to track changes, enhance collaboration, and manage different versions of a project. Two common types of version control systems are Centralized Version Control System (CVCS) and Distributed Version Control System (DVCS). CVCS uses a central server to store all versions of a project, with users getting snapshots from that server. Examples include SVN and Perforce. On the other hand, DVCS allows multiple developers to work on a single project simultaneously. Each user has a complete backup of all versions of the work. Examples include Git and Mercurial.", + "links": [] + }, + "rqSZ2ATeHbOdIQE9Jlb0B": { + "title": "Git", + "description": "`Git` is a highly efficient and flexible distributed version control system that was created by Linus Torvalds, the creator of Linux. It allows multiple developers to work on a project concurrently, providing tools for non-linear development and tracking changes in any set of files. Git has a local repository with a complete history and version-tracking capabilities, allowing offline operations, unlike SVN. It ensures data integrity and provides strong support for non-linear development with features such as branching and merging. Yet, Git has a high learning curve and can be complex for beginners to understand the command line interface. Furthermore, Git also allows you to create `tags` to reference certain points in your history for milestone or version releases.", + "links": [] + }, + "H-2eb8fLwz8IKYXbeSVKK": { + "title": "GitHub", + "description": "**GitHub** is a cloud-based hosting service for managing software version control using Git. It provides a platform for enabling multiple developers to work together on the same project at the same time. With GitHub, codes can be stored publicly, allowing for collaboration with other developers or privately for individual projects. Key features of GitHub include code sharing, task management, and version control, among others. GitHub also offers functionalities such as bug tracking, feature requests, and task management for the project. For Android development, it supports Gradle-based android projects, plugins for Android Studio and JetBrains IntelliJ IDEA, making version control operations more user-friendly.", + "links": [] + }, + "5LFZdUiFYYU_1sYsouyan": { + "title": "Bitbucket", + "description": "Bitbucket is a web-based hosting service that is owned by Atlassian. Bitbucket uses either Mercurial or Git revision control systems, allowing users to manage and maintain their code. This platform is mainly used for code and code review. Bitbucket provides both commercial plans and free accounts. It offers free accounts with an unlimited number of private repositories (which can have up to five users in the case of free accounts) as of September 2010. It originally offered only Mercurial support. Bitbucket integrates with other Atlassian software like JIRA, HipChat, Confluence and Bamboo.", + "links": [] + }, + "Q47BtQphp59NkkZoeNXmP": { + "title": "GitLab", + "description": "`Gitlab` is a web-based DevOps lifecycle tool which provides a Git-repository manager, along with continuous integration and deployment pipeline features, using an open-source license, developed by GitLab Inc. Users can manage and create their software projects and repositories, and collaborate on these projects with other members. `Gitlab` also allows users to view analytics and open issues of their project. It stands next to other version control tools like `GitHub` and `Bitbucket`, but comes with its own set of additional features and nuances. For Android development, `Gitlab` can be particularly useful owing to its continuous integration and deployment system which can automate large parts of the app testing and deployment.", + "links": [] + }, + "5Li8J5iR_ZuyIlxX0LYei": { + "title": "App Components", + "description": "Android apps are primarily made up of five different types of components:\n\n1. **Activities**: These are individual screens that a user can interact with. Any UI action like touching a button or swiping a screen will usually take place within an activity.\n \n2. **Services**: Unlike activities, services run in the background and don't have a user interface. They’re used for repetitive or long running operations, like playing music or pulling in a feed of data from a server.\n \n3. **Broadcast Receivers**: These are event listeners. The Android operating system uses them to respond to system-wide events.\n \n4. **Content Providers**: They manage and share app data with other apps installed on the device. For security, data is not generally shared across apps.\n \n5. **Intents**: These serve as messages or commands to the Android system. They're used to signal to the Android system that certain events have occurred.\n \n\nEach app component is designed to serve different purposes and to have a well-defined lifecycle which defines how the component is created and destroyed.", + "links": [] + }, + "nwuVlPmzwJ17mtVQ8Hi9w": { + "title": "Activity", + "description": "`Activity` in Android is a crucial component that represents a single screen with a user interface. It is just like a window in a desktop application. Android apps are typically made up of one or more activities, each having its interface which allows user interaction. When an app is launched, an instance of `Activity` is created, starting the lifecycle of that app. Every activity has its own lifecycle (create, start, resume, pause, stop, destroy) that keeps the state of a user's progress, and Android manages these states automatically. Activities can also have `Intent`, which allows them to interact with other components, such as starting another activity or getting a result from that activity.", + "links": [] + }, + "PcHmU1c9hqKyzSjwlRPHk": { + "title": "Services", + "description": "**Services**: A service in Android is an app component that performs operations in the background without a user interface. It can be started by an application component, like an activity, and it will continue to run in the background even if the user switches to another application. There are two types of services in Android, namely, `Started Service` and `Bound Service`. A `Started Service` is used to perform a single operation, such as downloading a large file. On the other hand, a `Bound Service` offers a client-server interface that allows components to interact with the service, send requests, receive results, and even perform interprocess communication (IPC).", + "links": [] + }, + "nkcdjrswv0WCzUs48BAt9": { + "title": "Content Provider", + "description": "A **Content Provider** in Android is a key component that allows applications to securely share data with other applications. They act as a layer between databases and applications to enhance data security. Content providers manage access to a structured set of data by handling data transactions, implementing data security, and maintaining isolation between applications. They provide an abstracted interface which is used to access data, while the underlying storage method (Like SQLite database, web, or any other method) remains hidden. This mechanism aids in retrieving data from a non-relational source in a structured way. They're used primarily when data needs to be shared between multiple applications, not just within a single application.", + "links": [] + }, + "tFuAToid1Fkmu96BDtW7K": { + "title": "Broadcast Receiver", + "description": "**Broadcast Receivers** in Android are components that respond to system-wide broadcast announcements. They can be registered to respond to a specific type of broadcasts or implement a user-defined broadcast. While you can initiate a broadcast from your app, they are generally used for receiving system notifications or communicating with other applications. However, keep in mind that they cannot display a user interface, but they can start activities if necessary, which do have a user interface. A `BroadcastReceiver` class must override the `onReceive()` method where each message is received as an `Intent` object parameter.", + "links": [] + }, + "hv_9imIQpthxEaMLXEUHI": { + "title": "Intent", + "description": "\"Intent\" in Android is a software mechanism used for late runtime binding between components, such as activities, content providers, and services. It is essentially a passive data structure holding an abstract description of an operation that the Android system is requested to perform. The Intent can be explicit, in which you specify the component to start or implicit, where you declare a general action to perform, allowing a component from another app to handle it. Implicit intents are often used to request another app's functionality, such as showing a user a location on a map or taking a photo. \"Intent Filters\" are then used by the components to advertise their capabilities to handle different types of intents.", + "links": [] + }, + "FVg438cVBBzqJFkGWVbQM": { + "title": "Activity LifeCycle", + "description": "The **Activity Lifecycle** in Android represents a series of states or events that an activity can go through from its creation to its destruction. The primary states or events are `onCreate()`, `onStart()`, `onResume()`, `onPause()`, `onStop()`, `onDestroy()`, and `onRestart()`. The method `onCreate()` is called when the activity is first created, followed by `onStart()` when the activity becomes visible to the user. The `onResume()` method executes when the user starts interacting with the application. `onPause()` and `onStop()` methods are invoked when the application is no longer in the foreground or visible to the user. The `onDestroy()` method is used when the activity is being completely removed from the memory. The `onRestart()` method is called after the system stops the activity and is about to start it again. The proper handling of these states ensures the efficient use of resources and a smooth user experience.", + "links": [] + }, + "oUjetA2eduvQIeLcQlLcu": { + "title": "State Changes", + "description": "In Android, an \"Activity\" is a crucial component that represents a single screen with a user interface. One or more active activities make up an Application. These activities can go through different states in their lifecycle, often due to user interaction or system interruption. The primary states of an Activity include `Created`, `Started`, `Resumed`, `Paused`, `Stopped`, `Restarted`, and `Destroyed`. The \"Created\" state occurs when an activity instance is being created. The \"Started\" state is when the activity is visible to the user, while \"Resumed\" is when the activity is interacting with the user. An activity is \"Paused\" when it loses focus but is partly visible, \"Stopped\" when it's not visible, \"Restarted\" when the activity is about to be started, and \"Destroyed\" when the activity is finished or the system is temporarily destroying it.", + "links": [] + }, + "-O-G9bg36ut8NnZcdOaop": { + "title": "Tasks & Backstack", + "description": "The **tasks backstack** in Android refers to the way Android manages and arranges tasks in a stack-like structure. Every task has a stack of activities, which is referred to as the task's back stack. The activities are placed in the order they are opened. When a new activity is started, it is placed at the top of the stack and becomes the running activity, while the previous activity is paused and put into the back stack. When you press the back button, the current activity is destroyed and the activity at the top of the back stack becomes active again. Android defines how to navigate between tasks and activities using this back stack concept.", + "links": [] + }, + "gGdz3j33x0gfrFDp_rw8Z": { + "title": "Implicit Intents", + "description": "In Android development, **Implicit Intents** do not specify the target component explicitly like Explicit Intents. Instead, they allow the system to find a suitable component matching the Intent description to handle the request. The system will find an activity that can handle this intent by comparing the `` section in the `AndroidManifest.xml` of all apps installed on the device against the Implicit Intent. An ideal example of an implicit intent is opening a URL. You do not need to know the specific activity that can handle this request, you just declare an intent to view a web page and Android system will select the suitable app that can open the URL.", + "links": [] + }, + "TmIeCF3xVCe5Sy3ITmM31": { + "title": "Explicit Intents", + "description": "**Explicit Intents** are primarily used within an application's own boundaries. In explicit intents you specify the component that needs to be responded to the intent. Therefore, the target component must be specified by calling methods such as `setComponent(ComponentName)`, `setClass(Context, Class)`, or `setClassName(String, String)`. This means that explicit intents are typically used for launching activities, broadcasting messages, starting services within the app. Explicit intents are not resolved by the system but are passed to the component identified in the intent.", + "links": [] + }, + "b-sfh6NoS-APqaNKm5L5S": { + "title": "Intent Filters", + "description": "`Intent Filters` in Android are essential components of the Android system where you can declare the capabilities of your activities, services, and broadcast receivers. An intent filter is an expression found in your app's manifest file, defined in the XML element. Android uses these filters to determine the appropriate components for incoming intents, which can be either explicit or implicit. Your app's ability to respond to intents depends on the filters you define. The filters are set of conditions comprised of `action`, `category`, and `data` which your activity or service is able to perform. If the incoming `Intent` matches with defined `Intent Filters`, Android system will permit that `Intent` to your Component (Activity, Service, or Broadcast Receiver).", + "links": [] + }, + "jePGzTejFe4ryA5qFFmjl": { + "title": "Design & Architecture", + "description": "In Android, **Design Architecture** refers to structuring the code in a way that increases its readability, maintainability, and testability. There are several ways to design architecture like [Model-View-Controller (MVC)](https://en.wikipedia.org/wiki/Model%E2%80%93view%E2%80%93controller), [Model-View-Presenter (MVP)](https://en.wikipedia.org/wiki/Model%E2%80%93view%E2%80%93presenter), [Model-View-ViewModel (MVVM)](https://en.wikipedia.org/wiki/Model%E2%80%93view%E2%80%93viewmodel), and [Model-View-Intent (MVI)](https://www.raywenderlich.com/817602-mvi-architecture-for-android-tutorial-getting-started). Each of these define the interaction between the data, the logic, and the UI layers. Google’s recommended architectural pattern is [Android Architecture Components](https://developer.android.com/topic/libraries/architecture) which follow the principles of the MVVM pattern. Bear in mind that an architecture is not a rigid structure that fits all solutions. Rather, it serves as a guideline and can be tweaked as necessary.", + "links": [] + }, + "Dp2DOX10u2xJUjB8Okhzh": { + "title": "Frame", + "description": "", + "links": [] + }, + "U8iMGGOd2EgPxSuwSG39Z": { + "title": "Linear", + "description": "", + "links": [] + }, + "yE0qAQZiEC9R8WvCdskpr": { + "title": "Relative", + "description": "", + "links": [] + }, + "3fFNMhQIuuh-NRzSXYpXO": { + "title": "Constraint", + "description": "", + "links": [] + }, + "xIvplWfe-uDr9iHjPT1Mx": { + "title": "RecycleView", + "description": "RecyclerView is the most commonly used and powerful list management tool in Android development. Witch makes it easy to efficiently display large sets of data. You supply the data and define how each item looks, and the RecyclerView library dynamically creates the elements when they're needed.\n\nAs the name implies, RecyclerView recycles those individual elements. When an item scrolls off the screen, RecyclerView doesn't destroy its view. Instead, RecyclerView reuses the view for new items that have scrolled onscreen. RecyclerView improves performance and your app's responsiveness, and it reduces power consumption.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Android Developers: Create dynamic lists with RecyclerView", + "url": "https://developer.android.com/develop/ui/views/layout/recyclerview", + "type": "article" + } + ] + }, + "znvZp24L-PcQwkSObtixs": { + "title": "TextView", + "description": "", + "links": [] + }, + "2iDJrxjXOt7o2fPp2HfRl": { + "title": "Fragments", + "description": "", + "links": [] + }, + "boMz0HZlMAsLdCZlpUo-H": { + "title": "EditText", + "description": "", + "links": [] + }, + "Mtx0bY0drmaTw8sCM5YTl": { + "title": "Dialogs", + "description": "", + "links": [] + }, + "WhfzFOUpm0DFEj7Oeq21R": { + "title": "Buttons", + "description": "", + "links": [] + }, + "BVgO9n7tGlVdiS72-hFSd": { + "title": "Toast", + "description": "", + "links": [] + }, + "A4rtNULX_MoV93IH1Lgqw": { + "title": "ImageView", + "description": "", + "links": [] + }, + "Z4Tbd5ClnqCXGPGG09F-G": { + "title": "Bottom Sheet", + "description": "", + "links": [] + }, + "EzLjX4iRT7AxkAOsJYnSU": { + "title": "ListView", + "description": "", + "links": [] + }, + "amTxz7mS98lkhOrNMJXG_": { + "title": "Drawer", + "description": "", + "links": [] + }, + "pEBpXv3Jf1AzBNHlvVrG8": { + "title": "Tabs", + "description": "", + "links": [] + }, + "Xn1VQ-xOT67ZfJJTM4r1p": { + "title": "Animations", + "description": "", + "links": [] + }, + "60Vm-77rseUqpMiFvp-dA": { + "title": "Jetpack Compose", + "description": "`Jetpack Compose` is a modern toolkit for building native Android UI. It simplifies and accelerates UI development on Android with less code, powerful tools, and intuitive Kotlin APIs. `Jetpack Compose` offers a declarative approach to designing UI, where you can simply describe what your UI should look like at any given point of your app’s state, and `Compose` takes care of updating the view hierarchy, making UI development more efficient. It also integrates well with existing Android apps, letting you adopt its benefits at your own pace.", + "links": [ + { + "title": "Explore top posts about Jetpack Compose", + "url": "https://app.daily.dev/tags/jetpack-compose?ref=roadmapsh", + "type": "article" + }, + { + "title": "Jetpack Compose Crash Course", + "url": "https://www.youtube.com/watch?v=6_wK_Ud8--0", + "type": "video" + } + ] + }, + "xV475jHTlLuHtpHZeXb7P": { + "title": "App Shortcuts", + "description": "App shortcuts in Android are designed to provide quick and convenient routes to specific actions or functions within your app from the device home screen. To use them, long-press an app's icon and a pop-up menu will appear with the available shortcuts. Depending on the app, you might be able to send a message, make a booking, navigate home, or perform some other specific task without having to first open the app and navigate to the desired function. These shortcuts can also be moved and placed individually on the home screen for even faster access.", + "links": [] + }, + "o5rzmnaQeiSh9ocvfJPpK": { + "title": "Navigation Components", + "description": "The **Navigation Components** are part of Android Jetpack and are designed to simplify the implementation of navigation in your Android app. These components help you follow best practices, handle deep linking, and provide a consistent user experience across deep and conditional navigation. They also automate many common tasks, such as handling Up and Back actions correctly across many different types of devices. The Navigation component consists of three key parts which are Navigation graph, NavHost, and NavController.", + "links": [] + }, + "Bz-BkfzsDHAbAw3HD7WCd": { + "title": "MVI", + "description": "", + "links": [] + }, + "pSU-NZtjBh-u0WKTYfjk_": { + "title": "MVVM", + "description": "", + "links": [] + }, + "aF_xFIqTjQbENtC7pkXvJ": { + "title": "MVP", + "description": "", + "links": [] + }, + "w1A6wPKSd3Yh2luuHV-aE": { + "title": "MVC", + "description": "", + "links": [] + }, + "ZF5XgO7I_J9928bD3CVXo": { + "title": "Repository Pattern", + "description": "In Android design architecture, the `Repository Pattern` separates the data sources from the rest of the application. It acts as a mediator between different data sources, such as persistent models, web services, or caches. Instead of having the network and database calls spread out throughout your ViewModel, they are encapsulated within a Repository class. This separation will make the code clean, easy to read and testable. It provides a simple API for data access, the rest of the app doesn't need to know where the data is coming from it just asks the repository.", + "links": [] + }, + "784fiIdKrQDlIm3VIiJQN": { + "title": "Builder Pattern", + "description": "The **Builder Pattern** in Android is used to construct complex objects step by step. It separates the construction of an object from its representation. The pattern involves a `Builder` class which is used to construct the `Product` class. The Builder class typically has a method for each part that may be used to construct a Product. It also has a method for returning the final product. The Builder pattern is useful in situations where you might have multiple ways to create an object or when an object requires a large number of parameters for its constructor.", + "links": [] + }, + "DeOxj6RzQBYfEWV-M1Ybm": { + "title": "Factory Pattern", + "description": "The **Factory Pattern** is part of the Creational Design Patterns. This pattern provides an interface for creating objects in a superclass, but allows subclasses to alter the type of objects that will be created. It introduces an abstraction layer between the client code and the concrete objects. Normally, this is achieved by using a factory method to create objects instead of using constructors. The instance of the class is usually created by a method, referred to as a `factory method`, which is either specified in an interface and implemented in implementing classes or implemented in a base class which may be optionally overridden by derived classes. The Factory Method is used when we want to provide users with a way to create an instance of a class from one of several possible classes that share a common super class.\n\nHere is a basic example of the Factory Pattern:\n\n public abstract class Animal {\n public abstract String makeSound();\n }\n \n public class Dog extends Animal {\n @override\n public String makeSound() {\n return \"Woof\";\n }\n }\n \n public class Cat extends Animal {\n @override\n public String makeSound() {\n return \"Meow\";\n }\n }\n \n public class AnimalFactory {\n public Animal createAnimal(String type) {\n if (\"Dog\".equals(type)) {\n return new Dog();\n } else if (\"Cat\".equals(type)) {\n return new Cat();\n }\n return null;\n }\n }\n \n\nIn the code above, `AnimalFactory` is the factory class responsible to create and return instances of `Dog` and `Cat` classes.", + "links": [] + }, + "N_auRfGKkeIIc-qiHLkR_": { + "title": "Observer Pattern", + "description": "The **Observer Pattern** is a software design pattern in which an object, known as the subject, maintains a list of its dependants, called observers, and notifies them automatically of any state changes. This is usually done by calling one of their methods. It's mainly used for implementing distributed event handling systems and is viewed as a good practice to follow, making your design more robust, flexible, and scalable. The subject to be observed triggers events and observers react to the change or the event that they are listening to. In Android, observable libraries like `LiveData`, `RxJava`, `Flow`, and other reactive streams allow the implementation of observer pattern.", + "links": [] + }, + "W-WTIiQml8dLK6i_V69JK": { + "title": "Flow", + "description": "`Flow` in Android development is part of the Kotlin Coroutines library. It is a type that can emit multiple values sequentially, making it perfect for working with any data streams or any streams of events that aren't instantaneous. Like Observables, `Flow` is also based on the observer pattern, meaning it can emit values and these emissions can be observed and reacted to. However, `Flow` comes with built-in backpressure handling and the ability to transform, filter, or combine these flows in a sequence. Along with Coroutines, `Flow` encourages a more predictable and simplified concurrency design without callback hell problem.", + "links": [] + }, + "xk0vnWr7uESdzYRxwFjoK": { + "title": "RxJava", + "description": "RxJava, standing for Reactive Extensions for the JVM, is a library in the Java and Android ecosystem that allows developers to write asynchronous, event-based programs. It is developed according to reactive programming principles and it supports multi-threading operations. One can create data streams from just about anything - variables, data structures, inputs, etc. and these streams could be used with functional programming methods to process the data.\n\nIn Android, RxJava is often used in combination with Retrofit, to handle API calls or with Room, to handle tasks involving the database. This library provides a set of operators such as `map()`, `filter()`, `concat()`, `merge()`, `flatmap()` etc. to handle tasks such as chain asynchronous operations, perform computations, or transform data.\n\nThere are three important entities in RxJava - 'Observable', 'Observer' and 'Subscription'. 'Observable' is a data stream that does some work, 'Observer' watches the 'Observable' and does something when the 'Observable’ changes, and 'Subscription' is what ties an 'Observer' to an 'Observable' - linking their lifecycles and allowing 'Observer’s to unsubscribe (to stop receiving updates) when they’re destroyed. In other words, 'Subscription' is what allows you to manage memory and prevent crashes due to leaks.", + "links": [] + }, + "7rbsp1o5bzIJP11BRIoeG": { + "title": "RxKotlin", + "description": "`RxKotlin` is a lightweight library that adds convenient extension functions to `RxJava`. It allows developers to use RxJava with Kotlin in a more idiomatic way, thus making code more readable and understandable. While `RxJava` is a Java-based implementation of Reactive Extensions, `RxKotlin` is essentially a wrapper that includes extension functions and other constructs to take full advantage of Kotlin's features, such as its syntactic simplicity and ease of use.", + "links": [] + }, + "TiokceMGU9caqiR0lbFYL": { + "title": "LiveData", + "description": "`LiveData` is a data holder class that can be observed within a given lifecycle. This means that an `Observer` can be added in a pair with a `LifecycleOwner`, and this observer will be notified about modifications of the `LiveData` object only if the associated `LifecycleOwner` is in active state. `LiveData` respects the lifecycle state of app components, such as activities, fragments, or services, and it only updates app-component observers that are in an active lifecycle state. Furthermore, `LiveData` automatically removes the observers when their associated `LifecycleOwner` moves to the `Destroyed` state. This combination of `LiveData` and `LifecycleOwner` helps you to manage appropriate and efficient updates because `LiveData` takes into consideration the lifecycle state of your app components.", + "links": [] + }, + "qtXM9K7wyjOFuEMlZrB3C": { + "title": "Dependency Injection", + "description": "`Dependency Injection` is a technique where an object does not need to create its own dependencies; instead, dependencies are provided (or injected) at runtime. This technique is highly beneficial in Android Development. It helps in creating loosely coupled and easily testable code. For example, the `Retrofit` instance that your application requires to make network calls can be created somewhere else and can be injected whenever required using libraries like `Dagger`, `Koin` or `Hilt`. The `ViewModel` instances can also be injected rather than being created in the required classes directly. Through dependency injection, plugins ensure the code becomes easier to change, understand, and maintain, hence, improving the quality of the code.", + "links": [] + }, + "CK7Ce632fdTgxeFsRUVvd": { + "title": "Dagger", + "description": "[Dagger](https://dagger.dev/) is a fully static, compile-time dependency injection framework for both Java and Android. It is an adaptation of an earlier version created by Square that's focused on simplicity and speed. Dagger's primary focus is on compile-time analysis of dependencies, code simplicity, and clarity. It uses annotations to define dependencies, thus aiding in easy readability and understanding of the code. Dagger also eliminates the use of reflection to inject dependencies, thus boosting performance. It offers custom scopes to control the lifespan of instances and ensures that dependencies are Singleton across the entire lifespan of certain scopes.", + "links": [] + }, + "UMqZ-jmXKDXKuu8bzqDH_": { + "title": "Koin", + "description": "[Koin](https://insert-koin.io) is a lightweight dependency injection framework developed specifically for Kotlin developers. It uses functional resolution only - no proxy, no code generation, no reflection, and offers simplicity by leveraging Kotlin's language features. While Koin is not related to the Android platform, it provides specific extensions enabling an efficient integration into your Android applications, including [Android Architecture Components](https://developer.android.com/topic/libraries/architecture/index.html) and [Kotlin Coroutines](https://kotlinlang.org/docs/reference/coroutines-overview.html), amongst others.", + "links": [] + }, + "ooo_k2k_vUBR_jQ7Ke6Et": { + "title": "Hilt", + "description": "Hilt is a dependency injection library for Android that reduces the boilerplate code that you would normally need to write when setting up manual dependency injection in your project. It is based on the Dagger library, but it simplifies the implementation process and streamlines the use of Dagger in Android apps. To set it up, you must annotate your Android classes and Hilt will automatically generate and provide the necessary dependencies for you. Moreover, Hilt provides predefined set of components tied to the Android lifecycle which helps to handle scoped instances.", + "links": [] + }, + "dc7k50PjCYZcElHhCk66p": { + "title": "Kodein", + "description": "`Kodein` is one of the recommended dependency injection frameworks suitable for Android development. This open-source Kotlin library simplifies the DI process by allowing developers to bind various types of dependencies, such as singleton, factory, or provider bindings into containers or `Kodein` modules. It promotes dependency declaration where used, instead of prior declaration. It follows a \"Define in Use\" principle that enables easy-to-use, easy-to-debug and very idiomatic Kotlin code. It's also worth noting that `Kodein` works hand in hand with Android's lifecycle and provides easy integration with popular libraries such as Android Architecture Components, leveraging their functionality.", + "links": [] + }, + "0fNQWRxst8xRstIfPaPO6": { + "title": "Storage", + "description": "On Android devices, storage refers to where your data such as apps, photos, videos, and music are saved. It can be categorized into two types: internal and external storage. Internal Storage is where data is stored that's tied directly to your app. This data is private by default and not accessible by other apps. External Storage, on the other hand, is a shared space where all apps can read and write data. It can be further subcategorized into Public and Private directories. Public directories are shared among all apps while private directories are specific to your app but can be accessed by other apps if they have the appropriate permissions. To control the access to these storage types, Android provides a file-based and a Scoped Storage framework. The latter limits apps' access to external storage and gives users more control over their data.", + "links": [] + }, + "PKql1HY0PLMfp50FRELXL": { + "title": "Shared Preferences", + "description": "Shared Preferences in Android are used to store data in key-value pairs. It works similar to a tiny database where you can save small pieces of data such as settings or the state of an application. When data is saved to Shared Preferences, it persists across user sessions, even if your application is killed or gets deleted. Data in Shared Preferences is not typically used for large amounts of data. To perform actions such as saving, retrieving, or editing data in Shared Preferences, you use an instance of `SharedPreferences.Editor`.", + "links": [] + }, + "GWq3s1iTxQOp1BstHscJ9": { + "title": "DataStore", + "description": "`DataStore` is a new and improved data storage solution by Android, meant to supersede `SharedPreferences`. It is important to understand that it comes in two different implementations: `Preferences DataStore` and `Proto DataStore`. `Preferences DataStore` uses key-value pairs similar to `SharedPreferences`, but it's more robust and handles runtime exceptions more efficiently. On the other hand, `Proto DataStore` uses custom data types to provide type safety. It lets you leverage the power of Protocol Buffers, a language-neutral, platform-neutral mechanism for serializing structured data, as the data storage format. Operating on data in `DataStore` is transactional, meaning that if an error occurs during an operation, all changes are rolled back, so the data remains in a consistent state.", + "links": [] + }, + "Bfg4So5RlI09zFNcburJd": { + "title": "Room Database", + "description": "\"Room\" is a persistence library introduced by Google that provides an abstraction layer over SQLite to help with robust database access while harnessing the full power of SQLite. Room supports the creation of databases and defines queries in compile-time-checked SQL strings. These databases belong to the data classes that you create representing your app's data. Room comprises three main components: **Database**, a container that holds your app's data tables; **Entity**, representing a table within the database; and **DAO (Data Access Object)**, containing SQL query methods to interact with the database.", + "links": [] + }, + "A4kdaj6AFueUgPI7hwKi5": { + "title": "File System", + "description": "The Android operating system uses a specific File System structure to store and manage files. It’s primarily based on the Linux File system, with some specific Android features. The File System includes several key directories that are used for specific purposes. For instance, directories such as `/system` hold system apps and firmware, while `/data` contains user data, settings and installed applications, and `/sdcard` usually represents an internal or external SD card for additional storage. It's worth mentioning directories like `/proc`, `/dev`, and `/sys` which are virtual file systems and house important system files. As an Android developer, understanding these directories can help you interact with Android's file system more effectively. Note that access to some of these directories may be restricted depending on system permissions.", + "links": [] + }, + "Yb6aKJMMCxU1QVltWg3Dr": { + "title": "Network", + "description": "In Android, the `Network` component offers vital capabilities that enable communication and interaction among users, between users and remote servers, and between users and cloud services. Android provides a variety of APIs and services to interact with networks. The primary APIs are the `ConnectivityManager`, `WifiManager`, `TelephonyManager`, and `BluetoothManager` APIs, among others. ConnectivityManager, for example, can tell you about network connectivity changes. The **Network** component also involves tools for network monitoring and testing, which can be useful for optimizing app performance and dealing with connectivity issues. Be aware, some network operations are subject to system imposed restrictions and require specific permissions.", + "links": [] + }, + "dDMRYiqrKyOBnRRQc8zsp": { + "title": "Retro", + "description": "Retrofit is a type-safe HTTP client for Android and Java. It's designed to connect your application with an API or a back-end web service. Retrofit uses annotations to encode details about the API's operations and requests, such as the HTTP method (GET, POST, PUT, DELETE, HEAD) and the query parameters. The main advantage of Retrofit over other similar libraries is in its simplicity and intuitiveness, and it efficiently handles all network calls.\n\n Retrofit retrofit = new Retrofit.Builder()\n .baseUrl(\"https://api.example.com\")\n .addConverterFactory(GsonConverterFactory.create())\n .build();\n \n ExampleService service = retrofit.create(ExampleService.class);\n Call call = service.exampleCall();\n \n\nThe `baseUrl()` is your API base URL. The `addConverterFactory()` specifies a factory to use for serialization and deserialization. In the example above, the Gson library will handle the conversion of JSON data. The `build()` call finishes the builder and returns the retrofit instance. Finally, `create()` generates an implementation of the `ExampleService` interface.", + "links": [] + }, + "5pVuwOItAhUxxJX8ysAsn": { + "title": "OkHttp", + "description": "`OkHttp` is an HTTP client that's extremely efficient, enabling several advanced features in Android app or other platforms that use Java. Developed by Square, it's built for high efficiency and capacity, simplifying many networking tasks, including connection pooling, response caching, and request retries. OkHttp allows seamless recovery from network issues, minimizing the loss of data. The library ensures fewer errors and higher quality of service by using the modern TLS encryption, extensible request and response models, and a fluent API for ease of use and integration.", + "links": [] + }, + "ww0fTbdXwVr-QIOClU7ng": { + "title": "Apollo-Android", + "description": "**Apollo Android** is a set of tools for using GraphQL with Android, made by the Apollo community developers. It's fully written in Kotlin and it was designed to seamlessly integrate with any Android app, making fetching data across network and handling data in the client-side a breeze. Apollo Android runs your queries and mutations and returns results as generated Kotlin types. It also normalizes your data and caches your results for further speed enhancements. It operates both on Android and Kotlin/JVM backend environment. It's also coroutines-first making handling concurrency easy and effective. To use Apollo Android, you'll set up the plugin, point it at your GraphQL schema, and write GraphQL queries.", + "links": [] + }, + "cFYZ2C7yNnY6NHKUNP2Z4": { + "title": "Asynchronism", + "description": "Asynchronism in Android is a practice that defines operations, which can run independently from the main operation without following the program's linear flow. The Android system uses threads to handle asynchronous processes. These threads function independently, ensuring that complex or time-consuming operations do not interfere with the user interface or other essential parts of the application. Android provides various tools for carrying out asynchronous tasks, such as `Handler`, `ThreadPoolExecutor`, `IntentService`, `AsyncTask`, and `Loader` etc. These tools provide ways to execute tasks on different threads and communicate the results back to the main thread.", + "links": [] + }, + "i_cKmTnGAYw8xpHwZHjAd": { + "title": "Coroutines", + "description": "In Android, `coroutines` refer to a concurrency design pattern that you can use on Android to simplify code that executes asynchronously. `Coroutines` provide a way to write asynchronous, non-blocking code in a natural, sequential manner. The fundamental building blocks of `coroutines` are `suspend` functions which are simply functions that can be paused and resumed at later times. They are the key to writing non-blocking asynchronous code and represent a single unit of asynchronous computation. This aspect of `coroutines` makes them useful for managing long-running tasks that might otherwise block the main thread and cause your application to become unresponsive.", + "links": [] + }, + "BeGrA5BDBMZP1Jy7n-wl-": { + "title": "Threads", + "description": "In Android, a `Thread` is a concurrent unit of execution. It has its own call stack, but can share its state with other threads in the same process, i.e., they can share the same memory area. They're primarily used in Android to perform operations in the background. One important aspect to note is that Android UI operations are not thread-safe, meaning they should always be done on the UI thread. Operations on `Threads` are typically managed through `Handler`, `Looper` and `MessageQueue` classes. Android also provides high-level constructs like `AsyncTask` and `Loader` for managing threads in relation to the UI.", + "links": [] + }, + "zXsNEyRbb8UpEOAUv6FpY": { + "title": "RxJava", + "description": "RxJava (Reactive Extensions for the JVM) is a powerful library for composing asynchronous and event-based programs using observable sequences in Java. If you are an Android developer, you might be familiar with callbacks and async tasks to perform long-running operations in the background. However, handling multiple async tasks and nested callbacks can produce code that's complicated to read and maintain. To alleviate such complexities, RxJava provides tools and methods to create, transform and chain Observable sequences in a clean and declarative manner. In other words, it provides a way to manage asynchronicity and concurrency in your code.", + "links": [] + }, + "4h37WBpYxRRyw9oH8ge7o": { + "title": "RxKotlin", + "description": "`RxKotlin` is a lightweight language extension to Java for Android development, enabling Android apps to be built using Kotlin with Reactivex. It brings the power of reactive programming paradigm to Kotlin, extending its capabilities for processing asynchronous streams of data. It allows you to express static (e.g., already known) or dynamic (e.g., future unknown) data streams, and perform various operations on them easily. Key concepts of `RxKotlin` include Observables, Observers and Schedulers. Observables represent the data streams, Observers interact with the data stream, and Schedulers determine on which thread operations are performed. RxKotlin helps manage background tasks, handle asynchronous data streams, and implement complex UIs, among others. It is especially beneficial for mobile development where UI thread and background thread coordination is essential.", + "links": [] + }, + "OAb_JD64uGm2tPoue7w6t": { + "title": "WorkManager", + "description": "`WorkManager` is an Android library introduced by Google to execute tasks in a predictable and reliable manner. It's designed for tasks that require guaranteed execution, even if the app has been closed or the device restarts. It is backwards compatible up to API 14 and uses JobScheduler for API 23 and above, whilst using a combination of BroadcastReceiver + AlarmManager for APIs 14 and up. Regardless of the device API level, WorkManager works for all Android devices. Three types of work are supported by WorkManager - OneTimeWorkRequest, PeriodicWorkRequest, and DelayedWorkRequest. You can have constraints too for your work execution like work only when there's Internet or work only when the device is in idle state. It also supports work-chaining where you can create a chain of works to be performed in a specific order.", + "links": [] + }, + "ZEdn2yy-IwHN3kOYr2ZbC": { + "title": "Common Services", + "description": "In Android, \"Common Services\" are functional units or components provided by the Android system for use by developers. These services include things such as Location Services (used to determine the device's geographical location), Notification Services (handles the display and management of user notifications), and Sensor Services (interacts with hardware sensors). Other common services are Network and Connectivity Services, Account Manager, and Data Storage Services among others. They simplify the development process by handling complex functionalities behind the scenes, allowing developers to focus on the application's specific needs.", + "links": [] + }, + "Xv0es_z64vh-QzivMeAT3": { + "title": "Authentication", + "description": "Firebase Authentication in Android provides backend services, easy-to-use SDKs, and ready-made UI libraries to authenticate users to your app. It supports authentication using passwords, popular federated identity providers like Google, Facebook and Twitter, and more. Firebase also facilitates integration of functionality to sign in, sign up, and reset password. Moreover, it can be used to secure your database by implementing role-based access to data and to provide personalized experience according to the user's unique identity.\n\nFirebase Authentication offers two methods to authenticate. These are using an `email/password` login provided by Firebase Authentication or a `federated identity provider` like Google or Facebook. It also covers token-based authentication by creating custom tokens or verifying ID tokens. In addition to this, Firebase Authentication works with Firebase's client SDKs for practical use and works for long-running server processes for some of your users.\n\nFirebase Authentication provides a full suite of capabilities even beyond authentication to make your life easier, which includes Security Rules for Cloud Storage and Cloud Firestore, Firebase Dynamic Links, and Firebase Invites.\n\nRemember to always keep your development environment and Firebase console synced for smooth development operations. Any changes to the authentication method in the Firebase console should be reflected in your app and vice versa.", + "links": [] + }, + "xB4evbD07n1VrHOIpowV4": { + "title": "Crashlytics", + "description": "`Crashlytics` is a lightweight, real-time crash reporter that helps you track, prioritize, and fix stability issues that dismantle your app's quality. It delivers fast and precise information about crashes and helps you gain insights into your app's performance so you can pinpoint the exact line of code your app crashed on. Crashlytics offers a comprehensive suite of features like crash analysis, issue aggregation, and tracking user activities that led to a crash. This tool is now a part of Google's `Firebase` platform but can also be used independently.", + "links": [] + }, + "1Tz-Shj_Tuz2U8llEAcLr": { + "title": "Remote Config", + "description": "Firebase Remote Config is a cloud service offered by Firebase. It lets you change the behavior and appearance of your app without requiring a new app release. By using Firebase Remote config, you can customize your app for different user segments, manage the core aspects of your app by modifying parameters externally, and conduct A/B tests to improve your app. It works efficiently by using default values that control the behavior and appearance of your app. When your app needs configuration information, it makes a request to the Firebase Remote Config server. If the server has updated values, these values replace the default ones, hence modifying the app's behavior or appearance according to your needs.", + "links": [] + }, + "e3vHFaFFMV7kI9q6yf5e9": { + "title": "Cloud Messaging", + "description": "Firebase Cloud Messaging (FCM) is a powerful, battery-efficient messaging service that enables you to send messages reliably and securely to your Android applications. It enables you to send two types of messages: \"notification messages\" and \"data messages\". Notification messages are primarily meant for user notifications and will only be delivered when the application is in the foreground. On the other hand, data messages can handle even when the app is in the background or killed and can be used to send custom key-value pairs. FCM also supports various additional features, such as topic messaging to send messages to multiple devices subscribed to a common topic, device group messaging for sending messages to groups of user devices, and upstream messaging for sending messages from the client application to the FCM server.", + "links": [] + }, + "3EEfKAd-ppIQpdQSEhbA1": { + "title": "FireStore", + "description": "Firestore, often referred to as Firebase Firestore or Cloud Firestore, is a flexible, scalable database for mobile, web, and server development from Firebase and Google Cloud. Firestore comes with features like expressive querying, real-time updates, and automatic multi-region data replication. It is designed to offer seamless integration with other Firebase and Google Cloud products. It provides a cloud-based NoSQL database, which means the data is stored as collections of documents. Each document, in turn, contains a set of key-value pairs. Firestore ensures durable networking, so data syncs across client apps in real-time, even when the device is offline, making it easier for you to work with distributed data that can be kept in sync across various clients.", + "links": [] + }, + "D4ZXQOKJkyFYNZIy-MJ9Y": { + "title": "Google Admob", + "description": "Google AdMob is a mobile advertising platform designed for app developers to monetize their apps by displaying ads from over 1 million Google advertisers. AdMob supports a wide range of ad formats, such as banner ads, interstitial ads, video ads, and more. It offers a powerful mediator that enables you to display ads from multiple sources, which includes the Google Mobile Ads SDK, third-party ad networks, and house ad campaigns. Furthermore, AdMob provides analytical tools to better understand your users and maximize ad revenue.", + "links": [] + }, + "m5rumeynEbS8T27pelr0-": { + "title": "Google Play Services", + "description": "_Google Play Services_ is a proprietary background service and API package for Android devices from Google. Operated by Google, the service provides core functionalities like authentication for Google services, synchronized contacts, access to all the latest user privacy settings, and higher quality, lower-powered location-based services. It also speeds up offline searches, provides more immersive maps, and improves gaming experiences. Google Play Services play a crucial role in the operation of various other applications, including those not developed by Google. Moreover, it improves the overall Android experience by speeding up offline searches, providing more detailed maps, enhancing gaming experiences, and more.", + "links": [] + }, + "S5FVF9rMgVSSDKXJW2GYb": { + "title": "Google Maps", + "description": "Google Maps is a crucial service on Android, offering powerful, user-friendly mapping technology and local business information. Google Maps features include street maps, satellite imagery, 360° panoramic views of streets (Street View), real-time traffic conditions (Google Traffic), and route planning for traveling by foot, car, bicycle and air, or public transportation. The service's interface includes a function to overlay various layers such as traffic density, public transit lines, and cycling paths. Google Maps for Android also provides an API, which allows developers to interface with and control certain aspects of the Google Maps service in their applications. This capability subjects to certain usage limits and requirements set by Google.", + "links": [] + }, + "77F9F3oI5CPgwgM_hxWfa": { + "title": "Linting", + "description": "`Linting` in Android is a tool that analyzes the source code of your application to identify potential errors, bugs, stylistic errors, and suspicious constructs. The term Linting came from a Unix utility that examined C language source code. In Android, it not only checks for potential Java issues but also for XML layout files and provides alerts for issues it encounters regarding usability, performance, and accessibility. For instance, it may scrutinize a draft of your AndroidManifest.xml file for potential duplication of elements or other anomalies that would cause the app to crash.", + "links": [] + }, + "zMbXQH17Q52opdbitPzj7": { + "title": "Ktlint", + "description": "`ktlint` is a static code analysis tool. It enforces a highly consistent style and adheres extensively to the official Kotlin coding conventions. `ktlint` does not have any configuration options (by design). The only exceptions are disabling specific rule(s) and specifying indentation size. `ktlint` can check, as well as automatically fix your code. Its main goal is to bring unified code style to your project. It works on the command line as well, so it can be hooked up into your continuous integration pipeline. It also has Ant, Gradle and Maven wrappers. You can use Ktlint on any Android/Kotlin project, as long as you have Gradle or Maven installed.", + "links": [] + }, + "RUvuCp_JK5MQQT13SSHUV": { + "title": "Detekt", + "description": "`Detekt` is a static code analysis tool for the Kotlin programming language. It operates on the abstract syntax tree provided by the Kotlin compiler and can run in the command line or as a task in your Gradle build script. Detekt provides complexity reports that can be used to identify overly complex code and help simplify it. It also checks for a variety of potential bugs and code smells, including issues with formatting, naming conventions, exception handling, and more. Moreover, Detekt is highly configurable, allowing you to enable, disable, or modify the behavior of its checks to suit your project's needs.", + "links": [] + }, + "6KbSUAoT_jTudFoIbwMpA": { + "title": "Debugging", + "description": "\"Debugging\" is a critical step in the app development process. In Android development, it includes identifying and fixing errors, or bugs, in your code. You can debug Android apps using several tools and techniques. For example, Android Studio, the primary integrated development environment (IDE) for Android, comes with a built-in debugging tool, the Android Debug Bridge (ADB). This command-line tool allows you to communicate your device and perform various actions like installing and debugging apps. Android Studio also supports step-by-step debugging, where you can set breakpoints in your code and inspect the application state at those points. Additionally, you can use log messages to monitor the behavior of your application, which is especially useful for understanding the state of your app during runtime. Remember that before debugging, you need to build your app in debug mode.", + "links": [] + }, + "VFOD4JrV8kZ2583G3oT95": { + "title": "Timber", + "description": "`Timber` is a logging utility tool that has been specifically extended from the `Log` class of Android. It has been built to simplify the logging process while aiming to reduce the amount of boilerplate code the developer has to write. It was designed and is maintained by Jake Wharton, a renowned contributor in the Android Developer community. In Timber, each log message is directed to the next available logger, reducing the redundancy of manually assigning log tags. The simplicity of Timber is highlighted by its ability to log without defining any tag. Most importantly, Timber only logs messages in debug builds by default, avoiding potential data leaks in your production application.", + "links": [] + }, + "3i4g9ZWgLxKb2UMgRJi4Q": { + "title": "Leak Canary", + "description": "LeakCanary is a powerful open-source memory leak detection library for Android and Java. It is integrated into your app, and once you run your app, LeakCanary immediately starts watching for memory leaks and captures a memory dump if it detects one. After investigation, it will present a full stack trace to help you pinpoint the exact location of the memory leak. With the LeakCanary's user-friendly interface, you can then analyze the memory leak right in your app. The most recent version of LeakCanary also includes other improvements like automatic detection of leaks in Activity, Fragment, View, ViewModel, LiveData, etc.", + "links": [] + }, + "7RKN1FNtRE_BE6QeAQrKb": { + "title": "Chucker", + "description": "`Chucker` is an open-source debugging library created for Android applications. It has been designed to be easy to use and convenient for developers. This library intercepts and records all HTTP requests and responses inside your application, which helps to visualize and share this information in an understandable and easy-to-read format. Using Chucker's distinct features, you can inspect all the HTTP and HTTPS traffic going in and out of your app directly. In addition, it provides other nifty features such as a user-friendly interface to view the server's raw response. It's like having a built-in network inspector in your debugging tool, enabling you to solve network-related issues more efficiently.", + "links": [] + }, + "ACUJlDDR0jqEohsFzWEoQ": { + "title": "Jetpack Benchmark", + "description": "Jetpack Benchmark is a library within the Android Jetpack Suite that allows developers to quickly and accurately measure the performance of their apps. This library can help measure CPU, memory, and IO performance of code in Android apps. Developers can define some specific code paths to be benchmarked by wrapping the code in `BenchmarkRule.measureRepeated {}`. In addition, it automatically takes care of warmup, measures your code performance, and outputs benchmarking results to the Android Studio's logcat.", + "links": [] + }, + "ZOQm5OlzCA-h_yxywwDrW": { + "title": "Testing", + "description": "_**Android Testing**_ is a crucial part of the app development process. It involves validating the functionality, performance, usability, and consistency of your app before deploying it to the Play Store. There are two types of testing methods notably used: **Unit testing** and **Instrumentation testing**. Unit testing, as the name suggests, tests each unit or segment of your code separately. It doesn't require Android dependencies and hence, runs faster. Instrumentation testing, on another hand, requires Android dependencies and is slower. Instrumentation testing tests the UIs, simulates user interactions and validates the navigation between different parts of your app. Android provides built-in testing frameworks like `JUnit` for unit testing and `Espresso` for Instrumentation testing. These can effectively test different parts of your app, assuring its robustness.", + "links": [] + }, + "-ONSC-ImGSELbamKmjIlH": { + "title": "Espresso", + "description": "Espresso is a testing framework provided by Android to create UI tests for Android applications. It automatically synchronizes your test actions with the UI of your application, ensuring that your test will only proceed when the necessary UI activities have been completed. In Espresso, you can programmatically simulate user interactions like clicking buttons, typing text, or swiping screens, and then examine the UI's state to confirm it's as expected. Espresso tests can run on devices running Android 4.3 (API level 18) or higher. It's important to note that Espresso tests are written based on what the user can see on the screen.", + "links": [] + }, + "gvGAwjk_nhEgxzZ_c3f6b": { + "title": "JUnit", + "description": "JUnit is a popular testing framework for Java programming. It forms the basis for many other testing libraries and tools in the Android ecosystem, making it important for any Android developer to become familiar with. The basic use of JUnit involves annotations such as `@Test`, indicating methods that represent a single test case. Other useful features include `@Before` and `@After` which allow for setup and teardown processes to be defined clearly. Another powerful feature in JUnit is the ability to create parameterized tests, effectively running the same test multiple times with different inputs.", + "links": [] + }, + "kc6buUsLAeZeUb4Tk0apM": { + "title": "Distribution", + "description": "Distribution in Android refers to the methods and channels you can use to get your Android application into the hands of users. You can choose to distribute your app on the Google Play Store, which is the official app store for the Android operating system. This platform makes your app available to users in various countries around the world. Additionally, you can also opt to distribute your app through other third-party app stores or even your own website. Furthermore, Google provides a range of distribution options such as country targeting, device targeting, and staged rollouts, which can be customized according to your distribution strategy. Remember, when you submit your application for distribution, you must adhere to the respective app store's policy and content guidelines.", + "links": [] + }, + "T7q_quNaIAuGi96OdnDT1": { + "title": "Firebase Distribution", + "description": "Firebase Distribution is a development tool within the Google Firebase suite that allows you to share pre-release versions of your Android apps with your development team. It enables you to distribute your app binaries to multiple testers and teams to get feedback before the app's official launch. Firebase Distribution supports both Android and iOS applications and works in tandem with other features, like Firebase Crashlytics, to effectively manage the testing and debugging aspects of your application's lifecycle.", + "links": [] + }, + "HgRdgi2Hu4C8YLG5PXfoo": { + "title": "Google Playstore", + "description": "**Google Play Store** is the official distribution channel for Android apps and other digital media content. It is a global online software store developed and operated by Google. Developers submit their applications to Google Play through the Play Console where Google Play's automated systems scan for potentially malicious code and content violations, before they are published on the Play Store. Users can then browse, download, and use these applications on their Android devices or via the web. Purchases, downloads, and user feedback can be tracked via the Google Play Console. Owners of Android devices can also configure automatic updates for the applications they have installed from the store. This platform supports multiple languages and multiple forms of payment methods, making it accessible and customer-friendly.", + "links": [] + }, + "_FSlD_qTz5Xo0x3pB6sZI": { + "title": "Signed APK", + "description": "A **Signed APK** is a version of your app that you prepare for distribution in the Play Store or other Android markets. When you sign your app using a private key, you authenticate your identity as the developer of the app. It is a required step by the Android system that ensures only updates to the APK that are from the original developer will be accepted. The Android system refuses to install an app if it's not signed appropriately, thereby protecting users from potential security risks. Please make sure that you keep your private key in a safe place for future updates to your app, losing it would imply that you cannot update your app anymore.", + "links": [] + }, + "4_e76QafrB419S2INOeKd": { + "title": "Interface & Navigation", + "description": "In Android development, the concepts of \"Interface\" and \"Navigation\" are crucial. The \"Interface\" often refers to the Graphical User Interface (GUI) that users interact with. This includes buttons, text fields, image views, scroll views and other UI elements that the users can interact with to perform certain tasks. Tools like XML and Material Designs are used for interface design in Android. Meanwhile, \"Navigation\" refers to the interactions that allow users to navigate across, into, and back out from the different pieces of content within the app, following a clear path, like a map. Android uses the Navigation Component, a suite of libraries, tools, and guidance on constructing in-app navigation. Understanding both these elements, Interface and Navigation, is fundamental to creating an intuitive and user-friendly Android application.", + "links": [] + } +} \ No newline at end of file diff --git a/public/roadmap-content/angular.json b/public/roadmap-content/angular.json new file mode 100644 index 000000000..7b8b164a0 --- /dev/null +++ b/public/roadmap-content/angular.json @@ -0,0 +1,1686 @@ +{ + "KDd40JOAvZ8O1mfhTYB3K": { + "title": "Introduction to Angular", + "description": "Angular is a popular open-source front-end web application framework developed by Google. It is written in TypeScript and allows developers to build dynamic, single-page web applications with ease. Angular provides a comprehensive set of features for creating interactive and responsive user interfaces, making it a powerful tool for modern web development.\n\nOne of the key features of Angular is its use of components, which are reusable building blocks for creating web applications. Components encapsulate the HTML, CSS, and TypeScript code needed to define a specific part of a web page, making it easier to manage and maintain complex applications. Angular also includes a powerful dependency injection system, which helps manage the dependencies between different parts of an application and promotes code reusability. Additionally, Angular provides tools for routing, form handling, and state management, making it a versatile framework for building a wide range of web applications.\n\nLearn more from the following resources:", + "links": [ + { + "title": " Angular website", + "url": "https://angularjs.org/", + "type": "article" + } + ] + }, + "DE3cMpeRYuUPw2ADtfS-3": { + "title": "Angular Architecture", + "description": "Visit the following resources to learn more:", + "links": [ + { + "title": "Angular coding style guide", + "url": "https://angular.dev/style-guide", + "type": "article" + } + ] + }, + "EbFRcy4s6yzzIApBqU77Y": { + "title": "Setting up a New Project", + "description": "Visit the following resources to learn more:", + "links": [ + { + "title": "Installation", + "url": "https://angular.dev/installation", + "type": "article" + }, + { + "title": "Setting up the local environment and workspace", + "url": "https://angular.dev/tools/cli/setup-local", + "type": "article" + }, + { + "title": "Build your first Angular app", + "url": "https://angular.dev/tutorials/first-app", + "type": "article" + } + ] + }, + "hpShWwL0M57ZAzqkB4I8t": { + "title": "Angular and History", + "description": "Angular is a web framework that empowers developers to build fast, reliable applications.\n\nMaintained by a dedicated team at Google, Angular provides a broad suite of tools, APIs, and libraries to simplify and streamline your development workflow. Angular gives you a solid platform on which to build fast, reliable applications that scale with both the size of your team and the size of your codebase.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is Angular?", + "url": "https://angular.dev/overview", + "type": "article" + } + ] + }, + "kGnKzCkQCNFEdgCBRtNuW": { + "title": "Components", + "description": "Components are the main building block for Angular applications. Each component consists of:\n\n* An HTML template that declares what renders on the page\n* A TypeScript class that defines the behavior\n* A CSS selector that defines how the component is used in a template\n* Optionally, CSS styles applied to the template\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Anatomy of a component", + "url": "https://angular.dev/guide/components", + "type": "article" + }, + { + "title": "Composing with Components in Angular", + "url": "https://angular.dev/essentials/components", + "type": "article" + }, + { + "title": "Explore top posts about Angular", + "url": "https://app.daily.dev/tags/angular?ref=roadmapsh", + "type": "article" + }, + { + "title": "Standalone Components in Angular", + "url": "https://www.youtube.com/watch?v=x5PZwb4XurU", + "type": "video" + } + ] + }, + "Mp056kNnwsRWeEXuhGPy-": { + "title": "Component Anatomy", + "description": "Angular components are the foundational building blocks of Angular applications, designed to encapsulate both the UI and the business logic.\n\nEvery component must have:\n\n* A TypeScript class with behaviors\n* An HTML template\n* A CSS selector\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Anatomy of a component", + "url": "https://angular.dev/guide/components", + "type": "article" + } + ] + }, + "dOMvz__EQjO-3p-Nzm-7P": { + "title": "Provider", + "description": "Configure the injector of component with a token that maps to a provider of a dependency.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Configuring dependency providers", + "url": "https://angular.dev/guide/di/dependency-injection-providers", + "type": "article" + }, + { + "title": "Component - API", + "url": "https://angular.dev/api/core/Component#providers", + "type": "article" + } + ] + }, + "uYHy2yhtTm6fQkKpYx3lU": { + "title": "changeDetection", + "description": "The change-detection strategy to use for this component.\n\nWhen a component is instantiated, Angular creates a change detector, which is responsible for propagating the component's bindings. The strategy is one of:\n\n* `ChangeDetectionStrategy.OnPush` sets the strategy to CheckOnce (on demand).\n* `ChangeDetectionStrategy.Default` sets the strategy to CheckAlways.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Advanced component configuration", + "url": "https://angular.dev/guide/components/advanced-configuration#changedetectionstrategy", + "type": "article" + }, + { + "title": "Component - API", + "url": "https://angular.dev/api/core/Component#changeDetection", + "type": "article" + } + ] + }, + "-gUpm3OLUJl9iAyx6fmHN": { + "title": "Template", + "description": "`template` metadata is a property defined within the `@Component` decorator that specifies the HTML template for the component. It allows you to define the structure and layout of the component's view.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Template syntax", + "url": "https://angular.dev/guide/templates", + "type": "article" + }, + { + "title": "Component - API", + "url": "https://angular.dev/api/core/Component#template", + "type": "article" + } + ] + }, + "RcNHEh6kmbBK1PICbhAwr": { + "title": "Standalone", + "description": "A standalone component is a component that sets `standalone: true` in its component metadata. Standalone components directly import other components, directives, and pipes used in their templates\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Standalone components", + "url": "https://angular.dev/guide/components/importing#standalone-components", + "type": "article" + }, + { + "title": "Component - API", + "url": "https://angular.dev/api/core/Component#standalone", + "type": "article" + } + ] + }, + "doHDoAgp7T59KGSXPpQzZ": { + "title": "viewProvider", + "description": "Defines the set of injectable objects that are visible to its view DOM children.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Using the viewProviders array", + "url": "https://angular.dev/guide/di/hierarchical-dependency-injection#using-the-viewproviders-array", + "type": "article" + }, + { + "title": "Component - API", + "url": "https://angular.dev/api/core/Component#viewProviders", + "type": "article" + } + ] + }, + "ctigvSYeFa77y3v7m11gk": { + "title": "Encapsulation", + "description": "An encapsulation policy for the component's styling. Possible values:\n\n* `ViewEncapsulation.Emulated`: Apply modified component styles in order to emulate a native Shadow DOM CSS encapsulation behavior.\n* `ViewEncapsulation.None`: Apply component styles globally without any sort of encapsulation.\n* `ViewEncapsulation.ShadowDom`: Use the browser's native Shadow DOM API to encapsulate styles.\n\nIf not supplied, the value is taken from the CompilerOptions which defaults to `ViewEncapsulation.Emulated`.\n\nIf the policy is `ViewEncapsulation.Emulated` and the component has no styles nor {@link Component#styleUrls styleUrls}, the policy is automatically switched to `ViewEncapsulation.None`.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Style scoping", + "url": "https://angular.dev/guide/components/styling#style-scoping", + "type": "article" + }, + { + "title": "Component - API", + "url": "https://angular.dev/api/core/Component#encapsulation", + "type": "article" + } + ] + }, + "cDN0PGo-zkcLmttxCiAI-": { + "title": "Selector", + "description": "In Angular, the `selector` metadata is a crucial property defined within the `@Component` decorator that specifies how the component can be identified and used in HTML templates. It determines the way the component is rendered in the DOM, allowing developers to create reusable and easily identifiable components.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Component selectors", + "url": "https://angular.dev/guide/components/selectors", + "type": "article" + }, + { + "title": "Component - API", + "url": "https://angular.dev/api/core/Component#selector", + "type": "article" + } + ] + }, + "4XJKEmSrQfPxggHlAP30w": { + "title": "Styles", + "description": "This metadata allows developers to apply CSS styles directly to a component, enhancing its appearance and ensuring that styles are scoped to that particular component.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Styling components", + "url": "https://angular.dev/guide/components/styling", + "type": "article" + }, + { + "title": "Component - API", + "url": "https://angular.dev/api/core/Component#styles", + "type": "article" + } + ] + }, + "ghbrJhuGvscnNGCtVLh5_": { + "title": "Imports", + "description": "The `imports` property specifies the `standalone` component's template dependencies — those directives, components, and pipes that can be used within its template.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Importing and using components", + "url": "https://angular.dev/guide/components/importing", + "type": "article" + }, + { + "title": "Component - API", + "url": "https://angular.dev/api/core/Component#imports", + "type": "article" + } + ] + }, + "Szgr8dnZNi-z5i6raIJzW": { + "title": "Metadata", + "description": "Metadata in Angular components refers to the configuration information that is used to define and configure the behavior of a component. It is specified using decorators, which are functions that add metadata to classes, properties, and methods.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Component - API", + "url": "https://angular.dev/api/core/Component", + "type": "article" + } + ] + }, + "19c7D-fWIJ3vYFT6h8ZfN": { + "title": "Communication", + "description": "", + "links": [] + }, + "TDyFjKrIZJnCjEZsojPNQ": { + "title": "Parent-Child Interaction", + "description": "", + "links": [] + }, + "v0XaLNZ-YrRqP-xv8wS43": { + "title": "ViewChild", + "description": "", + "links": [] + }, + "oQl9etjoHiU2JgxieUOEH": { + "title": "ContentChild", + "description": "", + "links": [] + }, + "nCpfj_35ZvW-NTygg06XZ": { + "title": "Component Lifecycle", + "description": "A component instance has a lifecycle that starts when Angular instantiates the component class and renders the component view along with its child views. The lifecycle continues with change detection, as Angular checks to see when data-bound properties change, and updates both the view and the component instance as needed. The lifecycle ends when Angular destroys the component instance and removes its rendered template from the DOM. Directives have a similar lifecycle, as Angular creates, updates, and destroys instances in the course of execution.\n\nYour application can use lifecycle hook methods to tap into key events in the lifecycle of a component or directive to initialize new instances, initiate change detection when needed, respond to updates during change detection, and clean up before deletion of instances.\n\nThe following life cycle hooks of angular are :\n\n`OnChanges` , `OnInit` , `DoCheck` , `OnDestroy` , `AfterContentInit` , `AfterContentChecked` , `AfterViewInit` , `AfterViewChecked`\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Component Lifecycle", + "url": "https://angular.dev/guide/components/lifecycle", + "type": "article" + }, + { + "title": "The life cycle hooks of angular - Blog ", + "url": "https://blog.logrocket.com/angular-lifecycle-hooks/", + "type": "article" + }, + { + "title": "Explore top posts about React Hooks", + "url": "https://app.daily.dev/tags/react-hooks?ref=roadmapsh", + "type": "article" + } + ] + }, + "tC5ETtOuuUcybj1jI4CuG": { + "title": "Dynamic Components", + "description": "", + "links": [] + }, + "b_kdNS9PDupcUftslkf9i": { + "title": "Modules", + "description": "Modules in Angular act like a container where we can group the components, directives, pipes, and services, related to the application.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Importing and using components", + "url": "https://angular.dev/guide/components/importing", + "type": "article" + }, + { + "title": "Introduction to Modules", + "url": "https://angular.dev/guide/ngmodules", + "type": "article" + }, + { + "title": "Explore top posts about Angular", + "url": "https://app.daily.dev/tags/angular?ref=roadmapsh", + "type": "article" + } + ] + }, + "BCq5sgWQLiw0f7u7ZSAd2": { + "title": "Module Architecture", + "description": "", + "links": [] + }, + "ex8FOKrUlbu4MuEq2czyW": { + "title": "Creating Components", + "description": "You can either use Angular CLI to create the Angular components or create it manually.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Build your first Angular app", + "url": "https://angular.dev/tutorials/first-app", + "type": "article" + }, + { + "title": "Components", + "url": "https://angular.dev/essentials/components", + "type": "article" + }, + { + "title": "Angular CLI - ng generate components", + "url": "https://angular.dev/guide/components", + "type": "article" + } + ] + }, + "9YhTXybJw2gszlqFeBtW3": { + "title": "Creating Modules", + "description": "", + "links": [] + }, + "w_BazXvINFyxDCHmlznfy": { + "title": "Feature Modules", + "description": "", + "links": [] + }, + "bLERvEERmNI5AgxtEYokZ": { + "title": "Lazy Loading Modules", + "description": "", + "links": [] + }, + "5b590c7s-2XJ0rgdCYxLa": { + "title": "Dependencies", + "description": "", + "links": [] + }, + "6fhe9xAi_RSVfa-KKbcbV": { + "title": "Templates", + "description": "A Template is a form of HTML which tells Angular to go towards another component. To create many Angular features, special syntax within the templates is used.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Template Syntax", + "url": "https://angular.dev/guide/templates", + "type": "article" + }, + { + "title": "Explore top posts about Angular", + "url": "https://app.daily.dev/tags/angular?ref=roadmapsh", + "type": "article" + } + ] + }, + "XHpfHRIlFh19FJIE07u7i": { + "title": "Interpolation", + "description": "Interpolation refers to embedding expressions into marked up text. By default, interpolation uses the double curly braces {{ and }} as delimiters. Angular replaces currentCustomer with the string value of the corresponding component property.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Angular Official Website", + "url": "https://angular.dev/guide/templates/interpolation", + "type": "article" + }, + { + "title": "Displaying values with interpolation", + "url": "https://angular.dev/guide/templates/interpolation", + "type": "article" + } + ] + }, + "t2YOeMONlcnKBrVAo0JDc": { + "title": "Template Statements", + "description": "Template statements are methods or properties that you can use in your HTML to respond to user events. With template statements, your application can engage users through actions such as displaying dynamic content or submitting forms. Enclose the event in `()` which causes Angular to evaluate the right hand side of the assignment as one or more template statements chained together using semicolon `;`.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Angular Official Website", + "url": "https://angular.dev/guide/templates/template-statements", + "type": "article" + }, + { + "title": "Understanding Template Statements", + "url": "https://angular.dev/guide/templates/template-statements#", + "type": "article" + } + ] + }, + "WH5wlyOtrqFHBJx7RFJwS": { + "title": "Understand Binding", + "description": "In an Angular template, a binding creates a live connection between view and the model and keeps them both in sync.\n\n* **property**: helps you set values for properties of HTML elements or directives.\n* **attributes**: helps you set values for attributes of HTML elements directly.\n* **event**: lets you listen for and respond to user actions such as keystrokes, mouse movements, clicks, and touches.\n* **data**: It's a combination of property and event binding and helps you share data between components.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Angular Official Website", + "url": "https://angular.dev/guide/templates/binding", + "type": "article" + } + ] + }, + "5vZkiH7HDwONIABLfNJ06": { + "title": "Data Binding", + "description": "", + "links": [] + }, + "TJOZfHtsLfwA0CZ2bd1b2": { + "title": "Properties Binding", + "description": "Property binding helps you set values for properties of HTML elements or directives. To bind to an element's property, enclose it in square brackets `[]` which causes Angular to evaluate the right-hand side of the assignment as a dynamic expression.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Angular Official Website", + "url": "https://angular.dev/guide/templates/property-binding", + "type": "article" + } + ] + }, + "FgsSyM6To7irpbivtOLEE": { + "title": "Attributes Binding", + "description": "", + "links": [] + }, + "bKnpirSvex4oE4lAjiSSV": { + "title": "Events", + "description": "", + "links": [] + }, + "2UH79nCjgtY1Qz1YjUJYL": { + "title": "Two-way Binding", + "description": "", + "links": [] + }, + "VzvB_bads057YtG4ST4a2": { + "title": "Control Flow", + "description": "", + "links": [] + }, + "Wc2ybRw43uamEtno0FpDv": { + "title": "Template Ref Vars", + "description": "", + "links": [] + }, + "VsU6713jeIjAOEZnF6gWx": { + "title": "@Input & @Output", + "description": "`@Input()` and `@Output()` give a child component a way to communicate with its parent component. `@Input()` lets a parent component update data in the child component. Conversely, `@Output()` lets the child send data to a parent component.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Angular Official Website - inputs", + "url": "https://angular.dev/guide/components/inputs", + "type": "article" + }, + { + "title": "Angular Official Website - outputs", + "url": "https://angular.dev/guide/components/outputs", + "type": "article" + } + ] + }, + "nyDry6ZWyEUuTq4pw-lU3": { + "title": "Template Ref Vars", + "description": "Template reference variables help you use data from one part of a template in another part of the template. A template variable can refer to a DOM element within a template, component or directive. In the template, use the hash symbol, `#`, to declare a template reference variable.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Angular Official Website", + "url": "https://angular.dev/guide/templates/reference-variables", + "type": "article" + } + ] + }, + "VsC7UmE_AumsBP8fC6to1": { + "title": "Template Syntax", + "description": "", + "links": [] + }, + "U1Zy2T-2ki9pDkXn9hn-I": { + "title": "@if", + "description": "The @if block conditionally displays its content when its condition expression is truthy. Content is added and removed from the DOM based on the evaluation of conditional expressions in the @if and @else blocks.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Angular Official Docs - @if", + "url": "https://angular.dev/api/core/@if", + "type": "article" + }, + { + "title": "Narrow Down signal value type within an if statement", + "url": "https://egghead.io/lessons/angular-narrow-down-angular-s-signal-value-type-within-an-if-statement", + "type": "video" + } + ] + }, + "ORdPDad4HWJAfcZuS-7yM": { + "title": "@else", + "description": "", + "links": [] + }, + "ys5untkSppGMFK-VsfuRt": { + "title": "@else if", + "description": "", + "links": [] + }, + "2kYS9w1UzQFZ1zhf01m9L": { + "title": "@for", + "description": "", + "links": [] + }, + "nZuim4Fjq6jYOXcRTAEay": { + "title": "@switch", + "description": "", + "links": [] + }, + "cHC2MH50CbUSMRZV4QGJI": { + "title": "@case", + "description": "", + "links": [] + }, + "h4MMn0_qUN3YXEdMUJOyd": { + "title": "@default", + "description": "", + "links": [] + }, + "AwOM0ucg6W7TohdUd7KWT": { + "title": "@let", + "description": "", + "links": [] + }, + "ONy-0olujU_FGZM7Wvfr2": { + "title": "@defer", + "description": "", + "links": [] + }, + "j99WQxuTzGeBBVoReDp_y": { + "title": "Pipes", + "description": "Use pipes to transform strings, currency amounts, dates, and other data for display. Pipes are simple functions to use in template expressions to accept an input value and return a transformed value. Pipes are useful because you can use them throughout your application , some common pipes are\n\n`DatePipe` | `UpperCasePipe` | `LowerCasePipe` | `CurrencyPipe` | `DecimalPipe` | `PercentPipe`\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Understanding Pipes", + "url": "https://angular.dev/guide/pipes", + "type": "article" + }, + { + "title": "BuiltIn Pipes - examples", + "url": "https://codecraft.tv/courses/angular/pipes/built-in-pipes/", + "type": "article" + } + ] + }, + "_-mTs_FMeob-ZGK-bb3j-": { + "title": "Change Detection", + "description": "Change detection is the process through which Angular checks to see whether your application state has changed, and if any DOM needs to be updated. At a high level, Angular walks your components from top to bottom, looking for changes. Angular runs its change detection mechanism periodically so that changes to the data model are reflected in an application’s view. Change detection can be triggered either manually or through an asynchronous event\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Runtime performance optimization", + "url": "https://angular.dev/best-practices/runtime-performance", + "type": "article" + }, + { + "title": "ChangeDetectionStrategy", + "url": "https://angular.dev/guide/components/advanced-configuration#changedetectionstrategy", + "type": "article" + }, + { + "title": "4 Runtime Performance Optimizations ( Change detection )", + "url": "https://www.youtube.com/watch?v=f8sA-i6gkGQ", + "type": "video" + } + ] + }, + "i2taHzQ5KLHjkkpbH4Ytd": { + "title": "Common Pipes", + "description": "", + "links": [] + }, + "nZxZnzbQg9dz-SI65UHq9": { + "title": "Pipes Precedence", + "description": "The pipe operator has a higher precedence than the JavaScript ternary operator.\n\nYou should always use parentheses to be sure Angular evaluates the expression as you intend.\n\n (condition ? a : b) | pipe\n \n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Precedence", + "url": "https://angular.dev/guide/pipes/precedence", + "type": "article" + }, + { + "title": "What is the precedence between pipe and ternary operators?", + "url": "https://iq.js.org/questions/angular/what-is-the-precedence-between-pipe-and-ternary-operators", + "type": "article" + } + ] + }, + "BOYXGfULJRiP-XOo_lNX3": { + "title": "Custom Pipes", + "description": "Pipes to transform strings, currency amounts, dates, and other data for display. Pipes are simple functions in template expressions to accept an input value and return a transformed value. Pipes are helpful because you can use them throughout your application while only declaring each pipe once. For example, you would use a pipe to show the date as April 15, 1988, rather than the raw string format.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Custom pipes for new transforms", + "url": "https://angular.dev/guide/pipes/transform-data", + "type": "article" + }, + { + "title": "Create a custom pipe video for Beginners", + "url": "https://www.youtube.com/watch?v=P2587FN4Y0w", + "type": "video" + } + ] + }, + "mRB-0CRdGwvxPqZbz08yj": { + "title": "@else if", + "description": "", + "links": [] + }, + "kGzlumFdZFxTRZ3HnCGFO": { + "title": "Directives", + "description": "SKDirectives are classes that add additional behavior to elements in your Angular applications. Use Angular's built-in directives to manage forms, lists, styles, and what users see.\n\n`NgClass` Adds and removes a set of CSS classes. | `NgStyle` Adds and removes a set of HTML styles. | `NgModel` Adds two-way data binding to an HTML form element.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Built-in directives", + "url": "https://angular.dev/guide/directives/", + "type": "article" + }, + { + "title": "BuiltIn Directives Types", + "url": "https://thinkster.io/tutorials/angular-2-directives", + "type": "article" + } + ] + }, + "xk3v8p6vf8ntGj5c-IU4U": { + "title": "Structural Directives", + "description": "", + "links": [] + }, + "xvwby0FTdIolRrV2j88fY": { + "title": "Attribute Directives", + "description": "", + "links": [] + }, + "7GUvTMVzfdVEDBOz-tHUT": { + "title": "Custom Directives", + "description": "Directives are the functions that will execute whenever the Angular compiler finds them. Angular Directives enhance the capability of HTML elements by attaching custom behaviors to the DOM.\n\nFrom the core concept, Angular directives are categorized into three categories: Attribute Directives, Structural Directives, and Component Directives.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Create a custom directive video for Beginners", + "url": "https://www.youtube.com/watch?v=AoN56g6UAsE", + "type": "video" + } + ] + }, + "a74v78SvGtWduZpXs7wSq": { + "title": "Routing", + "description": "Routing in Angular allows the users to create a single-page application with multiple views and allows navigation between them.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Angular Routing", + "url": "https://angular.dev/guide/routing", + "type": "article" + }, + { + "title": "Common Routing Tasks", + "url": "https://angular.dev/guide/routing/common-router-tasks", + "type": "article" + } + ] + }, + "dbAS-hN1hoCsNJhkxXcGq": { + "title": "Configuration", + "description": "The configuration of routes in an Angular application involves defining route mappings in an array and providing these routes to the Angular router.\n\n### Example routes:\n\n const appRoutes: Routes = [\n { path: 'custom-path', component: CustomComponet },\n { path: 'custom-path/:id', component: CustomDetailComponet, data: { title: 'Details component' } },\n { path: '', redirectTo: '/heroes', pathMatch: 'full'},\n { path: '**', component: PageNotFoundComponent }\n ];\n \n\n* `'custom-path'`: defining a new url route.\n* `'custom-path/:id'` defining _**id**_ parameter.\n* `''` (empty path): instantiate a component without the need for defining a new url route.\n* `'**'`: for undefined paths.\n* The `data` property in the second route is a place to store arbitrary data associated with this specific route.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Router reference - Configuration", + "url": "https://angular.dev/guide/routing/router-reference#configuration", + "type": "article" + } + ] + }, + "ewbDdPYv2SJl_jW3RVHQs": { + "title": "Lazy Loading", + "description": "Lazy loading is a technique in Angular that allows you to load JavaScript components asynchronously when a specific route is activated. It improves the application load time speed by splitting the application into several bundles. The bundles are loaded as required when the user navigates through the app.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Lazy-loading feature modules", + "url": "https://angular.dev/guide/ngmodules/lazy-loading", + "type": "article" + }, + { + "title": "Angular Tutorial - Lazy Loading", + "url": "https://www.youtube.com/watch?v=JjIQq9lh-Bw", + "type": "video" + } + ] + }, + "1ZwdEL0Gx30Vv_Av3ZTGG": { + "title": "Router Outlets", + "description": "The router-outlet is a directive that's available from the @angular/router package and is used by the router to mark where in a template, a matched component should be inserted.\n\nThanks to the router outlet, your app will have multiple views/pages and the app template acts like a shell of your application. Any element, you add to the shell will be rendered in each view, only the part marked by the router outlet will be changed between views.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Router reference - Router outlet", + "url": "https://angular.dev/guide/routing/router-reference#router-outlet", + "type": "article" + }, + { + "title": "Router outlet - API", + "url": "https://angular.dev/api/router/RouterOutlet", + "type": "article" + } + ] + }, + "8lFyuSx4MUcYRY2L8bZrq": { + "title": "Router Links", + "description": "In Angular, routerLink when applied to an element in a template, makes that element a link that initiates navigation to a route. Navigation opens one or more routed components in one or more `` locations on the page.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Router reference - Router links", + "url": "https://angular.dev/guide/routing/router-reference#router-links", + "type": "article" + }, + { + "title": "Router link - API", + "url": "https://angular.dev/api/router/RouterLink", + "type": "article" + }, + { + "title": "Angular Router: Navigation Using RouterLink, Navigate, or NavigateByUrl", + "url": "https://www.digitalocean.com/community/tutorials/angular-navigation-routerlink-navigate-navigatebyurl", + "type": "article" + } + ] + }, + "YF_sG292HqawIX0siWhrv": { + "title": "Router Events", + "description": "The Angular Router raises events when it navigates from one route to another route. It raises several events such as `NavigationStart`, `NavigationEnd`, `NavigationCancel`, `NavigationError`, `ResolveStart`, etc. You can listen to these events and find out when the state of the route changes. Some of the useful events are route change start (NavigationStart) and route change end (NavigationEnd).", + "links": [ + { + "title": "Router reference - Router events", + "url": "https://angular.dev/guide/routing/router-reference#router-events", + "type": "article" + }, + { + "title": "Router event - API", + "url": "https://angular.dev/api/router/RouterEvent", + "type": "article" + } + ] + }, + "PmC4zeaLpa5LoL4FhYXcG": { + "title": "Guards", + "description": "Use route guards to prevent users from navigating to parts of an application without authorization.\n\nAngular route guards are interfaces provided by Angular that, when implemented, allow us to control the accessibility of a route based on conditions provided in function implementation of that interface.\n\nSome types of angular guards are `CanActivate`, `CanActivateChild`, `CanDeactivate`, `CanMatch` and `Resolve`.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Preventing unauthorized access", + "url": "https://angular.dev/guide/routing/common-router-tasks#preventing-unauthorized-access", + "type": "article" + }, + { + "title": "Resolve", + "url": "https://angular.dev/api/router/Resolve", + "type": "article" + } + ] + }, + "CpsoIVoCKaZnM_-BbXbCh": { + "title": "Services & Remote Data", + "description": "Services let you define code or functionalities that are then accessible and reusable in many other components in the Angular project. It also helps you with the abstraction of logic and data that is hosted independently but can be shared across other components.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Creating an injectable service", + "url": "https://angular.dev/guide/di/creating-injectable-service", + "type": "article" + }, + { + "title": "What is an Angular Service", + "url": "https://www.javatpoint.com/what-is-an-angular-service", + "type": "article" + }, + { + "title": "Service for API Calls", + "url": "https://www.knowledgehut.com/blog/web-development/make-api-calls-angular", + "type": "article" + }, + { + "title": "Service Tutorial with Example", + "url": "https://www.positronx.io/angular-service-tutorial-with-example/", + "type": "article" + } + ] + }, + "8u9uHCRt9RU57erBy79PP": { + "title": "Dependency Injection", + "description": "Dependency Injection is one of the fundamental concepts in Angular. DI is wired into the Angular framework and allows classes with Angular decorators, such as Components, Directives, Pipes, and Injectables, to configure dependencies that they need.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Understanding Dependency Injection", + "url": "https://angular.dev/guide/di/dependency-injection", + "type": "article" + }, + { + "title": "DI in Action", + "url": "https://angular.dev/guide/di/di-in-action", + "type": "article" + }, + { + "title": "Explore top posts about Dependency Injection", + "url": "https://app.daily.dev/tags/dependency-injection?ref=roadmapsh", + "type": "article" + } + ] + }, + "Q36LQds8k_cSjijvXyWOM": { + "title": "Forms", + "description": "Forms are used to handle user inputs in many applications. It enables users from entering sensitive information to performing several data entry tasks.\n\nAngular provides two approaches to handle user inputs trough forms: reactive and template-driven forms.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Forms in Angular", + "url": "https://angular.dev/guide/forms", + "type": "article" + }, + { + "title": "Angular Forms Tutorial", + "url": "https://www.youtube.com/watch?v=-bGgjgx3fGs", + "type": "video" + }, + { + "title": "Building Forms in Angular Apps", + "url": "https://www.youtube.com/watch?v=hAaoPOx_oIw", + "type": "video" + } + ] + }, + "1d3Y4HVnqom8UOok-7EEf": { + "title": "Reactive Forms", + "description": "Reactive Forms in angular are those which used to handle the inputs coming from the user. We can define controls by using classes such as FormGroup and FormControl.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Reactive forms - Angular", + "url": "https://angular.dev/guide/forms/reactive-forms", + "type": "article" + }, + { + "title": "Angular Reactive Forms", + "url": "https://www.javatpoint.com/angular-reactive-forms", + "type": "article" + }, + { + "title": "How To Use Reactive Forms in Angular", + "url": "https://www.digitalocean.com/community/tutorials/angular-reactive-forms-introduction", + "type": "article" + }, + { + "title": "Explore top posts about General Programming", + "url": "https://app.daily.dev/tags/general-programming?ref=roadmapsh", + "type": "article" + }, + { + "title": "Reactive Form in Angular", + "url": "https://www.youtube.com/watch?v=8k4ctDmVn7w", + "type": "video" + } + ] + }, + "XC_K1Wahl2ySqOXoym4YU": { + "title": "Typed Forms", + "description": "", + "links": [] + }, + "uDx4lPavwsJFBMzdQ70CS": { + "title": "Template-driven Forms", + "description": "A Template driven form is the simplest form we can build in Angular. It is mainly used for creating simple form application.\n\nIt uses two-way data-binding (ngModel) to create and handle the form components.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Building a template-driven form", + "url": "https://angular.dev/guide/forms/template-driven-forms", + "type": "article" + }, + { + "title": "Template-Driven Forms", + "url": "https://codecraft.tv/courses/angular/forms/template-driven/", + "type": "article" + }, + { + "title": "Template driven form", + "url": "https://www.youtube.com/watch?v=whr14XxB8-M", + "type": "video" + }, + { + "title": "Template driven form Validations", + "url": "https://www.youtube.com/watch?v=cVd4ZCIXprs", + "type": "video" + } + ] + }, + "CpufN6DAOj5UNab9vnH0k": { + "title": "Dynamic Forms", + "description": "", + "links": [] + }, + "kxRtLsB3y_th8j-HjmJgK": { + "title": "Custom Validators", + "description": "", + "links": [] + }, + "m5dgKgUR3ZqI9sBAzToev": { + "title": "Control Value Accessor", + "description": "", + "links": [] + }, + "8UY0HAvjY7bdbFpt-MM1u": { + "title": "HTTP Client", + "description": "", + "links": [] + }, + "AKPhbg10xXjccO7UBh5eJ": { + "title": "Setting Up the Client", + "description": "", + "links": [] + }, + "HjGAv3aV-p4ijYJ8XYIw3": { + "title": "Making Requests", + "description": "", + "links": [] + }, + "xG7iSVOGcbxJbNv3xbNfc": { + "title": "Writing Interceptors", + "description": "", + "links": [] + }, + "lfp7PIjwITU5gBITQdirD": { + "title": "RxJS Basics", + "description": "Reactive Extensions for JavaScript, or RxJS, is a reactive library used to implement reactive programming to deal with async implementation, callbacks, and event-based programs.\n\nThe reactive paradigm can be used in many different languages through the use of reactive libraries. These libraries are downloaded APIs that provide functionalities for reactive tools like observers and operators. It can be used in your browser or with Node.js.", + "links": [] + }, + "krXA6ua7E3m4IIpFkgQZe": { + "title": "Observable Pattern", + "description": "The observer pattern is a software design pattern in which an object, named the subject, maintains a list of its dependents, called observers, and notifies them automatically of any state changes, usually by calling one of their methods.\n\nAngular uses the Observer pattern which simply means — Observable objects are registered, and other objects observe (in Angular using the subscribe method) them and take action when the observable object is acted on in some way.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Angular and Observable", + "url": "https://medium.com/fuzzycloud/angular-and-observable-4bf890b2a282", + "type": "article" + } + ] + }, + "b06Y5YrqBbHhWkK6Ws_1c": { + "title": "Observable Lifecycle", + "description": "An observable is a function that acts as a wrapper for a data stream. They support to pass messages inside your application. An observable is useless until an observer subscribes to it. An observer is an object which consumes the data emitted by the observable. An observer keeps receiving data values from the observable until the observable is completed, or the observer unsubscribes from the observable. Otherwise observers can receive data values from the observable continuously and asynchronously. So we can perform various operations such as updating the user interface, or passing the JSON response.\n\nThere are 4 stages for a life cycle of an observable.\n\n* Creation\n* Subscription\n* Execution\n* Destruction\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Understanding Observable LifeCycle", + "url": "https://medium.com/analytics-vidhya/understanding-rxjs-observables-ad5b34d9607f", + "type": "article" + } + ] + }, + "e1ZmmxPZuogCNgtbPPWmd": { + "title": "RxJS vs Promises", + "description": "In a nutshell, the main differences between the Promise and the Observable are as follows:\n\n* The Promise is eager, whereas the Observable is lazy,\n* The Promise is always asynchronous, while the Observable can be either asynchronous or synchronous,\n* The Promise can provide a single value, whereas the Observable is a stream of values (from 0 to multiple values), you can apply RxJS operators to the Observable to get a new tailored stream.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Why RxJS? RxJS vs Promises", + "url": "https://javascript.plainenglish.io/why-rxjs-rxjs-vs-promises-b28962771d68", + "type": "article" + }, + { + "title": "Explore top posts about RxJS", + "url": "https://app.daily.dev/tags/rxjs?ref=roadmapsh", + "type": "article" + } + ] + }, + "ihsjIcF0tkhjs56458teE": { + "title": "Operators", + "description": "RxJS is mostly useful for its operators, even though the Observable is the foundation. Operators are the essential pieces that allow complex asynchronous code to be easily composed in a declarative manner.\n\nOperators are functions. There are two kinds of operators:\n\n**Pipeable Operators** are the kind that can be piped to Observables using the syntax observableInstance.pipe(operator()). These include, filter(...), and mergeMap(...). When called, they do not change the existing Observable instance. Instead, they return a new Observable, whose subscription logic is based on the first Observable.\n\nA Pipeable Operator is essentially a pure function which takes one Observable as input and generates another Observable as output. Subscribing to the output Observable will also subscribe to the input Observable.\n\n**Creation Operators** are the other kind of operator, which can be called as standalone functions to create a new Observable. For example: of(1, 2, 3) creates an observable that will emit 1, 2, and 3, one right after another. Creation operators will be discussed in more detail in a later section.\n\nPiping\n------\n\nPipeable operators are functions, so they could be used like ordinary functions: op()(obs) — but in practice, there tend to be many of them convolved together, and quickly become unreadable: op4()(op3()(op2()(op1()(obs)))). For that reason, Observables have a method called .pipe() that accomplishes the same thing while being much easier to read:\n\n obs.pipe(op1(), op2(), op3(), op4());\n \n\nCreation Operators\n------------------\n\n**What are creation operators?** Distinct from pipeable operators, creation operators are functions that can be used to create an Observable with some common predefined behavior or by joining other Observables.\n\nA typical example of a creation operator would be the interval function. It takes a number (not an Observable) as input argument, and produces an Observable as output:\n\n import { interval } from 'rxjs';\n \n const observable = interval(1000 /* number of milliseconds */);\n \n\nVisit the following resources to learn more:", + "links": [ + { + "title": "List of creation operators", + "url": "https://rxjs.dev/guide/operators#creation-operators-list", + "type": "article" + }, + { + "title": "Full RxJS Operators Documentation", + "url": "https://rxjs.dev/guide/operators", + "type": "article" + } + ] + }, + "nxUbl0eu3LsSL-Z8X6nP5": { + "title": "Filtering", + "description": "RxJS provides a variety of filtering operators that you can use to filter and transform the data in a stream. You can use these operators in combination with other RxJS operators to create powerful and efficient data processing pipelines.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Understanding RxJS Operators", + "url": "https://rxjs.dev/api/operators", + "type": "article" + } + ] + }, + "bJbbayFQ9WSJT9-qy0H5l": { + "title": "Rate Limiting", + "description": "Rate limiting in RxJS refers to the practice of restricting the rate at which events or data can be emitted from an observable. This can be useful in situations where the rate of incoming data is higher than the rate at which it can be processed, or where there are limits on the number of requests that can be made to a server. There are a few different operators in RxJS that can be used for rate limiting, such as throttleTime and sampleTime. These operators can be used to limit the rate of emissions from an observable by discarding emissions that occur too frequently. Another operator is auditTime it emits the last value from the source Observable during periodic time windows.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "throttleTime", + "url": "https://rxjs.dev/api/operators/throttleTime", + "type": "article" + }, + { + "title": "sampleTime", + "url": "https://rxjs.dev/api/operators/sampleTime", + "type": "article" + }, + { + "title": "auditTime", + "url": "https://rxjs.dev/api/operators/auditTime", + "type": "article" + }, + { + "title": "Blogs and tutorials on RxJS", + "url": "https://blog.angular-university.io/functional-reactive-programming-for-angular-2-developers-rxjs-and-observables/", + "type": "article" + } + ] + }, + "kdMJHljMzGA3oRlh8Zvos": { + "title": "Transformation", + "description": "In RxJS, \"transformation\" refers to the process of modifying or manipulating the data emitted by an Observable. There are a variety of methods available in RxJS that can be used to transform the data emitted by an Observable, including:\n\n* map: applies a function to each item emitted by the Observable and emits the resulting value\n* mergeMap: applies a function to each item emitted by the Observable, and then merges the resulting Observables into a single Observable\n* switchMap: applies a function to each item emitted by the Observable, and then switches to the latest resulting Observable\n* concatMap: applies a function to each item emitted by the Observable, and then concatenates the resulting Observables into a single Observable\n* exhaustMap: applies a function to each item emitted by the Observable, but ignores subsequent emissions until the current Observable completes\n\nThese are just a few examples of the many methods available in RxJS for transforming the data emitted by an Observable. Each method has its own specific use case, and the best method to use will depend on the requirements of your application.\n\nHere are the official documentation links for the RxJS transformation methods:\n\nYou can find more information and examples on these methods in the official RxJS documentation. Additionally, you can find more operators on [https://rxjs.dev/api/operators](https://rxjs.dev/api/operators) and you can also find more information on the library as a whole on [https://rxjs.dev/](https://rxjs.dev/)", + "links": [ + { + "title": "map", + "url": "https://rxjs.dev/api/operators/map", + "type": "article" + }, + { + "title": "mergeMap", + "url": "https://rxjs.dev/api/operators/mergeMap", + "type": "article" + }, + { + "title": "switchMap", + "url": "https://rxjs.dev/api/operators/switchMap", + "type": "article" + }, + { + "title": "concatMap", + "url": "https://rxjs.dev/api/operators/concatMap", + "type": "article" + }, + { + "title": "exhaustMap", + "url": "https://rxjs.dev/api/operators/exhaustMap", + "type": "article" + }, + { + "title": "switchMap vs mergeMap vs concatMap vs exhaustMap practical guide", + "url": "https://youtu.be/40pC5wHowWw", + "type": "video" + } + ] + }, + "IgUHqfVhiGpwxT9tY8O88": { + "title": "Combination", + "description": "Combination operators in RxJS are used to combine multiple observables into a single observable. There are several types of combination operators, including:\n\n* Merge: merges multiple observables into a single observable that emits items from each source observable in a sequence.\n \n* Concat: concatenates multiple observables into a single observable that emits the items from each source observable in sequence, one after the other.\n \n* Zip: combines the items from multiple observables into a single observable by combining the items from each observable at a corresponding index.\n \n* CombineLatest: combines the latest values from multiple observables into a single observable by emitting an item whenever any of the source observables emit an item.\n \n* WithLatestFrom: combines the latest value from one observable with the latest values from multiple other observables.\n \n* ForkJoin: combines the items from multiple observables into a single observable by emitting an item only after all of the source observables have emitted an item.\n \n\nFurther documentation can be found in the official RxJS documentation:\n\n* Merge: [https://rxjs.dev/api/operators/merge](https://rxjs.dev/api/operators/merge)\n \n* Concat: [https://rxjs.dev/api/operators/concat](https://rxjs.dev/api/operators/concat)\n \n* Zip: [https://rxjs.dev/api/operators/zip](https://rxjs.dev/api/operators/zip)\n \n* CombineLatest: [https://rxjs.dev/api/operators/combineLatest](https://rxjs.dev/api/operators/combineLatest)\n \n* WithLatestFrom: [https://rxjs.dev/api/operators/withLatestFrom](https://rxjs.dev/api/operators/withLatestFrom)\n \n* ForkJoin: [https://rxjs.dev/api/index/function/forkJoin](https://rxjs.dev/api/index/function/forkJoin)", + "links": [] + }, + "u1TG8i145o0RKhOR_5epf": { + "title": "Signals", + "description": "", + "links": [] + }, + "KAdtebWvgvMifIwd52yc4": { + "title": "RxJS Interop", + "description": "", + "links": [] + }, + "LcJyAfv9hjyUNXUVyPRP4": { + "title": "Inputs as Signals", + "description": "", + "links": [] + }, + "9HS9C3yq9EUcUy0ZUZk_H": { + "title": "Queries as Signals", + "description": "", + "links": [] + }, + "IeU6ClS_yp6BYKdkQOJVf": { + "title": "Model Inputs", + "description": "", + "links": [] + }, + "Mqe_s-nwBqAL6X7OGRHEN": { + "title": "State Management", + "description": "Application state management is the process of maintaining knowledge of an application's inputs across multiple related data flows that form a complete business transaction -- or a session -- to understand the condition of the app at any given moment. In computer science, an input is information put into the program by the user and state refers to the condition of an application according to its stored inputs -- saved as variables or constants. State can also be described as the collection of preserved information that forms a complete session.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is State Management?", + "url": "https://www.techtarget.com/searchapparchitecture/definition/state-management", + "type": "article" + }, + { + "title": "Angular state management made simple with NgRx", + "url": "https://blog.logrocket.com/angular-state-management-made-simple-with-ngrx/", + "type": "article" + }, + { + "title": "Angular State Management with NgRx", + "url": "https://www.syncfusion.com/blogs/post/angular-state-management-with-ngrx.aspx", + "type": "article" + } + ] + }, + "N9ZCPgFnFIUv4jMv1w5qK": { + "title": "NGXS", + "description": "Ngxs is a state management pattern for the Angular framework. It acts as a single source of truth for our application. Ngxs is very simple and easily implementable. It reduce lots of boilerplate code . It is a replacement for Ngrx. In Ngrx we are creating state, action, reducer, and effects but in Ngxs, we are creating only state and actions instead of all of this. Like Ngrx, Ngxs is also asynchronous and when we dispatch any action we can get a response back.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is NGXS ? - Ngxs.io ", + "url": "https://www.ngxs.io/", + "type": "article" + }, + { + "title": "Details about NGXS - Medium ", + "url": "https://medium.com/@knoldus/introduction-to-ngxs-state-management-pattern-library-for-angular-ec76f681ceba", + "type": "article" + }, + { + "title": "Practise of NGXS", + "url": "https://www.youtube.com/watch?v=SGj11j4hxmg", + "type": "video" + } + ] + }, + "ir94IdkF1tVAA8ZTD9r0N": { + "title": "NgRx", + "description": "NgRx is a framework for building reactive applications in Angular. NgRx simplifies managing application state by enforcing unidirectional data flow and providing tools like NgRx Store, NgRx Effects, NgRx Router Store, NgRx Signals, NgRx Entity, and NgRx Operators.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is NGRX?", + "url": "https://ngrx.io/docs", + "type": "article" + }, + { + "title": "Angular NgRx Store and Effects Crash Course", + "url": "https://blog.angular-university.io/angular-ngrx-store-and-effects-crash-course/", + "type": "article" + }, + { + "title": "Angular state management with NgRx", + "url": "https://www.youtube.com/watch?v=a3_GW3RBqn0", + "type": "video" + }, + { + "title": "Angular NgRx Signal Store Crash Course (For NgRx Beginners)", + "url": "https://www.youtube.com/watch?v=HqxY0JPlh54", + "type": "video" + }, + { + "title": "NgRx Best Practices", + "url": "https://youtu.be/yYiO-kjmLAc?si=7J_JkOdbyocfb5m_", + "type": "video" + }, + { + "title": "Angular Course with NgRx - Building Angular Project From Scratch", + "url": "https://www.youtube.com/watch?v=vcfZ0EQpYTA", + "type": "video" + } + ] + }, + "rgPUcSKxG9DvXicLfC2Ay": { + "title": "Elf", + "description": "", + "links": [] + }, + "m4WBnx_9h01Jl6Q1sxi4Y": { + "title": "Zones", + "description": "Zone.js is a signaling mechanism that Angular uses to detect when an application state might have changed. It captures asynchronous operations like setTimeout, network requests, and event listeners. Angular schedules change detection based on signals from Zone.js.", + "links": [ + { + "title": "Resolving zone pollution", + "url": "https://angular.dev/best-practices/zone-pollution", + "type": "article" + }, + { + "title": "Angular without ZoneJS (Zoneless)", + "url": "https://angular.dev/guide/experimental/zoneless", + "type": "article" + }, + { + "title": "NgZone - API", + "url": "https://angular.dev/api/core/NgZone", + "type": "article" + } + ] + }, + "1x5pT607aKE-S-NCWB810": { + "title": "Zoneless Applications", + "description": "", + "links": [] + }, + "EbJib-XfZFF9bpCtL3aBs": { + "title": "Developer Tools", + "description": "", + "links": [] + }, + "4YSk6I63Ew--zoXC3xmrC": { + "title": "Angular CLI", + "description": "The Angular CLI is a command-line interface tool that you use to initialize, develop, scaffold, and maintain Angular applications directly from a command shell. we can install angular latest CLI using the following command\n\n`npm install -g @angular/cli`\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "CLI Reference", + "url": "https://angular.dev/cli", + "type": "article" + }, + { + "title": "The Angular CLI", + "url": "https://angular.dev/tools/cli", + "type": "article" + }, + { + "title": "Explore top posts about Angular", + "url": "https://app.daily.dev/tags/angular?ref=roadmapsh", + "type": "article" + }, + { + "title": "Angular CLI - setup", + "url": "https://www.youtube.com/watch?v=mZnzX3J5XKI", + "type": "video" + } + ] + }, + "FVH0lnbIZ2m5EfF2EJ2DW": { + "title": "Local Setup", + "description": "", + "links": [] + }, + "1fVi9AK6aLjt5QgAFbnGX": { + "title": "Deployment", + "description": "", + "links": [] + }, + "yhNGhduk__ow8VTLc6inZ": { + "title": "End-to-End Testing", + "description": "", + "links": [] + }, + "Uvr0pRk_fOzwRwqn0dQ6N": { + "title": "Schematics", + "description": "A schematic is a template-based code generator that supports complex logic. It is a set of instructions for transforming a software project by generating or modifying code.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Generating code using schematics", + "url": "https://angular.dev/tools/cli/schematics", + "type": "article" + }, + { + "title": "Angular Blog", + "url": "https://blog.angular.io/schematics-an-introduction-dc1dfbc2a2b2?gi=ad9571373944", + "type": "article" + } + ] + }, + "Ax-s_xw3FO3Ocv-AnLbQD": { + "title": "Build Environments", + "description": "", + "links": [] + }, + "TeWEy9I-hU6SH02Sy2S2S": { + "title": "CLI Builders", + "description": "", + "links": [] + }, + "MwtM1UAIfj4FJ-Y4CKDsP": { + "title": "AoT Compilation", + "description": "Angular applications require a compilation process before they can run in a browser. The Angular ahead-of-time (AOT) compiler converts your Angular HTML and TypeScript code into efficient JavaScript code during the build phase before the browser downloads and runs that code. Compiling your application during the build process provides a faster rendering in the browser.\n\nVisit the following resources to learn to more:", + "links": [ + { + "title": "Angular Compiler Output", + "url": "https://github.com/JeanMeche/angular-compiler-output", + "type": "opensource" + }, + { + "title": "Ahead-of-time (AOT) compilation", + "url": "https://angular.dev/tools/cli/aot-compiler", + "type": "article" + }, + { + "title": "Understanding Angular's ahead of time compliation", + "url": "https://blog.nashtechglobal.com/understanding-angulars-ahead-of-time-aot-compilation/", + "type": "article" + } + ] + }, + "T3MmS3bvMMgCUbOk3ktU7": { + "title": "DevTools", + "description": "", + "links": [] + }, + "ql7SyxrRmjpiXJ9hQeWPq": { + "title": "Language Service", + "description": "The Angular Language Service provides code editors with a way to get completions, errors, hints, and navigation inside Angular templates (external and in-line). Anytime you open an Angular application for the first time, an installation prompt will occur.\n\nVisit the following links to learn more:", + "links": [ + { + "title": "VS Code NG Language Service", + "url": "https://github.com/angular/vscode-ng-language-service", + "type": "opensource" + }, + { + "title": "Language Service Docs", + "url": "https://angular.dev/tools/language-service", + "type": "article" + } + ] + }, + "cl89U8atD6gw5rMGUm4Ix": { + "title": "Libraries", + "description": "Use the Angular CLI and the npm package manager to build and publish your library as an npm package.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Angular Website", + "url": "https://angular.dev/tools/libraries/creating-libraries", + "type": "article" + } + ] + }, + "YHV5oFwLwphXf1wJTDZuG": { + "title": "Using Libraries", + "description": "", + "links": [] + }, + "A1mYMg7cbcj6p_VkDf-Tz": { + "title": "Creating Libraries", + "description": "", + "links": [] + }, + "jfHaS8TqE4tcAo59K8Nkn": { + "title": "SSR", + "description": "A normal Angular application executes in the browser, rendering pages in the DOM in response to user actions. Angular Universal executes on the server, generating static application pages that later get bootstrapped on the client. This means that the application generally renders more quickly, giving users a chance to view the application layout before it becomes fully interactive.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Server-side rendering", + "url": "https://angular.dev/guide/ssr", + "type": "article" + }, + { + "title": "Rendering on the Web", + "url": "https://web.dev/rendering-on-the-web/", + "type": "article" + }, + { + "title": "Explore top posts about Angular", + "url": "https://app.daily.dev/tags/angular?ref=roadmapsh", + "type": "article" + } + ] + }, + "b-0yQ74zHtAxI9aRLBohc": { + "title": "SSG", + "description": "SSG (Static Site Generator), helps in building the HTML full website, during the process of building and serving that HTML Page. This method helps to generate the HTML website on the client side before its being served on the server side. Therefore, whenever a user requests a HTML Page, firstly HTML page will be rendered and secondly, the angular app will be rendered. The SSG can be used only if your website is static (or) it's content doesn't changes frequently.", + "links": [] + }, + "kauQofxCmpktXPcnzid17": { + "title": "AnalogJS", + "description": "AnalogJS is a full-stack meta-framework powered by Vite and Nitro for Angular. Analog supports both Server-Side Rendering (SSR) and Static Site Generation (SSG). Analog uses file-based routing and supports API (server) routes.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Analog", + "url": "https://analogjs.org/", + "type": "article" + }, + { + "title": "Robin Goetz - AnalogJS - The Vite powered Angular meta-framework | NG Belgrade Conf 2024", + "url": "https://www.youtube.com/watch?v=BSgpvP4eAGk", + "type": "video" + }, + { + "title": "Full-stack Angular (SSR, file-based routing, + more) with AnalogJS", + "url": "https://www.youtube.com/watch?v=VSCXOTCJpiI", + "type": "video" + }, + { + "title": "Is AnalogJS good enough for my blog site?", + "url": "https://www.youtube.com/watch?v=xTzEDQULo6s", + "type": "video" + } + ] + }, + "mm6c7GLQEwoQdAHdAYzGh": { + "title": "Security", + "description": "", + "links": [] + }, + "umUX4Hxk7srHlFR_Un-u7": { + "title": "Cross-site Scripting", + "description": "", + "links": [] + }, + "cgI9oeUHufA-ky_W1zENe": { + "title": "Sanitization", + "description": "", + "links": [] + }, + "XoYSuv1salCCHoI1cJkxv": { + "title": "Trusting Safe Values", + "description": "", + "links": [] + }, + "5h7U0spwEUhB-hbjSlaeB": { + "title": "Enforce Trusted Types", + "description": "", + "links": [] + }, + "xH3RHPhsaqD9zIMms5OmX": { + "title": "HTTP Vulnerabilities", + "description": "", + "links": [] + }, + "Z1DZBbFI4oU6-KQg3wqMm": { + "title": "Cross-site Request Forgery", + "description": "", + "links": [] + }, + "m2aw8vb4rz4IjshpoMyNx": { + "title": "HttpClient CSRF", + "description": "", + "links": [] + }, + "ni00edsphJd7uBLCn7-Vw": { + "title": "XSRF protection", + "description": "", + "links": [] + }, + "zd7YJGlcMFNFbsKUiW_XC": { + "title": "Cross-site Script Inclusion", + "description": "", + "links": [] + }, + "VNG9DdXlS6R1OJ6Lrn4Lt": { + "title": "Accessibility", + "description": "", + "links": [] + }, + "0FvH7KPs9ag02QkD1HEJ-": { + "title": "Attributes", + "description": "", + "links": [] + }, + "CZ1YRyai8Ds-ry4A8jVbr": { + "title": "UI Components", + "description": "", + "links": [] + }, + "0s-QhN5aZh2F3tLJFKEyR": { + "title": "Containers", + "description": "", + "links": [] + }, + "8i_JD1P4gIhY1rdldwLC2": { + "title": "Routing", + "description": "", + "links": [] + }, + "5-RCB8AiDbkdIFYNXKWge": { + "title": "Link Identification", + "description": "", + "links": [] + }, + "STEHxJpwBZxFdQl0zUKxo": { + "title": "Performance", + "description": "Angular’s performance is enhanced through key strategies such as Deferable Views for improved initial load times, Image Optimization techniques to reduce loading overhead, and mitigation of Zone Pollution to streamline change detection. Additionally, addressing Slow Computations enhances runtime efficiency, while Hydration techniques support faster, more interactive server-side rendered applications.\n\nLearn more from the following resources:", + "links": [ + { + "title": "From Good to Great: Optimizing Angular Performance", + "url": "https://www.youtube.com/watch?v=tMxrY7IL-Ac", + "type": "video" + } + ] + }, + "CYjsXIOWtP5DJmYS-qR-s": { + "title": "Deferrable Views", + "description": "", + "links": [] + }, + "1WIKjn3nxYDMIhBL17aYQ": { + "title": "Image Optimization", + "description": "", + "links": [] + }, + "pRSR5PEbkJXAJ1LPyK-EE": { + "title": "Zone Pollution", + "description": "", + "links": [] + }, + "yxUtSBzJPRcS-IuPsyp-W": { + "title": "Slow Computations", + "description": "", + "links": [] + }, + "NY_MfBNgNmloiRGcIvfJ1": { + "title": "Hydration", + "description": "", + "links": [] + }, + "lLa-OnHV6GzkNFZu29BIT": { + "title": "Testing", + "description": "In any software development process, Testing the application plays a vital role. If Bugs and crashes are not figured out and solved they can defame the development company as well as hurt the clients too. But, Angular’s architecture comes with built-in testability features. As soon as you create a new project with Angular CLI, two essential testing tools are installed.They are: Jasmine and Karma. Jasmine is the testing library which structures individual tests into specifications (“specs”) and suites. And Karma is the test runner, which enables the different browsers to run the tests mentioned by Jasmine and the browsers will finally report the test results back.", + "links": [] + }, + "HU1eTYB321C93qh_U7ioF": { + "title": "Testing Services", + "description": "", + "links": [] + }, + "rH13NBFG02hnn5eABSNCY": { + "title": "Testing Pipes", + "description": "An Angular Pipe is a special function that is called from a Component template. Its purpose is to transform a value: You pass a value to the Pipe, the Pipe computes a new value and returns it.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Testing pipes", + "url": "https://angular.dev/guide/testing/pipes", + "type": "article" + }, + { + "title": "Testing pipes examples", + "url": "https://testing-angular.com/testing-pipes/", + "type": "article" + }, + { + "title": "Explore top posts about Testing", + "url": "https://app.daily.dev/tags/testing?ref=roadmapsh", + "type": "article" + } + ] + }, + "4xt0m5jkUqB4Z-krcFBuL": { + "title": "Testing Requests", + "description": "", + "links": [] + }, + "TGRZBizDy83JKg_MhnRdX": { + "title": "Services with Dependencies", + "description": "In an Angular application, Services are responsible for fetching, storing and processing data. Services are singletons, meaning there is only one instance of a Service during runtime. They are fit for central data storage, HTTP and WebSocket communication as well as data validation.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Angular Website", + "url": "https://angular.dev/guide/testing/services", + "type": "article" + }, + { + "title": "Testing-Angular.com", + "url": "https://testing-angular.com/testing-services/", + "type": "article" + }, + { + "title": "Explore top posts about Testing", + "url": "https://app.daily.dev/tags/testing?ref=roadmapsh", + "type": "article" + } + ] + }, + "cXVy1lx2XqY_j8gxz-y60": { + "title": "Component Bindings", + "description": "Angular processes all data bindings once for each JavaScript event cycle, from the root of the application component tree through all child components. Data binding plays an important role in communication between a template and its component, and is also important for communication between parent and child components.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Angular Website", + "url": "https://angular.dev/guide/components", + "type": "article" + }, + { + "title": "Explore top posts about Testing", + "url": "https://app.daily.dev/tags/testing?ref=roadmapsh", + "type": "article" + } + ] + }, + "SGqb5k6OmWit8PA6ZT3js": { + "title": "Testing Directives", + "description": "Directives are classes that add new behavior or modify the existing behavior to the elements in the template. Basically directives are used to manipulate the DOM, for example adding/removing the element from DOM or changing the appearance of the DOM elements.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Angular Website", + "url": "https://angular.dev/guide/testing/attribute-directives", + "type": "article" + }, + { + "title": "tesing-angular Website", + "url": "https://testing-angular.com/testing-directives/", + "type": "article" + }, + { + "title": "Explore top posts about Testing", + "url": "https://app.daily.dev/tags/testing?ref=roadmapsh", + "type": "article" + } + ] + }, + "f5v74Uw54LsB4FgdN6eCd": { + "title": "Debugging Tests", + "description": "", + "links": [] + }, + "0dYWO_Zvh9J5_6cRjRjvI": { + "title": "Component Templates", + "description": "With a component template , you can save and reuse component processes and properties and create components from them; template-based components inherit the template's properties and process.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Component testing scenarios", + "url": "https://angular.dev/guide/testing/components-scenarios", + "type": "article" + }, + { + "title": "Explore top posts about Testing", + "url": "https://app.daily.dev/tags/testing?ref=roadmapsh", + "type": "article" + } + ] + }, + "bqA2bxPcZrqQ-6QE-YDK1": { + "title": "Code Coverage", + "description": "", + "links": [] + }, + "Xxyx3uzy5TpNhgR1IysMN": { + "title": "Internationalization", + "description": "", + "links": [] + }, + "W8OwpEw00xn0GxidlJjdc": { + "title": "Localize Package", + "description": "", + "links": [] + }, + "dVKl3Z2Rnf6IB064v19Mi": { + "title": "Locales by ID", + "description": "", + "links": [] + }, + "jL5amGV1BAX_V5cyTIH7d": { + "title": "Translation Files", + "description": "", + "links": [] + }, + "9ISvaaJ815_cr_KW9vQhT": { + "title": "Multiple Locales", + "description": "", + "links": [] + }, + "rYJq59Q0YdfK6n3x740Em": { + "title": "Animation", + "description": "", + "links": [] + }, + "Iv2d4sgODqMPzA9gH6RAw": { + "title": "Transitions & Triggers", + "description": "", + "links": [] + }, + "Unjknmb4b2LY-nUVvvF7_": { + "title": "Complex Sequences", + "description": "", + "links": [] + }, + "M1CU2Yq6dLp4yOuGV0fhF": { + "title": "Reusable Animations", + "description": "", + "links": [] + }, + "x91jWP81oCTeVEwzX8FbK": { + "title": "Route Transitions", + "description": "", + "links": [] + } +} \ No newline at end of file diff --git a/public/roadmap-content/api-design.json b/public/roadmap-content/api-design.json new file mode 100644 index 000000000..038b9435d --- /dev/null +++ b/public/roadmap-content/api-design.json @@ -0,0 +1,1485 @@ +{ + "duKkpzPjUU_-8kyJGHqRX": { + "title": "Learn the Basics", + "description": "Application Programming Interfaces (APIs) are an integral part of modern development, allowing software applications to communicate and use functions from other software applications or services. API design, therefore, becomes a key part of any software development process. Furthermore, the basics of API design encompass understanding the principles of what an API is, how it works, and the various types of APIs, such as REST, SOAP, and GraphQL. This also includes understanding the standards and best practices in API design to ensure the development of powerful, user-friendly, and secure APIs. The foundation of API Design lies in this knowledge, setting the stage for more complex API designing and development.", + "links": [] + }, + "r8M3quACGO2piu0u_R4hO": { + "title": "What are APIs", + "description": "APIs, or Application Programming Interfaces, provide a manner in which software applications communicate with each other. They abstract the complexity of applications to allow developers to use only the essentials of the software they are working with. They define the methods and data formats an application should use in order to perform tasks, like sending, retrieving, or modifying data. Understanding APIs is integral to mastering modern software development, primarily because they allow applications to exchange data and functionality with ease, thus enabling integration and convergence of technological services. Therefore, a solid understanding of what APIs are forms the basic cornerstone of API design.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is an API?", + "url": "https://aws.amazon.com/what-is/api/", + "type": "article" + }, + { + "title": "What is an API?", + "url": "https://www.youtube.com/watch?v=s7wmiS2mSXY", + "type": "video" + } + ] + }, + "2HdKzAIQi15pr3YHHrbPp": { + "title": "HTTP", + "description": "HTTP, or Hypertext Transfer Protocol, is a fundamental piece of any API design. It is the protocol used for transmitting hypermedia data on the web, such as HTML webpages or JSON from a web API. Understanding HTTP is crucial in API design as it provides the structure for how requests and responses should be constructed and handled. It dictates how endpoints are defined, how data should be transferred, and what status codes should be used to convey specific scenarios. A solid grounding in HTTP principles allows for more robust, efficient and secure API designs.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Everything you need to know about HTTP", + "url": "https://cs.fyi/guide/http-in-depth", + "type": "article" + }, + { + "title": "What is HTTP?", + "url": "https://www.cloudflare.com/en-gb/learning/ddos/glossary/hypertext-transfer-protocol-http/", + "type": "article" + }, + { + "title": "An overview of HTTP", + "url": "https://developer.mozilla.org/en-US/docs/Web/HTTP/Overview", + "type": "article" + }, + { + "title": "HTTP/3 From A To Z: Core Concepts", + "url": "https://www.smashingmagazine.com/2021/08/http3-core-concepts-part1/", + "type": "article" + }, + { + "title": "HTTP Crash Course & Exploration", + "url": "https://www.youtube.com/watch?v=iYM2zFP3Zn0", + "type": "video" + } + ] + }, + "ACALE93mL4gnX5ThRIdRp": { + "title": "HTTP Versions", + "description": "HTTP or Hypertext Transfer Protocol is pivotal in the world of API design. HTTP versions specify how data should be packaged and transported, as well as how web servers and browsers should respond to commands. Understanding different HTTP versions and their features is essential for API designers as it directly impacts how well an API can communicate with other software and systems. From HTTP/1.0, the initial version of HTTP to HTTP/2 and the latest version HTTP/3, each version brings in improvements in speed, data transmission capabilities, and security. Selecting an appropriate HTTP version is crucial for API efficiency and performance.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Evolution of HTTP", + "url": "https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Evolution_of_HTTP", + "type": "article" + }, + { + "title": "HTTP: 1.0 vs. 1.1 vs 2.0 vs. 3.0", + "url": "https://www.baeldung.com/cs/http-versions", + "type": "article" + } + ] + }, + "rADHM-6NAxEjzmgiHefDX": { + "title": "HTTP Methods", + "description": "HTTP (Hypertext Transfer Protocol) Methods play a significant role in API design. They define the type of request a client can make to a server, providing the framework for interaction between client and server. Understanding HTTP methods is paramount to creating a robust and effective API. Some of the common HTTP methods used in API design include GET, POST, PUT, DELETE, and PATCH. Each of these methods signifies a different type of request, allowing for various interactions with your API endpoints. This in turn creates a more dynamic, functional, and user-friendly API.\n\nLearn more from the following resources:", + "links": [ + { + "title": "HTTP request methods", + "url": "https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods", + "type": "article" + }, + { + "title": "HTTP Request Methods - W3Schools", + "url": "https://www.w3schools.com/tags/ref_httpmethods.asp", + "type": "article" + }, + { + "title": "What are HTTP Methods?", + "url": "https://blog.postman.com/what-are-http-methods/", + "type": "article" + } + ] + }, + "7szYyzLifKsepNU0c2KnN": { + "title": "HTTP Status Codes", + "description": "HTTP Status Codes are an essential part of API Design, providing important information about the result of a request made to a server. They are 3-digit numbers where the first digit defines the class of response, while the last two digits do not have any categorization value. For instance, '200' stands for a successful HTTP request, while '404' signifies that a requested resource could not be found on the server. Efficient use of these codes can enhance API's robustness, making it more understandable and easier to debug.\n\nLearn more from the following resources:", + "links": [ + { + "title": "HTTP Status Codes", + "url": "https://developer.mozilla.org/en-US/docs/Web/HTTP/Status", + "type": "article" + }, + { + "title": "What are HTTP status codes?", + "url": "https://umbraco.com/knowledge-base/http-status-codes/", + "type": "article" + }, + { + "title": "List of HTTP status codes", + "url": "https://en.wikipedia.org/wiki/List_of_HTTP_status_codes", + "type": "article" + }, + { + "title": "HTTP Status Codes explained in 5 minutes", + "url": "https://www.youtube.com/watch?v=qmpUfWN7hh4", + "type": "video" + } + ] + }, + "rE-0yibRH6B2UBKp351cf": { + "title": "HTTP Headers", + "description": "HTTP Headers play a crucial role in API Design as they provide essential information between the client and server regarding the data to be exchanged. Headers are part of the HTTP request and response message, with types including Standard, Non-standard, Common or Uncommon headers. They can define parameters such as content type, authentication, response status, cookies, and more. Understanding and effectively utilizing HTTP Headers is key to designing robust and secure APIs. A well-defined set of headers ensures successful data exchange, handles errors gracefully, and improves overall communication between the client and server.\n\nLearn more from the following resources:", + "links": [ + { + "title": "HTTP Headers", + "url": "https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers", + "type": "article" + }, + { + "title": "What are HTTP headers?", + "url": "https://blog.postman.com/what-are-http-headers/", + "type": "article" + }, + { + "title": "What are HTTP Headers & Understand different types of HTTP headers", + "url": "https://requestly.com/blog/what-are-http-headers-understand-different-types-of-http-headers/", + "type": "article" + } + ] + }, + "P-rGIk50Bg7nFmWieAW07": { + "title": "URL, Query & Path Parameters", + "description": "When designing APIs, an integral part involves dealing with uniform resource locators (URLs), query parameters, and path parameters. These components play crucial parts in how the API sends and retrieves data. The URL forms the basis of the API given that it identifies the resource on the server. Query parameters are used to filter specific results, sorting or showing specific data fields. On the other hand, Path parameters serve as placeholders for variable data that will be input into the URL, allowing us to customize the data response. Understanding the usage of URL, query, and path parameters is of utmost importance for creating efficient, scalable and user-friendly APIs.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Understanding Path Variables and Query Parameters in HTTP Requests", + "url": "https://medium.com/@averydcs/understanding-path-variables-and-query-parameters-in-http-requests-232248b71a8", + "type": "article" + }, + { + "title": "Describing parameters", + "url": "https://swagger.io/docs/specification/describing-parameters/", + "type": "article" + }, + { + "title": "Path parameters", + "url": "https://help.iot-x.com/api/how-to-use-the-api/parameters/path-parameters", + "type": "article" + } + ] + }, + "UFuX8wcxZQ7dvaQF_2Yp8": { + "title": "Cookies", + "description": "Cookies play an instrumental role in the field of API (Application Programming Interface) design. Essentially, cookies are small bits of data stored on a user's browser that enables stateful HTTP sessions, by storing pertinent information between server communications. In API design, cookies are especially useful when authentication is required. Cookies can store session tokens, thereby allowing users to stay logged in across multiple sessions or different web pages. Understanding cookies and how they function is vital in API design for sustaining user sessions, providing enhanced user experience, and securing user information.\n\nLearn more from the following resources:", + "links": [ + { + "title": "What Are API Cookies? How to Send it?", + "url": "https://apidog.com/articles/what-are-api-cookies/", + "type": "article" + }, + { + "title": "Cookes - Mozilla", + "url": "https://developer.mozilla.org/en-US/docs/Mozilla/Add-ons/WebExtensions/API/cookies", + "type": "article" + } + ] + }, + "TX_hg7EobNJhmWKsMCaT1": { + "title": "Content Negotiation", + "description": "In the context of API design, Content Negotiation refers to the process where the client and the server communicate about the data representation which is acceptable for both of them. It allows clients to indicate the preferred response format, such as JSON, XML, or HTML. This mechanism leads to flexible and adaptable APIs, enhancing their usability. Understanding and efficiently utilizing content negotiation is an integral part of mastering API design basics.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Content Negotiation", + "url": "https://developer.mozilla.org/en-US/docs/Web/HTTP/Content_negotiation", + "type": "article" + }, + { + "title": "Content Negotiation in practice", + "url": "https://softwaremill.com/content-negotiation-in-practice/", + "type": "article" + } + ] + }, + "GRlsBogOlOwuqhMMPyHN3": { + "title": "CORS", + "description": "Cross-Origin Resource Sharing (CORS) is a critical concept in API Design. It is a mechanism that uses HTTP headers to tell browsers to give a web application running at one origin, access to selected resources from a different origin. By default, web browsers prohibit web pages from making requests to a different domain than the one the web page came from. CORS is the guideline that lets you configure a set of rules on the server to define which types of cross-domain requests are allowed, providing much-needed flexibility without compromising security. Understanding CORS is crucial in designing APIs that ensure safe and effective inter-domain communication.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Cross-Origin Resource Sharing (CORS)", + "url": "https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS", + "type": "article" + }, + { + "title": "What is CORS?", + "url": "https://aws.amazon.com/what-is/cross-origin-resource-sharing/", + "type": "article" + }, + { + "title": "CORS in 100 seconds", + "url": "https://www.youtube.com/watch?v=4KHiSt0oLJ0", + "type": "video" + } + ] + }, + "KG3wO86F8Of27fU7QRcsn": { + "title": "Understand TCP / IP", + "description": "When designing APIs, an essential building block is the understanding of TCP/IP. TCP/IP, standing for Transmission Control Protocol/Internet Protocol, is the suite of communications protocols used to connect hosts on the Internet. It provides ordered, error-checked delivery of streams of bytes from a program on one computer to another program on another computer. If you want to understand how APIs communicate over networks, knowing the fundamental working of TCP/IP is indispensable. Fully appreciating this topic will strengthen your grasp on API design and help you make more informed decisions when creating APIs.\n\nLearn more from the following resources:", + "links": [ + { + "title": "What is Transmission Control Protocol TCP/IP?", + "url": "https://www.fortinet.com/resources/cyberglossary/tcp-ip", + "type": "article" + }, + { + "title": "What is TCP/IP?", + "url": "https://www.cloudflare.com/en-gb/learning/ddos/glossary/tcp-ip/", + "type": "article" + }, + { + "title": "what is TCP/IP and OSI?", + "url": "https://www.youtube.com/watch?v=CRdL1PcherM", + "type": "video" + } + ] + }, + "v4nJYD9yiIEUviLPhVTCD": { + "title": "Basics of DNS", + "description": "When discussing the foundational elements of API Design, the Basics of DNS (Domain Name System) can't be overlooked. DNS plays a fundamental role in the way APIs function, acting as the internet's equivalent of a phone book, it interprets human-friendly hostnames into IP addresses that APIs need for communication. Understanding this complex system is essential as it allows for better comprehension of the navigation and messaging flow in API Design. For API developers, knowledge about DNS can significantly aid in troubleshooting connectivity issues, ensuring secure connections, and optimizing API architecture with more efficient calls.\n\nLearn more from the following resources:", + "links": [ + { + "title": "What is DNS?", + "url": "https://www.cloudflare.com/en-gb/learning/dns/what-is-dns/", + "type": "article" + }, + { + "title": "Introduction to DNS", + "url": "https://aws.amazon.com/route53/what-is-dns/", + "type": "article" + }, + { + "title": "DNS explained in 100 seconds", + "url": "https://www.youtube.com/watch?v=UVR9lhUGAyU", + "type": "video" + } + ] + }, + "o8i093VQv-T5Qf1yGqU0R": { + "title": "Different API Styles", + "description": "Application Programming Interface (API) design isn't a one-size-fits-all endeavor. APIs can be structured in various styles, each with its own unique characteristics, advantages, and use cases. Early identification of the appropriate API style is crucial in ensuring a functional, efficient and seamless end-user experience. Commonly used API styles include REST, SOAP, GraphQL, and gRPC. Understanding these diverse API styles would help in making better design choices, fostering efficient overall system architecture, and promoting an intuitive and easy-to-use application.", + "links": [] + }, + "BvwdASMvuNQ9DNgzdSZ4o": { + "title": "RESTful APIs", + "description": "RESTful APIs, or Representational State Transfer APIs, are a set of conventions for designing networked applications. They utilize HTTP methods to read, update and delete data. They offer a simple and standardized way to build web services that can be easily consumed by different clients. The key principles of a RESTful API include stateless client-server communication, cacheable data, and a uniform interface, making the API easy to understand, flexible, and scalable. Moreover, it relies heavily on the use of resources and their representations, making it a popular choice in API design due to its performance, scalability, simplicity, and reliability.\n\nLearn more from the following resources:", + "links": [ + { + "title": "What is REST?", + "url": "https://restfulapi.net/", + "type": "article" + }, + { + "title": "What is a RESTul API?", + "url": "https://aws.amazon.com/what-is/restful-api/", + "type": "article" + } + ] + }, + "TVR-SkErlOHbDKLBGfxep": { + "title": "Simple JSON APIs", + "description": "Simple JSON (JavaScript Object Notation) APIs are a popular form of API or \"Application Programming Interface\" which utilise JSON to exchange data between servers and web applications. This method has gained prominence mainly for its simplicity, light weight, and easy readability. In the context of API design, a well-structured JSON API allows developers to efficiently interact with the backend and retrieve only the data they need in a consistent and comprehensible manner. From reducing redundant data to enabling quick parsing, Simple JSON APIs provide numerous benefits to improve the overall performance of applications. Designing a good JSON API requires careful planning, sound knowledge of HTTP methods, endpoints, error handling mechanisms, and most importantly, a clear understanding of the application's data requirements.\n\nLearn more from the following resources:", + "links": [ + { + "title": "A specification for building JSON APIs", + "url": "https://github.com/json-api/json-api", + "type": "opensource" + }, + { + "title": "JSON API: Explained in 4 minutes (+ EXAMPLES)", + "url": "https://www.youtube.com/watch?v=N-4prIh7t38", + "type": "video" + } + ] + }, + "Wwd-0PjrtViMFWxRGaQey": { + "title": "gRPC APIs", + "description": "gRPC is a platform agnostic serialization protocol that is used to communicate between services. Designed by Google in 2015, it is a modern alternative to REST APIs. It is a binary protocol that uses HTTP/2 as a transport layer. It is a high performance, open source, general-purpose RPC framework that puts mobile and HTTP/2 first.\n\nIt's main use case is for communication between two different languages within the same application. You can use Python to communicate with Go, or Java to communicate with C#.\n\ngRPC uses the protocol buffer language to define the structure of the data that is\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "gRPC Website", + "url": "https://grpc.io/", + "type": "article" + }, + { + "title": "gRPC Introduction", + "url": "https://grpc.io/docs/what-is-grpc/introduction/", + "type": "article" + }, + { + "title": "gRPC Core Concepts", + "url": "https://grpc.io/docs/what-is-grpc/core-concepts/", + "type": "article" + }, + { + "title": "Explore top posts about gRPC", + "url": "https://app.daily.dev/tags/grpc?ref=roadmapsh", + "type": "article" + }, + { + "title": "Stephane Maarek - gRPC Introduction", + "url": "https://youtu.be/XRXTsQwyZSU", + "type": "video" + } + ] + }, + "MKVcPM2EzAr2_Ieyp9Fu3": { + "title": "GraphQL APIs", + "description": "GraphQL is an open-source data query and manipulation language for APIs, and a runtime for executing those queries with your existing data. Unlike REST, where you have predefined data return structures for each endpoint, GraphQL APIs are designed around a type system and enable the client application to precisely specify what data it needs from the server. This gives a lot of flexibility and efficiency, leading to fewer round trips to the server and significantly enhancing the performance of the client application. Whether you are building a small project or an enterprise-scale application, understanding and implementing GraphQL APIs can result in cleaner, more manageable code.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Public GraphQL APIs", + "url": "https://github.com/graphql-kit/graphql-apis", + "type": "opensource" + }, + { + "title": "GraphQL Website", + "url": "https://graphql.org/", + "type": "article" + }, + { + "title": "GraphQL explained in 100 seconds", + "url": "https://www.youtube.com/watch?v=eIQh02xuVw4", + "type": "video" + } + ] + }, + "awdoiCHz7Yc3kYac_iy-a": { + "title": "Building JSON / RESTful APIs", + "description": "Building JSON/RESTful APIs involves designing and implementing APIs that adhere to the architectural constraints of Representational State Transfer (REST). These APIs use JSON (JavaScript Object Notation) as a format for information interchange, due to its lightweight, easy-to-understand, and universally accepted nature. A well-designed RESTful API, utilizing JSON, is key in developing applications that are scalable, maintainable, and easily integrated with other systems. This design approach enables the resources on a server to be accessed and manipulated using standard HTTP protocols, facilitating communication between different services and systems. Furthermore, it enables client-server interactions to be stateless, meaning each request from a client must contain all the information needed by the server to understand and process the request.\n\nLearn more from the following resources:", + "links": [ + { + "title": "A specification for building APIs in JSON", + "url": "https://jsonapi.org/", + "type": "article" + }, + { + "title": "How to make a REST API", + "url": "https://www.integrate.io/blog/how-to-make-a-rest-api/", + "type": "article" + }, + { + "title": "What is a REST API?", + "url": "https://www.youtube.com/watch?v=lsMQRaeKNDk&t=170s", + "type": "video" + } + ] + }, + "9WI_z34jIFXwoUQuChyRU": { + "title": "REST Principles", + "description": "REST (Representational State Transfer) is an important architectural style used in API design. It defines a set of rules and conventions through which systems communicate over a network. Key characteristics of REST include statelessness, client-server communication, cacheability, and a uniform interface. Understanding and applying these principles properly can help in designing robust, scalable, and high-performance APIs. REST principles revolve around resources and their manipulation to achieve desired outcomes. By following these principles, developers can ensure that their API design is in line with web standards, thus improving interoperability across different systems.\n\nLearn more from the following resources:", + "links": [ + { + "title": "REST API Principles | A Comprehensive Overview", + "url": "https://blog.dreamfactory.com/rest-apis-an-overview-of-basic-principles", + "type": "article" + }, + { + "title": "REST principles", + "url": "https://ninenines.eu/docs/en/cowboy/2.12/guide/rest_principles/", + "type": "article" + } + ] + }, + "b3qRTLwCC_9uDoPGrd9Bu": { + "title": "URI Design", + "description": "URI (Uniform Resource Identifier) is a string of characters used to identify a name or a resource on the Internet. Designing URIs carefully is a crucial part of creating a smooth API interface that is easy to understand, remember and use. Good URI design ensures that related resources are grouped together in a logical manner and can greatly impact the usability and maintainability of an API. It involves crafting standardised, intuitive HTTP paths that take advantage of the hierarchical nature of URLs to provide a better structure to the API. This hierarchy can then be used to expand the API over time without breaking existing clients' functionality.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Guidelines for URI design", + "url": "https://css-tricks.com/guidelines-for-uri-design/", + "type": "article" + }, + { + "title": "Designing URIs", + "url": "https://www.oreilly.com/library/view/restful-web-services/9780596809140/ch04.html", + "type": "article" + } + ] + }, + "itILK2SXvLvAjk1Kul7EK": { + "title": "Versioning Strategies", + "description": "API Versioning is a critical component of API Design and Management. As the APIs evolve over time to meet the new business requirements and functionality enhancements, it is crucial to manage the changes in a way that doesn't break the existing client applications. This calls for effective versioning strategies in API design. There are different versioning strategies like URI versioning, Request Header versioning, and Media Type versioning which are adopted based on the ease of implementation, client compatibility, and accessibility. Understanding each strategy and its pros and cons can lead to better API Design and maintainability.\n\nLearn more from the following resources:", + "links": [ + { + "title": "What is API versioning?", + "url": "https://www.postman.com/api-platform/api-versioning/", + "type": "article" + }, + { + "title": "4 API versioning best practices", + "url": "https://kodekloud.com/blog/api-versioning-best-practices/", + "type": "article" + }, + { + "title": "Versioning your APIs", + "url": "https://www.youtube.com/watch?v=Np_Jr6AvCOc", + "type": "video" + } + ] + }, + "pgJDzP3pJjhjr5wTRtPJO": { + "title": "Pagination", + "description": "Pagination is a crucial aspect of API design, providing a systematic approach to handling large amounts of data in a manageable way. Instead of returning all data in a single response, which can be overwhelming and inefficient, APIs implement pagination to deliver this data in smaller, more convenient parcels. This allows client applications to fetch data incremently and only as needed, greatly enhancing performance and usability. The design and implementation of pagination can vary, with different strategies such as limit-offset, cursor-based, or time-based pagination, each with its own set of advantages and limitations. An effective API design should carefully consider pagination style, striving for a balance between ease of use, efficiency, and scalability.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Everything you need to know about API pagination", + "url": "https://nordicapis.com/everything-you-need-to-know-about-api-pagination/", + "type": "article" + }, + { + "title": "Pagination in the REST API - Atlassian", + "url": "https://developer.atlassian.com/server/confluence/pagination-in-the-rest-api/", + "type": "article" + }, + { + "title": "Unlock the power of API pagination", + "url": "https://dev.to/pragativerma18/unlocking-the-power-of-api-pagination-best-practices-and-strategies-4b49", + "type": "article" + } + ] + }, + "O7wjldZ3yTA2s_F-UnJw_": { + "title": "Rate Limiting", + "description": "Rate Limiting is a critical aspect of API Design that dictates the number of API calls a client can make within a specified timeframe. This helps in managing resource allocation, preventing abuse of the API, and maintaining the overall health of the API system. Proper rate limiting measures should be in place to ensure the API's stability, thereby delivering a consistent and reliable service to all consumers. It works primarily by setting a limit on the frequency of client requests, thereby preventing individual users from overloading the system. It is crucial to design and implement rate limiting carefully for maintaining API availability and performance.", + "links": [] + }, + "20KEgZH6cu_UokqWpV-9I": { + "title": "Idempotency", + "description": "Idempotency in API design refers to the concept where multiple identical requests have the same effect as a single request. This means that no matter how many times a client sends the same request to the server, the server's state stays the same after the first request. Designing APIs to be idempotent is essential for reliability, as it allows retries without side-effects, reduces complexity in distributed systems, and provides better user experience in unstable network conditions. Understanding idempotency concepts can increase the robustness and fault tolerance of your APIs. It is usually applicable to `PUT`, `DELETE`, and sometimes `POST` methods in RESTful APIs.\n\nLearn more from the following resources:", + "links": [ + { + "title": "What is idempotency?", + "url": "https://blog.dreamfactory.com/what-is-idempotency", + "type": "article" + }, + { + "title": "Idempotent REST API", + "url": "https://restfulapi.net/idempotent-rest-apis/", + "type": "article" + } + ] + }, + "LByD1vhzunhY1uY1YGZHP": { + "title": "HATEOAS", + "description": "Hypertext As The Engine Of Application State (HATEOAS) is a key concept in the design of RESTful APIs (Application Programming Interfaces). It implies that the API delivers data as well as information about available interactions. By utilizing hypermedia, it contributes to the self-descriptiveness and discoverability of the API. When correctly implemented, clients only need generic knowledge about hypermedia, not specific API semantics, which can significantly simplify client implementations and make APIs more flexible to changes. The principle of HATEOAS can enforce a more structured, standardized approach to API design and development.\n\nLearn more from the following resources:", + "links": [ + { + "title": "HATEOAS Driven REST APIs", + "url": "https://restfulapi.net/hateoas/", + "type": "article" + }, + { + "title": "HATEOAS", + "url": "https://htmx.org/essays/hateoas/", + "type": "article" + }, + { + "title": "What Happend To HATEOAS in RESTful API?", + "url": "https://www.youtube.com/watch?v=HNTSrytKCoQ", + "type": "video" + } + ] + }, + "zXxEiM5HeOn7W-Vue0tQf": { + "title": "Handling CRUD Operations", + "description": "When designing APIs, one needs to account for various types of interactions with data - these typically revolve around the CRUD operations; Create, Read, Update, and Delete. Whether the API is designed for a banking app or a social media platform, the need to create new data, read or retrieve existing data, update or modify that data, and delete unnecessary data is universal.\n\nTherefore, mastering CRUD operations in API design is a fundamental skill. Effective handling of CRUD operations facilitates seamless interaction between the front-end and back-end systems, and ensures proper data management, thereby improving user experience.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Introduction to Building a CRUD API with Node.js and Express", + "url": "https://www.split.io/blog/introduction-to-building-a-crud-api-with-node-js-and-express/", + "type": "article" + }, + { + "title": "An expert's guide to CRUD APIs", + "url": "https://www.forestadmin.com/blog/an-experts-guide-to-crud-apis-designing-a-robust-one/", + "type": "article" + }, + { + "title": "Rethinking CRUD For REST API Designs - Palentir", + "url": "https://blog.palantir.com/rethinking-crud-for-rest-api-designs-a2a8287dc2af", + "type": "article" + } + ] + }, + "8tELdagrOaGCf3nMVs8t3": { + "title": "Error Handling", + "description": "Error Handling is a crucial aspect of API design that ensures the stability, usability, and reliability of the API in production. APIs are designed to help systems communicate with each other. However, there can be instances where these systems might encounter exceptions or errors. The process of predicting, catching, and managing these error occurrences is what we refer to as 'Error Handling'. In the context of API Design, it involves defining and implementing specific strategies to detect, manage and inform consumers of any exception or error that occurs while executing requests. Configuring this appropriately provides a more robust and seamless communication experience, enabling developers to debug and rectify issues more efficiently.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Best practices for API error handling", + "url": "https://blog.postman.com/best-practices-for-api-error-handling/", + "type": "article" + }, + { + "title": "Best Practices for REST API Error Handling", + "url": "https://www.baeldung.com/rest-api-error-handling-best-practices", + "type": "article" + }, + { + "title": "Handling HTTP API Errors with Problem Details", + "url": "https://www.youtube.com/watch?v=uvTT_0hqhyY", + "type": "video" + } + ] + }, + "5CxU3inGcSHp-TDg3BQiY": { + "title": "RFC 7807 - Problem Details for APIs", + "description": "The practice of API Design includes a significant focus on handling errors effectively and transparently. Among the widespread standards being adopted, the RFC 7807 or Problem Details for HTTP APIs plays a crucial role. This specification provides a standardized format for sending problem or error details from an HTTP API so developers engaging with the API do not need to parse non-standard error messages and can anticipate the structure of potential errors. In essence, RFC 7807 improves the usability and comprehension of your API, providing a better developer experience and encouraging the efficient use of your API. Implementing it paves the way to robust and accountable systems, where issues can be traced, identified, and solved more conveniently.\n\nLearn more from the following resources:", + "links": [ + { + "title": "RFC 7807 - Problem Details for HTTP APIs", + "url": "https://datatracker.ietf.org/doc/html/rfc7807", + "type": "article" + }, + { + "title": "RFC 9457 - Problem Details for HTTP APIs", + "url": "https://www.rfc-editor.org/rfc/rfc9457.html", + "type": "article" + } + ] + }, + "qAolZHf_jp8hCdtqHZwC8": { + "title": "HTTP Caching", + "description": "HTTP caching is a key aspect of API design which involves storing copies of responses to HTTP requests to speed up future requests. When an API receives the same request multiple times, instead of processing each request separately, it can use a previously stored response, thereby improving performance and efficiency. The cache is governed by headers on the HTTP requests and responses. Understanding and implementing HTTP caching in API design can drastically reduce latency, network traffic and improve the speed of an API.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Why HTTP Caching matters for APIs", + "url": "https://thenewstack.io/why-http-caching-matters-for-apis/", + "type": "article" + }, + { + "title": "Caching REST API Response", + "url": "https://restfulapi.net/caching/", + "type": "article" + } + ] + }, + "cQnQ9v3mH27MGNwetz3JW": { + "title": "Authentication Methods", + "description": "Application Programming Interfaces (APIs) are critical components in software development that allow different software systems to communicate and share functionality. To ensure secure communication, it's essential to authenticate the parties involved in the API transactions. The authentication process confirms the identity of the API user. There are numerous authentication methods available when designing an API, each with its own pros and cons. This includes Basic Authentication, API Key Authentication, OAuth, and JWT among others. Understanding these different methods and their best use cases is fundamental to designing secure and effective APIs.", + "links": [] + }, + "0FzHERK5AeYL5wv1FBJbH": { + "title": "Basic Auth", + "description": "Basic Auth, short for Basic Authentication, is a simple method often used in API design for handling user authentication. In this method, client credentials, consisting of a username and password pair, are passed to the API server in a field in the HTTP header. The server then verifies these credentials before granting access to protected resources. Although Basic Auth is straightforward to implement, it is less secure compared to more advanced methods since it involves transmitting credentials in an encoded, but not encrypted, format. It is often used in cases where simplicity is paramount, or High security levels are not required.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Basic Auth Generation Header", + "url": "https://www.debugbear.com/basic-auth-header-generator", + "type": "article" + }, + { + "title": "Basic Authentication - Swagger.io", + "url": "https://swagger.io/docs/specification/authentication/basic-authentication/", + "type": "article" + }, + { + "title": "Basic Authentication - Twillio", + "url": "https://www.twilio.com/docs/glossary/what-is-basic-authentication", + "type": "article" + } + ] + }, + "QTH7sy9uQZWl6ieBz7erY": { + "title": "Token Based Auth", + "description": "Token-based authentication is a crucial aspect of API design. It involves providing the user with a token that validates their identity after they have successfully logged in. Once the token is obtained, users can use it to access resources and services provided by the API. This token is usually passed in the headers of subsequent HTTP requests done by the client. One key advantage of token-based auth is that tokens can be created and checked by the server without storing them persistently, which can help to scale applications more easily. This authentication method enhances the security and scalability of web applications and it is mainly used in modern API strategies, including RESTful APIs.\n\nLearn more from the following resources:", + "links": [ + { + "title": "What Is Token-Based Authentication?", + "url": "https://www.okta.com/uk/identity-101/what-is-token-based-authentication/", + "type": "article" + }, + { + "title": "Session vs Token Authentication in 100 Seconds", + "url": "https://www.youtube.com/watch?v=UBUNrFtufWo", + "type": "video" + }, + { + "title": "Token based auth", + "url": "https://www.youtube.com/watch?v=woNZJMSNbuo", + "type": "video" + } + ] + }, + "tWg68AHLIr1gIZA1za3jp": { + "title": "JWT ", + "description": "JSON Web Tokens, or JWT, are a popular and secure method of transferring information between two parties in the domain of API design. As a compact, URL-safe means of representing claims to be transferred between two parties, they play a vital role in security and authorization in modern APIs. By encoding these claims, the information can be verified and trusted with a digital signature - ensuring that the API end-points can handle requests in a secure and reliable way. JWT is a relatively lightweight and scalable method that brings improved authentication and information exchange processes in API design.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Introduction to JSON Web Tokens", + "url": "https://jwt.io/introduction", + "type": "article" + }, + { + "title": "JSON Web Tokens", + "url": "https://auth0.com/docs/secure/tokens/json-web-tokens", + "type": "article" + }, + { + "title": "Why is JWT popular?", + "url": "https://www.youtube.com/watch?v=P2CPd9ynFLg", + "type": "video" + } + ] + }, + "TLuNtQ6HKYQXmglyVk8-t": { + "title": "OAuth 2.0", + "description": "OAuth 2.0 is an authorization framework that allows applications to obtain limited access to user accounts on an HTTP service, such as Facebook, GitHub, DigitalOcean, and others. It works by delegating user authentication to the service that hosts the user account and authorizing third-party applications to access the user account. OAuth 2.0 defines four roles: resource owner, client, resource server and authorization server. With regards to API design, OAuth 2.0 can be used to protect API endpoints by ensuring that the client applications having valid access tokens can only interact with the API. It provides detailed workflow processes and a set of protocols for the client application to get authorization to access resources.\n\nLearn more from the following resources:", + "links": [ + { + "title": "OAuth Website", + "url": "https://oauth.net/2/", + "type": "article" + }, + { + "title": "What is OAuth 2.0?", + "url": "https://auth0.com/intro-to-iam/what-is-oauth-2", + "type": "article" + }, + { + "title": "OAuth 2 Explained In Simple Terms", + "url": "https://www.youtube.com/watch?v=ZV5yTm4pT8g", + "type": "video" + } + ] + }, + "eQWoy4CpYP3TJL2bbhPB_": { + "title": "Session Based Auth", + "description": "Application Programming Interfaces (APIs) are critical for building software applications. Among several key considerations during API design, one is deciding how to implement authentication and security. Session Based Authentication is one popular way to apply security in API design.\n\nThis method revolves around the server creating a session for the user after they successfully log in, associating it with a session identifier. This Session ID is then stored client-side within a cookie. On subsequent requests, the server validates the Session ID before processing the API call. The server will destroy the session after the user logs out, thereby invalidating the Session ID.\n\nUnderstanding Session Based Authentication is crucial for secure API design, especially in scenarios where security is a top priority or in legacy systems where this method is prevalent.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Session Based Authentication - Roadmap.sh", + "url": "https://roadmap.sh/guides/session-based-authentication", + "type": "article" + }, + { + "title": "Session vs Token Authentication", + "url": "https://www.authgear.com/post/session-vs-token-authentication", + "type": "article" + }, + { + "title": "Session Based Authentication - Roadmap.sh", + "url": "https://www.youtube.com/watch?v=gKkBEOq_shs", + "type": "video" + } + ] + }, + "nHbn8_sMY7J8o6ckbD-ER": { + "title": "Authorization Methods", + "description": "In API design, authorization methods play a crucial role in ensuring the security and integrity of data transactions. They are the mechanisms through which an API identifies and validates a user, system, or application before granting them access to specific resources. These methods include Basic Authentication, OAuth, Token-based authentication, JSON Web Tokens (JWT), and API Key based, among others. So, understanding these methods enhances the ability to design APIs that effectively protect resources while allowing necessary access. Each method has its own pros and cons, usage scenarios and security features that make them more suitable for certain situations rather than others.", + "links": [] + }, + "wFsbmMi5Ey9UyDADdbdPW": { + "title": "Role Based Access Control (RBAC)", + "description": "Role-Based Access Control (RBAC) is a method of managing authorization in API design that assigns system access to users based on their role within an organization. RBAC is crucial in controlling which endpoints a user can call, and what operations they are allowed to execute. In the context of API design, RBAC ensures appropriate levels of access for different types of users to guarantee data security and integrity. It simplifies the process of security administration by assigning privileges based on a user's job function, rather than on an individual basis.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Role-Based Access Control", + "url": "https://auth0.com/docs/manage-users/access-control/rbac", + "type": "article" + }, + { + "title": "What is role-based access control (RBAC)?", + "url": "https://www.redhat.com/en/topics/security/what-is-role-based-access-control", + "type": "article" + }, + { + "title": "Role-based access control (RBAC) vs. Attribute-based access control (ABAC)", + "url": "https://www.youtube.com/watch?v=rvZ35YW4t5k", + "type": "video" + } + ] + }, + "dZTe_kxIUQsc9N3w920aR": { + "title": "Attribute Based Access Control (ABAC)", + "description": "Attribute Based Access Control (ABAC) is a flexible and powerful authorization method in the realm of API Design. Distinct from Role-Based Access Control (RBAC), which relies on predefined roles and permissions, ABAC uses attributes to build policies and make decisions. These attributes can be associated with the user, the action they want to perform, targeted resources, or the environment. With ABAC, finer-grained access control can be achieved, thereby improving the security and efficiency of APIs. This approach is widely used in complex and dynamic environments where access control requirements can be multifaceted and deeply context-dependent.\n\nLearn more from the following resources:", + "links": [ + { + "title": "What is Attribute Based Access Control?", + "url": "https://www.okta.com/uk/blog/2020/09/attribute-based-access-control-abac/", + "type": "article" + }, + { + "title": "Attribute Based Access Control", + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/introduction_attribute-based-access-control.html", + "type": "article" + } + ] + }, + "tzUJwXu_scwQHnPPT0oY-": { + "title": "API Keys & Management", + "description": "API keys and management is an integral part of API design. An API key is a unique identifier used to authenticate a user, developer, or calling program to an API. This ensures security and control over API endpoints, as only those with a valid API key can make requests. API Management, on the other hand, refers to the practices and tools that enable an organization to govern and monitor its API usage. It involves all the aspects of managing APIs including design, deployment, documentation, security, versioning, and analytics. Both elements play crucial roles in securing and organizing API access for efficient and controlled data sharing and communication.\n\nLearn more from the following resources:", + "links": [ + { + "title": "What is API key management?", + "url": "https://www.akeyless.io/secrets-management-glossary/api-key-management/", + "type": "article" + }, + { + "title": "API Key Management | Definition and Best Practices", + "url": "https://infisical.com/blog/api-key-management", + "type": "article" + } + ] + }, + "5R9yKfN1vItuv__HgCwP7": { + "title": "API Documentation Tools", + "description": "API Documentation Tools are instrumental in conveying the intricacies of API design to both technical developers and non-technical stakeholders. These tools help in creating comprehensive, easy-to-understand, and searchable documentation encompassing all the elements of an API such as its functions, classes, return types, arguments, and more. Thorough documentation is central in API design as it fosters seamless adoption, effective implementation, and efficient troubleshooting of APIs. Various tools exist including Swagger, DapperDox, and ReDoc, each with unique functionalities to suit different API documentation requirements.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Swagger's Official Website", + "url": "https://swagger.io/", + "type": "website" + }, + { + "title": "DapperDox's Official Website", + "url": "http://dapperdox.io/", + "type": "website" + }, + { + "title": "ReDoc Documentation", + "url": "https://github.com/Redocly/redoc", + "type": "website" + } + ] + }, + "5RY7AlfRQydjxWK65Z4cv": { + "title": "Swagger / Open API", + "description": "Swagger, also known as OpenAPI (not to be confused with OpenAI), is a set of tools specifically used for designing, building, and documenting RESTful Web services. API developers heavily rely on it due to its incredible feature for designing APIs with a clear and easy-to-understand approach. By utilizing the OpenAPI Specification (OAS), developers can accurately define a RESTful API that can easily be used across various programming languages. This powerful universal language is a key component for effective and efficient API design.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Swagger Website", + "url": "https://swagger.io/", + "type": "article" + }, + { + "title": "What is Swagger?", + "url": "https://blog.hubspot.com/website/what-is-swagger", + "type": "article" + }, + { + "title": "OpenAPI Inititive", + "url": "https://www.openapis.org/", + "type": "article" + } + ] + }, + "KQAus72RGqx5f-3-YeJby": { + "title": "Postman", + "description": "Postman is a popular tool in web development for designing, testing, and managing APIs. As a collaborative platform, it simplifies each step of the API lifecycle and streamlines collaboration across teams. In context of API design, it can be employed to design and mock APIs, automate testing, and observe responses in a user-friendly interface. API endpoints can be organized into collections also in Postman for a well-structured and organized API design process. Ultimately, its user-friendly interface and comprehensive features position Postman as an indispensable tool in the realm of API design.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Postman Website", + "url": "https://www.postman.com/", + "type": "article" + }, + { + "title": "Postman Api Testing Tutorial for beginners", + "url": "https://www.youtube.com/watch?v=MFxk5BZulVU", + "type": "video" + } + ] + }, + "LxWHkhlikUaMT2G8YmVDQ": { + "title": "Readme.com", + "description": "[Readme.com](http://Readme.com) is an invaluable tool in the realm of API Design, renowned for providing a collaborative platform for creating beautiful, dynamic and intuitive documentation. It's a tool which aids developers in outlining clear, comprehensive documentation for their API interfaces. The API documentation created with [Readme.com](http://Readme.com) is not just about the presentation of information, but enhances the reader's understanding by making it interactive. This interactive approach encourages practical learning and offers insights into how the API will behave under different circumstances. With [Readme.com](http://Readme.com), developers can create a user-focused documentation environment that streamlines the learning process and makes their APIs easier to consume and implement.\n\nLearn more from the following resources:", + "links": [ + { + "title": "readmeio", + "url": "https://github.com/readmeio", + "type": "opensource" + }, + { + "title": "readme.com", + "url": "https://readme.com", + "type": "article" + } + ] + }, + "OpS2NX1lPTOtfjV1wKtC4": { + "title": "Stoplight", + "description": "Stoplight is an advanced tool that offers a comprehensive platform for technical teams to handle all aspects of API design. Leveraging Stoplight, teams can design, document and develop APIs in a more collaborative and streamlined manner. It uses an OpenAPI specification and allows users to design APIs visually, making API development easier. With its ability to auto-generate API documentation, performing API mock testing, and providing API management features, Stoplight plays a crucial role in adopting a design-first approach in API development. By using Stoplight, APIs can be designed to be easy-to-use, scalable, and robust from the outset, which ultimately improves the overall development process and quality of the APIs.\n\nLearn more from the following resources:", + "links": [ + { + "title": "/stoplightio", + "url": "https://github.com/stoplightio", + "type": "opensource" + }, + { + "title": "Stoplight Website", + "url": "https://stoplight.io/", + "type": "article" + } + ] + }, + "qIJ6dUppjAjOTA8eQbp0n": { + "title": "API Security", + "description": "API Security refers to the practices and products that are used to secure application programming interfaces (APIs). In the context of design, it is an essential component that helps ensure that a deployed API achieves its objectives in a safe and secure manner. This includes safeguarding the data, preventing unauthorized access, and protecting the system that hosts the API. API security encompasses the strategies, procedures and technology used to protect APIs from malicious attacks or unauthorized access while guaranteeing optimum performance, availability, and data privacy.\n\nHave a look at the following resources to understand API security and vulnerabilties further:", + "links": [ + { + "title": "OWASP Project API Security", + "url": "https://owasp.org/API-Security/editions/2023/en/0x00-toc/", + "type": "article" + }, + { + "title": "Explore top posts about Security", + "url": "https://app.daily.dev/tags/security?ref=roadmapsh", + "type": "article" + } + ] + }, + "G70wvcOM1Isrx5ZBvS2xP": { + "title": "Common Vulnerabilities", + "description": "API design is a critical component of modern software development, enabling various applications to communicate and share data. However, as the use of APIs expands, so does the potential for security vulnerabilities. Understanding common vulnerabilities in API design is crucial to protecting sensitive data and maintaining a secure system. These vulnerabilities might arise due to lack of proper validation, weak authentication mechanisms, insecure endpoint configurations among others.\n\nLearn more from the following resources:", + "links": [ + { + "title": "API Vulnerability Overview", + "url": "https://apimike.com/api-vulnerabilities", + "type": "article" + }, + { + "title": "Top API Vulnerabilities and 6 Ways to Mitigate Them", + "url": "https://brightsec.com/blog/top-api-vulnerabilities-and-6-ways-to-mitigate-them/", + "type": "article" + } + ] + }, + "q1yaf-RbHIQsOqfzjn4k4": { + "title": "Best Practices", + "description": "API design has rapidly emerged as a vital component of software development. When designing an API, it is crucial to follow best practices to ensure optimization, scalability, and efficiency. The best practices in API design revolve around principles such as simplicity, consistency, security, and proper documentation among others. These practices not only smoothens the development process but also makes the API more user-friendly, stable, and easily maintainable. Thus, following the best practices in API design is not an option but rather a must for developers and organizations looking to create APIs that last longer and perform better.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Best practices for REST API design", + "url": "https://stackoverflow.blog/2020/03/02/best-practices-for-rest-api-design/", + "type": "article" + }, + { + "title": "Best practices in API design", + "url": "https://swagger.io/resources/articles/best-practices-in-api-design/", + "type": "article" + } + ] + }, + "d9ZXdU73jiCdeNHQv1_DH": { + "title": "API Performance", + "description": "When we talk about API design, one crucial aspect that demands our attention is API Performance. API Performance refers to the efficiency and speed at which a developed API can execute tasks and communicate with other programs or software components. This fundamental aspect can directly impact the responsiveness of an application, determining how quickly data can be exchanged, processed, and presented to the end-user. Improving the API performance often resolves problems related to the user experience and enhances the overall performance of the application that the API is integrated with. API performance, therefore, plays a pivotal role both in facilitating optimized interactions between systems and in determining the overall success of the digital products that rely on such interfaces.\n\nLearn more from the following resources:", + "links": [ + { + "title": "10 Tips for Improving API Performance", + "url": "https://nordicapis.com/10-tips-for-improving-api-performance/", + "type": "article" + }, + { + "title": "Top 7 Ways to 10x Your API Performance", + "url": "https://www.youtube.com/watch?v=zvWKqUiovAM", + "type": "video" + } + ] + }, + "nQpczZUcn-TvrfT80dv0Q": { + "title": "Performance Metrics", + "description": "API Design performance metrics play a critical role in ensuring APIs are efficient, effective, and absolutely fit for their intended purpose. The performance of an API can profoundly impact the user experience and overall system performance. Therefore, it is crucial to define and monitor a set of performance metrics. These may include response times, throughput, error rates, and others that measure system health and resource utilization. By prioritizing these metrics in the context of API Design, developers can create APIs that not only meet functional requirements but also deliver desired performance levels.\n\nLearn more from the following resources:", + "links": [ + { + "title": "API Performance Monitoring", + "url": "https://www.catchpoint.com/api-monitoring-tools/api-performance-monitoring", + "type": "article" + }, + { + "title": "How does API monitoring improve API performance?", + "url": "https://tyk.io/blog/api-product-metrics-what-you-need-to-know/", + "type": "article" + } + ] + }, + "PrvRCR4HCdGar0vcUbG_a": { + "title": "Caching Strategies", + "description": "Caching in API design serves as a technique that allows you to store copies of data temporarily in places where you can access it more readily. By obtaining this data from high-speed storage rather than slower storage sources, you can help improve the overall speed and performance of the API. Multiple strategies such as HTTP caching, database caching, application caching, and CDN caching can be implemented, each with its own sets of advantages and considerations. Understanding different caching strategies in the context of API design is crucial for designing efficient, high-performing APIs.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Caching Strategies for APIs", + "url": "https://medium.com/@satyendra.jaiswal/caching-strategies-for-apis-improving-performance-and-reducing-load-1d4bd2df2b44", + "type": "article" + }, + { + "title": "Using caching strategies to improve API performance", + "url": "https://www.lonti.com/blog/using-caching-strategies-to-improve-api-performance", + "type": "article" + }, + { + "title": "Cache Systems Every Developer Should Know", + "url": "https://www.youtube.com/watch?v=dGAgxozNWFE", + "type": "video" + } + ] + }, + "p5wsniYnOS7cbHd92RxGk": { + "title": "Load Balancing", + "description": "Load Balancing plays a crucial role in the domain of API Design. It primarily revolves around evenly and efficiently distributing network traffic across a group of backend servers, also known as a server farm or server pool. When it comes to API design, implementing load balancing algorithms is of immense importance to ensure that no single server bears too much demand. This allows for high availability and reliability by rerouting the traffic in case of server failure, effectively enhancing application performance and contributing to a positive user experience. Therefore, it's a vital tactic in ensuring the scalability and robustness of system architectures which heavily rely on API interactions.\n\nLearn more from the following resources:", + "links": [ + { + "title": "What is load balancing?", + "url": "https://www.cloudflare.com/en-gb/learning/performance/what-is-load-balancing/", + "type": "article" + }, + { + "title": "API Gateway vs Load Balancer: Which is Right for Your Application?", + "url": "https://konghq.com/blog/engineering/api-gateway-vs-load-balancer", + "type": "article" + }, + { + "title": "What is a load balancer?", + "url": "https://www.youtube.com/watch?v=sCR3SAVdyCc", + "type": "video" + } + ] + }, + "tPVtRV818D8zAAuNbqPNa": { + "title": "Rate Limiting / Throttling", + "description": "Rate Limiting, often referred to as Throttling, is a fundamental aspect of API Design aimed at controlling the number of requests a client can make to an API within a specified timeframe. This technique ensures fair usage, enhances security, prevents server overload, and allows an even distribution of resources. It also minimizes the risks associated with abusive behaviors or DDoS attacks. Effective rate limiting strategy involves defining the limits based on the API's capacity and clients' reasonable needs, with flexibility to tweak these limits when necessary. Understanding rate limiting and its significance is crucial for building resilient, secure, and scalable API platforms.\n\nLearn more from the following resources:", + "links": [ + { + "title": "API Management 101: Rate Limiting", + "url": "https://tyk.io/learning-center/api-rate-limiting/", + "type": "article" + }, + { + "title": "API Rate Limiting vs. Throttling", + "url": "https://blog.stoplight.io/best-practices-api-rate-limiting-vs-throttling", + "type": "article" + }, + { + "title": "What is Rate Limiting / API Throttling? | System Design Concepts", + "url": "https://www.youtube.com/watch?v=9CIjoWPwAhU", + "type": "video" + } + ] + }, + "-qdwBg7HvwlbLy3IKCRij": { + "title": "Profiling and Monitoring", + "description": "Profiling and monitoring are critical aspects of API design and implementation. Profiling, in this context, refers to the process of analyzing the behavior of your API in order to understand various performance metrics including response times, request rates, error rates, and the overall health and functionality of your API. On the other hand, monitoring is the ongoing process of checking the status of your API to ensure it's functioning as expected while also providing an early warning system for potential issues and improvements. Together, profiling and monitoring your API can lead to a more reliable, efficient, and high-performing service.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Monitor health and performance of your APIs", + "url": "https://learning.postman.com/docs/monitoring-your-api/intro-monitors/", + "type": "article" + }, + { + "title": "API profiling at Pintrest", + "url": "https://medium.com/pinterest-engineering/api-profiling-at-pinterest-6fa9333b4961", + "type": "article" + } + ] + }, + "DQcAV59vr1-ZRnMfbLXpu": { + "title": "Performance Testing", + "description": "Performance Testing in API design refers to the practice of evaluating and ensuring that an API operates reliably and efficiently under varying workloads. Properly conducted performance testing can verify an API's speed, response time, reliability, and scalability. As an integral aspect of API design, it checks if API's are effectively meeting expectations for system functionality and demonstrates the potential areas of optimization. Performance testing is essential in maintaining high standards of user experience by preventing unexpected failures, and optimizing API consumer satisfaction.\n\nLearn more from the following resources:", + "links": [ + { + "title": "API Performance Testing: A Step-by-Step Guide", + "url": "https://testsigma.com/blog/api-performance-testing/", + "type": "article" + }, + { + "title": "Simulate user traffic to test your API performance", + "url": "https://learning.postman.com/docs/collections/performance-testing/testing-api-performance/", + "type": "article" + } + ] + }, + "R3aRhqCslwhegMfHtxg5z": { + "title": "API Integration Patterns", + "description": "API Integration Patterns, in the context of API Design, refers to the common paradigms and approaches used to enable communication between services. These patterns dictate how different APIs interact and exchange data, allowing software applications to work cohesively. They play a vital role in application development by providing a standard method for connecting diverse software components. By understanding and implementing these patterns, developers can design more robust, scalable, and interoperable APIs.\n\nLearn more from the following resources:", + "links": [ + { + "title": "API Integration Patterns - Dzone", + "url": "https://dzone.com/refcardz/api-integration-patterns", + "type": "article" + }, + { + "title": "API Integration Patterns", + "url": "https://uk.devoteam.com/expert-view/api-integration-patterns/", + "type": "article" + } + ] + }, + "--mmTKhG58_elbUqyn90G": { + "title": "Synchronous vs Asynchronous APIs", + "description": "When designing APIs, one critical decision is whether to create a synchronous or asynchronous API. Synchronous APIs are those that hold a connection open and wait for a response before moving on, hence operating in a sequential manner. This can lead to efficient, simple-to-understand coding but can pose performance issues when dealing with long tasks since the caller has to wait until the process finishes.\n\nOn the other hand, Asynchronous APIs do not wait for a response before moving on to the next task, allowing multiple operations to be executed simultaneously. This can result in improved performance and responsiveness especially in applications that need to handle multiple requests concurrently. However, coding for asynchronous APIs can be complex due to issues such as race conditions and callbacks. Understanding the differences between these two types of API design is crucial for creating efficient and effective APIs.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Asynchronous APIs — Everything You Need to Know", + "url": "https://blog.hubspot.com/website/asynchronous-api", + "type": "article" + }, + { + "title": "The Differences Between Synchronous and Asynchronous APIs", + "url": "https://nordicapis.com/the-differences-between-synchronous-and-asynchronous-apis/", + "type": "article" + }, + { + "title": "Understanding Asyncronous APIs", + "url": "https://blog.postman.com/understanding-asynchronous-apis/", + "type": "article" + } + ] + }, + "oMfOBkVsgiLvFLicOUdx6": { + "title": "Event Driven Architecture", + "description": "Event-driven architecture (EDA) is a software design concept that revolves around the production, interpretation, and consumption of events. With regards to API design, EDA grants systems the flexibility to decentralize analytics, microservices, and operations, thus promoting real-time information sharing and reaction. Event-driven APIs prioritize asynchronous communication, allowing applications to stay responsive even when tackling heavy data loads. For an effective API, adhering to EDA provides data reliability, maturity with a scalable structure, and efficient real-time data processing capabilities.\n\nLearn more form the following resources:", + "links": [ + { + "title": "Event-driven architecture style", + "url": "https://learn.microsoft.com/en-us/azure/architecture/guide/architecture-styles/event-driven", + "type": "article" + }, + { + "title": "Event-driven architecture", + "url": "https://aws.amazon.com/event-driven-architecture/", + "type": "article" + }, + { + "title": "Event-Driven Architecture: Explained in 7 Minutes!", + "url": "https://www.youtube.com/watch?v=gOuAqRaDdHA", + "type": "video" + } + ] + }, + "MJeUD4fOHaJu1oxk4uQ-x": { + "title": "API Gateways", + "description": "API Gateways act as the main point of entry in a microservices architecture, often responsible for request routing, composition, and protocol translation. They play a significant role in API design by providing a shared layer to handle non-business tasks. This not only simplifies how consumers interact with the backend services but also helps in maintaining the security, enforcing policies, and providing analytics over the API usage. When designing APIs, understanding and implementing efficient API Gateways is an invaluable skill, as these gateways form a crucial component of any well-structured, scalable API architecture.\n\nLearn more from the following resources:", + "links": [ + { + "title": "What does an API Gateway do?", + "url": "https://www.redhat.com/en/topics/api/what-does-an-api-gateway-do", + "type": "article" + }, + { + "title": "What are API Gateways?", + "url": "https://www.ibm.com/blog/api-gateway/", + "type": "article" + } + ] + }, + "PPeBbooE121zrgNwpVTiA": { + "title": "Microservices Architecture", + "description": "When it comes to API Design, Microservices Architecture plays a crucial role. It represents a unique method of developing software systems that focuses on building single-function modules with well-defined interfaces. Each microservice runs a unique process and communicates through a well-defined, lightweight mechanism (often HTTP resources API) to serve a specific business goal. This architecture allows rapid, reliable, and scalable deployment of large, complex applications. It facilitates the organization of the development team around independently deployable units, thereby enhancing productivity and speed. When designing an API, it's essential to adapt this model to get a flexible and scalable construction.\n\nLearn more from the following resources:", + "links": [ + { + "title": "What is Microservices Architecture?", + "url": "https://cloud.google.com/learn/what-is-microservices-architecture", + "type": "article" + }, + { + "title": "Microservice Architecture Style", + "url": "https://learn.microsoft.com/en-us/azure/architecture/guide/architecture-styles/microservices", + "type": "article" + }, + { + "title": "Microservices explained in 5 minutes", + "url": "https://www.youtube.com/watch?v=lL_j7ilk7rc", + "type": "video" + } + ] + }, + "IkPZel5zxXWIx90Qx7fZI": { + "title": "Messaging Queues", + "description": "Messaging Queues play a fundamental role in API design, particularly in creating robust, decoupled, and efficient systems. These queues act like a buffer, storing messages or data sent from a sender (producer), allowing a receiver (consumer) to retrieve and process them at its own pace. In the context of API design, this concept enables developers to handle high-volume data processing requirements, providing an asynchronous communication protocol between multiple services. The benefits of messaging queues in API design include better system scalability, fault tolerance, and increased overall system resiliency.\n\nLearn more from the following resources:", + "links": [ + { + "title": "What is a message queue?", + "url": "https://aws.amazon.com/message-queue/", + "type": "article" + }, + { + "title": "REST API message queues explained", + "url": "https://www.youtube.com/watch?v=2idPgA6IN_Q", + "type": "video" + } + ] + }, + "75NVxS0iwoQXxOHCkWQxH": { + "title": "Webhooks vs Polling", + "description": "When it comes to managing server communication and data exchange in API design, two commonly used methods are webhooks and polling. These two strategies handle updates and data synchronization in different ways. Polling is a technique where the client repeatedly makes a request to the server to check for updates. In this case, it's the client determining the rate of information exchange. Conversely, webhooks operate on a 'push' mechanism. The server sends updates to the client as they happen, providing real-time, efficient data synchronization. Determining which method to use often depends on the specifics of the API design requirement including the frequency of data changes, server load, and application's real-time need.\n\nLearn more from the following resources:", + "links": [ + { + "title": "When to Use Webhooks, WebSocket, Pub/Sub, and Polling", + "url": "https://hookdeck.com/webhooks/guides/when-to-use-webhooks", + "type": "article" + }, + { + "title": "Polling vs webhooks: when to use one over the other", + "url": "https://www.merge.dev/blog/webhooks-vs-polling", + "type": "article" + } + ] + }, + "X68HXAAV-nKo-V4Fu1o72": { + "title": "Batch Processing", + "description": "Batch Processing refers to the method of handling bulk data requests in API design. Here, multiple API requests are packed and processed as a single group or 'batch'. Instead of making numerous individual API calls, a user can make one batch request with numerous operations. This approach can increase performance and efficiency by reducing the overhead of establishing and closing multiple connections. The concept of 'batch processing' in API design is particularly useful in data-intensive applications or systems where the need for processing high volumes of data is prevalent.\n\nLearn more from the following resources:", + "links": [ + { + "title": "API design guidance: bulk vs batch import", + "url": "https://tyk.io/blog/api-design-guidance-bulk-and-batch-import/", + "type": "article" + }, + { + "title": "Stream vs Batch processing explained with examples", + "url": "https://www.youtube.com/watch?v=1xgBQTF24mU", + "type": "video" + } + ] + }, + "H22jAI2W5QLL-b1rq-c56": { + "title": "Rabbit MQ", + "description": "RabbitMQ is an open-source message-broker software/system that plays a crucial role in API design, specifically in facilitating effective and efficient inter-process communication. It implements the Advanced Message Queuing Protocol (AMQP) to enable secure and reliable data transmission in various formats such as text, binary, or serialized objects.\n\nIn API design, RabbitMQ comes in handy in decoupling application processes for scalability and robustness, whilst ensuring that data delivery occurs safely and seamlessly. It introduces queuing as a way of handling multiple users or service calls at once hence enhancing responsiveness and performance of APIs. Its queue system elegantly digests API request loads, allowing services to evenly process data while preventing overloading.\n\nLearn more from the following resources:", + "links": [ + { + "title": "RabbitMQ Website", + "url": "https://www.rabbitmq.com/", + "type": "article" + }, + { + "title": "Intro to RabbitMQ", + "url": "https://www.youtube.com/watch?v=bfVddTJNiAw", + "type": "video" + } + ] + }, + "boYX1QcJullypfX4sevdy": { + "title": "Kafka", + "description": "Apache Kafka is a real-time, fault-tolerant, and highly reliable messaging system that's integral to API design. It's primarily used to build real-time data streaming applications and microservices due to its inherent ability to handle high volume data and multi-subscriber support. In the context of API design, Kafka provides a robust messaging queue system that enables cloud-based platforms and services to communicate seamlessly with each other in a real-time environment. Moreover, the API designers use Kafka APIs such as Producer API, Consumer API, Streams API, and Connect API which enable the transmission and manipulation of messages within the Kafka ecosystem.\n\nLearn more from the following resources:", + "links": [ + { + "title": "apache/kafka", + "url": "https://github.com/apache/kafka", + "type": "opensource" + }, + { + "title": "Kafka Website", + "url": "https://kafka.apache.org/", + "type": "article" + }, + { + "title": "Kafka in 100 seconds", + "url": "https://www.youtube.com/watch?v=uvb00oaa3k8", + "type": "video" + } + ] + }, + "Wpk4TvxcZOJgAoXjrOsZF": { + "title": "API Testing", + "description": "API Testing refers to the process of checking the functionality, reliability, performance, and security of Application Programming Interfaces (APIs). It plays a crucial role in API design as it ensures that the APIs work correctly and as expected. This kind of testing does not require a user interface and mainly focuses on the business logic layer of the software architecture. API Testing is integral to guarantee that the data communication and responses between different software systems are error-free and streamlined.\n\nLearn more from the following resources:", + "links": [ + { + "title": "What is API testing?", + "url": "https://www.postman.com/api-platform/api-testing/", + "type": "article" + }, + { + "title": "API Testing : What It is, How to Test & Best Practices", + "url": "https://testsigma.com/guides/api-testing/", + "type": "article" + } + ] + }, + "JvmW78cDm84GNhq8VEYZp": { + "title": "Unit Testing", + "description": "Unit Testing, in the context of API design, refers to the process of testing the individual components or functions of an API independently to ensure that each part is working correctly. It is typically performed at the development stage. The chief goal of Unit Testing is to isolate each component and validate its correct operation, thereby increasing confidence in the stability of the API as a whole. It lays a solid foundation for integration testing and ultimately leads to reliable, bug-free APIs. Whether your API is RESTful, SOAP, or GraphQL, unit testing is a critical step in API design and development.\n\nLearn more from the following resources:", + "links": [ + { + "title": "How to write unit tests for your REST API", + "url": "https://medium.com/@oyetoketoby80/how-to-write-unit-test-for-your-rest-api-f8f71376273f", + "type": "article" + }, + { + "title": "Unit test a REST API", + "url": "https://www.testim.io/blog/unit-test-rest-api/", + "type": "article" + } + ] + }, + "qZELS5vw2feS7QfyD7spX": { + "title": "Integration Testing", + "description": "Integration testing is a critical aspect of API design. It is a level of software testing where individual units or components are combined and tested as a group. The main purpose of integration testing in API design is to expose faults and discrepancies in the interaction between integrated units. This testing approach ensures that the different parts of an API work together seamlessly, to deliver the necessary functionality and performance. It helps detect issues related to the network, database, and performance, which unit tests cannot uncover. Thus, this level of testing is instrumental in validating the reliability, efficiency, and functionality of an API's integrated components.\n\nLearn more from the following resources:", + "links": [ + { + "title": "How to run API integration tests", + "url": "https://www.merge.dev/blog/api-integration-testing", + "type": "article" + }, + { + "title": "Integration testing template", + "url": "https://www.postman.com/templates/fe506090-ca91-4340-bea9-82d2c3d2bb9a/Integration-testing/", + "type": "article" + } + ] + }, + "6lm3wy9WTAERTqXCn6pFt": { + "title": "Functional Testing", + "description": "Functional testing in the context of API design involves validating the endpoints and key-value pairs of an API. It ensures the server response works as expected and assesses the functionality of the API -- whether it is performing all the intended functions correctly. Various approaches like testing request-response pairs, error codes, and data accuracy are used. Functional testing can provide invaluable insights into how well an API meets the specified requirements and whether it is ready for integration into applications.\n\nLearn more from the following resources:", + "links": [ + { + "title": "API Functional Testing – Why Is It Important And How to Test", + "url": "https://testsigma.com/blog/api-functional-testing/", + "type": "article" + }, + { + "title": "What Is API Functional Testing?", + "url": "https://www.youtube.com/watch?v=CvJHDKMWofk", + "type": "video" + } + ] + }, + "7JNEx_cbqnAx3esvwZMOd": { + "title": "Load Testing", + "description": "Load testing is a crucial aspect of API design that ensures reliability, efficiency and performance under varying loads. It primarily focuses on identifying the maximum capacity of the API in terms of the volume of requests it can handle and its subsequent behavior when this threshold is reached or overloaded. By simulating varying degrees of user load, developers can identify and rectify bottlenecks or breakdown points in the system, hence enhancing overall API resilience.\n\nLearn more from the following resources:", + "links": [ + { + "title": "API load testing - a beginners guide", + "url": "https://grafana.com/blog/2024/01/30/api-load-testing/", + "type": "article" + }, + { + "title": "Test your API’s performance by simulating real-world traffic", + "url": "https://blog.postman.com/postman-api-performance-testing/", + "type": "article" + }, + { + "title": "Load testing your API's", + "url": "https://www.youtube.com/watch?v=a5hWE4hMOoY", + "type": "video" + } + ] + }, + "bEVCT5QGY3uw0kIfAELKh": { + "title": "Mocking APIs", + "description": "API mocking is a crucial aspect of API design and testing. It involves simulating the behaviors of real APIs to test various aspects of the system without the need of the real API being readily available. During the stages of development and testing, the API might be undefined or changes in the API can be expected, hence mocking comes into the picture. In such cases, it helps software developers and testers to isolate the system and work independently, enhancing the control over the input and output of the test. The focus here ranges from testing the API for functionality, reliability, performance, to security. Therefore, understanding and implementing effective API mocking strategies can significantly streamline the API design and development process.\n\nLearn more from the following resources:", + "links": [ + { + "title": "What is API Mocking? Definition, Guide, and Best Practices", + "url": "https://katalon.com/resources-center/blog/what-is-api-mocking", + "type": "article" + }, + { + "title": "@articleWhat is API mocking (What is API Mocking? Definition, Guide, and Best Practices)", + "url": "https://blog.postman.com/what-is-api-mocking/", + "type": "article" + }, + { + "title": "How to Mock RESTFUL APIs - The Easy way!", + "url": "https://www.youtube.com/watch?v=tJRN5WBF5Wc", + "type": "video" + } + ] + }, + "NqeBglhzukVMMEF9p2CXc": { + "title": "Contract Testing", + "description": "Contract Testing is a critical aspect of maintaining a robust and reliable API infrastructure. In the realm of API design, Contract Testing refers to the method of ensuring that APIs work as anticipated and that changes to them do not break their intended functionality. This approach validates the interaction between two different systems, typically consumer and provider ( API), ensuring they comply with their agreed-upon contract. By defining clear and concise contracts for our APIs, developers can avoid common deployment issues and enhance system integration processes.\n\nLearn more from the following resources:", + "links": [ + { + "title": "A complete guide to Contract Testing", + "url": "https://testsigma.com/blog/api-contract-testing/", + "type": "article" + }, + { + "title": "Get started with API Contract Testing", + "url": "https://saucelabs.com/resources/blog/getting-started-with-api-contract-testing", + "type": "article" + }, + { + "title": "Contract Testing", + "url": "https://www.postman.com/templates/42247877-8529-429d-acba-4de20c3b5b3b/Contract-testing/", + "type": "article" + } + ] + }, + "XD1vDtrRQFbLyKJaD1AlA": { + "title": "Error Handling / Retries", + "description": "When creating effective API designs, addressing Error Handling and Retries forms an essential facet. This is primarily due to the fact that APIs aren't always error-free and instances of network hiccups or input inaccuracies from users can occur. Without robust error handling, such occurrences can easily lead to catastrophic application failure or unsatisfactory user experiences.\n\nIn this context, error handling can refer to validating inputs, managing exceptions, and returning appropriate error message or status codes to the user. Meanwhile, the concept of retries comes into play to ensure maximum request success amidst transient failures. Through correctly implemented retries, an API can repeatedly attempt to execute a request until it is successful, thus ensuring seamless operation. The criteria and mechanisms of retries, including the count, delay, and conditions for retries, are crucial aspects to solidify during the API design.\n\nLearn more from the following resources:", + "links": [ + { + "title": "How To Improve Your Backend By Adding Retries to Your API Calls", + "url": "https://hackernoon.com/how-to-improve-your-backend-by-adding-retries-to-your-api-calls-83r3udx", + "type": "article" + }, + { + "title": "How to make resilient web applications with retries", + "url": "https://www.youtube.com/watch?v=Gly94hp3Eec", + "type": "video" + } + ] + }, + "JE12g5cqnwmgeTle14Vxw": { + "title": "Real-time APIs", + "description": "In the realm of API Design, Real-time APIs hold significant value as they provide immediate access to the latest data as soon as they become available. Rather than relying on periodic polling for updates, Real-time APIs maintain an open connection between the client and the server which allows immediate, bi-directional data flow. These APIs are commonly used in applications which require real-time information such as live chat programs, financial trading platforms or online multiplayer games. Designing such APIs requires careful consideration of factors such as connection management, data consistency, and efficient handling of high volume data streams.\n\nLearn more from the following resources:", + "links": [ + { + "title": "What are Realtime APIs?", + "url": "https://www.pubnub.com/guides/realtime-api/", + "type": "article" + }, + { + "title": "What are realtime APIs and when to use them?", + "url": "https://ably.com/topic/what-is-a-realtime-api", + "type": "article" + } + ] + }, + "UQ8N7gcVpRLAYXgUNHBt5": { + "title": "Web Sockets", + "description": "Web Sockets provide a long-lived connection between a client and a server over which messages can be sent bi-directionally, in real-time. They play a crucial role in creating real-time APIs by offering a faster and more efficient communication method over the traditional HTTP. In the context of API Design, Web Sockets are used for developing APIs that require real-time data transfer, such as chat applications, live sports updates, and real-time analytics. This paradigm shift from traditional HTTP-based API design to Web Socket-based API design helps create APIs that are more responsive, dynamic, and efficient in handling real-time data.\n\nLearn more from the following resources:", + "links": [ + { + "title": "The WebSocket API (WebSockets)", + "url": "https://developer.mozilla.org/en-US/docs/Web/API/WebSockets_API", + "type": "article" + }, + { + "title": "What are websockets?", + "url": "https://www.pubnub.com/guides/websockets/", + "type": "article" + }, + { + "title": "How web sockets work", + "url": "https://www.youtube.com/watch?v=pnj3Jbho5Ck", + "type": "video" + } + ] + }, + "iNsXTtcIHsI_i-mCfjGYn": { + "title": "Server Sent Events", + "description": "Server-Sent Events (SSE) represent an explicit concept in the design of Real-time APIs. Unlike traditional approaches where a client sends a request and awaits a response from the server, SSE enables a server to push data to clients whenever a particular event takes place. SSE is especially fundamental in API design when it comes to the development of applications where real-time data is essential, such as live news updates, real-time gaming, or live-streaming services. Designing APIs with the SSE approach ensures a more dynamic and responsive user experience.\n\nLearn more from the following events:", + "links": [ + { + "title": "Using server-sent events", + "url": "https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events", + "type": "article" + }, + { + "title": "Server-Sent Events | Postman Level Up", + "url": "https://www.youtube.com/watch?v=KrE044J8jEQ", + "type": "video" + } + ] + }, + "yvdfoly5WHHTq2Puss355": { + "title": "Standards and Compliance", + "description": "When designing APIs, it's crucial to consider the concept of standards and compliance. Standards represent the set of rules and best practices that guide developers to create well-structured and easily maintainable APIs. They can range from the proper structure of the endpoints, the standardization of error responses, to naming conventions, and the usage of HTTP verbs.\n\nCompliance on the other hand, emphasizes on meeting protocol requirements or standards such as REST or SOAP. Furthermore, operating within regulated industries can also necessitate certain compliance measures like GDPR, HIPAA and others. Compliance in API Design ensures interoperability and safety of data transmission between systems.\n\nIn essence, Standards and Compliance in API Design contributes towards building more secure, robust, and efficient APIs that are user-friendly and universally understandable.\n\nLearn more from the following resources:", + "links": [ + { + "title": "What is API compliance?", + "url": "https://tyk.io/learning-center/api-compliance/", + "type": "article" + }, + { + "title": "What is API compliance and why is it important?", + "url": "https://www.traceable.ai/blog-post/achieve-api-compliance", + "type": "article" + }, + { + "title": "REST API standards", + "url": "https://www.integrate.io/blog/rest-api-standards/", + "type": "article" + } + ] + }, + "vZxdswGLHCPi5GSuXEcHJ": { + "title": "GDPR", + "description": "The General Data Protection Regulation (GDPR) is an essential standard in API Design that addresses the storage, transfer, and processing of personal data of individuals within the European Union. With regards to API Design, considerations must be given on how APIs handle, process, and secure the data to conform with GDPR's demands on data privacy and security. This includes requirements for explicit consent, right to erasure, data portability, and privacy by design. Non-compliance with these standards not only leads to hefty fines but may also erode trust from users and clients. As such, understanding the impact and integration of GDPR within API design is pivotal for organizations handling EU residents' data.\n\nLearn more from the following resources:", + "links": [ + { + "title": "GDPR", + "url": "https://gdpr-info.eu/", + "type": "article" + }, + { + "title": "What is GDPR Compliance in Web Application and API Security?", + "url": "https://probely.com/blog/what-is-gdpr-compliance-in-web-application-and-api-security/", + "type": "article" + } + ] + }, + "At5exN7ZAx2IzY3cTCzHm": { + "title": "API Lifecycle Management", + "description": "API Lifecycle Management is a crucial aspect in API design that oversees the process of creating, managing, and retiring APIs. This involves various stages from initial planning, designing, testing, deployment, to eventual retirement of the API. Proper lifecycle management ensures that an API meets the requirements, is reliable, and that it evolves with the needs of end users and developers. Moreover, it helps in maintaining the security, performance, and accessibility of the API throughout its lifetime. This comprehensive approach enables organizations to make the most of their APIs, mitigate issues, and facilitate successful digital transformation.\n\nLearn more from the following resources:", + "links": [ + { + "title": "What is the API lifecycle?", + "url": "https://www.postman.com/api-platform/api-lifecycle/", + "type": "article" + }, + { + "title": "What is API lifescycle management?", + "url": "https://swagger.io/blog/api-strategy/what-is-api-lifecycle-management/", + "type": "article" + }, + { + "title": "Day in the lifecycle of an API", + "url": "https://www.youtube.com/watch?v=VxY_cz0VQXE", + "type": "video" + } + ] + }, + "a-_iIE7UdoXzD00fD9MxN": { + "title": "CCPA", + "description": "The California Consumer Privacy Act (CCPA) is a pivotal state statute meant to enhance privacy rights and consumer protection for individuals within California, United States. API Design greatly impacts compliance with CCPA, as improper management and exposure of user data can potentially violate this law. Crucially, designing APIs means considering data privacy, security, and user consent from the very foundation. Programmatically, CCPA compliance may involve structuring APIs to respond to user demands such as data access, data deletion, and opt-out requests. It imposes a significant responsibility on API developers to enforce user control over data and maintain rigorous standards of data protection.\n\nLearn more from the following resources:", + "links": [ + { + "title": "California Consumer Privacy Act (CCPA)", + "url": "https://oag.ca.gov/privacy/ccpa", + "type": "article" + }, + { + "title": "What is the CCPA?", + "url": "https://www.cloudflare.com/en-gb/learning/privacy/what-is-the-ccpa/", + "type": "article" + } + ] + }, + "J0enF8UTVzY3H4n3pbPIF": { + "title": "PCI DSS", + "description": "Payment Card Industry Data Security Standard (PCI DSS) is a widely accepted set of policies and procedures intended to optimize the security of credit, debit and cash card transactions and protect cardholders against misuse of their personal information. In terms of API Design, building APIs in compliance with PCI DSS is crucial when processing, storing or transmitting credit card information. By adhering to these standards, not only can developers ensure safe and secure API endpoints but also build trust among users by safeguarding their sensitive financial data.\n\nLearn more from the following resources:", + "links": [ + { + "title": "What is PCI DSS and how to comply?", + "url": "https://www.itgovernance.co.uk/pci_dss", + "type": "article" + }, + { + "title": "Payment Card Industry Data Security Standard", + "url": "https://en.wikipedia.org/wiki/Payment_Card_Industry_Data_Security_Standard", + "type": "article" + } + ] + }, + "W4WwTmgZGnWmiYsB0ezml": { + "title": "HIPAA", + "description": "HIPAA (Health Insurance Portability and Accountability Act) is a critical standard when it comes to API design in the healthcare industry. In essence, it provides the mandate for protecting sensitive patient data. Any organization dealing with protected health information (PHI) must ensure all required physical, network, and process security measures are in place. In the context of API design, HIPAA compliance means structuring endpoints, data transmission, and storage methods that align with these crucial safeguards. This encompasses encryption, access controls, audit controls, and integrity controls. Hence, understanding HIPAA is fundamental for API designers working in the healthcare domain.\n\nLearn more from the following resources:", + "links": [ + { + "title": "HIPAA", + "url": "https://www.hhs.gov/hipaa/index.html", + "type": "article" + }, + { + "title": "The 11 MOST Common HIPAA Violations", + "url": "https://www.youtube.com/watch?v=sN-zLAqYoTo", + "type": "video" + } + ] + }, + "mXCKtLUvwVJkHrpHzOecq": { + "title": "PII", + "description": "Personal Identifiable Information (PII) under Standards and Compliance is a crucial aspect of API Design. It refers to the secure handling and transmission of personal data such as names, addresses, and credit card numbers, which APIs often deal with. In this context, APIs must be built under strict compliance with standards such as GDPR, HIPAA or PCI DSS, which regulate the protection of personal data. These standards ensure that personal data is not misused and that user privacy is respected. Any violations can lead to hefty fines and damage to the company's reputation. Understanding PII and designing APIs in accordance with applicable regulations is vital for a robust, secure, and compliant API design.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Personally Identifiable Information (PII): Definition, Types, and Examples", + "url": "https://www.investopedia.com/terms/p/personally-identifiable-information-pii.asp", + "type": "article" + }, + { + "title": "What is Personally Identifiable Information?", + "url": "https://www.ibm.com/topics/pii", + "type": "article" + } + ] + } +} \ No newline at end of file diff --git a/public/roadmap-content/backend.json b/public/roadmap-content/backend.json new file mode 100644 index 000000000..539359877 --- /dev/null +++ b/public/roadmap-content/backend.json @@ -0,0 +1,3039 @@ +{ + "gKTSe9yQFVbPVlLzWB0hC": { + "title": "Search Engines", + "description": "", + "links": [] + }, + "9Fpoor-Os_9lvrwu5Zjh-": { + "title": "Design and Development Principles", + "description": "In this section, we'll discuss some essential design and development principles to follow while building the backend of any application. These principles will ensure that the backend is efficient, scalable, and maintainable.\n\n1\\. Separation of Concerns (SoC)\n--------------------------------\n\nSeparation of Concerns is a fundamental principle that states that different functionalities of a system should be as independent as possible. This approach improves maintainability and scalability by allowing developers to work on separate components without affecting each other. Divide your backend into clear modules and layers, such as data storage, business logic, and network communication.\n\n2\\. Reusability\n---------------\n\nReusability is the ability to use components, functions, or modules in multiple places without duplicating code. While designing the backend, look for opportunities where you can reuse existing code. Use techniques like creating utility functions, abstract classes, and interfaces to promote reusability and reduce redundancy.\n\n3\\. Keep It Simple and Stupid (KISS)\n------------------------------------\n\nKISS principle states that the simpler the system, the easier it is to understand, maintain, and extend. When designing the backend, try to keep the architecture and code as simple as possible. Use clear naming conventions and modular structures, and avoid over-engineering and unnecessary complexity.\n\n4\\. Don't Repeat Yourself (DRY)\n-------------------------------\n\nDo not duplicate code or functionality across your backend. Duplication can lead to inconsistency and maintainability issues. Instead, focus on creating reusable components, functions or modules, which can be shared across different parts of the backend.\n\n5\\. Scalability\n---------------\n\nA scalable system is one that can efficiently handle an increasing number of users, requests, or data. Design the backend with scalability in mind, considering factors such as data storage, caching, load balancing, and horizontal scaling (adding more instances of the backend server).\n\n6\\. Security\n------------\n\nSecurity is a major concern when developing any application. Always follow best practices to prevent security flaws, such as protecting sensitive data, using secure communication protocols (e.g., HTTPS), implementing authentication and authorization mechanisms, and sanitizing user inputs.\n\n7\\. Testing\n-----------\n\nTesting is crucial for ensuring the reliability and stability of the backend. Implement a comprehensive testing strategy, including unit, integration, and performance tests. Use automated testing tools and set up continuous integration (CI) and continuous deployment (CD) pipelines to streamline the testing and deployment process.\n\n8\\. Documentation\n-----------------\n\nProper documentation helps developers understand and maintain the backend codebase. Write clear and concise documentation for your code, explaining the purpose, functionality, and how to use it. Additionally, use comments and appropriate naming conventions to make the code itself more readable and self-explanatory.\n\nBy following these design and development principles, you'll be well on your way to creating an efficient, secure, and maintainable backend for your applications.", + "links": [] + }, + "EwvLPSI6AlZ4TnNIJTZA4": { + "title": "Learn about APIs", + "description": "API is the acronym for Application Programming Interface, which is a software intermediary that allows two applications to talk to each other.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is an API?", + "url": "https://aws.amazon.com/what-is/api/", + "type": "article" + }, + { + "title": "What is an API?", + "url": "https://www.youtube.com/watch?v=s7wmiS2mSXY", + "type": "video" + } + ] + }, + "SiYUdtYMDImRPmV2_XPkH": { + "title": "Internet", + "description": "The Internet is a global network of computers connected to each other which communicate through a standardized set of protocols.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "How does the Internet Work?", + "url": "https://cs.fyi/guide/how-does-internet-work", + "type": "article" + }, + { + "title": "The Internet Explained", + "url": "https://www.vox.com/2014/6/16/18076282/the-internet", + "type": "article" + }, + { + "title": "How Does the Internet Work?", + "url": "http://web.stanford.edu/class/msande91si/www-spr04/readings/week1/InternetWhitepaper.htm", + "type": "article" + }, + { + "title": "Introduction to Internet", + "url": "/guides/what-is-internet", + "type": "article" + }, + { + "title": "How does the Internet work?", + "url": "https://www.youtube.com/watch?v=x3c1ih2NJEg", + "type": "video" + }, + { + "title": "How the Internet Works in 5 Minutes", + "url": "https://www.youtube.com/watch?v=7_LPdttKXPc", + "type": "video" + }, + { + "title": "Computer Network | Google IT Support Certificate", + "url": "https://www.youtube.com/watch?v=Z_hU2zm4_S8", + "type": "video" + } + ] + }, + "CWwh2abwqx4hAxpAGvhIx": { + "title": "Rust", + "description": "Rust is a modern systems programming language focusing on safety, speed, and concurrency. It accomplishes these goals by being memory safe without using garbage collection.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "The Rust Programming Language - online book", + "url": "https://doc.rust-lang.org/book/", + "type": "article" + }, + { + "title": "Rust by Example - collection of runnable examples", + "url": "https://doc.rust-lang.org/stable/rust-by-example/index.html", + "type": "article" + }, + { + "title": "Rust vs. Go: Why They’re Better Together", + "url": "https://thenewstack.io/rust-vs-go-why-theyre-better-together/", + "type": "article" + }, + { + "title": "Rust by the Numbers: The Rust Programming Language in 2021", + "url": "https://thenewstack.io/rust-by-the-numbers-the-rust-programming-language-in-2021/", + "type": "article" + }, + { + "title": "Explore top posts about Rust", + "url": "https://app.daily.dev/tags/rust?ref=roadmapsh", + "type": "article" + } + ] + }, + "l9Wrq_Ad9-Ju4NIB0m5Ha": { + "title": "PHP", + "description": "PHP is a general purpose scripting language often used for making dynamic and interactive Web pages. It was originally created by Danish-Canadian programmer Rasmus Lerdorf in 1994. The PHP reference implementation is now produced by The PHP Group and supported by PHP Foundation. PHP supports procedural and object-oriented styles of programming with some elements of functional programming as well.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "PHP Website", + "url": "https://php.net/", + "type": "article" + }, + { + "title": "Learn PHP - W3Schools", + "url": "https://www.w3schools.com/php/", + "type": "article" + }, + { + "title": "PHP - The Right Way", + "url": "https://phptherightway.com/", + "type": "article" + }, + { + "title": "Explore top posts about PHP", + "url": "https://app.daily.dev/tags/php?ref=roadmapsh", + "type": "article" + }, + { + "title": "PHP for Beginners", + "url": "https://www.youtube.com/watch?v=U2lQWR6uIuo&list=PL3VM-unCzF8ipG50KDjnzhugceoSG3RTC", + "type": "video" + }, + { + "title": "PHP For Absolute Beginners", + "url": "https://www.youtube.com/watch?v=2eebptXfEvw", + "type": "video" + }, + { + "title": "Full PHP 8 Tutorial - Learn PHP The Right Way In 2022", + "url": "https://www.youtube.com/watch?v=sVbEyFZKgqk&list=PLr3d3QYzkw2xabQRUpcZ_IBk9W50M9pe-", + "type": "video" + } + ] + }, + "BdXbcz4-ar3XOX0wIKzBp": { + "title": "Go", + "description": "Go is an open source programming language supported by Google. Go can be used to write cloud services, CLI tools, used for API development, and much more.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Visit Dedicated Go Roadmap", + "url": "/golang", + "type": "article" + }, + { + "title": "A Tour of Go – Go Basics", + "url": "https://go.dev/tour/welcome/1", + "type": "article" + }, + { + "title": "Go Reference Documentation", + "url": "https://go.dev/doc/", + "type": "article" + }, + { + "title": "Go by Example - annotated example programs", + "url": "https://gobyexample.com/", + "type": "article" + }, + { + "title": "W3Schools Go Tutorial ", + "url": "https://www.w3schools.com/go/", + "type": "article" + }, + { + "title": "Making a RESTful JSON API in Go", + "url": "https://thenewstack.io/make-a-restful-json-api-go/", + "type": "article" + }, + { + "title": "Go, the Programming Language of the Cloud", + "url": "https://thenewstack.io/go-the-programming-language-of-the-cloud/", + "type": "article" + }, + { + "title": "Explore top posts about Golang", + "url": "https://app.daily.dev/tags/golang?ref=roadmapsh", + "type": "article" + }, + { + "title": "Go Class by Matt", + "url": "https://www.youtube.com/playlist?list=PLoILbKo9rG3skRCj37Kn5Zj803hhiuRK6", + "type": "video" + } + ] + }, + "8-lO-v6jCYYoklEJXULxN": { + "title": "JavaScript", + "description": "Apart from being used in the browser, JavaScript is also used in backend e.g. using [Node.js](https://nodejs.org/) or [Deno](https://deno.land/) for writing server-side code in JavaScript.\n\nIf you pick up JavaScript for the Backend, my personal recommendation would be to learn [JavaScript](/javascript) and then go with [Node.js](/nodejs) as it is the most popular and widely used option. Also, I would recommend learning TypeScript later on as you continue with your backend development Journey; it's a superset of JavaScript and is used in many projects.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "You Dont Know JS Yet (book series) ", + "url": "https://github.com/getify/You-Dont-Know-JS", + "type": "opensource" + }, + { + "title": "Visit Dedicated JavaScript Roadmap", + "url": "/javascript", + "type": "article" + }, + { + "title": "W3Schools – JavaScript Tutorial", + "url": "https://www.w3schools.com/js/", + "type": "article" + }, + { + "title": "The Modern JavaScript Tutorial", + "url": "https://javascript.info/", + "type": "article" + }, + { + "title": "Eloquent Javascript - Book", + "url": "https://eloquentjavascript.net/", + "type": "article" + }, + { + "title": "Visit Dedicated Node.js Roadmap", + "url": "/nodejs", + "type": "article" + }, + { + "title": "Official JavaScript Documentation", + "url": "https://www.javascript.com/", + "type": "article" + }, + { + "title": "Explore top posts about JavaScript", + "url": "https://app.daily.dev/tags/javascript?ref=roadmapsh", + "type": "article" + }, + { + "title": "JavaScript Crash Course for Beginners", + "url": "https://youtu.be/hdI2bqOjy3c", + "type": "video" + }, + { + "title": "Node.js Crash Course", + "url": "https://www.youtube.com/watch?v=fBNz5xF-Kx4", + "type": "video" + }, + { + "title": "Node.js Tutorial for Beginners", + "url": "https://www.youtube.com/watch?v=TlB_eWDSMt4", + "type": "video" + } + ] + }, + "ANeSwxJDJyQ-49pO2-CCI": { + "title": "Java", + "description": "Java is general-purpose language, primarily used for Internet-based applications. It was created in 1995 by James Gosling at Sun Microsystems and is one of the most popular options for backend developers.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Visit Dedicated Java Roadmap", + "url": "/java", + "type": "article" + }, + { + "title": "Java Website", + "url": "https://www.java.com/", + "type": "article" + }, + { + "title": "W3 Schools Tutorials", + "url": "https://www.w3schools.com/java/", + "type": "article" + }, + { + "title": "Explore top posts about Java", + "url": "https://app.daily.dev/tags/java?ref=roadmapsh", + "type": "article" + }, + { + "title": "Java Crash Course", + "url": "https://www.youtube.com/watch?v=eIrMbAQSU34", + "type": "video" + }, + { + "title": "Complete Java course", + "url": "https://www.youtube.com/watch?v=xk4_1vDrzzo", + "type": "video" + } + ] + }, + "J_sVHsD72Yzyqb9KCIvAY": { + "title": "Python", + "description": "Python is a well known programming language which is both a strongly typed and a dynamically typed language. Being an interpreted language, code is executed as soon as it is written and the Python syntax allows for writing code in functional, procedural or object-oriented programmatic ways.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Visit Dedicated Python Roadmap", + "url": "/python", + "type": "article" + }, + { + "title": "Python Website", + "url": "https://www.python.org/", + "type": "article" + }, + { + "title": "Python Getting Started", + "url": "https://www.python.org/about/gettingstarted/", + "type": "article" + }, + { + "title": "Automate the Boring Stuff", + "url": "https://automatetheboringstuff.com/", + "type": "article" + }, + { + "title": "Python principles - Python basics", + "url": "https://pythonprinciples.com/", + "type": "article" + }, + { + "title": "W3Schools - Python Tutorial ", + "url": "https://www.w3schools.com/python/", + "type": "article" + }, + { + "title": "Python Crash Course", + "url": "https://ehmatthes.github.io/pcc/", + "type": "article" + }, + { + "title": "An Introduction to Python for Non-Programmers", + "url": "https://thenewstack.io/an-introduction-to-python-for-non-programmers/", + "type": "article" + }, + { + "title": "Getting Started with Python and InfluxDB", + "url": "https://thenewstack.io/getting-started-with-python-and-influxdb/", + "type": "article" + }, + { + "title": "Explore top posts about Python", + "url": "https://app.daily.dev/tags/python?ref=roadmapsh", + "type": "article" + }, + { + "title": "Python for Beginners - Learn Python in 1 Hour", + "url": "https://www.youtube.com/watch?v=kqtD5dpn9C8&ab_channel=ProgrammingwithMosh", + "type": "video" + } + ] + }, + "rImbMHLLfJwjf3l25vBkc": { + "title": "C#", + "description": "C# (pronounced \"C sharp\") is a general purpose programming language made by Microsoft. It is used to perform different tasks and can be used to create web apps, games, mobile apps, etc.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "C# Learning Path", + "url": "https://docs.microsoft.com/en-us/learn/paths/csharp-first-steps/?WT.mc_id=dotnet-35129-website", + "type": "article" + }, + { + "title": "C# on W3 schools", + "url": "https://www.w3schools.com/cs/index.php", + "type": "article" + }, + { + "title": "Introduction to C#", + "url": "https://docs.microsoft.com/en-us/shows/CSharp-101/?WT.mc_id=Educationalcsharp-c9-scottha", + "type": "article" + }, + { + "title": "Explore top posts about C#", + "url": "https://app.daily.dev/tags/c#?ref=roadmapsh", + "type": "article" + }, + { + "title": "C# tutorials", + "url": "https://www.youtube.com/watch?v=gfkTfcpWqAY&list=PLTjRvDozrdlz3_FPXwb6lX_HoGXa09Yef", + "type": "video" + } + ] + }, + "SlH0Rl07yURDko2nDPfFy": { + "title": "Ruby", + "description": "Ruby is a high-level, interpreted programming language that blends Perl, Smalltalk, Eiffel, Ada, and Lisp. Ruby focuses on simplicity and productivity along with a syntax that reads and writes naturally. Ruby supports procedural, object-oriented and functional programming and is dynamically typed.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Ruby Website", + "url": "https://www.ruby-lang.org/en/", + "type": "article" + }, + { + "title": "Learn Ruby in 20 minutes", + "url": "https://www.ruby-lang.org/en/documentation/quickstart/", + "type": "article" + }, + { + "title": "Ruby, An Introduction to a Programmer’s Best Friend", + "url": "https://thenewstack.io/ruby-a-programmers-best-friend/", + "type": "article" + }, + { + "title": "Explore top posts about Ruby", + "url": "https://app.daily.dev/tags/ruby?ref=roadmapsh", + "type": "article" + }, + { + "title": "Ruby Comprehensive courses", + "url": "https://www.youtube.com/playlist?list=PL_EzhIKp343lBMH4UuklrMRL_WkilGoXe", + "type": "video" + } + ] + }, + "2f0ZO6GJElfZ2Eis28Hzg": { + "title": "Pick a Language", + "description": "Even if you’re a beginner the least you would have known is that Web Development is majorly classified into two facets: Frontend Development and Backend Development. And obviously, they both have their respective set of tools and technologies. For instance, when we talk about Frontend Development, there always comes 3 names first and foremost – HTML, CSS, and JavaScript.\n\nIn the same way, when it comes to Backend Web Development – we primarily require a backend (or you can say server-side) programming language to make the website function along with various other tools & technologies such as databases, frameworks, web servers, etc.\n\nPick a language from the given list and make sure to learn its quirks, core details about its runtime e.g. concurrency, memory model etc.\n\n[@article@ Top Languages for job ads](https://www.tiobe.com/tiobe-index/)", + "links": [] + }, + "_I1E__wCIVrhjMk6IMieE": { + "title": "Git", + "description": "[Git](https://git-scm.com/) is a free and open source distributed version control system designed to handle everything from small to very large projects with speed and efficiency.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Introduction to Git", + "url": "https://learn.microsoft.com/en-us/training/modules/intro-to-git/", + "type": "article" + }, + { + "title": "Learn Git with Tutorials, News and Tips - Atlassian", + "url": "https://www.atlassian.com/git", + "type": "article" + }, + { + "title": "Git Cheat Sheet", + "url": "https://cs.fyi/guide/git-cheatsheet", + "type": "article" + }, + { + "title": "Learn Git Branching", + "url": "https://learngitbranching.js.org/", + "type": "article" + }, + { + "title": "Git Tutorial", + "url": "https://www.w3schools.com/git/", + "type": "article" + }, + { + "title": "Explore top posts about Git", + "url": "https://app.daily.dev/tags/git?ref=roadmapsh", + "type": "article" + }, + { + "title": "Git & GitHub Crash Course For Beginners", + "url": "https://www.youtube.com/watch?v=SWYqp7iY_Tc", + "type": "video" + } + ] + }, + "ezdqQW9wTUw93F6kjOzku": { + "title": "Version Control Systems", + "description": "Version control/source control systems allow developers to track and control changes to code over time. These services often include the ability to make atomic revisions to code, branch/fork off of specific points, and to compare versions of code. They are useful in determining the who, what, when, and why code changes were made.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Git", + "url": "https://git-scm.com/", + "type": "article" + }, + { + "title": "What is Version Control?", + "url": "https://www.atlassian.com/git/tutorials/what-is-version-control", + "type": "article" + } + ] + }, + "ptD8EVqwFUYr4W5A_tABY": { + "title": "GitHub", + "description": "GitHub is a provider of Internet hosting for software development and version control using Git. It offers the distributed version control and source code management functionality of Git, plus its own features.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "GitHub Website", + "url": "https://github.com", + "type": "opensource" + }, + { + "title": "GitHub Documentation", + "url": "https://docs.github.com/en/get-started/quickstart", + "type": "article" + }, + { + "title": "How to Use Git in a Professional Dev Team", + "url": "https://ooloo.io/project/github-flow", + "type": "article" + }, + { + "title": "Learn Git Branching", + "url": "https://learngitbranching.js.org/?locale=en_us", + "type": "article" + }, + { + "title": "Explore top posts about GitHub", + "url": "https://app.daily.dev/tags/github?ref=roadmapsh", + "type": "article" + }, + { + "title": "What is GitHub?", + "url": "https://www.youtube.com/watch?v=w3jLJU7DT5E", + "type": "video" + }, + { + "title": "Git vs. GitHub: Whats the difference?", + "url": "https://www.youtube.com/watch?v=wpISo9TNjfU", + "type": "video" + }, + { + "title": "Git and GitHub for Beginners", + "url": "https://www.youtube.com/watch?v=RGOj5yH7evk", + "type": "video" + }, + { + "title": "Git and GitHub - CS50 Beyond 2019", + "url": "https://www.youtube.com/watch?v=eulnSXkhE7I", + "type": "video" + } + ] + }, + "Ry_5Y-BK7HrkIc6X0JG1m": { + "title": "Bitbucket", + "description": "Bitbucket is a Git based hosting and source code repository service that is Atlassian's alternative to other products like GitHub, GitLab etc\n\nBitbucket offers hosting options via Bitbucket Cloud (Atlassian's servers), Bitbucket Server (customer's on-premise) or Bitbucket Data Centre (number of servers in customers on-premise or cloud environment)\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Bitbucket Website", + "url": "https://bitbucket.org/product", + "type": "article" + }, + { + "title": "Getting started with Bitbucket", + "url": "https://bitbucket.org/product/guides/basics/bitbucket-interface", + "type": "article" + }, + { + "title": "Using Git with Bitbucket Cloud", + "url": "https://www.atlassian.com/git/tutorials/learn-git-with-bitbucket-cloud", + "type": "article" + }, + { + "title": "A brief overview of Bitbucket", + "url": "https://bitbucket.org/product/guides/getting-started/overview#a-brief-overview-of-bitbucket", + "type": "article" + }, + { + "title": "Explore top posts about Bitbucket", + "url": "https://app.daily.dev/tags/bitbucket?ref=roadmapsh", + "type": "article" + }, + { + "title": "Bitbucket tutorial | How to use Bitbucket Cloud", + "url": "https://www.youtube.com/watch?v=M44nEyd_5To", + "type": "video" + }, + { + "title": "Bitbucket Tutorial | Bitbucket for Beginners", + "url": "https://www.youtube.com/watch?v=i5T-DB8tb4A", + "type": "video" + } + ] + }, + "Wcp-VDdFHipwa7hNAp1z_": { + "title": "GitLab", + "description": "GitLab is a provider of internet hosting for software development and version control using Git. It offers the distributed version control and source code management functionality of Git, plus its own features.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "GitLab Website", + "url": "https://gitlab.com/", + "type": "opensource" + }, + { + "title": "GitLab Documentation", + "url": "https://docs.gitlab.com/", + "type": "article" + }, + { + "title": "Explore top posts about GitLab", + "url": "https://app.daily.dev/tags/gitlab?ref=roadmapsh", + "type": "article" + } + ] + }, + "NvUcSDWBhzJZ31nzT4UlE": { + "title": "Repo Hosting Services", + "description": "When working on a team, you often need a remote place to put your code so others can access it, create their own branches, and create or review pull requests. These services often include issue tracking, code review, and continuous integration features. A few popular choices are GitHub, GitLab, BitBucket, and AWS CodeCommit.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "GitHub", + "url": "https://github.com/features/", + "type": "opensource" + }, + { + "title": "GitLab", + "url": "https://about.gitlab.com/", + "type": "article" + }, + { + "title": "BitBucket", + "url": "https://bitbucket.org/product/guides/getting-started/overview", + "type": "article" + }, + { + "title": "How to choose the best source code repository", + "url": "https://blockandcapital.com/en/choose-code-repository/", + "type": "article" + } + ] + }, + "FihTrMO56kj9jT8O_pO2T": { + "title": "PostgreSQL", + "description": "PostgreSQL, also known as Postgres, is a free and open-source relational database management system emphasizing extensibility and SQL compliance.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Visit Dedicated PostgreSQL DBA Roadmap", + "url": "/postgresql-dba", + "type": "article" + }, + { + "title": "Official Website", + "url": "https://www.postgresql.org/", + "type": "article" + }, + { + "title": "Learn PostgreSQL - Full Tutorial for Beginners", + "url": "https://www.postgresqltutorial.com/", + "type": "article" + }, + { + "title": "Explore top posts about PostgreSQL", + "url": "https://app.daily.dev/tags/postgresql?ref=roadmapsh", + "type": "article" + }, + { + "title": "Learn PostgreSQL Tutorial - Full Course for Beginners", + "url": "https://www.youtube.com/watch?v=qw--VYLpxG4", + "type": "video" + }, + { + "title": "Postgres tutorial for Beginners", + "url": "https://www.youtube.com/watch?v=eMIxuk0nOkU", + "type": "video" + } + ] + }, + "dEsTje8kfHwWjCI3zcgLC": { + "title": "MS SQL", + "description": "MS SQL (or Microsoft SQL Server) is the Microsoft developed relational database management system (RDBMS). MS SQL uses the T-SQL (Transact-SQL) query language to interact with the relational databases. There are many different versions and editions available of MS SQL\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "MS SQL website", + "url": "https://www.microsoft.com/en-ca/sql-server/", + "type": "article" + }, + { + "title": "Tutorials for SQL Server", + "url": "https://docs.microsoft.com/en-us/sql/sql-server/tutorials-for-sql-server-2016?view=sql-server-ver15", + "type": "article" + }, + { + "title": "SQL Server tutorial for beginners", + "url": "https://www.youtube.com/watch?v=-EPMOaV7h_Q", + "type": "video" + } + ] + }, + "VPxOdjJtKAqmM5V0LR5OC": { + "title": "MySQL", + "description": "MySQL is an incredibly popular open source relational database management system (RDBMS). MySQL can be used as a stand-alone client or in conjunction with other services to provide database connectivity. The **M** in LAMP stack stands for MySQL; that alone should provide an idea of its prevalence.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "MySQL website", + "url": "https://www.mysql.com/", + "type": "article" + }, + { + "title": "W3Schools - MySQL tutorial ", + "url": "https://www.w3schools.com/mySQl/default.asp", + "type": "article" + }, + { + "title": "MySQL for Developers", + "url": "https://planetscale.com/courses/mysql-for-developers/introduction/course-introduction", + "type": "article" + }, + { + "title": "MySQL Tutorial", + "url": "https://www.mysqltutorial.org/", + "type": "article" + }, + { + "title": "Explore top posts about MySQL", + "url": "https://app.daily.dev/tags/mysql?ref=roadmapsh", + "type": "article" + }, + { + "title": "MySQL tutorial for beginners", + "url": "https://www.youtube.com/watch?v=7S_tz1z_5bA", + "type": "video" + } + ] + }, + "h1SAjQltHtztSt8QmRgab": { + "title": "Oracle", + "description": "Oracle Database Server or sometimes called Oracle RDBMS or even simply Oracle is a world leading relational database management system produced by Oracle Corporation.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Official Website", + "url": "https://www.oracle.com/database/", + "type": "article" + }, + { + "title": "Official Docs", + "url": "https://docs.oracle.com/en/database/index.html", + "type": "article" + }, + { + "title": "Explore top posts about Oracle", + "url": "https://app.daily.dev/tags/oracle?ref=roadmapsh", + "type": "article" + }, + { + "title": "Oracle SQL Tutorial for Beginners", + "url": "https://www.youtube.com/watch?v=ObbNGhcxXJA", + "type": "video" + } + ] + }, + "tD3i-8gBpMKCHB-ITyDiU": { + "title": "MariaDB", + "description": "MariaDB server is a community developed fork of MySQL server. Started by core members of the original MySQL team, MariaDB actively works with outside developers to deliver the most featureful, stable, and sanely licensed open SQL server in the industry. MariaDB was created with the intention of being a more versatile, drop-in replacement version of MySQL\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "MariaDB website", + "url": "https://mariadb.org/", + "type": "article" + }, + { + "title": "MariaDB vs MySQL", + "url": "https://www.guru99.com/mariadb-vs-mysql.html", + "type": "article" + }, + { + "title": "W3Schools - MariaDB tutorial ", + "url": "https://www.w3schools.blog/mariadb-tutorial", + "type": "article" + }, + { + "title": "Explore top posts about Infrastructure", + "url": "https://app.daily.dev/tags/infrastructure?ref=roadmapsh", + "type": "article" + }, + { + "title": "MariaDB Tutorial For Beginners in One Hour", + "url": "https://www.youtube.com/watch?v=_AMj02sANpI", + "type": "video" + } + ] + }, + "r45b461NxLN6wBODJ5CNP": { + "title": "Relational Databases", + "description": "A relational database is **a type of database that stores and provides access to data points that are related to one another**. Relational databases store data in a series of tables. Interconnections between the tables are specified as foreign keys. A foreign key is a unique reference from one row in a relational table to another row in a table, which can be the same table but is most commonly a different table.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Databases and SQL", + "url": "https://www.edx.org/course/databases-5-sql", + "type": "course" + }, + { + "title": "Relational Databases", + "url": "https://www.ibm.com/cloud/learn/relational-databases", + "type": "article" + }, + { + "title": "51 Years of Relational Databases", + "url": "https://learnsql.com/blog/codd-article-databases/", + "type": "article" + }, + { + "title": "Intro To Relational Databases", + "url": "https://www.udacity.com/course/intro-to-relational-databases--ud197", + "type": "article" + }, + { + "title": "Explore top posts about Backend Development", + "url": "https://app.daily.dev/tags/backend?ref=roadmapsh", + "type": "article" + }, + { + "title": "What is Relational Database", + "url": "https://youtu.be/OqjJjpjDRLc", + "type": "video" + } + ] + }, + "F8frGuv1dunOdcVJ_IiGs": { + "title": "NoSQL Databases", + "description": "NoSQL databases offer data storage and retrieval that is modelled differently to \"traditional\" relational databases. NoSQL databases typically focus more on horizontal scaling, eventual consistency, speed and flexibility and is used commonly for big data and real-time streaming applications. NoSQL is often described as a BASE system (**B**asically **A**vailable, **S**oft state, **E**ventual consistency) as opposed to SQL/relational which typically focus on ACID (Atomicity, Consistency, Isolation, Durability). Common NoSQL data structures include key-value pair, wide column, graph and document.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "NoSQL Explained", + "url": "https://www.mongodb.com/nosql-explained", + "type": "article" + }, + { + "title": "Explore top posts about NoSQL", + "url": "https://app.daily.dev/tags/nosql?ref=roadmapsh", + "type": "article" + }, + { + "title": "How do NoSQL Databases work", + "url": "https://www.youtube.com/watch?v=0buKQHokLK8", + "type": "video" + }, + { + "title": "SQL vs NoSQL Explained", + "url": "https://www.youtube.com/watch?v=ruz-vK8IesE", + "type": "video" + } + ] + }, + "Z7jp_Juj5PffSxV7UZcBb": { + "title": "ORMs", + "description": "Object-Relational Mapping (ORM) is a technique that lets you query and manipulate data from a database using an object-oriented paradigm. When talking about ORM, most people are referring to a library that implements the Object-Relational Mapping technique, hence the phrase \"an ORM\".\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Object Relational Mapping - Wikipedia", + "url": "https://en.wikipedia.org/wiki/Object%E2%80%93relational_mapping", + "type": "article" + }, + { + "title": "What is an ORM and how should I use it?", + "url": "https://stackoverflow.com/questions/1279613/what-is-an-orm-how-does-it-work-and-how-should-i-use-one", + "type": "article" + }, + { + "title": "What is an ORM, how does it work, and how should I use one?", + "url": "https://stackoverflow.com/a/1279678", + "type": "article" + }, + { + "title": "Explore top posts about Backend Development", + "url": "https://app.daily.dev/tags/backend?ref=roadmapsh", + "type": "article" + } + ] + }, + "Ge2SnKBrQQrU-oGLz6TmT": { + "title": "Normalization", + "description": "Database normalization is the process of structuring a relational database in accordance with a series of so-called normal forms in order to reduce data redundancy and improve data integrity. It was first proposed by Edgar F. Codd as part of his relational model.\n\nNormalization entails organizing the columns (attributes) and tables (relations) of a database to ensure that their dependencies are properly enforced by database integrity constraints. It is accomplished by applying some formal rules either by a process of synthesis (creating a new database design) or decomposition (improving an existing database design).\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is Normalization in DBMS (SQL)? 1NF, 2NF, 3NF, BCNF Database with Example", + "url": "https://www.guru99.com/database-normalization.html", + "type": "article" + }, + { + "title": "Database normalization", + "url": "https://en.wikipedia.org/wiki/Database_normalization", + "type": "article" + }, + { + "title": "Explore top posts about Database", + "url": "https://app.daily.dev/tags/database?ref=roadmapsh", + "type": "article" + }, + { + "title": "Basic Concept of Database Normalization", + "url": "https://www.youtube.com/watch?v=xoTyrdT9SZI", + "type": "video" + } + ] + }, + "qSAdfaGUfn8mtmDjHJi3z": { + "title": "ACID", + "description": "ACID are the four properties of relational database systems that help in making sure that we are able to perform the transactions in a reliable manner. It's an acronym which refers to the presence of four properties: atomicity, consistency, isolation and durability\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is ACID Compliant Database?", + "url": "https://retool.com/blog/whats-an-acid-compliant-database/", + "type": "article" + }, + { + "title": "What is ACID Compliance?: Atomicity, Consistency, Isolation", + "url": "https://fauna.com/blog/what-is-acid-compliance-atomicity-consistency-isolation", + "type": "article" + }, + { + "title": "ACID Explained: Atomic, Consistent, Isolated & Durable", + "url": "https://www.youtube.com/watch?v=yaQ5YMWkxq4", + "type": "video" + } + ] + }, + "GwApfL4Yx-b5Y8dB9Vy__": { + "title": "Failure Modes", + "description": "There are several different failure modes that can occur in a database, including:\n\n* Read contention: This occurs when multiple clients or processes are trying to read data from the same location in the database at the same time, which can lead to delays or errors.\n* Write contention: This occurs when multiple clients or processes are trying to write data to the same location in the database at the same time, which can lead to delays or errors.\n* Thundering herd: This occurs when a large number of clients or processes try to access the same resource simultaneously, which can lead to resource exhaustion and reduced performance.\n* Cascade: This occurs when a failure in one part of the database system causes a chain reaction that leads to failures in other parts of the system.\n* Deadlock: This occurs when two or more transactions are waiting for each other to release a lock on a resource, leading to a standstill.\n* Corruption: This occurs when data in the database becomes corrupted, which can lead to errors or unexpected results when reading or writing to the database.\n* Hardware failure: This occurs when hardware components, such as disk drives or memory, fail, which can lead to data loss or corruption.\n* Software failure: This occurs when software components, such as the database management system or application, fail, which can lead to errors or unexpected results.\n* Network failure: This occurs when the network connection between the database and the client is lost, which can lead to errors or timeouts when trying to access the database.\n* Denial of service (DoS) attack: This occurs when a malicious actor attempts to overwhelm the database with requests, leading to resource exhaustion and reduced performance.", + "links": [] + }, + "rq_y_OBMD9AH_4aoecvAi": { + "title": "Transactions", + "description": "In short, a database transaction is a sequence of multiple operations performed on a database, and all served as a single logical unit of work — taking place wholly or not at all. In other words, there's never a case where only half of the operations are performed and the results saved.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What are Transactions?", + "url": "https://fauna.com/blog/database-transaction", + "type": "article" + } + ] + }, + "SYXJhanu0lFmGj2m2XXhS": { + "title": "Profiling Perfor.", + "description": "There are several ways to profile the performance of a database:\n\n* Monitor system performance: You can use tools like the Windows Task Manager or the Unix/Linux top command to monitor the performance of your database server. These tools allow you to see the overall CPU, memory, and disk usage of the system, which can help identify any resource bottlenecks.\n* Use database-specific tools: Most database management systems (DBMSs) have their own tools for monitoring performance. For example, Microsoft SQL Server has the SQL Server Management Studio (SSMS) and the sys.dm\\_os\\_wait\\_stats dynamic management view, while Oracle has the Oracle Enterprise Manager and the v$waitstat view. These tools allow you to see specific performance metrics, such as the amount of time spent waiting on locks or the number of physical reads and writes.\n* Use third-party tools: There are also several third-party tools that can help you profile the performance of a database. Some examples include SolarWinds Database Performance Analyzer, Quest Software Foglight, and Redgate SQL Monitor. These tools often provide more in-depth performance analysis and can help you identify specific issues or bottlenecks.\n* Analyze slow queries: If you have specific queries that are running slowly, you can use tools like EXPLAIN PLAN or SHOW PLAN in MySQL or SQL Server to see the execution plan for the query and identify any potential issues. You can also use tools like the MySQL slow query log or the SQL Server Profiler to capture slow queries and analyze them further.\n* Monitor application performance: If you are experiencing performance issues with a specific application that is using the database, you can use tools like Application Insights or New Relic to monitor the performance of the application and identify any issues that may be related to the database.\n\nHave a look at the documentation for the database that you are using.", + "links": [] + }, + "bQnOAu863hsHdyNMNyJop": { + "title": "N+1 Problem", + "description": "The N+1 query problem happens when your code executes N additional query statements to fetch the same data that could have been retrieved when executing the primary query.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "In Detail Explanation of N+1 Problem", + "url": "https://medium.com/doctolib/understanding-and-fixing-n-1-query-30623109fe89", + "type": "article" + }, + { + "title": "What is the N+1 Problem", + "url": "https://planetscale.com/blog/what-is-n-1-query-problem-and-how-to-solve-it", + "type": "article" + }, + { + "title": "Solving N+1 Problem: For Java Backend Developers", + "url": "https://dev.to/jackynote/solving-the-notorious-n1-problem-optimizing-database-queries-for-java-backend-developers-2o0p", + "type": "article" + } + ] + }, + "LJt27onEOeIBomiEMTyKd": { + "title": "More about Databases", + "description": "A database is a collection of useful data of one or more related organizations structured in a way to make data an asset to the organization. A database management system is a software designed to assist in maintaining and extracting large collections of data in a timely fashion.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Oracle: What is a Database?", + "url": "https://www.oracle.com/database/what-is-database/", + "type": "article" + }, + { + "title": "Prisma.io: What are Databases?", + "url": "https://www.prisma.io/dataguide/intro/what-are-databases", + "type": "article" + }, + { + "title": "Explore top posts about Backend Development", + "url": "https://app.daily.dev/tags/backend?ref=roadmapsh", + "type": "article" + } + ] + }, + "y-xkHFE9YzhNIX3EiWspL": { + "title": "Database Indexes", + "description": "An index is a data structure that you build and assign on top of an existing table that basically looks through your table and tries to analyze and summarize so that it can create shortcuts.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Explore top posts about Database", + "url": "https://app.daily.dev/tags/database?ref=roadmapsh", + "type": "article" + }, + { + "title": "Database Indexing Explained", + "url": "https://www.youtube.com/watch?v=-qNSXK7s7_w", + "type": "video" + } + ] + }, + "zWstl08R4uzqve4BdYurp": { + "title": "Sharding Strategies", + "description": "Sharding strategy is a technique to split a large dataset into smaller chunks (logical shard) in which we distribute these chunks in different machines/database nodes in order to distribute the traffic load. It’s a good mechanism to improve the scalability of an application. Many databases support sharding, but not all.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Wikipedia - Sharding in Database Architectures", + "url": "https://en.wikipedia.org/wiki/Shard_(database_architecture)", + "type": "article" + }, + { + "title": "How sharding a database can make it faster", + "url": "https://stackoverflow.blog/2022/03/14/how-sharding-a-database-can-make-it-faster/", + "type": "article" + }, + { + "title": "Explore top posts about Backend Development", + "url": "https://app.daily.dev/tags/backend?ref=roadmapsh", + "type": "article" + } + ] + }, + "wrl7HHWXOaxoKVlNZxZ6d": { + "title": "Data Replication", + "description": "Data replication is the process by which data residing on a physical/virtual server(s) or cloud instance (primary instance) is continuously replicated or copied to a secondary server(s) or cloud instance (standby instance). Organizations replicate data to support high availability, backup, and/or disaster recovery.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is Data Replication?", + "url": "https://youtu.be/fUrKt-AQYtE", + "type": "video" + } + ] + }, + "LAdKDJ4LcMaDWqslMvE8X": { + "title": "CAP Theorem", + "description": "CAP is an acronym that stands for Consistency, Availability and Partition Tolerance. According to CAP theorem, any distributed system can only guarantee two of the three properties at any point of time. You can't guarantee all three properties at once.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is CAP Theorem?", + "url": "https://www.bmc.com/blogs/cap-theorem/", + "type": "article" + }, + { + "title": "CAP Theorem - Wikipedia", + "url": "https://en.wikipedia.org/wiki/CAP_theorem", + "type": "article" + }, + { + "title": "An Illustrated Proof of the CAP Theorem", + "url": "https://mwhittaker.github.io/blog/an_illustrated_proof_of_the_cap_theorem/", + "type": "article" + }, + { + "title": "CAP Theorem and its applications in NoSQL Databases", + "url": "https://www.ibm.com/uk-en/cloud/learn/cap-theorem", + "type": "article" + }, + { + "title": "What is CAP Theorem?", + "url": "https://www.youtube.com/watch?v=_RbsFXWRZ10", + "type": "video" + } + ] + }, + "95d9itpUZ4s9roZN8kG9x": { + "title": "Scaling Databases", + "description": "Scaling databases is the process of adapting them to handle more data and users efficiently. It's achieved by either upgrading existing hardware (vertical scaling) or adding more servers (horizontal scaling). Techniques like sharding and replication are key. This ensures databases continue to be a robust asset as they grow.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "MongoDB: Database Scaling Basics", + "url": "https://www.mongodb.com/basics/scaling", + "type": "article" + }, + { + "title": "Explore top posts about Backend Development", + "url": "https://app.daily.dev/tags/backend?ref=roadmapsh", + "type": "article" + } + ] + }, + "dLY0KafPstajCcSbslC4M": { + "title": "HATEOAS", + "description": "HATEOAS is an acronym for **H**ypermedia **A**s **T**he **E**ngine **O**f **A**pplication **S**tate, it's the concept that when sending information over a RESTful API the document received should contain everything the client needs in order to parse and use the data i.e they don't have to contact any other endpoint not explicitly mentioned within the Document.", + "links": [ + { + "title": "What is HATEOAS and why is it important for my REST API?", + "url": "https://restcookbook.com/Basics/hateoas/", + "type": "article" + } + ] + }, + "sNceS4MpSIjRkWhNDmrFg": { + "title": "JSON APIs", + "description": "JSON or JavaScript Object Notation is an encoding scheme that is designed to eliminate the need for an ad-hoc code for each application to communicate with servers that communicate in a defined way. JSON API module exposes an implementation for data stores and data structures, such as entity types, bundles, and fields.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Official Website", + "url": "https://jsonapi.org/", + "type": "article" + }, + { + "title": "Official Docs", + "url": "https://jsonapi.org/implementations/", + "type": "article" + }, + { + "title": "JSON API: Explained in 4 minutes ", + "url": "https://www.youtube.com/watch?v=N-4prIh7t38", + "type": "video" + } + ] + }, + "9cD5ag1L0GqHx4_zxc5JX": { + "title": "Open API Specs", + "description": "The OpenAPI Specification (OAS) defines a standard, language-agnostic interface to RESTful APIs which allows both humans and computers to discover and understand the capabilities of the service without access to source code, documentation, or through network traffic inspection. When properly defined, a consumer can understand and interact with the remote service with a minimal amount of implementation logic.\n\nAn OpenAPI definition can then be used by documentation generation tools to display the API, code generation tools to generate servers and clients in various programming languages, testing tools, and many other use cases.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "OpenAPI Specification Website", + "url": "https://swagger.io/specification/", + "type": "article" + }, + { + "title": "Open API Live Editor", + "url": "https://swagger.io/tools/swagger-editor/", + "type": "article" + }, + { + "title": "Official training guide", + "url": "https://swagger.io/docs/specification/about/", + "type": "article" + }, + { + "title": "OpenAPI 3.0: How to Design and Document APIs with the Latest OpenAPI Specification 3.0", + "url": "https://www.youtube.com/watch?v=6kwmW_p_Tig", + "type": "video" + } + ] + }, + "sSNf93azjuyMzQqIHE0Rh": { + "title": "SOAP", + "description": "Simple Object Access Protocol (SOAP) is a message protocol for exchanging information between systems and applications. When it comes to application programming interfaces (APIs), a SOAP API is developed in a more structured and formalized way. SOAP messages can be carried over a variety of lower-level protocols, including the web-related Hypertext Transfer Protocol (HTTP).\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "w3school SOAP explanation", + "url": "https://www.w3schools.com/xml/xml_soap.asp", + "type": "article" + } + ] + }, + "J-TOE2lT4At1mSdNoxPS1": { + "title": "gRPC", + "description": "gRPC is a high-performance, open source universal RPC framework\n\nRPC stands for Remote Procedure Call, there's an ongoing debate on what the g stands for. RPC is a protocol that allows a program to execute a procedure of another program located on another computer. The great advantage is that the developer doesn’t need to code the details of the remote interaction. The remote procedure is called like any other function. But the client and the server can be coded in different languages.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "gRPC Website", + "url": "https://grpc.io/", + "type": "article" + }, + { + "title": "gRPC Docs", + "url": "https://grpc.io/docs/", + "type": "article" + }, + { + "title": "What Is GRPC?", + "url": "https://www.wallarm.com/what/the-concept-of-grpc", + "type": "article" + }, + { + "title": "Explore top posts about gRPC", + "url": "https://app.daily.dev/tags/grpc?ref=roadmapsh", + "type": "article" + }, + { + "title": "What Is GRPC?", + "url": "https://www.youtube.com/watch?v=hVrwuMnCtok", + "type": "video" + } + ] + }, + "lfNFDZZNdrB0lbEaMtU71": { + "title": "REST", + "description": "REST, or REpresentational State Transfer, is an architectural style for providing standards between computer systems on the web, making it easier for systems to communicate with each other.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "REST Fundamental", + "url": "https://dev.to/cassiocappellari/fundamentals-of-rest-api-2nag", + "type": "article" + }, + { + "title": "What is a REST API?", + "url": "https://www.redhat.com/en/topics/api/what-is-a-rest-api", + "type": "article" + }, + { + "title": "Roy Fieldings dissertation chapter, Representational State Transfer (REST)", + "url": "https://www.ics.uci.edu/~fielding/pubs/dissertation/rest_arch_style.htm", + "type": "article" + }, + { + "title": "Learn REST: A RESTful Tutorial", + "url": "https://restapitutorial.com/", + "type": "article" + }, + { + "title": "Explore top posts about REST API", + "url": "https://app.daily.dev/tags/rest-api?ref=roadmapsh", + "type": "article" + } + ] + }, + "zp3bq38tMnutT2N0tktOW": { + "title": "GraphQL", + "description": "GraphQL is a query language and runtime system for APIs (application programming interfaces). It is designed to provide a flexible and efficient way for clients to request data from servers, and it is often used as an alternative to REST (representational state transfer) APIs.\n\nOne of the main features of GraphQL is its ability to specify exactly the data that is needed, rather than receiving a fixed set of data from an endpoint. This allows clients to request only the data that they need, and it reduces the amount of data that needs to be transferred over the network.\n\nGraphQL also provides a way to define the structure of the data that is returned from the server, allowing clients to request data in a predictable and flexible way. This makes it easier to build and maintain client applications that depend on data from the server.\n\nGraphQL is widely used in modern web and mobile applications, and it is supported by a large and active developer community.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "GraphQL Official Website", + "url": "https://graphql.org/", + "type": "article" + }, + { + "title": "Explore top posts about GraphQL", + "url": "https://app.daily.dev/tags/graphql?ref=roadmapsh", + "type": "article" + } + ] + }, + "KWTbEVX_WxS8jmSaAX3Fe": { + "title": "Client Side", + "description": "Client-side caching is the storage of network data to a local cache for future re-use. After an application fetches network data, it stores that resource in a local cache. Once a resource has been cached, the browser uses the cache on future requests for that resource to boost performance.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Everything you need to know about HTTP Caching", + "url": "https://www.youtube.com/watch?v=HiBDZgTNpXY", + "type": "video" + } + ] + }, + "Nq2BO53bHJdFT1rGZPjYx": { + "title": "CDN", + "description": "A Content Delivery Network (CDN) service aims to provide high availability and performance improvements of websites. This is achieved with fast delivery of website assets and content typically via geographically closer endpoints to the client requests. Traditional commercial CDNs (Amazon CloudFront, Akamai, CloudFlare and Fastly) provide servers across the globe which can be used for this purpose. Serving assets and contents via a CDN reduces bandwidth on website hosting, provides an extra layer of caching to reduce potential outages and can improve website security as well\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "CloudFlare - What is a CDN? | How do CDNs work?", + "url": "https://www.cloudflare.com/en-ca/learning/cdn/what-is-a-cdn/", + "type": "article" + }, + { + "title": "Wikipedia - Content Delivery Network", + "url": "https://en.wikipedia.org/wiki/Content_delivery_network", + "type": "article" + }, + { + "title": "What is Cloud CDN?", + "url": "https://www.youtube.com/watch?v=841kyd_mfH0", + "type": "video" + }, + { + "title": "What is a Content Delivery Network (CDN)?", + "url": "https://www.youtube.com/watch?v=Bsq5cKkS33I", + "type": "video" + } + ] + }, + "z1-eP4sV75GBEIdM4NvL9": { + "title": "Server Side", + "description": "Server-side caching temporarily stores web files and data on the origin server to reuse later.\n\nWhen the user first requests for the webpage, the website goes under the normal process of retrieving data from the server and generates or constructs the webpage of the website. After the request has happened and the response has been sent back, the server copies the webpage and stores it as a cache.\n\nNext time the user revisits the website, it loads the already saved or cached copy of the webpage, thus making it faster.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Server-side caching and Client-side caching", + "url": "https://www.codingninjas.com/codestudio/library/server-side-caching-and-client-side-caching", + "type": "article" + }, + { + "title": "Caching strategies", + "url": "https://medium.com/@genchilu/cache-strategy-in-backend-d0baaacd2d79", + "type": "article" + }, + { + "title": "Local vs distributed", + "url": "https://redis.io/glossary/distributed-caching/", + "type": "article" + }, + { + "title": "Example - Hibernate caching", + "url": "https://medium.com/@himani.prasad016/caching-in-hibernate-3ad4f479fcc0", + "type": "article" + }, + { + "title": "Explore top posts about Web Development", + "url": "https://app.daily.dev/tags/webdev?ref=roadmapsh", + "type": "article" + } + ] + }, + "ELj8af7Mi38kUbaPJfCUR": { + "title": "Caching", + "description": "Caching is a technique of storing frequently used data or results of complex computations in a local memory, for a certain period. So, next time, when the client requests the same information, instead of retrieving the information from the database, it will give the information from the local memory. The main advantage of caching is that it improves performance by reducing the processing burden.\n\nNB! Caching is a complicated topic that has obvious benefits but can lead to pitfalls like stale data, cache invalidation, distributed caching etc", + "links": [] + }, + "RBrIP5KbVQ2F0ly7kMfTo": { + "title": "Web Security", + "description": "Web security refers to the protective measures taken by the developers to protect the web applications from threats that could affect the business.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "OWASP Web Application Security Testing Checklist", + "url": "https://github.com/0xRadi/OWASP-Web-Checklist", + "type": "opensource" + }, + { + "title": "Why HTTPS Matters", + "url": "https://developers.google.com/web/fundamentals/security/encrypt-in-transit/why-https", + "type": "article" + }, + { + "title": "Wikipedia - OWASP", + "url": "https://en.wikipedia.org/wiki/OWASP", + "type": "article" + }, + { + "title": "OWASP Top 10 Security Risks", + "url": "https://sucuri.net/guides/owasp-top-10-security-vulnerabilities-2021/", + "type": "article" + }, + { + "title": "OWASP Cheatsheets", + "url": "https://cheatsheetseries.owasp.org/cheatsheets/AJAX_Security_Cheat_Sheet.html", + "type": "article" + }, + { + "title": "Content Security Policy (CSP)", + "url": "https://developer.mozilla.org/en-US/docs/Web/HTTP/CSP", + "type": "article" + }, + { + "title": "Explore top posts about Security", + "url": "https://app.daily.dev/tags/security?ref=roadmapsh", + "type": "article" + } + ] + }, + "381Kw1IMRv7CJp-Uf--qd": { + "title": "Integration Testing", + "description": "Integration testing is a broad category of tests where multiple software modules are **integrated** and tested as a group. It is meant to test the **interaction** between multiple services, resources, or modules. For example, an API's interaction with a backend service, or a service with a database.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Integration Testing", + "url": "https://www.guru99.com/integration-testing.html", + "type": "article" + }, + { + "title": "How to Integrate and Test Your Tech Stack", + "url": "https://thenewstack.io/how-to-integrate-and-test-your-tech-stack/", + "type": "article" + }, + { + "title": "Explore top posts about Testing", + "url": "https://app.daily.dev/tags/testing?ref=roadmapsh", + "type": "article" + }, + { + "title": "What is Integration Testing?", + "url": "https://youtu.be/QYCaaNz8emY", + "type": "video" + } + ] + }, + "NAGisfq2CgeK3SsuRjnMw": { + "title": "Functional Testing", + "description": "Functional testing is where software is tested to ensure functional requirements are met. Usually, it is a form of black box testing in which the tester has no understanding of the source code; testing is performed by providing input and comparing expected/actual output. It contrasts with non-functional testing, which includes performance, load, scalability, and penetration testing.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is Functional Testing?", + "url": "https://www.guru99.com/functional-testing.html", + "type": "article" + }, + { + "title": "Explore top posts about Testing", + "url": "https://app.daily.dev/tags/testing?ref=roadmapsh", + "type": "article" + }, + { + "title": "Functional Testing vs Non-Functional Testing", + "url": "https://youtu.be/j_79AXkG4PY", + "type": "video" + } + ] + }, + "3OYm6b9f6WOrKi4KTOZYK": { + "title": "Unit Testing", + "description": "Unit testing is where individual **units** (modules, functions/methods, routines, etc.) of software are tested to ensure their correctness. This low-level testing ensures smaller components are functionally sound while taking the burden off of higher-level tests. Generally, a developer writes these tests during the development process and they are run as automated tests.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Unit Testing Tutorial", + "url": "https://www.guru99.com/unit-testing-guide.html", + "type": "article" + }, + { + "title": "Explore top posts about Testing", + "url": "https://app.daily.dev/tags/testing?ref=roadmapsh", + "type": "article" + }, + { + "title": "What is Unit Testing?", + "url": "https://youtu.be/3kzHmaeozDI", + "type": "video" + } + ] + }, + "STQQbPa7PE3gbjMdL6P-t": { + "title": "Testing", + "description": "A key to building software that meets requirements without defects is testing. Software testing helps developers know they are building the right software. When tests are run as part of the development process (often with continuous integration tools), they build confidence and prevent regressions in the code.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is Software Testing?", + "url": "https://www.guru99.com/software-testing-introduction-importance.html", + "type": "article" + }, + { + "title": "Testing Pyramid", + "url": "https://www.browserstack.com/guide/testing-pyramid-for-test-automation", + "type": "article" + }, + { + "title": "Explore top posts about Testing", + "url": "https://app.daily.dev/tags/testing?ref=roadmapsh", + "type": "article" + } + ] + }, + "mGfD7HfuP184lFkXZzGjG": { + "title": "CI / CD", + "description": "CI/CD (Continuous Integration/Continuous Deployment) is the practice of automating building, testing, and deployment of applications with the main goal of detecting issues early, and provide quicker releases to the production environment.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is CI/CD?", + "url": "https://about.gitlab.com/topics/ci-cd/", + "type": "article" + }, + { + "title": "A Primer: Continuous Integration and Continuous Delivery (CI/CD)", + "url": "https://thenewstack.io/a-primer-continuous-integration-and-continuous-delivery-ci-cd/", + "type": "article" + }, + { + "title": "3 Ways to Use Automation in CI/CD Pipelines", + "url": "https://thenewstack.io/3-ways-to-use-automation-in-ci-cd-pipelines/", + "type": "article" + }, + { + "title": "Articles about CI/CD", + "url": "https://thenewstack.io/category/ci-cd/", + "type": "article" + }, + { + "title": "Explore top posts about CI/CD", + "url": "https://app.daily.dev/tags/cicd?ref=roadmapsh", + "type": "article" + }, + { + "title": "DevOps CI/CD Explained in 100 Seconds by Fireship", + "url": "https://www.youtube.com/watch?v=scEDHsr3APg", + "type": "video" + }, + { + "title": "Automate your Workflows with GitHub Actions", + "url": "https://www.youtube.com/watch?v=nyKZTKQS_EQ", + "type": "video" + } + ] + }, + "6XIWO0MoE-ySl4qh_ihXa": { + "title": "GOF Design Patterns", + "description": "The Gang of Four (GoF) design patterns are a set of design patterns for object-oriented software development that were first described in the book \"Design Patterns: Elements of Reusable Object-Oriented Software\" by Erich Gamma, Richard Helm, Ralph Johnson, and John Vlissides (also known as the Gang of Four).\n\nThe GoF design patterns are divided into three categories: Creational, Structural and Behavioral.\n\n* Creational Patterns\n* Structural Patterns\n* Behavioral Patterns\n\nLearn more from the following links:", + "links": [ + { + "title": "Gangs of Four (GoF) Design Patterns", + "url": "https://www.digitalocean.com/community/tutorials/gangs-of-four-gof-design-patterns", + "type": "article" + }, + { + "title": "Tutorial - Builder Pattern (Gang of Four Design Patterns Series)", + "url": "https://www.youtube.com/watch?v=_sa2WlAFWQos", + "type": "video" + } + ] + }, + "u8IRw5PuXGUcmxA0YYXgx": { + "title": "CQRS", + "description": "CQRS, or command query responsibility segregation, defines an architectural pattern where the main focus is to separate the approach of reading and writing operations for a data store. CQRS can also be used along with Event Sourcing pattern in order to persist application state as an ordered of sequence events, making it possible to restore data to any point in time.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "CQRS Pattern", + "url": "https://docs.microsoft.com/en-us/azure/architecture/patterns/cqrs", + "type": "article" + } + ] + }, + "BvHi5obg0L1JDZFKBzx9t": { + "title": "Domain Driven Design", + "description": "Domain-driven design (DDD) is a software design approach focusing on modeling software to match a domain according to input from that domain's experts.\n\nIn terms of object-oriented programming, it means that the structure and language of software code (class names, class methods, class variables) should match the business domain. For example, if a software processes loan applications, it might have classes like LoanApplication and Customer, and methods such as AcceptOffer and Withdraw.\n\nDDD connects the implementation to an evolving model and it is predicated on the following goals:\n\n* Placing the project's primary focus on the core domain and domain logic;\n* Basing complex designs on a model of the domain;\n* Initiating a creative collaboration between technical and domain experts to iteratively refine a conceptual model that addresses particular domain problems.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Domain-Driven Design", + "url": "https://redis.com/glossary/domain-driven-design-ddd/", + "type": "article" + }, + { + "title": "Domain-Driven Design: Tackling Complexity in the Heart of Software", + "url": "https://www.amazon.com/Domain-Driven-Design-Tackling-Complexity-Software/dp/0321125215", + "type": "article" + }, + { + "title": "Explore top posts about Domain-Driven Design", + "url": "https://app.daily.dev/tags/domain-driven-design?ref=roadmapsh", + "type": "article" + } + ] + }, + "wqE-mkxvehOzOv8UyE39p": { + "title": "Event Sourcing", + "description": "Event sourcing is a design pattern in which the state of a system is represented as a sequence of events that have occurred over time. In an event-sourced system, changes to the state of the system are recorded as events and stored in an event store. The current state of the system is derived by replaying the events from the event store.\n\nOne of the main benefits of event sourcing is that it provides a clear and auditable history of all the changes that have occurred in the system. This can be useful for debugging and for tracking the evolution of the system over time.\n\nEvent sourcing is often used in conjunction with other patterns, such as Command Query Responsibility Segregation (CQRS) and domain-driven design, to build scalable and responsive systems with complex business logic. It is also useful for building systems that need to support undo/redo functionality or that need to integrate with external systems.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Event Sourcing - Martin Fowler", + "url": "https://martinfowler.com/eaaDev/EventSourcing.html", + "type": "article" + }, + { + "title": "Explore top posts about Architecture", + "url": "https://app.daily.dev/tags/architecture?ref=roadmapsh", + "type": "article" + } + ] + }, + "I-PUAE2AzbEaUkW9vMaUM": { + "title": "Test Driven Development", + "description": "Test driven development (TDD) is the process of writing tests for software's requirements which will fail until the software is developed to meet those requirements. Once those tests pass, then the cycle repeats to refactor code or develop another feature/requirement. In theory, this ensures that software is written to meet requirements in the simplest form, and avoids code defects.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is Test Driven Development (TDD)?", + "url": "https://www.guru99.com/test-driven-development.html", + "type": "article" + }, + { + "title": "Test-driven development", + "url": "https://www.ibm.com/garage/method/practices/code/practice_test_driven_development/", + "type": "article" + }, + { + "title": "Explore top posts about TDD", + "url": "https://app.daily.dev/tags/tdd?ref=roadmapsh", + "type": "article" + }, + { + "title": "Agile in Practice: Test Driven Development", + "url": "https://youtu.be/uGaNkTahrIw", + "type": "video" + } + ] + }, + "Ke522R-4k6TDeiDRyZbbU": { + "title": "Monolithic Apps", + "description": "Monolithic architecture is a pattern in which an application handles requests, executes business logic, interacts with the database, and creates the HTML for the front end. In simpler terms, this one application does many things. It's inner components are highly coupled and deployed as one unit.\n\nIt is recommended to build simple applications as a monolith for faster development cycle. Also suitable for Proof-of-Concept(PoC) projects.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Pattern: Monolithic Architecture", + "url": "https://microservices.io/patterns/monolithic.html", + "type": "article" + }, + { + "title": "Monolithic Architecture - Advantages & Disadvantages", + "url": "https://datamify.medium.com/monolithic-architecture-advantages-and-disadvantages-e71a603eec89", + "type": "article" + } + ] + }, + "nkmIv3dNwre4yrULMgTh3": { + "title": "Serverless", + "description": "Serverless is an architecture in which a developer builds and runs applications without provisioning or managing servers. With cloud computing/serverless, servers exist but are managed by the cloud provider. Resources are used as they are needed, on demand and often using auto scaling.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Serverless", + "url": "https://www.ibm.com/cloud/learn/serverless", + "type": "article" + }, + { + "title": "AWS Services", + "url": "https://aws.amazon.com/serverless/", + "type": "article" + }, + { + "title": "Explore top posts about Serverless", + "url": "https://app.daily.dev/tags/serverless?ref=roadmapsh", + "type": "article" + }, + { + "title": "Serverless Computing in 100 Seconds", + "url": "https://www.youtube.com/watch?v=W_VV2Fx32_Y&ab_channel=Fireship", + "type": "video" + } + ] + }, + "K55h3aqOGe6-hgVhiFisT": { + "title": "Microservices", + "description": "Microservice architecture is a pattern in which highly cohesive, loosely coupled services are separately developed, maintained, and deployed. Each component handles an individual function, and when combined, the application handles an overall business function.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Pattern: Microservice Architecture", + "url": "https://microservices.io/patterns/microservices.html", + "type": "article" + }, + { + "title": "What is Microservices?", + "url": "https://smartbear.com/solutions/microservices/", + "type": "article" + }, + { + "title": "Microservices 101", + "url": "https://thenewstack.io/microservices-101/", + "type": "article" + }, + { + "title": "Primer: Microservices Explained", + "url": "https://thenewstack.io/primer-microservices-explained/", + "type": "article" + }, + { + "title": "Articles about Microservices", + "url": "https://thenewstack.io/category/microservices/", + "type": "article" + }, + { + "title": "Explore top posts about Microservices", + "url": "https://app.daily.dev/tags/microservices?ref=roadmapsh", + "type": "article" + } + ] + }, + "n14b7sfTOwsjKTpFC9EZ2": { + "title": "Service Mesh", + "description": "A service mesh is an architectural pattern for enhancing communication, security, and management between microservices in a distributed network. It employs a collection of intelligent proxies to manage service-to-service communication, ensuring high availability, efficient load balancing, and robust service discovery. Additionally, a service mesh offers advanced features like observability for monitoring network behavior, and various traffic management capabilities.\n\nIn a typical service mesh setup, each microservice is paired with a proxy. This proxy, often deployed using a sidecar pattern, is responsible not only for handling communication to and from its associated microservice but also for implementing various network functionalities. These functionalities include load balancing, intelligent routing, and ensuring secure data transfer.\n\nThe sidecar pattern, integral to service meshes, involves deploying the proxy as a sidecar container alongside the main microservice container, especially in Kubernetes environments. This design allows the service mesh to function independently from the microservices themselves, simplifying management and updates.\n\nPopular service mesh implementations include Istio and Linkerd, which offer robust solutions tailored to modern, cloud-based application architectures.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is a Service Mesh (AWS blog)?", + "url": "https://aws.amazon.com/what-is/service-mesh/", + "type": "article" + }, + { + "title": "What is a Service Mesh (RedHat blog)?", + "url": "https://www.redhat.com/en/topics/microservices/what-is-a-service-mesh", + "type": "article" + }, + { + "title": "Explore top posts about Service Mesh", + "url": "https://app.daily.dev/tags/service-mesh?ref=roadmapsh", + "type": "article" + }, + { + "title": "Microservices pain points and how service mesh can help solve those issues", + "url": "https://www.youtube.com/watch?v=QiXK0B9FhO0", + "type": "video" + } + ] + }, + "tObmzWpjsJtK4GWhx6pwB": { + "title": "SOA", + "description": "SOA, or service-oriented architecture, defines a way to make software components reusable via service interfaces. These interfaces utilize common communication standards in such a way that they can be rapidly incorporated into new applications without having to perform deep integration each time.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is SOA?", + "url": "https://aws.amazon.com/what-is/service-oriented-architecture/", + "type": "article" + }, + { + "title": "Reference Architecture Foundation for Service Oriented Architecture", + "url": "http://docs.oasis-open.org/soa-rm/soa-ra/v1.0/soa-ra.html", + "type": "article" + }, + { + "title": "Explore top posts about Architecture", + "url": "https://app.daily.dev/tags/architecture?ref=roadmapsh", + "type": "article" + } + ] + }, + "8DmabQJXlrT__COZrDVTV": { + "title": "Twelve Factor Apps", + "description": "The Twelve-Factor App is a methodology for building scalable and maintainable software-as-a-service (SaaS) applications. It is based on a set of best practices that were identified by the authors of the methodology as being essential for building modern, cloud-native applications.\n\nThe Twelve-Factor App methodology consists of the following principles:\n\n* Codebase: There should be a single codebase for the application, with multiple deployments.\n* Dependencies: The application should explicitly declare and isolate its dependencies.\n* Config: The application should store configuration in the environment.\n* Backing services: The application should treat backing services as attached resources.\n* Build, release, run: The application should be built, released, and run as an isolated unit.\n* Processes: The application should be executed as one or more stateless processes.\n* Port binding: The application should expose its services through port binding.\n* Concurrency: The application should scale out by adding more processes, not by adding threads.\n* Disposability: The application should be designed to start and stop quickly.\n* Dev/prod parity: The development, staging, and production environments should be as similar as possible.\n* Logs: The application should treat logs as event streams.\n* Admin processes: The application should run admin/maintenance tasks as one-off processes.\n\nThe Twelve-Factor App methodology is widely adopted by developers of SaaS applications, and it is seen as a best practice for building cloud-native applications that are scalable, maintainable, and easy to deploy.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "The Twelve-Factor App", + "url": "https://12factor.net/", + "type": "article" + } + ] + }, + "tHiUpG9LN35E5RaHddMv5": { + "title": "Architectural Patterns", + "description": "An architectural pattern is a general, reusable solution to a commonly occurring problem in software architecture within a given context. The architectural patterns address various issues in software engineering, such as computer hardware performance limitations, high availability and minimization of a business risk.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "14 Architectural Patterns to know", + "url": "https://www.redhat.com/architect/14-software-architecture-patterns", + "type": "article" + }, + { + "title": "Architectural Patterns in a nutshell", + "url": "https://towardsdatascience.com/10-common-software-architectural-patterns-in-a-nutshell-a0b47a1e9013", + "type": "article" + } + ] + }, + "GPFRMcY1DEtRgnaZwJ3vW": { + "title": "RabbitMQ", + "description": "With tens of thousands of users, RabbitMQ is one of the most popular open-source message brokers. RabbitMQ is lightweight and easy to deploy on-premises and in the cloud. It supports multiple messaging protocols. RabbitMQ can be deployed in distributed and federated configurations to meet high-scale, high-availability requirements.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "RabbitMQ Tutorials", + "url": "https://www.rabbitmq.com/getstarted.html", + "type": "article" + }, + { + "title": "Explore top posts about RabbitMQ", + "url": "https://app.daily.dev/tags/rabbitmq?ref=roadmapsh", + "type": "article" + }, + { + "title": "RabbitMQ Tutorial - Message Queues and Distributed Systems", + "url": "https://www.youtube.com/watch?v=nFxjaVmFj5E", + "type": "video" + } + ] + }, + "VoYSis1F1ZfTxMlQlXQKB": { + "title": "Kafka", + "description": "Apache Kafka is an open-source distributed event streaming platform used by thousands of companies for high-performance data pipelines, streaming analytics, data integration, and mission-critical applications.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Apache Kafka quickstart", + "url": "https://kafka.apache.org/quickstart", + "type": "article" + }, + { + "title": "Explore top posts about Kafka", + "url": "https://app.daily.dev/tags/kafka?ref=roadmapsh", + "type": "article" + }, + { + "title": "Apache Kafka Fundamentals", + "url": "https://www.youtube.com/watch?v=B5j3uNBH8X4", + "type": "video" + } + ] + }, + "nJ5FpFgGCRaALcWmAKBKT": { + "title": "Message Brokers", + "description": "Message brokers are an inter-application communication technology to help build a common integration mechanism to support cloud-native, microservices-based, serverless, and hybrid cloud architectures. Two of the most famous message brokers are `RabbitMQ` and `Apache Kafka`\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Introduction to Message Brokers", + "url": "https://www.youtube.com/watch?v=57Qr9tk6Uxc", + "type": "video" + } + ] + }, + "31ZlpfIPr9-5vYZqvjUeL": { + "title": "LXC", + "description": "LXC is an abbreviation used for Linux Containers which is an operating system that is used for running multiple Linux systems virtually on a controlled host via a single Linux kernel. LXC is a userspace interface for the Linux kernel containment features. Through a powerful API and simple tools, it lets Linux users easily create and manage system or application containers.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "LXC Documentation", + "url": "https://linuxcontainers.org/lxc/documentation/", + "type": "article" + }, + { + "title": "What is LXC?", + "url": "https://linuxcontainers.org/lxc/introduction/", + "type": "article" + }, + { + "title": "Linux Container (LXC) Introduction", + "url": "https://youtu.be/_KnmRdK69qM", + "type": "video" + }, + { + "title": "Getting started with LXC containers", + "url": "https://youtu.be/CWmkSj_B-wo", + "type": "video" + } + ] + }, + "SGVwJme-jT_pbOTvems0v": { + "title": "Containerization vs Virtualization", + "description": "Containers and virtual machines are the two most popular approaches to setting up a software infrastructure for your organization.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Containerization vs. Virtualization: Everything you need to know", + "url": "https://middleware.io/blog/containerization-vs-virtualization/", + "type": "article" + }, + { + "title": "Explore top posts about Containers", + "url": "https://app.daily.dev/tags/containers?ref=roadmapsh", + "type": "article" + }, + { + "title": "Containerization or Virtualization - The Differences ", + "url": "https://www.youtube.com/watch?v=1WnDHitznGY", + "type": "video" + } + ] + }, + "sVuIdAe08IWJVqAt4z-ag": { + "title": "WebSockets", + "description": "Web sockets are defined as a two-way communication between the servers and the clients, which mean both the parties, communicate and exchange data at the same time. This protocol defines a full duplex communication from the ground up. Web sockets take a step forward in bringing desktop rich functionalities to the web browsers.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Introduction to WebSockets", + "url": "https://www.tutorialspoint.com/websockets/index.htm", + "type": "article" + }, + { + "title": "Socket.io Library Bidirectional and low-latency communication for every platform", + "url": "https://socket.io/", + "type": "article" + }, + { + "title": "A Beginners Guide to WebSockets", + "url": "https://www.youtube.com/watch?v=8ARodQ4Wlf4", + "type": "video" + } + ] + }, + "RUSdlokJUcEYbCvq5FJBJ": { + "title": "Server Sent Events", + "description": "Server-Sent Events (SSE) is a technology that allows a web server to push data to a client in real-time. It uses an HTTP connection to send a stream of data from the server to the client, and the client can listen for these events and take action when they are received.\n\nSSE is useful for applications that require real-time updates, such as chat systems, stock tickers, and social media feeds. It is a simple and efficient way to establish a long-lived connection between a client and a server, and it is supported by most modern web browsers.\n\nTo use SSE, the client must create an EventSource object and specify the URL of the server-side script that will send the events. The server can then send events by writing them to the response stream with the proper formatting.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Server-Sent Events - MDN", + "url": "https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events", + "type": "article" + } + ] + }, + "z5AdThp9ByulmM9uekgm-": { + "title": "Nginx", + "description": "NGINX is a powerful web server and uses a non-threaded, event-driven architecture that enables it to outperform Apache if configured correctly. It can also do other important things, such as load balancing, HTTP caching, or be used as a reverse proxy.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Official Website", + "url": "https://nginx.org/", + "type": "article" + }, + { + "title": "Explore top posts about Nginx", + "url": "https://app.daily.dev/tags/nginx?ref=roadmapsh", + "type": "article" + }, + { + "title": "NGINX Explained in 100 Seconds", + "url": "https://www.youtube.com/watch?v=JKxlsvZXG7c", + "type": "video" + } + ] + }, + "Op-PSPNoyj6Ss9CS09AXh": { + "title": "Caddy", + "description": "The Caddy web server is an extensible, cross-platform, open-source web server written in Go. It has some really nice features like automatic SSL/HTTPs and a really easy configuration file.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Official Website", + "url": "https://caddyserver.com/", + "type": "article" + }, + { + "title": "Getting started with Caddy the HTTPS Web Server from scratch", + "url": "https://www.youtube.com/watch?v=t4naLFSlBpQ", + "type": "video" + } + ] + }, + "jjjonHTHHo-NiAf6p9xPv": { + "title": "Apache", + "description": "Apache is a free, open-source HTTP server, available on many operating systems, but mainly used on Linux distributions. It is one of the most popular options for web developers, as it accounts for over 30% of all the websites, as estimated by W3Techs.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Apache Server Website", + "url": "https://httpd.apache.org/", + "type": "article" + }, + { + "title": "Explore top posts about Apache", + "url": "https://app.daily.dev/tags/apache?ref=roadmapsh", + "type": "article" + }, + { + "title": "What is Apache Web Server?", + "url": "https://www.youtube.com/watch?v=kaaenHXO4t4", + "type": "video" + } + ] + }, + "0NJDgfe6eMa7qPUOI6Eya": { + "title": "MS IIS", + "description": "Internet Information Services (IIS) for Windows® Server is a flexible, secure and manageable Web server for hosting anything on the Web.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Official Website", + "url": "https://www.iis.net/", + "type": "article" + }, + { + "title": "Explore top posts about .NET", + "url": "https://app.daily.dev/tags/.net?ref=roadmapsh", + "type": "article" + }, + { + "title": "Learn Windows Web Server IIS", + "url": "https://www.youtube.com/watch?v=1VdxPWwtISA", + "type": "video" + } + ] + }, + "fekyMpEnaGqjh1Cu4Nyc4": { + "title": "Web Servers", + "description": "Web servers can be either hardware or software, or perhaps a combination of the two.\n\n### Hardware Side:\n\nA hardware web server is a computer that houses web server software and the files that make up a website (for example, HTML documents, images, CSS stylesheets, and JavaScript files). A web server establishes a connection to the Internet and facilitates the physical data exchange with other web-connected devices.\n\n### Software side:\n\nA software web server has a number of software components that regulate how hosted files are accessed by online users. This is at the very least an HTTP server. Software that knows and understands HTTP and URLs (web addresses) is known as an HTTP server (the protocol your browser uses to view webpages). The content of these hosted websites is sent to the end user's device through an HTTP server, which may be accessed via the domain names of the websites it holds.\n\nBasically, an HTTP request is made by a browser anytime it wants a file that is stored on a web server. The relevant (hardware) web server receives the request, which is then accepted by the appropriate (software) HTTP server, which then locates the requested content and returns it to the browser over HTTP. (If the server cannot locate the requested page, it responds with a 404 error.)\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is a Web Server ", + "url": "https://developer.mozilla.org/en-US/docs/Learn/Common_questions/What_is_a_web_server", + "type": "article" + }, + { + "title": "Web Server Concepts and Examples", + "url": "https://youtu.be/9J1nJOivdyw", + "type": "video" + } + ] + }, + "SHmbcMRsc3SygEDksJQBD": { + "title": "Building For Scale", + "description": "Speaking in general terms, scalability is the ability of a system to handle a growing amount of work by adding resources to it.\n\nA software that was conceived with a scalable architecture in mind, is a system that will support higher workloads without any fundamental changes to it, but don't be fooled, this isn't magic. You'll only get so far with smart thinking without adding more sources to it.\n\nFor a system to be scalable, there are certain things you must pay attention to, like:\n\n* Coupling\n* Observability\n* Evolvability\n* Infrastructure\n\nWhen you think about the infrastructure of a scalable system, you have two main ways of building it: using on-premises resources or leveraging all the tools a cloud provider can give you.\n\nThe main difference between on-premises and cloud resources will be FLEXIBILITY, on cloud providers you don't really need to plan ahead, you can upgrade your infrastructure with a couple of clicks, while with on-premises resources you will need a certain level of planning.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Scalable Architecture: A Definition and How-To Guide", + "url": "https://www.sentinelone.com/blog/scalable-architecture/", + "type": "article" + }, + { + "title": "Scaling Distributed Systems - Software Architecture Introduction", + "url": "https://www.youtube.com/watch?v=gxfERVP18-g", + "type": "video" + } + ] + }, + "g8GjkJAhvnSxXTZks0V1g": { + "title": "Redis", + "description": "Redis is an open source (BSD licensed), in-memory **data structure store** used as a database, cache, message broker, and streaming engine. Redis provides data structures such as [strings](https://redis.io/topics/data-types-intro#strings), [hashes](https://redis.io/topics/data-types-intro#hashes), [lists](https://redis.io/topics/data-types-intro#lists), [sets](https://redis.io/topics/data-types-intro#sets), [sorted sets](https://redis.io/topics/data-types-intro#sorted-sets) with range queries, [bitmaps](https://redis.io/topics/data-types-intro#bitmaps), [hyperloglogs](https://redis.io/topics/data-types-intro#hyperloglogs), [geospatial indexes](https://redis.io/commands/geoadd), and [streams](https://redis.io/topics/streams-intro). Redis has built-in [replication](https://redis.io/topics/replication), [Lua scripting](https://redis.io/commands/eval), [LRU eviction](https://redis.io/topics/lru-cache), [transactions](https://redis.io/topics/transactions), and different levels of [on-disk persistence](https://redis.io/topics/persistence), and provides high availability via [Redis Sentinel](https://redis.io/topics/sentinel) and automatic partitioning with [Redis Cluster](https://redis.io/topics/cluster-tutorial).\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Redis Website", + "url": "https://redis.io/", + "type": "article" + }, + { + "title": "Explore top posts about Redis", + "url": "https://app.daily.dev/tags/redis?ref=roadmapsh", + "type": "article" + }, + { + "title": "Redis in 100 Seconds", + "url": "https://www.youtube.com/watch?v=G1rOthIU-uo", + "type": "video" + } + ] + }, + "xPvVwGQw28uMeLYIWn8yn": { + "title": "Memcached", + "description": "Memcached (pronounced variously mem-cash-dee or mem-cashed) is a general-purpose distributed memory-caching system. It is often used to speed up dynamic database-driven websites by caching data and objects in RAM to reduce the number of times an external data source (such as a database or API) must be read. Memcached is free and open-source software, licensed under the Revised BSD license. Memcached runs on Unix-like operating systems (Linux and macOS) and on Microsoft Windows. It depends on the `libevent` library.\n\nMemcached's APIs provide a very large hash table distributed across multiple machines. When the table is full, subsequent inserts cause older data to be purged in the least recently used (LRU) order. Applications using Memcached typically layer requests and additions into RAM before falling back on a slower backing store, such as a database.\n\nMemcached has no internal mechanism to track misses which may happen. However, some third-party utilities provide this functionality.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Memcached, From Official Github", + "url": "https://github.com/memcached/memcached#readme", + "type": "opensource" + }, + { + "title": "Memcached, From Wikipedia", + "url": "https://en.wikipedia.org/wiki/Memcached", + "type": "article" + }, + { + "title": "Memcached Tutorial", + "url": "https://www.tutorialspoint.com/memcached/index.htm", + "type": "article" + } + ] + }, + "28U6q_X-NTYf7OSKHjoWH": { + "title": "MongoDB", + "description": "MongoDB is a source-available cross-platform document-oriented database program. Classified as a NoSQL database program, MongoDB uses JSON-like documents with optional schemas. MongoDB is developed by MongoDB Inc. and licensed under the Server Side Public License (SSPL).\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Visit Dedicated MongoDB Roadmap", + "url": "/mongodb", + "type": "article" + }, + { + "title": "MongoDB Website", + "url": "https://www.mongodb.com/", + "type": "article" + }, + { + "title": "MongoDB Documentation", + "url": "https://docs.mongodb.com/", + "type": "article" + }, + { + "title": "MongoDB Online Sandbox", + "url": "https://mongoplayground.net/", + "type": "article" + }, + { + "title": "Learning Path for MongoDB Developers", + "url": "https://learn.mongodb.com/catalog", + "type": "article" + }, + { + "title": "Dynamo DB Docs", + "url": "https://docs.aws.amazon.com/dynamodb/index.html", + "type": "article" + }, + { + "title": "Official Developers Guide", + "url": "https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Introduction.html", + "type": "article" + } + ] + }, + "qOlNzZ7U8LhIGukb67n7U": { + "title": "CouchDB", + "description": "Apache CouchDB is an open-source document-oriented NoSQL database. It uses JSON to store data, JavaScript as its query language using MapReduce, and HTTP for an API. Unlike a relational database, a CouchDB database does not store data and relationships in tables. Instead, each database is a collection of independent documents. Each document maintains its own data and self-contained schema.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "CouchDB Website", + "url": "https://couchdb.apache.org/", + "type": "article" + }, + { + "title": "CouchDB Documentation", + "url": "https://docs.couchdb.org/", + "type": "article" + }, + { + "title": "The big NoSQL databases comparison", + "url": "https://kkovacs.eu/cassandra-vs-mongodb-vs-couchdb-vs-redis/", + "type": "article" + }, + { + "title": "pouchdb - a JavaScript database inspired by CouchDB", + "url": "https://pouchdb.com/", + "type": "article" + }, + { + "title": "Explore top posts about CouchDB", + "url": "https://app.daily.dev/tags/couchdb?ref=roadmapsh", + "type": "article" + } + ] + }, + "BTNJfWemFKEeNeTyENXui": { + "title": "Neo4j", + "description": "A graph database stores nodes and relationships instead of tables, or documents. Data is stored just like you might sketch ideas on a whiteboard. Your data is stored without restricting it to a pre-defined model, allowing a very flexible way of thinking about and using it.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is a Graph Database?", + "url": "https://neo4j.com/developer/graph-database/", + "type": "article" + }, + { + "title": "Explore top posts about Backend Development", + "url": "https://app.daily.dev/tags/backend?ref=roadmapsh", + "type": "article" + } + ] + }, + "G9AI_i3MkUE1BsO3_-PH7": { + "title": "Graceful Degradation", + "description": "Graceful degradation is a design principle that states that a system should be designed to continue functioning, even if some of its components or features are not available. In the context of web development, graceful degradation refers to the ability of a web page or application to continue functioning, even if the user's browser or device does not support certain features or technologies.\n\nGraceful degradation is often used as an alternative to progressive enhancement, a design principle that states that a system should be designed to take advantage of advanced features and technologies if they are available.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is Graceful Degradation & Why Does it Matter?", + "url": "https://blog.hubspot.com/website/graceful-degradation", + "type": "article" + }, + { + "title": "Four Considerations When Designing Systems For Graceful Degradation", + "url": "https://newrelic.com/blog/best-practices/design-software-for-graceful-degradation", + "type": "article" + }, + { + "title": "The Art of Graceful Degradation", + "url": "https://farfetchtechblog.com/en/blog/post/the-art-of-failure-ii-graceful-degradation/", + "type": "article" + } + ] + }, + "qAu-Y4KI2Z_y-EqiG86cR": { + "title": "Throttling", + "description": "Throttling is a design pattern that is used to limit the rate at which a system or component can be used. It is commonly used in cloud computing environments to prevent overuse of resources, such as compute power, network bandwidth, or storage capacity.\n\nThere are several ways to implement throttling in a cloud environment:\n\n* Rate limiting: This involves setting a maximum number of requests that can be made to a system or component within a specified time period.\n* Resource allocation: This involves allocating a fixed amount of resources to a system or component, and then limiting the use of those resources if they are exceeded.\n* Token bucket: This involves using a \"bucket\" of tokens to represent the available resources, and then allowing a certain number of tokens to be \"consumed\" by each request. When the bucket is empty, additional requests are denied until more tokens become available.\n\nThrottling is an important aspect of cloud design, as it helps to ensure that resources are used efficiently and that the system remains stable and available. It is often used in conjunction with other design patterns, such as auto-scaling and load balancing, to provide a scalable and resilient cloud environment.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Throttling - AWS Well-Architected Framework", + "url": "https://docs.aws.amazon.com/wellarchitected/2022-03-31/framework/rel_mitigate_interaction_failure_throttle_requests.html", + "type": "article" + } + ] + }, + "JansCqGDyXecQkD1K7E7e": { + "title": "Backpressure", + "description": "Backpressure is a design pattern that is used to manage the flow of data through a system, particularly in situations where the rate of data production exceeds the rate of data consumption. It is commonly used in cloud computing environments to prevent overloading of resources and to ensure that data is processed in a timely and efficient manner.\n\nThere are several ways to implement backpressure in a cloud environment:\n\n* Buffering: This involves storing incoming data in a buffer until it can be processed, allowing the system to continue receiving data even if it is temporarily unable to process it.\n* Batching: This involves grouping incoming data into batches and processing the batches in sequence, rather than processing each piece of data individually.\n* Flow control: This involves using mechanisms such as flow control signals or windowing to regulate the rate at which data is transmitted between systems.\n\nBackpressure is an important aspect of cloud design, as it helps to ensure that data is processed efficiently and that the system remains stable and available. It is often used in conjunction with other design patterns, such as auto-scaling and load balancing, to provide a scalable and resilient cloud environment.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Awesome Architecture: Backpressure", + "url": "https://awesome-architecture.com/back-pressure/", + "type": "article" + } + ] + }, + "HoQdX7a4SnkFRU4RPQ-D5": { + "title": "Loadshifting", + "description": "Load shifting is a design pattern that is used to manage the workload of a system by shifting the load to different components or resources at different times. It is commonly used in cloud computing environments to balance the workload of a system and to optimize the use of resources.\n\nThere are several ways to implement load shifting in a cloud environment:\n\n* Scheduling: This involves scheduling the execution of tasks or workloads to occur at specific times or intervals.\n* Load balancing: This involves distributing the workload of a system across multiple resources, such as servers or containers, to ensure that the workload is balanced and that resources are used efficiently.\n* Auto-scaling: This involves automatically adjusting the number of resources that are available to a system based on the workload, allowing the system to scale up or down as needed.\n\nLoad shifting is an important aspect of cloud design, as it helps to ensure that resources are used efficiently and that the system remains stable and available. It is often used in conjunction with other design patterns, such as throttling and backpressure, to provide a scalable and resilient cloud environment.", + "links": [] + }, + "spkiQTPvXY4qrhhVUkoPV": { + "title": "Circuit Breaker", + "description": "The circuit breaker design pattern is a way to protect a system from failures or excessive load by temporarily stopping certain operations if the system is deemed to be in a failed or overloaded state. It is commonly used in cloud computing environments to prevent cascading failures and to improve the resilience and availability of a system.\n\nA circuit breaker consists of three states: closed, open, and half-open. In the closed state, the circuit breaker allows operations to proceed as normal. If the system encounters a failure or becomes overloaded, the circuit breaker moves to the open state, and all subsequent operations are immediately stopped. After a specified period of time, the circuit breaker moves to the half-open state, and a small number of operations are allowed to proceed. If these operations are successful, the circuit breaker moves back to the closed state; if they fail, the circuit breaker moves back to the open state.\n\nThe circuit breaker design pattern is useful for protecting a system from failures or excessive load by providing a way to temporarily stop certain operations and allow the system to recover. It is often used in conjunction with other design patterns, such as retries and fallbacks, to provide a more robust and resilient cloud environment.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Circuit Breaker - AWS Well-Architected Framework", + "url": "https://docs.aws.amazon.com/wellarchitected/latest/reliability-pillar/rel_mitigate_interaction_failure_graceful_degradation.html", + "type": "article" + }, + { + "title": "Circuit Breaker - Complete Guide", + "url": "https://mateus4k.github.io/posts/circuit-breakers/", + "type": "article" + } + ] + }, + "f7iWBkC0X7yyCoP_YubVd": { + "title": "Migration Strategies", + "description": "Learn how to run database migrations effectively. Especially zero downtime multi-phase schema migrations. Rather than make all changes at once, do smaller incremental changes to allow old code, and new code to work with the database at the same time, before removing old code, and finally removing the parts of the database schema which is no longer used.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Databases as a Challenge for Continuous Delivery", + "url": "https://phauer.com/2015/databases-challenge-continuous-delivery/", + "type": "article" + } + ] + }, + "osQlGGy38xMcKLtgZtWaZ": { + "title": "Types of Scaling", + "description": "Horizontal scaling is a change in the **number** of a resource. For example, increasing the number of virtual machines processing messages in a queue. Vertical scaling is a change in the **size/power** of a resource. For example, increasing the memory or disk space available to a machine. Scaling can be applied to databases, cloud resources, and other areas of computing.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Horizontal vs Vertical Scaling", + "url": "https://touchstonesecurity.com/horizontal-vs-vertical-scaling-what-you-need-to-know/", + "type": "article" + }, + { + "title": "System Design Basics: Horizontal vs. Vertical Scaling", + "url": "https://youtu.be/xpDnVSmNFX0", + "type": "video" + }, + { + "title": "System Design 101", + "url": "https://www.youtube.com/watch?v=Y-Gl4HEyeUQ", + "type": "video" + } + ] + }, + "4X-sbqpP0NDhM99bKdqIa": { + "title": "Instrumentation", + "description": "Instrumentation refers to the measure of a product's performance, in order to diagnose errors and to write trace information. Instrumentation can be of two types: source instrumentation and binary instrumentation.\n\nBackend monitoring allows the user to view the performance of infrastructure i.e. the components that run a web application. These include the HTTP server, middleware, database, third-party API services, and more.\n\nTelemetry is the process of continuously collecting data from different components of the application. This data helps engineering teams to troubleshoot issues across services and identify the root causes. In other words, telemetry data powers observability for your distributed applications.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is Instrumentation?", + "url": "https://en.wikipedia.org/wiki/Instrumentation_(computer_programming)", + "type": "article" + }, + { + "title": "What is Monitoring?", + "url": "https://www.yottaa.com/performance-monitoring-backend-vs-front-end-solutions/", + "type": "article" + }, + { + "title": "What is Telemetry?", + "url": "https://www.sumologic.com/insight/what-is-telemetry/", + "type": "article" + }, + { + "title": "Explore top posts about Monitoring", + "url": "https://app.daily.dev/tags/monitoring?ref=roadmapsh", + "type": "article" + } + ] + }, + "QvMEEsXh0-rzn5hDGcmEv": { + "title": "Monitoring", + "description": "Distributed systems are hard to build, deploy and maintain. They consist of multiple components which communicate with each other. In parallel to that, users use the system, resulting in multiple requests. Making sense of this noise is important to understand:\n\n* how the system behaves\n* is it broken\n* is it fast enough\n* what can be improved\n\nA product can integrate with existing monitoring products (APM - application performance management). They can show a detailed view of each request - its user, time, components involved, state(error or OK) etc.\n\nWe can build dashboards with custom events or metrics according to our needs. Automatic alert rules can be configured on top of these events/metrics.\n\nA few popular tools are Grafana, Sentry, Mixpanel, NewRelic etc", + "links": [ + { + "title": "Observability vs Monitoring?", + "url": "https://www.dynatrace.com/news/blog/observability-vs-monitoring/", + "type": "article" + }, + { + "title": "What is APM?", + "url": "https://www.sumologic.com/blog/the-role-of-apm-and-distributed-tracing-in-observability/", + "type": "article" + }, + { + "title": "Top monitoring tools 2024", + "url": "https://thectoclub.com/tools/best-application-monitoring-software/", + "type": "article" + }, + { + "title": "Caching strategies", + "url": "https://medium.com/@genchilu/cache-strategy-in-backend-d0baaacd2d79", + "type": "article" + } + ] + }, + "neVRtPjIHP_VG7lHwfah0": { + "title": "Telemetry", + "description": "", + "links": [] + }, + "jWwA6yX4Zjx-r_KpDaD3c": { + "title": "MD5", + "description": "MD5 (Message-Digest Algorithm 5) is a hash function that is currently advised not to be used due to its extensive vulnerabilities. It is still used as a checksum to verify data integrity.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Wikipedia - MD5", + "url": "https://en.wikipedia.org/wiki/MD5", + "type": "article" + }, + { + "title": "What is MD5?", + "url": "https://www.techtarget.com/searchsecurity/definition/MD5", + "type": "article" + }, + { + "title": "Why is MD5 not safe?", + "url": "https://infosecscout.com/why-md5-is-not-safe/", + "type": "article" + } + ] + }, + "JVN38r5jENoteia3YeIQ3": { + "title": "SHA", + "description": "SHA (Secure Hash Algorithms) is a family of cryptographic hash functions created by the NIST (National Institute of Standards and Technology). The family includes:\n\n* SHA-0: Published in 1993, this is the first algorithm in the family. Shortly after its release, it was discontinued for an undisclosed significant flaw.\n* SHA-1: Created to replace SHA-0 and which resembles MD5, this algorithm has been considered insecure since 2010.\n* SHA-2: This isn't an algorithm, but a set of them, with SHA-256 and SHA-512 being the most popular. SHA-2 is still secure and widely used.\n* SHA-3: Born in a competition, this is the newest member of the family. SHA-3 is very secure and doesn't carry the same design flaws as its brethren.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Wikipedia - SHA-1", + "url": "https://en.wikipedia.org/wiki/SHA-1", + "type": "article" + }, + { + "title": "Wikipedia - SHA-2", + "url": "https://en.wikipedia.org/wiki/SHA-2", + "type": "article" + }, + { + "title": "Wikipedia - SHA-3", + "url": "https://en.wikipedia.org/wiki/SHA-3", + "type": "article" + } + ] + }, + "kGTALrvCpxyVCXHRmkI7s": { + "title": "scrypt", + "description": "Scrypt (pronounced \"ess crypt\") is a password hashing function (like bcrypt). It is designed to use a lot of hardware, which makes brute-force attacks more difficult. Scrypt is mainly used as a proof-of-work algorithm for cryptocurrencies.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Wikipedia - Scrypt", + "url": "https://en.wikipedia.org/wiki/Scrypt", + "type": "article" + } + ] + }, + "dlG1bVkDmjI3PEGpkm1xH": { + "title": "bcrypt", + "description": "bcrypt is a password hashing function, that has been proven reliable and secure since it's release in 1999. It has been implemented into most commonly-used programming languages.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "bcrypts npm package", + "url": "https://www.npmjs.com/package/bcrypt", + "type": "article" + }, + { + "title": "Understanding bcrypt", + "url": "https://auth0.com/blog/hashing-in-action-understanding-bcrypt/", + "type": "article" + }, + { + "title": "bcrypt explained", + "url": "https://www.youtube.com/watch?v=O6cmuiTBZVs", + "type": "video" + } + ] + }, + "x-WBJjBd8u93ym5gtxGsR": { + "title": "HTTPS", + "description": "HTTPS is a secure way to send data between a web server and a browser.\n\nA communication through HTTPS starts with the handshake phase during which the server and the client agree on how to encrypt the communication, in particular they choose an encryption algorithm and a secret key. After the handshake all the communication between the server and the client will be encrypted using the agreed upon algorithm and key.\n\nThe handshake phase uses a particular kind of cryptography, called asymmetric cryptography, to communicate securely even though client and server have not yet agreed on a secret key. After the handshake phase the HTTPS communication is encrypted with symmetric cryptography, which is much more efficient but requires client and server to both have knowledge of the secret key.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is HTTPS?", + "url": "https://www.cloudflare.com/en-gb/learning/ssl/what-is-https/", + "type": "article" + }, + { + "title": "Why HTTPS Matters", + "url": "https://developers.google.com/web/fundamentals/security/encrypt-in-transit/why-https", + "type": "article" + }, + { + "title": "Enabling HTTPS on Your Servers", + "url": "https://web.dev/articles/enable-https", + "type": "article" + }, + { + "title": "How HTTPS works (comic)", + "url": "https://howhttps.works/", + "type": "article" + }, + { + "title": "HTTPS explained with carrier pigeons", + "url": "https://baida.dev/articles/https-explained-with-carrier-pigeons", + "type": "article" + }, + { + "title": "SSL, TLS, HTTP, HTTPS Explained", + "url": "https://www.youtube.com/watch?v=hExRDVZHhig", + "type": "video" + }, + { + "title": "HTTPS — Stories from the field", + "url": "https://www.youtube.com/watch?v=GoXgl9r0Kjk", + "type": "video" + } + ] + }, + "AAgciyxuDvS2B_c6FRMvT": { + "title": "OWASP Risks", + "description": "OWASP or Open Web Application Security Project is an online community that produces freely-available articles, methodologies, documentation, tools, and technologies in the field of web application security.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "OWASP Application Security Verification Standard", + "url": "https://github.com/OWASP/ASVS", + "type": "opensource" + }, + { + "title": "Wikipedia - OWASP", + "url": "https://en.wikipedia.org/wiki/OWASP", + "type": "article" + }, + { + "title": "OWASP Top 10 Security Risks", + "url": "https://cheatsheetseries.owasp.org/IndexTopTen.html", + "type": "article" + }, + { + "title": "OWASP Cheatsheets", + "url": "https://cheatsheetseries.owasp.org/cheatsheets/AJAX_Security_Cheat_Sheet.html", + "type": "article" + } + ] + }, + "0v3OsaghJEGHeXX0c5kqn": { + "title": "SSL/TLS", + "description": "Secure Sockets Layer (SSL) and Transport Layer Security (TLS) are cryptographic protocols used to provide security in internet communications. These protocols encrypt the data that is transmitted over the web, so anyone who tries to intercept packets will not be able to interpret the data. One difference that is important to know is that SSL is now deprecated due to security flaws, and most modern web browsers no longer support it. But TLS is still secure and widely supported, so preferably use TLS.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Wikipedia - SSL/TLS", + "url": "https://en.wikipedia.org/wiki/Transport_Layer_Security", + "type": "article" + }, + { + "title": "Cloudflare - What is SSL?", + "url": "https://www.cloudflare.com/learning/ssl/what-is-ssl/", + "type": "article" + }, + { + "title": "SSL, TLS, HTTPS Explained", + "url": "https://www.youtube.com/watch?v=j9QmMEWmcfo", + "type": "video" + } + ] + }, + "LU6WUbkWKbPM1rb2_gEqa": { + "title": "CORS", + "description": "Cross-Origin Resource Sharing (CORS) is an HTTP-header based mechanism that allows a server to indicate any origins (domain, scheme, or port) other than its own from which a browser should permit loading resources.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Cross-Origin Resource Sharing (CORS)", + "url": "https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS", + "type": "article" + }, + { + "title": "Understanding CORS", + "url": "https://rbika.com/blog/understanding-cors", + "type": "article" + }, + { + "title": "CORS in 100 Seconds", + "url": "https://www.youtube.com/watch?v=4KHiSt0oLJ0", + "type": "video" + }, + { + "title": "CORS in 6 minutes", + "url": "https://www.youtube.com/watch?v=PNtFSVU-YTI", + "type": "video" + } + ] + }, + "TZ0BWOENPv6pQm8qYB8Ow": { + "title": "Server Security", + "description": "Learn about the security of your server and how to secure it. Here are some of the topics off the top of my head:\n\n* Use a firewall: One of the most effective ways to secure a server is to use a firewall to block all unnecessary incoming traffic. You can use iptables on Linux systems or a hardware firewall to do this.\n* Close unnecessary ports: Make sure to close any ports that are not needed for your server to function properly. This will reduce the attack surface of your server and make it more difficult for attackers to gain access.\n* Use strong passwords: Use long, complex passwords for all of your accounts, and consider using a password manager to store them securely.\n* Keep your system up to date: Make sure to keep your operating system and software up to date with the latest security patches. This will help to prevent vulnerabilities from being exploited by attackers.\n* Use SSL/TLS for communication: Use Secure Sockets Layer (SSL) or Transport Layer Security (TLS) to encrypt communication between your server and client devices. This will help to protect against man-in-the-middle attacks and other types of cyber threats.\n* Use a intrusion detection system (IDS): An IDS monitors network traffic and alerts you to any suspicious activity, which can help you to identify and respond to potential threats in a timely manner.\n* Enable two-factor authentication: Two-factor authentication adds an extra layer of security to your accounts by requiring a second form of authentication, such as a code sent to your phone, in addition to your password.\n\nAlso learn about OpenSSL and creating your own PKI as well as managing certs, renewals, and mutual client auth with x509 certs", + "links": [] + }, + "HgQBde1zLUFtlwB66PR6_": { + "title": "CSP", + "description": "Content Security Policy is a computer security standard introduced to prevent cross-site scripting, clickjacking and other code injection attacks resulting from execution of malicious content in the trusted web page context.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "MDN — Content Security Policy (CSP)", + "url": "https://developer.mozilla.org/en-US/docs/Web/HTTP/CSP", + "type": "article" + }, + { + "title": "Google Devs — Content Security Policy (CSP)", + "url": "https://developers.google.com/web/fundamentals/security/csp", + "type": "article" + }, + { + "title": "Explore top posts about Security", + "url": "https://app.daily.dev/tags/security?ref=roadmapsh", + "type": "article" + } + ] + }, + "yCnn-NfSxIybUQ2iTuUGq": { + "title": "How does the internet work?", + "description": "The Internet is a global network of computers connected to each other which communicate through a standardized set of protocols.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "How does the Internet Work?", + "url": "https://cs.fyi/guide/how-does-internet-work", + "type": "article" + }, + { + "title": "The Internet Explained", + "url": "https://www.vox.com/2014/6/16/18076282/the-internet", + "type": "article" + }, + { + "title": "How Does the Internet Work?", + "url": "http://web.stanford.edu/class/msande91si/www-spr04/readings/week1/InternetWhitepaper.htm", + "type": "article" + }, + { + "title": "Introduction to Internet", + "url": "/guides/what-is-internet", + "type": "article" + }, + { + "title": "How does the Internet work?", + "url": "https://www.youtube.com/watch?v=x3c1ih2NJEg", + "type": "video" + }, + { + "title": "How the Internet Works in 5 Minutes", + "url": "https://www.youtube.com/watch?v=7_LPdttKXPc", + "type": "video" + }, + { + "title": "How does the internet work? (Full Course)", + "url": "https://www.youtube.com/watch?v=zN8YNNHcaZc", + "type": "video" + } + ] + }, + "R12sArWVpbIs_PHxBqVaR": { + "title": "What is HTTP?", + "description": "HTTP is the `TCP/IP` based application layer communication protocol which standardizes how the client and server communicate with each other. It defines how the content is requested and transmitted across the internet.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Everything you need to know about HTTP", + "url": "https://cs.fyi/guide/http-in-depth", + "type": "article" + }, + { + "title": "What is HTTP?", + "url": "https://www.cloudflare.com/en-gb/learning/ddos/glossary/hypertext-transfer-protocol-http/", + "type": "article" + }, + { + "title": "An overview of HTTP", + "url": "https://developer.mozilla.org/en-US/docs/Web/HTTP/Overview", + "type": "article" + }, + { + "title": "HTTP/3 From A To Z: Core Concepts", + "url": "https://www.smashingmagazine.com/2021/08/http3-core-concepts-part1/", + "type": "article" + }, + { + "title": "Full HTTP Networking Course", + "url": "https://www.youtube.com/watch?v=2JYT5f2isg4", + "type": "video" + }, + { + "title": "HTTP/1 to HTTP/2 to HTTP/3", + "url": "https://www.youtube.com/watch?v=a-sBfyiXysI", + "type": "video" + }, + { + "title": "HTTP Crash Course & Exploration", + "url": "https://www.youtube.com/watch?v=iYM2zFP3Zn0", + "type": "video" + }, + { + "title": "SSL, TLS, HTTPS Explained", + "url": "https://www.youtube.com/watch?v=j9QmMEWmcfo", + "type": "video" + } + ] + }, + "ZhSuu2VArnzPDp6dPQQSC": { + "title": "What is Domain Name?", + "description": "A domain name is a unique, easy-to-remember address used to access websites, such as ‘[google.com](http://google.com)’, and ‘[facebook.com](http://facebook.com)’. Users can connect to websites using domain names thanks to the DNS system.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is a Domain Name?", + "url": "https://developer.mozilla.org/en-US/docs/Learn/Common_questions/What_is_a_domain_name", + "type": "article" + }, + { + "title": "What is a Domain Name? | Domain name vs. URL", + "url": "https://www.cloudflare.com/en-gb/learning/dns/glossary/what-is-a-domain-name/", + "type": "article" + }, + { + "title": "A Beginners Guide to How Domain Names Work", + "url": "https://www.youtube.com/watch?v=Y4cRx19nhJk", + "type": "video" + } + ] + }, + "aqMaEY8gkKMikiqleV5EP": { + "title": "What is hosting?", + "description": "Web hosting is an online service that allows you to publish your website files onto the internet. So, anyone who has access to the internet has access to your website.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is the difference between webpage, website, web server, and search engine?", + "url": "https://developer.mozilla.org/en-US/docs/Learn/Common_questions/Web_mechanics/Pages_sites_servers_and_search_engines", + "type": "article" + }, + { + "title": "What is a web server?", + "url": "https://developer.mozilla.org/en-US/docs/Learn/Common_questions/Web_mechanics/What_is_a_web_server", + "type": "article" + }, + { + "title": "What Is Web Hosting? Explained", + "url": "https://www.youtube.com/watch?v=htbY9-yggB0", + "type": "video" + }, + { + "title": "Different Types of Web Hosting Explained", + "url": "https://www.youtube.com/watch?v=AXVZYzw8geg", + "type": "video" + }, + { + "title": "Where to Host a Fullstack Project on a Budget", + "url": "https://www.youtube.com/watch?v=Kx_1NYYJS7Q", + "type": "video" + } + ] + }, + "hkxw9jPGYphmjhTjw8766": { + "title": "DNS and how it works?", + "description": "The Domain Name System (DNS) is the phonebook of the Internet. Humans access information online through domain names, like [nytimes.com](http://nytimes.com) or [espn.com](http://espn.com). Web browsers interact through Internet Protocol (IP) addresses. DNS translates domain names to IP addresses so browsers can load Internet resources.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is DNS?", + "url": "https://www.cloudflare.com/en-gb/learning/dns/what-is-dns/", + "type": "article" + }, + { + "title": "How DNS works (comic)", + "url": "https://howdns.works/", + "type": "article" + }, + { + "title": "Understanding Domain names", + "url": "https://developer.mozilla.org/en-US/docs/Glossary/DNS/", + "type": "article" + }, + { + "title": "Explore top posts about DNS", + "url": "https://app.daily.dev/tags/dns?ref=roadmapsh", + "type": "article" + }, + { + "title": "DNS and How does it Work?", + "url": "https://www.youtube.com/watch?v=Wj0od2ag5sk", + "type": "video" + }, + { + "title": "DNS Records", + "url": "https://www.youtube.com/watch?v=7lxgpKh_fRY", + "type": "video" + }, + { + "title": "Complete DNS mini-series", + "url": "https://www.youtube.com/watch?v=zEmUuNFBgN8&list=PLTk5ZYSbd9MhMmOiPhfRJNW7bhxHo4q-K", + "type": "video" + } + ] + }, + "P82WFaTPgQEPNp5IIuZ1Y": { + "title": "Browsers and how they work?", + "description": "A web browser is a software application that enables a user to access and display web pages or other online content through its graphical user interface.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "How Browsers Work", + "url": "https://www.html5rocks.com/en/tutorials/internals/howbrowserswork/", + "type": "article" + }, + { + "title": "Role of Rendering Engine in Browsers", + "url": "https://www.browserstack.com/guide/browser-rendering-engine", + "type": "article" + }, + { + "title": "Populating the Page: How Browsers Work", + "url": "https://developer.mozilla.org/en-US/docs/Web/Performance/How_browsers_work", + "type": "article" + }, + { + "title": "Explore top posts about Browsers", + "url": "https://app.daily.dev/tags/browsers?ref=roadmapsh", + "type": "article" + } + ] + }, + "PY9G7KQy8bF6eIdr1ydHf": { + "title": "Authentication", + "description": "The API authentication process validates the identity of the client attempting to make a connection by using an authentication protocol. The protocol sends the credentials from the remote client requesting the connection to the remote access server in either plain text or encrypted form. The server then knows whether it can grant access to that remote client or not.\n\nHere is the list of common ways of authentication:\n\n* JWT Authentication\n* Token based Authentication\n* Session based Authentication\n* Basic Authentication\n* OAuth - Open Authorization\n* SSO - Single Sign On\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "User Authentication: Understanding the Basics & Top Tips", + "url": "https://swoopnow.com/user-authentication/", + "type": "article" + }, + { + "title": "An overview about authentication methods", + "url": "https://betterprogramming.pub/how-do-you-authenticate-mate-f2b70904cc3a", + "type": "article" + }, + { + "title": "SSO - Single Sign On", + "url": "https://roadmap.sh/guides/sso", + "type": "article" + }, + { + "title": "OAuth - Open Authorization", + "url": "https://roadmap.sh/guides/oauth", + "type": "article" + }, + { + "title": "JWT Authentication", + "url": "https://roadmap.sh/guides/jwt-authentication", + "type": "article" + }, + { + "title": "Token Based Authentication", + "url": "https://roadmap.sh/guides/token-authentication", + "type": "article" + }, + { + "title": "Session Based Authentication", + "url": "https://roadmap.sh/guides/session-authentication", + "type": "article" + }, + { + "title": "Basic Authentication", + "url": "https://roadmap.sh/guides/basic-authentication", + "type": "article" + }, + { + "title": "Explore top posts about Authentication", + "url": "https://app.daily.dev/tags/authentication?ref=roadmapsh", + "type": "article" + } + ] + }, + "UxS_mzVUjLigEwKrXnEeB": { + "title": "JWT", + "description": "JWT stands for JSON Web Token is a token-based encryption open standard/methodology that is used to transfer information securely as a JSON object. Clients and Servers use JWT to securely share information, with the JWT containing encoded JSON objects and claims. JWT tokens are designed to be compact, safe to use within URLs, and ideal for SSO contexts.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "jwt.io Website", + "url": "https://jwt.io/", + "type": "article" + }, + { + "title": "Introduction to JSON Web Tokens", + "url": "https://jwt.io/introduction", + "type": "article" + }, + { + "title": "What is JWT?", + "url": "https://www.akana.com/blog/what-is-jwt", + "type": "article" + }, + { + "title": "Explore top posts about JWT", + "url": "https://app.daily.dev/tags/jwt?ref=roadmapsh", + "type": "article" + }, + { + "title": "What Is JWT and Why Should You Use JWT", + "url": "https://www.youtube.com/watch?v=7Q17ubqLfaM", + "type": "video" + }, + { + "title": "What is JWT? JSON Web Token Explained", + "url": "https://www.youtube.com/watch?v=926mknSW9Lo", + "type": "video" + } + ] + }, + "yRiJgjjv2s1uV9vgo3n8m": { + "title": "Basic Authentication", + "description": "Given the name \"Basic Authentication\", you should not confuse Basic Authentication with the standard username and password authentication. Basic authentication is a part of the HTTP specification, and the details can be [found in the RFC7617](https://www.rfc-editor.org/rfc/rfc7617.html).\n\nBecause it is a part of the HTTP specifications, all the browsers have native support for \"HTTP Basic Authentication\".\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "HTTP Basic Authentication", + "url": "https://roadmap.sh/guides/http-basic-authentication", + "type": "article" + }, + { + "title": "Explore top posts about Authentication", + "url": "https://app.daily.dev/tags/authentication?ref=roadmapsh", + "type": "article" + }, + { + "title": "Illustrated HTTP Basic Authentication", + "url": "https://www.youtube.com/watch?v=mwccHwUn7Gc", + "type": "video" + } + ] + }, + "0rGj7FThLJZouSQUhnqGW": { + "title": "Token Authentication", + "description": "Token-based authentication is a protocol which allows users to verify their identity, and in return receive a unique access token. During the life of the token, users then access the website or app that the token has been issued for, rather than having to re-enter credentials each time they go back to the same webpage, app, or any resource protected with that same token.\n\nAuth tokens work like a stamped ticket. The user retains access as long as the token remains valid. Once the user logs out or quits an app, the token is invalidated.\n\nToken-based authentication is different from traditional password-based or server-based authentication techniques. Tokens offer a second layer of security, and administrators have detailed control over each action and transaction.\n\nBut using tokens requires a bit of coding know-how. Most developers pick up the techniques quickly, but there is a learning curve.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What Is Token-Based Authentication?", + "url": "https://www.okta.com/identity-101/what-is-token-based-authentication/", + "type": "article" + }, + { + "title": "Explore top posts about Authentication", + "url": "https://app.daily.dev/tags/authentication?ref=roadmapsh", + "type": "article" + } + ] + }, + "vp-muizdICcmU0gN8zmkS": { + "title": "OAuth", + "description": "OAuth stands for **O**pen **Auth**orization and is an open standard for authorization. It works to authorize devices, APIs, servers and applications using access tokens rather than user credentials, known as \"secure delegated access\".\n\nIn its most simplest form, OAuth delegates authentication to services like Facebook, Amazon, Twitter and authorizes third-party applications to access the user account **without** having to enter their login and password.\n\nIt is mostly utilized for REST/APIs and only provides a limited scope of a user's data.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Okta - What the Heck is OAuth", + "url": "https://developer.okta.com/blog/2017/06/21/what-the-heck-is-oauth", + "type": "article" + }, + { + "title": "DigitalOcean - An Introduction to OAuth 2", + "url": "https://www.digitalocean.com/community/tutorials/an-introduction-to-oauth-2", + "type": "article" + }, + { + "title": "Explore top posts about OAuth", + "url": "https://app.daily.dev/tags/oauth?ref=roadmapsh", + "type": "article" + }, + { + "title": "What is OAuth really all about", + "url": "https://www.youtube.com/watch?v=t4-416mg6iU", + "type": "video" + }, + { + "title": "OAuth 2.0: An Overview", + "url": "https://www.youtube.com/watch?v=CPbvxxslDTU", + "type": "video" + } + ] + }, + "ffzsh8_5yRq85trFt9Xhk": { + "title": "Cookie Based Auth", + "description": "Cookies are pieces of data used to identify the user and their preferences. The browser returns the cookie to the server every time the page is requested. Specific cookies like HTTP cookies are used to perform cookie-based authentication to maintain the session for each user.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "How does cookie based authentication work?", + "url": "https://stackoverflow.com/questions/17769011/how-does-cookie-based-authentication-work", + "type": "article" + } + ] + }, + "z3EJBpgGm0_Uj3ymhypbX": { + "title": "OpenID", + "description": "OpenID is a protocol that utilizes the authorization and authentication mechanisms of OAuth 2.0 and is now widely adopted by many identity providers on the Internet. It solves the problem of needing to share user's personal info between many different web services(e.g. online shops, discussion forums etc.)\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Official Website", + "url": "https://openid.net/", + "type": "article" + }, + { + "title": "What is OpenID", + "url": "https://openid.net/connect/", + "type": "article" + }, + { + "title": "OAuth vs OpenID", + "url": "https://securew2.com/blog/oauth-vs-openid-which-is-better", + "type": "article" + }, + { + "title": "Explore top posts about Authentication", + "url": "https://app.daily.dev/tags/authentication?ref=roadmapsh", + "type": "article" + }, + { + "title": "An Illustrated Guide to OAuth and OpenID Connect", + "url": "https://www.youtube.com/watch?v=t18YB3xDfXI", + "type": "video" + }, + { + "title": "OAuth 2.0 and OpenID Connect (in plain English)", + "url": "https://www.youtube.com/watch?v=996OiexHze0", + "type": "video" + } + ] + }, + "UCHtaePVxS-0kpqlYxbfC": { + "title": "SAML", + "description": "Security Assertion Markup Language (SAML)\n-----------------------------------------\n\n**SAML** stands for Security Assertion Markup Language. It is an XML-based standard for exchanging authentication and authorization data between parties, particularly between an identity provider (IdP) and a service provider (SP). In a SAML-based system, a user requests access to a protected resource. The service provider asks the identity provider to authenticate the user and assert whether they are granted access to the resource.\n\n### Benefits of SAML\n\nSome advantages of using SAML include:\n\n* Single Sign-On (SSO): Users can log in once at the IdP and access multiple service providers without needing to authenticate again.\n* Improved security: Passwords and user credentials are not required to be stored and managed by the service provider, reducing the potential vectors for attack.\n* Increased efficiency: As users no longer need to maintain multiple sets of credentials, managing access becomes easier for both the user and the system administrators.\n* Interoperability: SAML enables a wide range of applications to work together, regardless of the underlying technology or platform.\n\n### SAML Components\n\nThree main components are involved in the SAML architecture:\n\n1. **Identity Provider (IdP)**: The entity that manages users' identities and authenticates them by providing security tokens, also called assertions.\n2. **Service Provider (SP)**: The entity that provides a service (such as a web application or API) and relies on the identity provider to authenticate users and grant/deny access to the resources.\n3. **User/Principal**: The end user seeking access to the service provided by the service provider.\n\n### SAML Workflow\n\nThe SAML authentication process consists of the following steps:\n\n1. The user requests access to a protected resource from the service provider.\n2. If the user is not already authenticated, the service provider generates and sends a SAML authentication request to the identity provider.\n3. The identity provider authenticates the user (using, e.g., a username and password, multi-factor authentication, or another method).\n4. The identity provider constructs a SAML response, which includes details about the user and asserts whether the user is authorized to access the requested resource.\n5. The SAML response is sent back to the service provider, typically via the user's web browser or API client.\n6. The service provider processes the SAML response, extracts the necessary information, and grants or denies access to the user based on the identity provider's assertion.\n\nWith SAML, you can streamline user authentication and authorization across various applications and systems, providing a better user experience and improving your overall backend security.", + "links": [] + }, + "NulaE1isWqn-feYHg4YQT": { + "title": "Elasticsearch", + "description": "Elastic search at its core is a document-oriented search engine. It is a document based database that lets you INSERT, DELETE , RETRIEVE and even perform analytics on the saved records. But, Elastic Search is unlike any other general purpose database you have worked with, in the past. It's essentially a search engine and offers an arsenal of features you can use to retrieve the data stored in it, as per your search criteria. And that too, at lightning speeds.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Elasticsearch Website", + "url": "https://www.elastic.co/elasticsearch/", + "type": "article" + }, + { + "title": "Elasticsearch Documentation", + "url": "https://www.elastic.co/guide/index.html", + "type": "article" + }, + { + "title": "Explore top posts about ELK", + "url": "https://app.daily.dev/tags/elk?ref=roadmapsh", + "type": "article" + } + ] + }, + "iN_1EuIwCx_7lRBw1Io4U": { + "title": "Solr", + "description": "Solr is highly reliable, scalable and fault tolerant, providing distributed indexing, replication and load-balanced querying, automated failover and recovery, centralized configuration and more. Solr powers the search and navigation features of many of the world's largest internet sites.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Official Website", + "url": "https://solr.apache.org/", + "type": "article" + }, + { + "title": "Official Documentation", + "url": "https://solr.apache.org/resources.html#documentation", + "type": "article" + } + ] + }, + "5XGvep2qoti31bsyqNzrU": { + "title": "Real-Time Data", + "description": "There are many ways to get real time data from the backend. Some of them are:", + "links": [] + }, + "osvajAJlwGI3XnX0fE-kA": { + "title": "Long Polling", + "description": "Long polling is a technique where the client polls the server for new data. However, if the server does not have any data available for the client, instead of sending an empty response, the server holds the request and waits for some specified period of time for new data to be available. If new data becomes available during that time, the server immediately sends a response to the client, completing the open request. If no new data becomes available and the timeout period specified by the client expires, the server sends a response indicating that fact. The client will then immediately re-request data from the server, creating a new request-response cycle.", + "links": [] + }, + "Tt7yr-ChHncJG0Ge1f0Xk": { + "title": "Short Polling", + "description": "Short polling is a technique where the client repeatedly polls the server for new data. This is the most common approach to polling. It's simple to implement and understand, but it's not the most efficient way of doing things.", + "links": [] + }, + "M0iaSSdVPWaCUpyTG50Vf": { + "title": "Redis", + "description": "A key-value database (KV database) is a type of database that stores data as a collection of key-value pairs. In a KV database, each piece of data is identified by a unique key, and the value is the data associated with that key.\n\nKV databases are designed for fast and efficient storage and retrieval of data, and they are often used in applications that require high performance and low latency. They are particularly well-suited for storing large amounts of unstructured data, such as log data and user profiles.\n\nSome popular KV databases include Redis, Memcached, and LevelDB. These databases are often used in combination with other types of databases, such as relational databases or document databases, to provide a complete and scalable data storage solution.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Key-Value Databases - Wikipedia", + "url": "https://en.wikipedia.org/wiki/Key-value_database", + "type": "article" + }, + { + "title": "Explore top posts about Backend Development", + "url": "https://app.daily.dev/tags/backend?ref=roadmapsh", + "type": "article" + } + ] + }, + "dwfEHInbX2eFiafM-nRMX": { + "title": "DynamoDB", + "description": "", + "links": [] + }, + "RyJFLLGieJ8Xjt-DlIayM": { + "title": "Firebase", + "description": "A real-time database is broadly defined as a data store designed to collect, process, and/or enrich an incoming series of data points (i.e., a data stream) in real time, typically immediately after the data is created.\n\n[Firebase](https://firebase.google.com/) [RethinkDB](https://rethinkdb.com/)", + "links": [] + }, + "5T0ljwlHL0545ICCeehcQ": { + "title": "RethinkDB", + "description": "", + "links": [] + }, + "kdulE3Z_BdbtRmq6T2KmR": { + "title": "SQLite", + "description": "SQLite is a relational database management system that is embedded into the end program. It is self-contained, serverless, zero-configuration, transactional SQL database engine.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "SQLite website", + "url": "https://www.sqlite.org/index.html", + "type": "article" + }, + { + "title": "SQLite Tutorial", + "url": "https://www.sqlitetutorial.net/", + "type": "article" + }, + { + "title": "Explore top posts about SQLite", + "url": "https://app.daily.dev/tags/sqlite?ref=roadmapsh", + "type": "article" + } + ] + }, + "XbM4TDImSH-56NsITjyHK": { + "title": "Influx DB", + "description": "InfluxDB\n--------\n\nInfluxDB was built from the ground up to be a purpose-built time series database; i.e., it was not repurposed to be time series. Time was built-in from the beginning. InfluxDB is part of a comprehensive platform that supports the collection, storage, monitoring, visualization and alerting of time series data. It’s much more than just a time series database.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "InfluxDB Website", + "url": "https://www.influxdata.com/", + "type": "article" + }, + { + "title": "Time series database", + "url": "https://www.influxdata.com/time-series-database/", + "type": "article" + }, + { + "title": "Explore top posts about Backend Development", + "url": "https://app.daily.dev/tags/backend?ref=roadmapsh", + "type": "article" + } + ] + }, + "WiAK70I0z-_bzbWNwiHUd": { + "title": "TimeScale", + "description": "", + "links": [] + }, + "gT6-z2vhdIQDzmR2K1g1U": { + "title": "Cassandra", + "description": "A **wide-column database** (sometimes referred to as a column database) is similar to a relational database. It store data in tables, rows and columns. However in opposite to relational databases here each row can have its own format of the columns. Column databases can be seen as a two-dimensional key-value database. One of such database system is **Apache Cassandra**.\n\n**Warning:** [note that a \"columnar database\" and a \"column database\" are two different terms!](https://en.wikipedia.org/wiki/Wide-column_store#Wide-column_stores_versus_columnar_databases)\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Apache Cassandra", + "url": "https://cassandra.apache.org/_/index.html", + "type": "article" + }, + { + "title": "Explore top posts about Backend Development", + "url": "https://app.daily.dev/tags/backend?ref=roadmapsh", + "type": "article" + }, + { + "title": "Apache Cassandra - Course for Beginners", + "url": "https://www.youtube.com/watch?v=J-cSy5MeMOA", + "type": "video" + } + ] + }, + "QZwTLOvjUTaSb_9deuxsR": { + "title": "Base", + "description": "", + "links": [] + }, + "5xy66yQrz1P1w7n6PcAFq": { + "title": "AWS Neptune", + "description": "", + "links": [] + }, + "Z01E67D6KjrShvQCHjGR7": { + "title": "Observability", + "description": "In software development, observability is the measure of how well we can understand a system from the work it does, and how to make it better.\n\nSo what makes a system to be \"observable\"? It is its ability of producing and collecting metrics, logs and traces in order for us to understand what happens under the hood and identify issues and bottlenecks faster.\n\nYou can of course implement all those features by yourself, but there are a lot of softwares out there that can help you with it like Datadog, Sentry and CloudWatch.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "DataDog Docs", + "url": "https://docs.datadoghq.com/", + "type": "article" + }, + { + "title": "AWS CloudWatch Docs", + "url": "https://aws.amazon.com/cloudwatch/getting-started/", + "type": "article" + }, + { + "title": "Sentry Docs", + "url": "https://docs.sentry.io/", + "type": "article" + }, + { + "title": "Observability and Instrumentation: What They Are and Why They Matter", + "url": "https://newrelic.com/blog/best-practices/observability-instrumentation", + "type": "article" + }, + { + "title": "Explore top posts about Observability", + "url": "https://app.daily.dev/tags/observability?ref=roadmapsh", + "type": "article" + }, + { + "title": "AWS re:Invent 2017: Improving Microservice and Serverless Observability with Monitor", + "url": "https://www.youtube.com/watch?v=Wx0SHRb2xcI", + "type": "video" + } + ] + } +} \ No newline at end of file diff --git a/public/roadmap-content/blockchain.json b/public/roadmap-content/blockchain.json new file mode 100644 index 000000000..890f6cb55 --- /dev/null +++ b/public/roadmap-content/blockchain.json @@ -0,0 +1,2538 @@ +{ + "MvpHHpbS-EksUfuOKILOq": { + "title": "Basic Blockchain Knowledge", + "description": "A blockchain is a decentralized, distributed, and oftentimes public, digital ledger consisting of records called blocks that is used to record transactions across many computers so that any involved block cannot be altered retroactively, without the alteration of all subsequent blocks.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Introduction to Blockchain", + "url": "https://chain.link/education-hub/blockchain", + "type": "article" + }, + { + "title": "Blockchain Explained", + "url": "https://www.investopedia.com/terms/b/blockchain.asp", + "type": "article" + }, + { + "title": "An Elementary and Slightly Distilled Introduction to Blockchain", + "url": "https://markpetherbridge.co.uk/blog/an-elementary-and-slightly-distilled-introduction-to-blockchain/", + "type": "article" + }, + { + "title": "Explore top posts about Blockchain", + "url": "https://app.daily.dev/tags/blockchain?ref=roadmapsh", + "type": "article" + }, + { + "title": "How does a blockchain work?", + "url": "https://youtu.be/SSo_EIwHSd4", + "type": "video" + }, + { + "title": "What Is a Blockchain? | Blockchain Basics for Developers", + "url": "https://youtu.be/4ff9esY_4aU", + "type": "video" + } + ] + }, + "Atv-4Q7edtvfySs_XhgEq": { + "title": "Blockchain Structure", + "description": "The blockchain gets its name from its underlying structure. The blockchain is organized as a series of “blocks” that are “chained” together.\n\nUnderstanding blockchain security requires understanding how the blockchain is put together. This requires knowing what the blocks and chains of blockchain are and why they are designed the way that they are.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Blockchain Basics | Coursera", + "url": "https://www.coursera.org/lecture/blockchain-basics/blockchain-structure-5rj9Z", + "type": "course" + }, + { + "title": "Blockchain Architecture Basics: Components, Structure, Benefits & Creation", + "url": "https://mlsdev.com/blog/156-how-to-build-your-own-blockchain-architecture", + "type": "article" + }, + { + "title": "Blockchain Architecture 101: Components, Structure, and Benefits", + "url": "https://komodoplatform.com/en/academy/blockchain-architecture-101/", + "type": "article" + }, + { + "title": "Blockchain structure", + "url": "https://resources.infosecinstitute.com/topic/blockchain-structure/", + "type": "article" + }, + { + "title": "Explore top posts about Blockchain", + "url": "https://app.daily.dev/tags/blockchain?ref=roadmapsh", + "type": "article" + } + ] + }, + "9z0Fqn1qqN8eo6s7_kwcb": { + "title": "Basic Blockchain Operations", + "description": "Operations in a decentralized networks are the responsibility of the peer participants and their respective computational nodes. These are specific for each type of blockchain.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Blockchain Basics | Coursera", + "url": "https://www.coursera.org/lecture/blockchain-basics/basic-operations-OxILB", + "type": "course" + }, + { + "title": "Blockchain Basics: Structure, Operations, and the Bitcoin Blockchain", + "url": "https://www.mlq.ai/blockchain-basics/", + "type": "article" + }, + { + "title": "Bitcoin blockchain transactions | Bitcoin Developer", + "url": "https://developer.bitcoin.org/reference/transactions.html", + "type": "article" + }, + { + "title": "Ethereum blockchain transactions | ethereum.org", + "url": "https://ethereum.org/en/developers/docs/transactions/", + "type": "article" + }, + { + "title": "Explore top posts about Blockchain", + "url": "https://app.daily.dev/tags/blockchain?ref=roadmapsh", + "type": "article" + }, + { + "title": "How Bitcoin blockchain actually work (Video)", + "url": "https://www.youtube.com/watch?v=bBC-nXj3Ng4", + "type": "video" + } + ] + }, + "WD2JH4X4tEE4J0W0XFQ_4": { + "title": "Applications and Uses", + "description": "Blockchain applications go far beyond cryptocurrency and bitcoin. With its ability to create more transparency and fairness while also saving businesses time and money, the technology is impacting a variety of sectors in ways that range from how contracts are enforced to making government work more efficiently.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Blockchain Use Cases and Applications by Industry", + "url": "https://consensys.net/blockchain-use-cases/", + "type": "article" + }, + { + "title": "Top 10 Real-World Applications Of Blockchain Technology", + "url": "https://www.blockchain-council.org/blockchain/top-10-real-world-applications-of-blockchain-technology/", + "type": "article" + }, + { + "title": "Ethereum blockchain transactions | ethereum.org", + "url": "https://ethereum.org/en/developers/docs/transactions/", + "type": "article" + }, + { + "title": "E34 Blockchain Applications and Real-World Use Cases Disrupting the Status Quo", + "url": "https://builtin.com/blockchain/blockchain-applications", + "type": "article" + } + ] + }, + "edO8iEehsZtYavlsEKhOy": { + "title": "What is Blockchain", + "description": "A blockchain is a decentralized, distributed, and oftentimes public, digital ledger consisting of records called blocks that is used to record transactions across many computers so that any involved block cannot be altered retroactively, without the alteration of all subsequent blocks.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Blockchain Explained", + "url": "https://www.investopedia.com/terms/b/blockchain.asp", + "type": "article" + }, + { + "title": "What is decentralization?", + "url": "https://aws.amazon.com/blockchain/decentralization-in-blockchain/", + "type": "article" + }, + { + "title": "Explore top posts about Blockchain", + "url": "https://app.daily.dev/tags/blockchain?ref=roadmapsh", + "type": "article" + }, + { + "title": "How does a blockchain work?", + "url": "https://youtu.be/SSo_EIwHSd4", + "type": "video" + }, + { + "title": "What Is a Blockchain? | Blockchain Basics for Developers", + "url": "https://youtu.be/4ff9esY_4aU", + "type": "video" + } + ] + }, + "H9jvIlxX6P-C_cgPfZop4": { + "title": "Decentralization", + "description": "In blockchain, decentralization refers to the transfer of control and decision-making from a centralized entity (individual, organization, or group thereof) to a distributed network. Decentralized networks strive to reduce the level of trust that participants must place in one another, and deter their ability to exert authority or control over one another in ways that degrade the functionality of the network.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is decentralization?", + "url": "https://aws.amazon.com/blockchain/decentralization-in-blockchain/", + "type": "article" + }, + { + "title": "What is Decentralization in Blockchain?", + "url": "https://www.blockchain-council.org/blockchain/what-is-decentralization-in-blockchain/", + "type": "article" + }, + { + "title": "Explore top posts about Decentralization", + "url": "https://app.daily.dev/tags/decentralization?ref=roadmapsh", + "type": "article" + } + ] + }, + "Nc9AH6L7EqeQxh0m6Hddz": { + "title": "Why it matters?", + "description": "", + "links": [] + }, + "bA4V_9AbV3uQi3qrtLWk0": { + "title": "General Blockchain Knowledge", + "description": "Visit the following resources to learn more:", + "links": [ + { + "title": "The Complete Course On Understanding Blockchain Technology", + "url": "https://www.udemy.com/course/understanding-blockchain-technology/", + "type": "course" + }, + { + "title": "Explore top posts about Blockchain", + "url": "https://app.daily.dev/tags/blockchain?ref=roadmapsh", + "type": "article" + }, + { + "title": "Blockchain Technology Explained", + "url": "https://youtu.be/qOVAbKKSH10", + "type": "video" + } + ] + }, + "B7niNXMOTbHn_1ixKQ8ri": { + "title": "Mining and Incentive Models", + "description": "Mining is the process of adding transaction details to the Blockchain, like sender address, hash value, etc. The Blockchain contains all the history of the transactions that have taken place in the past for record purposes and it is stored in such a manner that, it can’t be manipulated.\n\nAn Incentive is basically a reward given to a Blockchain Miner for speeding up the transactions and making correct decisions while processing the complete transaction securely.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Ethereum Consensus Mechanisms", + "url": "https://ethereum.org/en/developers/docs/consensus-mechanisms/", + "type": "article" + }, + { + "title": "Solana Staking Rewards", + "url": "https://docs.solana.com/implemented-proposals/staking-rewards", + "type": "article" + } + ] + }, + "E9HR_voxQ2a2tvWUuva_p": { + "title": "Decentralization vs Trust", + "description": "Blockchains, cryptocurrency, smart contracts, and oracles have emerged as new technologies for coordinating social and economic activities in a more secure, transparent, and accessible manner. Most importantly, these technologies are revealing the power of cryptographic guarantees—what we often call cryptographic truth—in restoring users’ trust in everyday interactions.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What Crypto Is Really About", + "url": "https://blog.chain.link/what-crypto-is-really-about/", + "type": "article" + }, + { + "title": "Ethereum Consensus Mechanisms", + "url": "https://ethereum.org/en/developers/docs/consensus-mechanisms/", + "type": "article" + }, + { + "title": "Explore top posts about Decentralization", + "url": "https://app.daily.dev/tags/decentralization?ref=roadmapsh", + "type": "article" + }, + { + "title": "The Superiority of Cryptographic Truth", + "url": "https://youtu.be/AEtBPbmIRKQ", + "type": "video" + } + ] + }, + "L08v-78UsEhcg-mZtMVuk": { + "title": "Blockchain Forking", + "description": "A fork happens whenever a community makes a change to the blockchain’s protocol, or basic set of rules.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Blockchain Fork", + "url": "https://en.wikipedia.org/wiki/Fork_(blockchain)", + "type": "article" + }, + { + "title": "What is a fork?", + "url": "https://www.coinbase.com/learn/crypto-basics/what-is-a-fork", + "type": "article" + }, + { + "title": "What Is a Hard Fork?", + "url": "https://www.investopedia.com/terms/h/hard-fork.asp", + "type": "article" + }, + { + "title": "Explore top posts about Blockchain", + "url": "https://app.daily.dev/tags/blockchain?ref=roadmapsh", + "type": "article" + } + ] + }, + "s1QqQc0We5yQaNF3Ogt4k": { + "title": "Cryptocurrencies", + "description": "A cryptocurrency, crypto-currency, or crypto is a digital currency designed to work as a medium of exchange through a blockchain, which is not reliant on any central authority, such as a government or bank, to uphold or maintain it.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What Is Cryptocurrency?", + "url": "https://www.investopedia.com/terms/c/cryptocurrency.asp", + "type": "article" + }, + { + "title": "Cryptocurrency: What It Is and How It Works", + "url": "https://www.nerdwallet.com/article/investing/cryptocurrency", + "type": "article" + }, + { + "title": "Explore top posts about Crypto", + "url": "https://app.daily.dev/tags/crypto?ref=roadmapsh", + "type": "article" + }, + { + "title": "How Cryptocurrency actually works.", + "url": "https://youtu.be/rYQgy8QDEBI", + "type": "video" + } + ] + }, + "FSThY0R1OAZCIL98W3AMj": { + "title": "Cryptowallets", + "description": "A cryptocurrency wallet is an application that functions as a wallet for your cryptocurrency.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is a Cryptocurrency Wallet?", + "url": "https://www.investopedia.com/terms/b/bitcoin-wallet.asp", + "type": "article" + }, + { + "title": "What is a Crypto Wallet? A Beginner’s Guide", + "url": "https://crypto.com/university/crypto-wallets", + "type": "article" + } + ] + }, + "e_I-4Q6_qIW09Hcn-pgKm": { + "title": "Cryptography", + "description": "Cryptography, or cryptology, is the practice and study of techniques for secure communication in the presence of adversarial behavior.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Cryptography", + "url": "https://en.wikipedia.org/wiki/Cryptography", + "type": "article" + }, + { + "title": "What is Cryptography", + "url": "https://www.synopsys.com/glossary/what-is-cryptography.html", + "type": "article" + }, + { + "title": "Explore top posts about Cryptography", + "url": "https://app.daily.dev/tags/cryptography?ref=roadmapsh", + "type": "article" + }, + { + "title": "Asymmetric Encryption - Simply explained", + "url": "https://youtu.be/AQDCe585Lnc", + "type": "video" + }, + { + "title": "What is Cryptography?", + "url": "https://www.youtube.com/watch?v=6_Cxj5WKpIw", + "type": "video" + } + ] + }, + "_BkpK9qgp9up8nXNH7q4m": { + "title": "Consensus Protocols", + "description": "Consensus for blockchain is a procedure in which the peers of a Blockchain network reach agreement about the present state of the data in the network. Through this, consensus algorithms establish reliability and trust in the Blockchain network.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Consensus Mechanisms in Blockchain: A Beginner’s Guide", + "url": "https://crypto.com/university/consensus-mechanisms-explained", + "type": "article" + }, + { + "title": "Consensus Mechanisms", + "url": "https://ethereum.org/en/developers/docs/consensus-mechanisms/", + "type": "article" + }, + { + "title": "What Is a Consensus Mechanism?", + "url": "https://www.coindesk.com/learn/what-is-a-consensus-mechanism/", + "type": "article" + } + ] + }, + "nNPa6jKRUaitmHwBip_LE": { + "title": "Blockchain Interoperability", + "description": "The concept of “blockchain interoperability” refers to the ability of different blockchain networks to exchange and leverage data between one another and to move unique types of digital assets between the networks’ respective blockchains.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Cross-Chain Interoperability: What it Means for Blockchain", + "url": "https://www.gemini.com/cryptopedia/why-is-interoperability-important-for-blockchain", + "type": "article" + }, + { + "title": "Blockchain Interoperability : Why Is Cross Chain Technology Important?", + "url": "https://101blockchains.com/blockchain-interoperability/", + "type": "article" + }, + { + "title": "Blockchain Interoperability – Understanding Cross-Chain Technology", + "url": "https://www.blockchain-council.org/blockchain/blockchain-interoperability/", + "type": "article" + }, + { + "title": "Explore top posts about Blockchain", + "url": "https://app.daily.dev/tags/blockchain?ref=roadmapsh", + "type": "article" + } + ] + }, + "JYHK95Xr0R1MVCda1Epl6": { + "title": "Solana", + "description": "Solana is a public blockchain platform with smart contract functionality. Its native cryptocurrency is SOL.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is Solana, and how does it work?", + "url": "https://cointelegraph.com/news/what-is-solana-and-how-does-it-work", + "type": "article" + }, + { + "title": "Beginners Guide To Solana", + "url": "https://solana.com/news/getting-started-with-solana-development", + "type": "article" + }, + { + "title": "Solana Introduction", + "url": "https://docs.solana.com/introduction", + "type": "article" + }, + { + "title": "Solana Whitepaper", + "url": "https://solana.com/solana-whitepaper.pdf", + "type": "article" + }, + { + "title": "Solana Architecture", + "url": "https://docs.solana.com/cluster/overview", + "type": "article" + }, + { + "title": "Start Building Solana!", + "url": "https://beta.solpg.io/?utm_source=solana.com", + "type": "article" + }, + { + "title": "Explore top posts about Solana", + "url": "https://app.daily.dev/tags/solana?ref=roadmapsh", + "type": "article" + } + ] + }, + "ug4FB4RXItHU0ADnisvve": { + "title": "TON", + "description": "TON is a fully decentralized layer-1 blockchain designed by Telegram to onboard billions of users. It boasts ultra-fast transactions, tiny fees, easy-to-use apps, and is environmentally friendly.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "TON Telegram integration highlights synergy of blockchain community", + "url": "https://cointelegraph.com/news/ton-telegram-integration-highlights-synergy-of-blockchain-community", + "type": "article" + }, + { + "title": "Start building on The Open Network", + "url": "https://ton.org/dev", + "type": "article" + }, + { + "title": "TON Introduction", + "url": "https://ton.org/docs/learn/introduction", + "type": "article" + }, + { + "title": "Blockchain analysis", + "url": "https://ton.org/analysis", + "type": "article" + } + ] + }, + "tSJyp46rkJcOtDqVpJX1s": { + "title": "EVM-Based", + "description": "The Ethereum Virtual Machine (EVM) is a dedicated software virtual stack that executes smart contract bytecode and is integrated into each Ethereum node. Simply said, EVM is a software framework that allows developers to construct Ethereum-based decentralized applications (DApps). All Ethereum accounts and smart contracts are stored on this virtual computer.\n\nMany blockchains have forked the Ethereum blockchain and added functionality on top, these blockchains are referred to as EVM-based blockchains.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is Ethereum Virtual Machine?", + "url": "https://moralis.io/evm-explained-what-is-ethereum-virtual-machine/", + "type": "article" + }, + { + "title": "Explore top posts about EVM", + "url": "https://app.daily.dev/tags/evm?ref=roadmapsh", + "type": "article" + }, + { + "title": "Understanding the Ethereum Virtual Machine (EVM): Concepts and Architecture", + "url": "https://www.youtube.com/watch?v=kCswGz9naZg", + "type": "video" + } + ] + }, + "EQgb4LqXnyq3gOX7Fb85s": { + "title": "Blockchains", + "description": "Blockchain systems vary considerably in their design, particularly with regard to the consensus mechanisms used to perform the essential task of verifying network data.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Types of Blockchains: PoW, PoS, and Private", + "url": "https://www.gemini.com/cryptopedia/blockchain-types-pow-pos-private", + "type": "article" + }, + { + "title": "Explore top posts about Blockchain", + "url": "https://app.daily.dev/tags/blockchain?ref=roadmapsh", + "type": "article" + } + ] + }, + "JLXIbP-y8C2YktIk3R12m": { + "title": "Ehereum", + "description": "Ethereum is a programmable blockchain platform with the capacity to support smart contracts, dapps (decentralized apps), and other DeFi projects. The Ethereum native token is the Ether (ETH), and it’s used to fuel operations on the blockchain.\n\nThe Ethereum platform launched in 2015, and it’s now the second largest form of crypto next to Bitcoin (BTC).\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Ethereum whitepaper", + "url": "https://ethereum.org/en/whitepaper/", + "type": "article" + }, + { + "title": "Intro to Ethereum", + "url": "https://ethereum.org/en/developers/docs/intro-to-ethereum/", + "type": "article" + }, + { + "title": "A gentle introduction to Ethereum", + "url": "https://bitsonblocks.net/2016/10/02/gentle-introduction-ethereum/", + "type": "article" + }, + { + "title": "Explore top posts about Ethereum", + "url": "https://app.daily.dev/tags/ethereum?ref=roadmapsh", + "type": "article" + } + ] + }, + "JNilHFQnnVDOz-Gz6eNo5": { + "title": "Polygon", + "description": "Polygon, formerly known as the Matic Network, is a protocol that allows anyone to create and exchange value, powered by zero-knowledge technology. Polygon provides multiple solutions including", + "links": [ + { + "title": "Polygon zkEVM", + "url": "https://polygon.technology/polygon-zkevm", + "type": "article" + }, + { + "title": "Polygon PoS", + "url": "https://polygon.technology/polygon-pos", + "type": "article" + }, + { + "title": "Polygon CDK", + "url": "https://polygon.technology/polygon-cdk", + "type": "article" + }, + { + "title": "Polygon ID", + "url": "https://polygon.technology/polygon-id", + "type": "article" + }, + { + "title": "Introduction to Polygon", + "url": "https://wiki.polygon.technology/", + "type": "article" + }, + { + "title": "Polygon POL whitepaper", + "url": "https://polygon.technology/papers/pol-whitepaper", + "type": "article" + } + ] + }, + "sJj-6wG5KjjzYYia_eo63": { + "title": "Binance Smart Chain", + "description": "Binance Smart Chain (also known as BNB Chain) is a blockchain project initiated by Binance as a central piece of their cryptocurrency exchange, which is the largest exchange in the world in terms of daily trading volume of cryptocurrencies.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Binance whitepaper", + "url": "https://www.exodus.com/assets/docs/binance-coin-whitepaper.pdf", + "type": "article" + }, + { + "title": "BNB Chain overview", + "url": "https://www.binance.com/en/blog/all/bnb-chain-blockchain-for-exchanging-the-world-304219301536473088", + "type": "article" + }, + { + "title": "Explore top posts about Crypto", + "url": "https://app.daily.dev/tags/crypto?ref=roadmapsh", + "type": "article" + } + ] + }, + "UQ9AejYV6_Sk6ZJkXYWf7": { + "title": "Gnosis Chain", + "description": "Gnosis is a blockchain based on Ethereum, which changed the consensus model to PoS to solve major issues on the Ethereum mainnet. While the platform solves problems surrounding transaction fees and speed, it also means that the Gnosis chain is less decentralized, as it is somewhat reliant on the Ethereum chain.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Gnosis whitepaper", + "url": "https://blockchainlab.com/pdf/gnosis_whitepaper.pdf", + "type": "article" + }, + { + "title": "Gnosis overview", + "url": "https://developers.gnosischain.com/#gnosis-chain", + "type": "article" + } + ] + }, + "PkRAYBZQAUAHxWEeCCX4U": { + "title": "Huobi Eco Chain", + "description": "Huobi's ECO Chain (also known as HECO) is a public blockchain that provides developers with a low-cost onchain environment for running decentralized apps (dApps) of smart contracts and storing digital assets.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Huobi Eco Chain whitepaper", + "url": "https://www.hecochain.com/developer.133bd45.pdf", + "type": "article" + }, + { + "title": "Introduction to HECO Chain", + "url": "https://docs.hecochain.com/#/", + "type": "article" + } + ] + }, + "txQ9U1wcnZkQVh6B49krk": { + "title": "Avalanche", + "description": "Avalanche describes itself as an “open, programmable smart contracts platform for decentralized applications.” What does that mean? Like many other decentralized protocols, Avalanche has its own token called AVAX, which is used to pay transaction fees and can be staked to secure the network.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Avalanche whitepaper", + "url": "https://assets.website-files.com/5d80307810123f5ffbb34d6e/6008d7bbf8b10d1eb01e7e16_Avalanche%20Platform%20Whitepaper.pdf", + "type": "article" + }, + { + "title": "Avalanche official website", + "url": "https://www.avax.network/", + "type": "article" + } + ] + }, + "trcGwYcFW5LQUUrAcbUf_": { + "title": "Fantom", + "description": "Fantom is a decentralized, open-source smart contract platform that supports decentralized applications (dApps) and digital assets. It's one of many blockchain networks built as a faster, more efficient alternative to Ethereum, it uses the proof-of-stake consensus mechanism.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Fantom whitepaper", + "url": "https://arxiv.org/pdf/1810.10360.pdf", + "type": "article" + }, + { + "title": "Fantom overview", + "url": "https://docs.fantom.foundation/", + "type": "article" + } + ] + }, + "VVbvueVMJKLUoJYhbJB1z": { + "title": "Moonbeam / Moonriver", + "description": "Moonbeam is a Polkadot network parachain that promises cross-chain interoperability between the Ethereum and Polkadot . More specifically, Moonbeam is a smart contract platform that enables developers to move dApps between the two networks without having to rewrite code or redeploy infrastructure.\n\nMoonriver is an incentivized testnet. It enables developers to create, test, and adjust their protocols prior to launching on Moonbeam. Moonbeam is the mainnet of the ecosystem.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "About Moonbream", + "url": "https://docs.moonbeam.network/learn/platform/networks/moonbeam/", + "type": "article" + }, + { + "title": "Moonbeam Vision", + "url": "https://docs.moonbeam.network/learn/platform/vision/", + "type": "article" + } + ] + }, + "YC385OLECWjpZjVeWKksO": { + "title": "Everscale", + "description": "Everscale is a layer-1 PoS blockchain network of the 5th generation. It is one of the most technologically advanced blockchain networks, and that is not a marketing exaggeration. Everscale incorporates all the blockchain innovations and concepts of recent years. Its versatility helps it develop as a decentralized hub for many blockchains and resource-demanding applications such as GameFi, DeFi, micro-transactions, real-time bidding, etc.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Everscale site", + "url": "https://everscale.network", + "type": "article" + }, + { + "title": "Everscale Whitepaper", + "url": "https://everscale.network/docs/everscale-whitepaper.pdf", + "type": "article" + }, + { + "title": "Documentation", + "url": "https://docs.everscale.network/", + "type": "article" + }, + { + "title": "Guide", + "url": "https://everscale.guide/", + "type": "article" + } + ] + }, + "5MGtl00EEZdSnJdrNYPJ7": { + "title": "Gosh", + "description": "Gosh is a development platform that is purpose-built for securing the software supply chain and extracting the value locked in projects. It is the first blockchain-based platform for software development, which allows developers and businesses to create products in a familiar, straightforward, and safe way.\n\nOn Gosh, every operation, commit, and transaction is trustless, traceable, and transparent. This means that developers can build composable, censorship-resistant repositories, and monetize their open source projects by turning them into a DAO.\n\nGosh is built on cryptography, decentralization, and consensus, which means that repositories have no owner and are managed in a decentralized way. Developers can use Gosh like they use Git and turn any Gosh repository into a DAO and configure it to suit their needs. They can also fund their DAO and use DeFi applications to incentivize code security.\n\nWith Gosh, builds are no longer at risk. From source code on Gosh to Docker container, developers can be sure that their build is safe. Mission-critical applications can also write their scripts as formally verified smart contracts to get rid of holes in the CI/CD process.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Gosh site", + "url": "https://gosh.sh/", + "type": "article" + }, + { + "title": "Gosh Documentation", + "url": "https://docs.gosh.sh/", + "type": "article" + }, + { + "title": "Gosh Web", + "url": "https://app.gosh.sh/", + "type": "article" + }, + { + "title": "Docker Extension", + "url": "https://docs.gosh.sh/working-with-gosh/docker-extension/", + "type": "article" + }, + { + "title": "Git Remote Helper", + "url": "https://docs.gosh.sh/working-with-gosh/git-remote-helper/", + "type": "article" + } + ] + }, + "4tAyunbYVwlbzybuVq7fr": { + "title": "TON", + "description": "TON is a fully decentralized layer-1 blockchain designed by Telegram to onboard billions of users. It boasts ultra-fast transactions, tiny fees, easy-to-use apps, and is environmentally friendly.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "TON Telegram integration highlights synergy of blockchain community", + "url": "https://cointelegraph.com/news/ton-telegram-integration-highlights-synergy-of-blockchain-community", + "type": "article" + }, + { + "title": "Start building on The Open Network", + "url": "https://ton.org/dev", + "type": "article" + }, + { + "title": "TON Introduction", + "url": "https://ton.org/docs/learn/introduction", + "type": "article" + }, + { + "title": "Blockchain analysis", + "url": "https://ton.org/analysis", + "type": "article" + } + ] + }, + "3HCpgWWPIkhK3gPRJuJQf": { + "title": "Venom", + "description": "The Venom Foundation is the first crypto foundation licensed in UAE's ADGM and is set to launch its blockchain platform soon. The platform uses asynchronous blockchain technology of dynamical sharding, which enables boundless scalability, higher security guarantees with decentralization, and manages the gross data transaction flows without faltering by increasing fees and transaction times. The foundation aims to develop and support a self-sufficient blockchain ecosystem with non-custodial wallet options, transparent transaction histories, interchain transactions, staking on validator nodes, and a native decentralized exchange, among others.\n\nThe MENA region entrepreneurs are considered pioneers in global crypto trend adoption, and the foundation's customizable approach is well-suited to bridging different dimensions of market participants. The platform has a panel of industry leaders and seasoned investors, and the project is generating attention in MENA due to its transactional management possibilities, higher security, and inbound governmental database projects. The foundation will work with ecosystem participants to offer new products such as NFT marketplace, derivative exchange, fiat-backed stablecoin, and others to come with the potential to become a bridge towards wide adoption of CBDC in the UAE, other MENA countries and globally.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Venom site", + "url": "https://venom.foundation", + "type": "article" + }, + { + "title": "Venom whitepaper", + "url": "https://venom.foundation/Venom_Whitepaper.pdf", + "type": "article" + }, + { + "title": "Venom Documentation", + "url": "https://docs.venom.foundation/", + "type": "article" + }, + { + "title": "Explore Grants", + "url": "https://venom.foundation/#explore_grants", + "type": "article" + } + ] + }, + "miBEG3x_foKYxwfX4Tr4f": { + "title": "TVM-Based", + "description": "", + "links": [] + }, + "i_Dw3kUZ7qKPG-tk-sFPf": { + "title": "L2 Blockchains", + "description": "Layer-2 refers to a network or technology that operates on top of an underlying blockchain protocol to improve its scalability and efficiency.\n\nThis category of scaling solutions entails shifting a portion of Ethereum's transactional burden to an adjacent system architecture, which then handles the brunt of the network’s processing and only subsequently reports back to Ethereum to finalize its results.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Layer-1 and Layer-2 Blockchain Scaling Solutions", + "url": "https://www.gemini.com/cryptopedia/blockchain-layer-2-network-layer-1-network", + "type": "article" + }, + { + "title": "Layer 2 - Binance Academy", + "url": "https://academy.binance.com/en/glossary/layer-2", + "type": "article" + }, + { + "title": "Develop a ZK-powered Layer 2 with the Polygon CDK open-source framework", + "url": "https://wiki.polygon.technology/docs/cdk/", + "type": "article" + }, + { + "title": "Explore top posts about Blockchain", + "url": "https://app.daily.dev/tags/blockchain?ref=roadmapsh", + "type": "article" + } + ] + }, + "ixTIn2Uhs-i5-UPt9jKAa": { + "title": "Arbitrum", + "description": "Arbitrum aims to reduce transaction fees and congestion by moving as much computation and data storage off of Ethereum's main blockchain (layer 1) as it can. Storing data off of Ethereum's blockchain is known as Layer 2 scaling solutions.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Arbitrum whitepaper", + "url": "https://www.usenix.org/system/files/conference/usenixsecurity18/sec18-kalodner.pdf", + "type": "article" + }, + { + "title": "Inside Arbitrum", + "url": "https://developer.offchainlabs.com/docs/Inside_Arbitrum", + "type": "article" + } + ] + }, + "Ib9STGxQa8yeoB-GFeGDE": { + "title": "Moonbeam / Moonriver", + "description": "Moonbeam is a Polkadot network parachain that promises cross-chain interoperability between the Ethereum and Polkadot . More specifically, Moonbeam is a smart contract platform that enables developers to move dApps between the two networks without having to rewrite code or redeploy infrastructure.\n\nMoonriver is an incentivized testnet. It enables developers to create, test, and adjust their protocols prior to launching on Moonbeam. Moonbeam is the mainnet of the ecosystem.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "About Moonbeam", + "url": "https://docs.moonbeam.network/learn/platform/networks/moonbeam/", + "type": "article" + }, + { + "title": "Moonbeam Vision", + "url": "https://docs.moonbeam.network/learn/platform/vision/", + "type": "article" + } + ] + }, + "hlcavpstLnXkJcjccQUL8": { + "title": "TVM-Based", + "description": "TVM-based blockchain is a type of blockchain that uses the Telegram Open Network Virtual Machine (TVM) for executing smart contracts. This allows for fast and efficient execution of smart contracts and enables developers to create decentralized applications.\n\nBoC stands for Bag of Cells, and it refers to the data structure used in the TVM-based blockchain to store all the information related to a smart contract. This includes the code of the contract, its state, and other relevant data. The Bag of Cells is a highly efficient data structure that allows for fast and secure storage of smart contract data.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Original specification", + "url": "https://ton.org/tvm.pdf", + "type": "article" + }, + { + "title": "Everscale VM specification", + "url": "https://docs.everscale.network/tvm.pdf", + "type": "article" + } + ] + }, + "sK8G-41D3EfYNSLFJ3XYf": { + "title": "Oracles", + "description": "A blockchain oracle is a third-party service that connects smart contracts with the outside world, primarily to feed information in from the world, but also the reverse. Information from the world encapsulates multiple sources so that decentralized knowledge is obtained.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Blockchain Oracle", + "url": "https://en.wikipedia.org/wiki/Blockchain_oracle", + "type": "article" + }, + { + "title": "What Is a Blockchain Oracle?", + "url": "https://chain.link/education/blockchain-oracles", + "type": "article" + }, + { + "title": "Explore top posts about Blockchain", + "url": "https://app.daily.dev/tags/blockchain?ref=roadmapsh", + "type": "article" + } + ] + }, + "JbgBwG6KmeTdyle9U6WAv": { + "title": "Hybrid Smart Contracts", + "description": "Hybrid smart contracts combine code running on the blockchain (on-chain) with data and computation from outside the blockchain (off-chain) provided by Decentralized Oracle Networks.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Hybrid Smart Contracts Explained", + "url": "https://blog.chain.link/hybrid-smart-contracts-explained/", + "type": "article" + }, + { + "title": "A complete guide to understand hybrid smart contracts", + "url": "https://www.leewayhertz.com/hybrid-smart-contracts/", + "type": "article" + } + ] + }, + "omQOhNfMO09pBc7oy76Wo": { + "title": "Chainlink", + "description": "Chainlink is a decentralized network of oracles that enables smart contracts to securely interact with real-world data and services that exist outside of blockchain networks.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What Is Chainlink? A Beginner’s Guide", + "url": "https://blog.chain.link/what-is-chainlink/", + "type": "article" + }, + { + "title": "What Is Chainlink in 5 Minutes", + "url": "https://www.gemini.com/cryptopedia/what-is-chainlink-and-how-does-it-work", + "type": "article" + } + ] + }, + "CzfsAQIk3zIsDaDAorG9K": { + "title": "Oracle Networks", + "description": "By leveraging many different data sources, and implementing an oracle system that isn’t controlled by a single entity, decentralized oracle networks provide an increased level of security and fairness to smart contracts.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Decentralized Oracle Networks", + "url": "https://medium.com/coinmonks/decentralized-oracle-networks-9fead28f5fe5", + "type": "article" + }, + { + "title": "A Beginner’s Guide To The Evolution Of Decentralized Oracle Networks", + "url": "https://chainlinktoday.com/a-beginners-guide-to-the-evolution-of-decentralized-oracle-networks/", + "type": "article" + }, + { + "title": "Understanding Blockchain Oracle", + "url": "https://chain.link/education/blockchain-oracles", + "type": "article" + }, + { + "title": "Explore top posts about Oracle", + "url": "https://app.daily.dev/tags/oracle?ref=roadmapsh", + "type": "article" + } + ] + }, + "BV6lEwCAKaYxSPWD0LV_d": { + "title": "Smart Contracts", + "description": "A smart contract is a computer program or a transaction protocol that is intended to automatically execute, control or document legally relevant events and actions according to the terms of a contract or an agreement.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What Are Smart Contracts and How Do They Work?", + "url": "https://chain.link/education/smart-contracts", + "type": "article" + }, + { + "title": "Explore top posts about Smart Contracts", + "url": "https://app.daily.dev/tags/smart-contracts?ref=roadmapsh", + "type": "article" + }, + { + "title": "Smart contracts - Simply Explained", + "url": "https://youtu.be/ZE2HxTmxfrI", + "type": "video" + } + ] + }, + "chaIKoE1uE8rpZLkDSfV-": { + "title": "Solidity", + "description": "Solidity is an object-oriented programming language created specifically by Ethereum Network team for constructing smart contracts on various blockchain platforms, most notably, Ethereum.\n\n* It's used to create smart contracts that implements business logic and generate a chain of transaction records in the blockchain system.\n* It acts as a tool for creating machine-level code and compiling it on the Ethereum Vitural Machine (EVM).\n\nLike any other programming languages, Solidity also has variables, functions, classes, arithmetic operations, string manipulation, and many more.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Solidity Programming Language", + "url": "https://soliditylang.org/", + "type": "article" + }, + { + "title": "Solidity Tutorial", + "url": "https://www.tutorialspoint.com/solidity/index.htm", + "type": "article" + }, + { + "title": "Explore top posts about Solidity", + "url": "https://app.daily.dev/tags/solidity?ref=roadmapsh", + "type": "article" + }, + { + "title": "Solidity Course by FreeCodeCamp", + "url": "https://www.youtube.com/watch?v=ipwxYa-F1uY", + "type": "video" + }, + { + "title": "Solidity Course by Dapp University", + "url": "https://www.youtube.com/watch?v=EhPeHeoKF88", + "type": "video" + }, + { + "title": "Learn Blockchain, Solidity, and Full Stack Web3 Development", + "url": "https://youtu.be/gyMwXuJrbJQ", + "type": "video" + } + ] + }, + "jgHa_LeCac0pl6dSADizF": { + "title": "Vyper", + "description": "Vyper is a contract-oriented, pythonic programming language that targets the Ethereum Virtual Machine (EVM).\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Vyper Programming Language", + "url": "https://vyper.readthedocs.io/en/stable/", + "type": "article" + }, + { + "title": "Learn Vyper in Y Minutes", + "url": "https://learnxinyminutes.com/docs/vyper/", + "type": "article" + } + ] + }, + "Larbhjzi-MnPQKH1Pzn2R": { + "title": "Rust", + "description": "Rust is a multi-paradigm, general-purpose programming language. Rust emphasizes performance, type safety, and concurrency. It is popular on smart contract chains Solana and Polkadot.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Rust Programming Language", + "url": "https://www.rust-lang.org/", + "type": "article" + }, + { + "title": "How to write and deploy a smart contract in Rust", + "url": "https://docs.near.org/tutorials/nfts/introduction", + "type": "article" + }, + { + "title": "Explore top posts about Rust", + "url": "https://app.daily.dev/tags/rust?ref=roadmapsh", + "type": "article" + } + ] + }, + "IXs4nUzy_A5vBjI_44kaT": { + "title": "Unit Tests", + "description": "Unit testing involves testing individual components in a smart contract for correctness. A unit test is simple, quick to run, and provides a clear idea of what went wrong if the test fails.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Smart Contracts Unit Testing", + "url": "https://ethereum.org/en/developers/docs/smart-contracts/testing/#unit-testing", + "type": "article" + }, + { + "title": "Tips for Unit Testing Ethereum Smart Contracts in Solidity", + "url": "https://betterprogramming.pub/a-few-tips-for-unit-testing-ethereum-smart-contract-in-solidity-d804062068fb", + "type": "article" + }, + { + "title": "Explore top posts about Testing", + "url": "https://app.daily.dev/tags/testing?ref=roadmapsh", + "type": "article" + } + ] + }, + "QNfnbUGkT3N-pj5epnHcM": { + "title": "Integration Tests", + "description": "Integration tests validate interactions between multiple components. For smart contract testing this can mean interactions between different components of a single contract, or across multiple contracts.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Explore top posts about Testing", + "url": "https://app.daily.dev/tags/testing?ref=roadmapsh", + "type": "article" + }, + { + "title": "Unit tests vs integration tests | Smart contract testing course", + "url": "https://youtu.be/GxnX9k8i0zM", + "type": "video" + } + ] + }, + "4V-dj9x9hSAAGCxpBWsbE": { + "title": "Code Coverage", + "description": "Code coverage is a metric that can help you understand how much of your source is tested. It's a very useful metric that can help you assess the quality of your test suite.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Testing Smart Contracts", + "url": "https://ethereum.org/en/developers/docs/smart-contracts/testing/", + "type": "article" + }, + { + "title": "Smart Contract Code Coverage In Hardhat", + "url": "https://medium.com/coinmonks/smart-contract-code-coverage-in-hardhat-d4a5ff6c9ba6", + "type": "article" + }, + { + "title": "Explore top posts about General Programming", + "url": "https://app.daily.dev/tags/general-programming?ref=roadmapsh", + "type": "article" + } + ] + }, + "7goaYnedUlfgfl5qApoO2": { + "title": "Deployment", + "description": "Unlike other software, smart contracts don’t run on a local computer or a remote server: they live on the blockchain. Thus, interacting with them is different from more traditional applications.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Deploying Smart Contracts", + "url": "https://ethereum.org/en/developers/docs/smart-contracts/deploying/", + "type": "article" + }, + { + "title": "Deploying and interacting with smart contracts", + "url": "https://docs.openzeppelin.com/learn/deploying-and-interacting", + "type": "article" + }, + { + "title": "Explore top posts about CI/CD", + "url": "https://app.daily.dev/tags/cicd?ref=roadmapsh", + "type": "article" + } + ] + }, + "H3jNM_0sJrB7ZbYzrVhF7": { + "title": "Monitoring", + "description": "Monitoring smart contracts allow their authors to view its activity and interactions based on generated transactions and events, allowing verification of the contract's intended purpose and functionality.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Monitoring Smart Contracts", + "url": "https://consensys.github.io/smart-contract-best-practices/development-recommendations/solidity-specific/event-monitoring/", + "type": "article" + }, + { + "title": "Explore top posts about Monitoring", + "url": "https://app.daily.dev/tags/monitoring?ref=roadmapsh", + "type": "article" + } + ] + }, + "OotJnHgm622NQJc2WRI7c": { + "title": "Upgrades", + "description": "Smart contracts are immutable by default. Once they are created there is no way to alter them, effectively acting as an unbreakable contract among participants. However, for some scenarios, it is desirable to be able to modify them.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Upgrading Ethereum contracts", + "url": "https://ethereum.org/en/developers/docs/smart-contracts/upgrading/", + "type": "article" + }, + { + "title": "Upgrading smart contracts", + "url": "https://docs.openzeppelin.com/learn/upgrading-smart-contracts", + "type": "article" + }, + { + "title": "What are Upgradable Smart Contracts? Full Guide", + "url": "https://moralis.io/what-are-upgradable-smart-contracts-full-guide/", + "type": "article" + }, + { + "title": "Upgrading your Smart Contracts | A Tutorial & Introduction", + "url": "https://youtu.be/bdXJmWajZRY", + "type": "video" + } + ] + }, + "bjUuL7WALETzgFxL6-ivU": { + "title": "ERC Tokens", + "description": "An ‘Ethereum Request for Comments’ (ERC) is a document that programmers use to write smart contracts on Ethereum Blockchain. They describe rules in these documents that Ethereum-based tokens must comply with.\n\nWhile there are several Ethereum standards. These ERC Ethereum standards are the most well-known and popular: ERC-20, ERC-721, ERC-1155, and ERC-777.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What are Ethereum request for comments (ERC) Standards", + "url": "https://dev.to/envoy_/ks-what-are-ethereum-request-for-comments-erc-standards-5f80", + "type": "article" + }, + { + "title": "ERC-20 Token Standard", + "url": "https://ethereum.org/en/developers/docs/standards/tokens/erc-20/", + "type": "article" + }, + { + "title": "ERC-721 Token Standard (NFTs)", + "url": "https://decrypt.co/resources/erc-721-ethereum-nft-token-standard", + "type": "article" + }, + { + "title": "ERC-1155 Token Standard (Multi-Token)", + "url": "https://decrypt.co/resources/what-is-erc-1155-ethereums-flexible-token-standard", + "type": "article" + } + ] + }, + "SM8Wt3iNM_nncLj69KCuy": { + "title": "Crypto Wallets", + "description": "A cryptocurrency wallet is a device, physical medium, program, or service which stores the public and/or private keys for cryptocurrency transactions. In addition to this basic function of storing the keys, a cryptocurrency wallet more often also offers the functionality of encrypting and/or signing information.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is a crypto wallet?", + "url": "https://www.coinbase.com/learn/crypto-basics/what-is-a-crypto-wallet", + "type": "article" + }, + { + "title": "What is a Crypto Wallet? A Beginner’s Guide", + "url": "https://crypto.com/university/crypto-wallets", + "type": "article" + }, + { + "title": "Explore top posts about Crypto", + "url": "https://app.daily.dev/tags/crypto?ref=roadmapsh", + "type": "article" + } + ] + }, + "lXukWXEatsF87EWFSYyOO": { + "title": "IDEs", + "description": "An integrated development environment is a software application that provides comprehensive facilities to computer programmers for software development. An IDE normally consists of at least a source code editor, build automation tools and a debugger.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Integrated Development Environments", + "url": "https://ethereum.org/en/developers/docs/ides/", + "type": "article" + }, + { + "title": "Remix - Ethereum IDE & community", + "url": "https://remix-project.org/", + "type": "article" + }, + { + "title": "Explore top posts about DevTools", + "url": "https://app.daily.dev/tags/devtools?ref=roadmapsh", + "type": "article" + } + ] + }, + "S68IUKs0k_FFHEH97xxs7": { + "title": "Crypto Faucets", + "description": "A crypto faucet lets users earn small crypto rewards by completing simple tasks. The metaphor is based on how even one drop of water from a leaky faucet could eventually fill up a cup. There are various kinds of crypto faucets, including bitcoin (BTC), Ethereum (ETH), and BNB faucets.\n\nFaucets are common in development environments where developers obtain testnet crypto in order develop and test their application prior to mainnet deployment.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What Is A Crypto Faucet?", + "url": "https://academy.binance.com/en/articles/what-is-a-crypto-faucet", + "type": "article" + }, + { + "title": "What are crypto faucets and how do they work?", + "url": "https://cointelegraph.com/news/what-are-crypto-faucets-and-how-do-they-work", + "type": "article" + }, + { + "title": "Explore top posts about Crypto", + "url": "https://app.daily.dev/tags/crypto?ref=roadmapsh", + "type": "article" + } + ] + }, + "tvk1Wh04BcFbAAwYWMx27": { + "title": "Centralized Storage", + "description": "Decentralized storage is where data is stored on a decentralized network across multiple locations by users or groups who are incentivized to join, store, and keep data accessible. The servers used are hosted by people, rather than a single company. Anyone is free to join, they are kept honest due to smart contracts, and they are incentivized to participate via tokens.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What Is Decentralized Storage?", + "url": "https://medium.com/@ppio/what-is-decentralized-storage-9c4b761942e2", + "type": "article" + }, + { + "title": "Decentralized Storage", + "url": "https://ethereum.org/en/developers/docs/storage/", + "type": "article" + }, + { + "title": "Explore top posts about Decentralized", + "url": "https://app.daily.dev/tags/decentralized?ref=roadmapsh", + "type": "article" + } + ] + }, + "KRtEN0845lV5e85SOi6oZ": { + "title": "Smart Contract Frameworks", + "description": "Building a full-fledged dapp requires different pieces of technology. Software frameworks include many of the needed features or provide easy plugin systems to pick the tools you desire.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "dApp Development Frameworks", + "url": "https://ethereum.org/en/developers/docs/frameworks/", + "type": "article" + }, + { + "title": "A Definitive List of Ethereum Developer Tools - Frameworks", + "url": "https://media.consensys.net/an-definitive-list-of-ethereum-developer-tools-2159ce865974#frameworks", + "type": "article" + }, + { + "title": "Top 10 Smart Contract Developer Tools You Need for 2022", + "url": "https://medium.com/better-programming/top-10-smart-contract-developer-tools-you-need-for-2022-b763f5df689a", + "type": "article" + } + ] + }, + "l110d4NqTQt9lfEoxqXMX": { + "title": "Hardhat", + "description": "Hardhat is an Ethereum development environment. It allows users to compile contracts and run them on a development network. Get Solidity stack traces, console.log and more.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Hardhat Overview", + "url": "https://hardhat.org/hardhat-runner/docs/getting-started#overview", + "type": "article" + }, + { + "title": "Explore top posts about Hardhat", + "url": "https://app.daily.dev/tags/hardhat?ref=roadmapsh", + "type": "article" + }, + { + "title": "Build and Deploy Smart Contracts using Hardhat", + "url": "https://youtu.be/GBc3lBrXEBo", + "type": "video" + } + ] + }, + "Q64AbQlvYPiqJl8BtoJj9": { + "title": "Brownie", + "description": "Brownie is a Python-based development and testing framework for smart contracts targeting the Ethereum Virtual Machine.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Brownie Overview", + "url": "https://eth-brownie.readthedocs.io/", + "type": "article" + }, + { + "title": "Python and Blockchain: Deploy Smart Contracts using Brownie", + "url": "https://youtu.be/QfFO22lwSw4", + "type": "video" + } + ] + }, + "Vl9XHtc22HnqaCnF9yJv9": { + "title": "Truffle", + "description": "A development environment, testing framework, and asset pipeline for blockchains using the Ethereum Virtual Machine (EVM), aiming to make life as a developer easier.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Truffle Overview", + "url": "https://trufflesuite.com/docs/truffle/", + "type": "article" + }, + { + "title": "Truffle Tutorial for Beginners | Compile, Test & Deploy Smart contracts to any EVM Blockchain", + "url": "https://youtu.be/62f757RVEvU", + "type": "video" + } + ] + }, + "F1EUAxODBJ3GEoh7cqM-K": { + "title": "Foundry", + "description": "Foundry is a smart contract development toolchain. Foundry manages your dependencies, compiles your project, runs tests, deploys, and lets you interact with the chain from the command-line and via Solidity scripts.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Foundry Overview", + "url": "https://book.getfoundry.sh/", + "type": "article" + }, + { + "title": "Intro to Foundry", + "url": "https://youtu.be/fNMfMxGxeag", + "type": "video" + } + ] + }, + "snQA5_4H2dDkT1pENgaYD": { + "title": "Security", + "description": "Smart contracts are extremely flexible, capable of both holding large quantities of tokens (often in excess of $1B) and running immutable logic based on previously deployed smart contract code. While this has created a vibrant and creative ecosystem of trustless, interconnected smart contracts, it is also the perfect ecosystem to attract attackers looking to profit by exploiting vulnerabilities\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Smart Contract Security", + "url": "https://ethereum.org/en/developers/docs/smart-contracts/security/", + "type": "article" + }, + { + "title": "Ethereum Smart Contract Security Recommendations", + "url": "https://consensys.net/blog/developers/ethereum-smart-contract-security-recommendations/", + "type": "article" + }, + { + "title": "Explore top posts about Blockchain", + "url": "https://app.daily.dev/tags/blockchain?ref=roadmapsh", + "type": "article" + } + ] + }, + "UOF7Ep97i1l3Own6YEWlq": { + "title": "Practices", + "description": "Smart contract programming requires a different engineering mindset. The cost of failure can be high, and change can be difficult.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Ethereum Smart Contract Security Best Practices", + "url": "https://consensys.github.io/smart-contract-best-practices/", + "type": "article" + }, + { + "title": "Smart Contract Security and Auditing 101", + "url": "https://youtu.be/0aJfCug1zTM", + "type": "video" + } + ] + }, + "wypJdjTW4jHm9FCqv7Lhb": { + "title": "Fuzz Testing & Static Analysis", + "description": "Fuzzing or fuzz testing is an automated software testing technique that involves providing invalid, unexpected, or random data as inputs to a smart contract.\n\nStatic analysis is the analysis of smart contracts performed without executing them.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Getting Started with Smart Contract Fuzzing", + "url": "https://www.immunebytes.com/blog/getting-started-with-smart-contract-fuzzing/", + "type": "article" + }, + { + "title": "Solidity smart contract Static Code Analysis", + "url": "https://lightrains.com/blogs/solidity-static-analysis-tools/#static-code-analysis", + "type": "article" + }, + { + "title": "Explore top posts about Testing", + "url": "https://app.daily.dev/tags/testing?ref=roadmapsh", + "type": "article" + }, + { + "title": "Smart contract Fuzzing", + "url": "https://youtu.be/LRyyNzrqgOc", + "type": "video" + } + ] + }, + "GxD-KybtmkwT3wqDzIfHp": { + "title": "Common Threat Vectors", + "description": "Smart contract audits enable developers to provide a thorough analysis of smart contract sets. The main goal of a smart contract audit is to detect and eliminate vulnerabilities, starting with the most common threat vectors.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Smart Contract Attack Vectors", + "url": "https://github.com/kadenzipfel/smart-contract-attack-vectors", + "type": "opensource" + }, + { + "title": "Solidity Security: Comprehensive list of known attack vectors and common anti-patterns", + "url": "https://blog.sigmaprime.io/solidity-security.html", + "type": "article" + }, + { + "title": "Blockchain Attack Vectors: Vulnerabilities of the Most Secure Technology", + "url": "https://www.apriorit.com/dev-blog/578-blockchain-attack-vectors", + "type": "article" + } + ] + }, + "f60P5RNNdgCbrhDDzkY25": { + "title": "Source of Randomness Attacks", + "description": "The security of cryptographic systems depends on some secret data that is known to authorized persons but unknown and unpredictable to others. To achieve this unpredictability, some randomization is typically employed. Modern cryptographic protocols often require frequent generation of random quantities. Cryptographic attacks that subvert or exploit weaknesses in this process are known as randomness attacks.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Smart Contract Randomness or ReplicatedLogic Attack", + "url": "https://blog.finxter.com/randomness-or-replicatedlogic-attack-on-smart-contracts/", + "type": "article" + }, + { + "title": "Explore top posts about Security", + "url": "https://app.daily.dev/tags/security?ref=roadmapsh", + "type": "article" + } + ] + }, + "n3pipnNb76aaQeUwrDLk_": { + "title": "Tools", + "description": "Blockchain and smart contract technology is fairly new, therefore, you should expect constant changes in the security landscape, as new bugs and security risks are discovered, and new best practices are developed. Keeping track of this constantly moving landscape proves difficult, so using tools to aid this mission is important. The cost of failing to properly secure smart contracts can be high, and because change can be difficult, we must make use of these tools.", + "links": [] + }, + "YA3-7EZBRW-T-8HuVI7lk": { + "title": "Slither", + "description": "Slither is a Solidity static analysis framework written in Python 3. It runs a suite of vulnerability detectors, prints visual information about contract details, and provides an API to easily write custom analyses. Slither enables developers to find vulnerabilities, enhance their code comprehension, and quickly prototype custom analyses.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Slither, the Solidity source analyzer", + "url": "https://github.com/crytic/slither/blob/master/README.md", + "type": "opensource" + } + ] + }, + "twR3UdzUNSztjpwbAUT4F": { + "title": "Manticore", + "description": "Manticore is a symbolic execution tool for analysis of smart contracts and binaries.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Manticore Docs", + "url": "https://manticore.readthedocs.io/", + "type": "article" + } + ] + }, + "XIgczUc3yKo6kw-_3gskC": { + "title": "MythX", + "description": "MythX is a comprehensive smart contract security analysis tools developed by Consensys. It allows users to detect security vulnerabilities in Ethereum smart contracts throughout the development life cycle as well as analyze Solidity dapps for security holes and known smart contract vulnerabilities.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "MythX Official Site", + "url": "https://mythx.io/", + "type": "article" + }, + { + "title": "MythX Documentation", + "url": "https://docs.mythx.io/", + "type": "article" + } + ] + }, + "U4H62lVac8wIgxNJ3N3ga": { + "title": "Echidna", + "description": "Echidna is a Haskell program designed for fuzzing/property-based testing of Ethereum smarts contracts. It uses sophisticated grammar-based fuzzing campaigns based on a contract ABI to falsify user-defined predicates or Solidity assertions.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Echidna: A Fast Smart Contract Fuzzer", + "url": "https://github.com/crytic/echidna/blob/master/README.md", + "type": "opensource" + } + ] + }, + "fbESHQGYqxKRi-5DW8TY3": { + "title": "Management Platforms", + "description": "Managing smart contracts in a production environment (mainnet) can prove difficult as users must keep track of different versions, blockchains, deployments, etc. Using a tool for this process eliminates a lot of the risk that comes with manual tracking.", + "links": [] + }, + "qox-x_q-Q7aWcNFWD7RkT": { + "title": "OpenZeppelin", + "description": "OpenZeppelin Contracts helps you minimize risk by using battle-tested libraries of smart contracts for Ethereum and other blockchains. It includes the most used implementations of ERC standards.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "OpenZeppelin Contracts", + "url": "https://docs.openzeppelin.com/contracts/", + "type": "article" + } + ] + }, + "Fs9rcEh_f9fJ2tF-bkAUE": { + "title": "Version Control Systems", + "description": "Version control/source control systems allow developers to track and control changes to code over time. These services often include the ability to make atomic revisions to code, branch/fork off of specific points, and to compare versions of code. They are useful in determining the who, what, when, and why code changes were made.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Git", + "url": "https://git-scm.com/", + "type": "article" + }, + { + "title": "Mercurial", + "url": "https://www.mercurial-scm.org/", + "type": "article" + }, + { + "title": "What is Version Control?", + "url": "https://www.atlassian.com/git/tutorials/what-is-version-control", + "type": "article" + } + ] + }, + "gpS5CckcQZX3TMFQ2jtIL": { + "title": "Git", + "description": "[Git](https://git-scm.com/) is a free and open source distributed version control system designed to handle everything from small to very large projects with speed and efficiency.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Learn Git with Tutorials, News and Tips - Atlassian", + "url": "https://www.atlassian.com/git", + "type": "article" + }, + { + "title": "Git Cheat Sheet", + "url": "https://cs.fyi/guide/git-cheatsheet", + "type": "article" + }, + { + "title": "Explore top posts about Git", + "url": "https://app.daily.dev/tags/git?ref=roadmapsh", + "type": "article" + }, + { + "title": "Git & GitHub Crash Course For Beginners", + "url": "https://www.youtube.com/watch?v=SWYqp7iY_Tc", + "type": "video" + } + ] + }, + "oSK3MRQD_4j1gGDORN7RO": { + "title": "Repo Hosting Services", + "description": "When working on a team, you often need a remote place to put your code so others can access it, create their own branches, and create or review pull requests. These services often include issue tracking, code review, and continuous integration features. A few popular choices are GitHub, GitLab, BitBucket, and AWS CodeCommit.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "GitHub", + "url": "https://github.com/features/", + "type": "opensource" + }, + { + "title": "GitLab", + "url": "https://about.gitlab.com/", + "type": "article" + }, + { + "title": "BitBucket", + "url": "https://bitbucket.org/product/guides/getting-started/overview", + "type": "article" + }, + { + "title": "How to choose the best source code repository", + "url": "https://bitbucket.org/product/code-repository", + "type": "article" + } + ] + }, + "GOgeaQoRvqg-7mAfL_A8t": { + "title": "GitHub", + "description": "GitHub is a provider of Internet hosting for software development and version control using Git. It offers the distributed version control and source code management functionality of Git, plus its own features.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "GitHub Website", + "url": "https://github.com", + "type": "opensource" + }, + { + "title": "GitHub Documentation", + "url": "https://docs.github.com/en/get-started/quickstart", + "type": "article" + }, + { + "title": "How to Use Git in a Professional Dev Team", + "url": "https://ooloo.io/project/github-flow", + "type": "article" + }, + { + "title": "Explore top posts about GitHub", + "url": "https://app.daily.dev/tags/github?ref=roadmapsh", + "type": "article" + }, + { + "title": "What is GitHub?", + "url": "https://www.youtube.com/watch?v=w3jLJU7DT5E", + "type": "video" + }, + { + "title": "Git vs. GitHub: Whats the difference?", + "url": "https://www.youtube.com/watch?v=wpISo9TNjfU", + "type": "video" + }, + { + "title": "Git and GitHub for Beginners", + "url": "https://www.youtube.com/watch?v=RGOj5yH7evk", + "type": "video" + }, + { + "title": "Git and GitHub - CS50 Beyond 2019", + "url": "https://www.youtube.com/watch?v=eulnSXkhE7I", + "type": "video" + } + ] + }, + "CWqwv4asouS-dssAwIdxv": { + "title": "GitLab", + "description": "GitLab is a provider of internet hosting for software development and version control using Git. It offers the distributed version control and source code management functionality of Git, plus its own features.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "GitLab Website", + "url": "https://gitlab.com/", + "type": "opensource" + }, + { + "title": "GitLab Documentation", + "url": "https://docs.gitlab.com/", + "type": "article" + }, + { + "title": "Explore top posts about GitLab", + "url": "https://app.daily.dev/tags/gitlab?ref=roadmapsh", + "type": "article" + } + ] + }, + "TMPB62h9LGIA0pMmjfUun": { + "title": "Bitbucket", + "description": "Bitbucket is a Git based hosting and source code repository service that is Atlassian's alternative to other products like GitHub, GitLab etc\n\nBitbucket offers hosting options via Bitbucket Cloud (Atlassian's servers), Bitbucket Server (customer's on-premise) or Bitbucket Data Centre (number of servers in customers on-premise or cloud environment)\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Bitbucket Website", + "url": "https://bitbucket.org/product", + "type": "article" + }, + { + "title": "A brief overview of Bitbucket", + "url": "https://bitbucket.org/product/guides/getting-started/overview#a-brief-overview-of-bitbucket", + "type": "article" + }, + { + "title": "Getting started with Bitbucket", + "url": "https://bitbucket.org/product/guides/basics/bitbucket-interface", + "type": "article" + }, + { + "title": "Using Git with Bitbucket Cloud", + "url": "https://www.atlassian.com/git/tutorials/learn-git-with-bitbucket-cloud", + "type": "article" + }, + { + "title": "Explore top posts about Bitbucket", + "url": "https://app.daily.dev/tags/bitbucket?ref=roadmapsh", + "type": "article" + }, + { + "title": "Bitbucket tutorial | How to use Bitbucket Cloud", + "url": "https://www.youtube.com/watch?v=M44nEyd_5To", + "type": "video" + }, + { + "title": "Bitbucket Tutorial | Bitbucket for Beginners", + "url": "https://www.youtube.com/watch?v=i5T-DB8tb4A", + "type": "video" + } + ] + }, + "SXXvFtf_7Rx64cHSEWxMS": { + "title": "dApps - Decentralized Applications", + "description": "A decentralized application (dApp) is an application that can operate autonomously, through the use of smart contracts that run on a blockchain. Like traditional applications, dApps provide some function or utility to its users.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Introduction to dApps", + "url": "https://ethereum.org/en/developers/docs/dapps/", + "type": "article" + }, + { + "title": "What Is a Dapp? Decentralized Apps Explained", + "url": "https://www.coindesk.com/learn/what-is-a-dapp-decentralized-apps-explained/", + "type": "article" + } + ] + }, + "aATSuiqPG-yctr3ChEBa_": { + "title": "Applicability", + "description": "dApps can be used for just about anything that requires two or more parties to agree on something. When the appropriate conditions are met, the smart contract will execute automatically. An important differentiation is that these transactions are no longer based on trust but they are rather based on cryptographically-backed smart contracts.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What Is a dApp? A Guide to Decentralized Applications", + "url": "https://www.sofi.com/learn/content/what-is-a-dapp/", + "type": "article" + }, + { + "title": "Blockchain Use Cases and Applications by Industry", + "url": "https://consensys.net/blockchain-use-cases/", + "type": "article" + }, + { + "title": "The real-world use cases for blockchain technology", + "url": "https://roboticsandautomationnews.com/2022/05/20/the-real-world-use-cases-for-blockchain-technology/", + "type": "article" + } + ] + }, + "1AhombZUkZN6Ra5fysSpg": { + "title": "Defi", + "description": "Decentralized finance offers financial instruments without relying on intermediaries such as brokerages, exchanges, or banks by using smart contracts on a blockchain.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Decentralized Finance (DeFi) Definition", + "url": "https://www.investopedia.com/decentralized-finance-defi-5113835", + "type": "article" + }, + { + "title": "What is DeFi?", + "url": "https://www.coinbase.com/learn/crypto-basics/what-is-defi", + "type": "article" + }, + { + "title": "Explore top posts about DeFi", + "url": "https://app.daily.dev/tags/defi?ref=roadmapsh", + "type": "article" + }, + { + "title": "What is DeFi? (Decentralized Finance Animated)", + "url": "https://www.youtube.com/watch?v=17QRFlml4pA", + "type": "video" + } + ] + }, + "RsoOgixZlyQU6h7nIaY9J": { + "title": "DAOs", + "description": "A decentralized autonomous organization (DAO) is an emerging form of legal structure. With no central governing body, every member within a DAO typically shares a common goal and attempt to act in the best interest of the entity. Popularized through cryptocurrency enthusiasts and blockchain technology, DAOs are used to make decisions in a bottoms-up management approach.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What Is A DAO And How Do They Work?", + "url": "https://consensys.net/blog/blockchain-explained/what-is-a-dao-and-how-do-they-work/", + "type": "article" + }, + { + "title": "Decentralized Autonomous Organization (DAO)", + "url": "https://www.investopedia.com/tech/what-dao/", + "type": "article" + } + ] + }, + "e4OHLOfa_AqEShpMQe6Dx": { + "title": "NFTs", + "description": "A non-fungible token (NFT) is a financial security consisting of digital data stored in a blockchain, a form of distributed ledger. The ownership of an NFT is recorded in the blockchain, and can be transferred by the owner, allowing NFTs to be sold and traded.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Non-Fungible Token (NFT)", + "url": "https://www.investopedia.com/non-fungible-tokens-nft-5115211", + "type": "article" + }, + { + "title": "NFTs, explained", + "url": "https://www.theverge.com/22310188/nft-explainer-what-is-blockchain-crypto-art-faq", + "type": "article" + }, + { + "title": "Explore top posts about NFT", + "url": "https://app.daily.dev/tags/nft?ref=roadmapsh", + "type": "article" + }, + { + "title": "NFT Explained In 5 Minutes | What Is NFT? - Non Fungible Token", + "url": "https://youtu.be/NNQLJcJEzv0", + "type": "video" + } + ] + }, + "vTGSDThkDDHvCanNlgP07": { + "title": "Payments", + "description": "Blockchain technology has the ability to eliminate all the tolls exacted by centralized organization when transferring payments.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "How does blockchain impact global payments and remittances?", + "url": "https://consensys.net/blockchain-use-cases/finance/#payments", + "type": "article" + }, + { + "title": "Smart Contract Use Cases - Payments", + "url": "https://blog.chain.link/smart-contract-use-cases/#external-payments", + "type": "article" + } + ] + }, + "uMXcKCUoUN8-Hq_IOBRCp": { + "title": "Insurance", + "description": "Blockchain technology has the ability to automate claims functions by verifying real-world data through the use of an oracle. It also automates payments between parties for claims and thus lower administrative costs for insurance companies.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Smart Contract Use Cases - Insurance", + "url": "https://blog.chain.link/smart-contract-use-cases/#insurance", + "type": "article" + }, + { + "title": "Top 7 Use Cases of Blockchain in the Insurance Industry", + "url": "https://imaginovation.net/blog/blockchain-insurance-industry-examples/", + "type": "article" + } + ] + }, + "dDPhMNPpl3rZh3EgXy13P": { + "title": "Node as a Service", + "description": "Running your own blockchain node can be challenging, especially when getting started or while scaling fast. There are a number of services that run optimized node infrastructures for you, so you can focus on developing your application or product instead.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Blockchain Node Providers and How They Work", + "url": "https://www.infoq.com/articles/blockchain-as-a-service-get-block/", + "type": "article" + }, + { + "title": "Node as a Service - Ethereum", + "url": "https://ethereum.org/en/developers/docs/nodes-and-clients/nodes-as-a-service/", + "type": "article" + } + ] + }, + "lOoubzXNILBk18jGsc-JX": { + "title": "Alchemy", + "description": "Alchemy is a developer platform that empowers companies to build scalable and reliable decentralized applications without the hassle of managing blockchain infrastructure in-house.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Alchemy official site", + "url": "https://www.alchemy.com/", + "type": "article" + } + ] + }, + "hJmNuEMHaMSM0QQlABFRS": { + "title": "Infura", + "description": "Infura provides the tools and infrastructure that allow developers to easily take their blockchain application from testing to scaled deployment - with simple, reliable access to Ethereum and IPFS.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Infura official site", + "url": "https://infura.io/", + "type": "article" + } + ] + }, + "pJhR9OQo8YFQmAZXQbikJ": { + "title": "Moralis", + "description": "Moralis provides a single workflow for building high performance dapps. Fully compatible with your favorite web3 tools and services.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Moralis official site", + "url": "https://moralis.io/", + "type": "article" + }, + { + "title": "Explore top posts about Moralis", + "url": "https://app.daily.dev/tags/moralis?ref=roadmapsh", + "type": "article" + } + ] + }, + "FGih8w9We52PHpBnnGO6H": { + "title": "Quicknode", + "description": "QuickNode is a Web3 developer platform used to build and scale blockchain applications.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Quicknode official site", + "url": "https://www.quicknode.com/", + "type": "article" + } + ] + }, + "NK02dunI3i6C6z7krENCC": { + "title": "Supporting Languages", + "description": "While the bulk of the logic in blockchain applications is handled by smart contracts, all the surrounding services that support those smart contracts (frontend, monitoring, etc.) are often written in other languages.", + "links": [] + }, + "fF06XiQV4CPEJnt_ESOvv": { + "title": "JavaScript", + "description": "JavaScript, often abbreviated JS, is a programming language that is one of the core technologies of the World Wide Web, alongside HTML and CSS. It lets us add interactivity to pages e.g. you might have seen sliders, alerts, click interactions, and popups etc on different websites -- all of that is built using JavaScript. Apart from being used in the browser, it is also used in other non-browser environments as well such as Node.js for writing server-side code in JavaScript, Electron for writing desktop applications, React Native for mobile applications and so on.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "You Dont Know JS Yet (book series) ", + "url": "https://github.com/getify/You-Dont-Know-JS", + "type": "opensource" + }, + { + "title": "W3Schools – JavaScript Tutorial", + "url": "https://www.w3schools.com/js/", + "type": "article" + }, + { + "title": "The Modern JavaScript Tutorial", + "url": "https://javascript.info/", + "type": "article" + }, + { + "title": "Eloquent Javascript - Book", + "url": "https://eloquentjavascript.net/", + "type": "article" + }, + { + "title": "Explore top posts about JavaScript", + "url": "https://app.daily.dev/tags/javascript?ref=roadmapsh", + "type": "article" + }, + { + "title": "JavaScript Crash Course for Beginners", + "url": "https://youtu.be/hdI2bqOjy3c", + "type": "video" + }, + { + "title": "Node.js Crash Course", + "url": "https://www.youtube.com/watch?v=fBNz5xF-Kx4", + "type": "video" + }, + { + "title": "Node.js Tutorial for Beginners", + "url": "https://www.youtube.com/watch?v=TlB_eWDSMt4", + "type": "video" + } + ] + }, + "pVG7bGqVGCeSBlZxRNHJs": { + "title": "Python", + "description": "Python is a well known programming language which is both a strongly typed and a dynamically typed language. Being an interpreted language, code is executed as soon as it is written and the Python syntax allows for writing code in functional, procedural or object-oriented programmatic ways.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Visit Dedicated Python Roadmap", + "url": "/python", + "type": "article" + }, + { + "title": "Python Website", + "url": "https://www.python.org/", + "type": "article" + }, + { + "title": "Python Getting Started", + "url": "https://www.python.org/about/gettingstarted/", + "type": "article" + }, + { + "title": "W3Schools - Python Tutorial ", + "url": "https://www.w3schools.com/python/", + "type": "article" + }, + { + "title": "Python Crash Course", + "url": "https://ehmatthes.github.io/pcc/", + "type": "article" + }, + { + "title": "Automate the Boring Stuff", + "url": "https://automatetheboringstuff.com/", + "type": "article" + }, + { + "title": "Explore top posts about Python", + "url": "https://app.daily.dev/tags/python?ref=roadmapsh", + "type": "article" + } + ] + }, + "jxlQ1ibcCv6ZlcEvobZ_G": { + "title": "Go", + "description": "Go is an open source programming language supported by Google. Go can be used to write cloud services, CLI tools, used for API development, and much more.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Visit Dedicated Go Roadmap", + "url": "/golang", + "type": "article" + }, + { + "title": "A Tour of Go – Go Basics", + "url": "https://go.dev/tour/welcome/1", + "type": "article" + }, + { + "title": "Go Reference Documentation", + "url": "https://go.dev/doc/", + "type": "article" + }, + { + "title": "Go by Example - annotated example programs", + "url": "https://gobyexample.com/", + "type": "article" + }, + { + "title": "W3Schools Go Tutorial ", + "url": "https://www.w3schools.com/go/", + "type": "article" + }, + { + "title": "Explore top posts about Golang", + "url": "https://app.daily.dev/tags/golang?ref=roadmapsh", + "type": "article" + } + ] + }, + "YR-U2njkUrEMO0JxCU1PH": { + "title": "Frontend Frameworks", + "description": "Web frameworks are designed to write web applications. Frameworks are collections of libraries that aid in the development of a software product or website. Frameworks for web application development are collections of various tools. Frameworks vary in their capabilities and functions, depending on the tasks set. They define the structure, establish the rules, and provide the development tools required.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Web3 Frontend – Everything You Need to Learn About Building Dapp Frontends", + "url": "https://moralis.io/web3-frontend-everything-you-need-to-learn-about-building-dapp-frontends/", + "type": "article" + }, + { + "title": "Explore top posts about Frontend Development", + "url": "https://app.daily.dev/tags/frontend?ref=roadmapsh", + "type": "article" + }, + { + "title": "What is the difference between a framework and a library?", + "url": "https://www.youtube.com/watch?v=D_MO9vIRBcA", + "type": "video" + }, + { + "title": "Which JS Framework is best?", + "url": "https://www.youtube.com/watch?v=cuHDQhDhvPE", + "type": "video" + } + ] + }, + "0DUYS40_-BTpk2rLQ1a0e": { + "title": "React", + "description": "React is the most popular front-end JavaScript library for building user interfaces. React can also render on the server using Node and power mobile apps using React Native.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Visit Dedicated React Roadmap", + "url": "/react", + "type": "article" + }, + { + "title": "React Website", + "url": "https://reactjs.org/", + "type": "article" + }, + { + "title": "Official Getting Started", + "url": "https://reactjs.org/tutorial/tutorial.html", + "type": "article" + }, + { + "title": "Explore top posts about React", + "url": "https://app.daily.dev/tags/react?ref=roadmapsh", + "type": "article" + }, + { + "title": "React JS Course for Beginners", + "url": "https://www.youtube.com/watch?v=nTeuhbP7wdE", + "type": "video" + }, + { + "title": "React Course - Beginners Tutorial for React JavaScript Library [2022]", + "url": "https://www.youtube.com/watch?v=bMknfKXIFA8", + "type": "video" + }, + { + "title": "Understanding Reacts UI Rendering Process", + "url": "https://www.youtube.com/watch?v=i793Qm6kv3U", + "type": "video" + } + ] + }, + "UY_vAsixTyocvo8zvAF4b": { + "title": "Angular", + "description": "Angular is a component based front-end development framework built on TypeScript which includes a collection of well-integrated libraries that include features like routing, forms management, client-server communication, and more.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Visit Dedicated Angular Roadmap", + "url": "/angular", + "type": "article" + }, + { + "title": "Official - Getting started with Angular", + "url": "https://angular.io/start", + "type": "article" + }, + { + "title": "Explore top posts about Angular", + "url": "https://app.daily.dev/tags/angular?ref=roadmapsh", + "type": "article" + } + ] + }, + "Ke97bMHGfb-8hB_xSwMbk": { + "title": "Vue", + "description": "Vue.js is an open-source JavaScript framework for building user interfaces and single-page applications. It is mainly focused on front end development.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Visit Dedicated Vue Roadmap", + "url": "/vue", + "type": "article" + }, + { + "title": "Vue.js Website", + "url": "https://vuejs.org/", + "type": "article" + }, + { + "title": "Official Getting Started", + "url": "https://vuejs.org/v2/guide/", + "type": "article" + }, + { + "title": "Explore top posts about Vue.js", + "url": "https://app.daily.dev/tags/vuejs?ref=roadmapsh", + "type": "article" + }, + { + "title": "Vue.js Course for Beginners", + "url": "https://www.youtube.com/watch?v=FXpIoQ_rT_c", + "type": "video" + }, + { + "title": "Vue.js Crash Course", + "url": "https://www.youtube.com/watch?v=qZXt1Aom3Cs", + "type": "video" + } + ] + }, + "-7Bq2ktD0nt7of9liuCDL": { + "title": "Testing", + "description": "A key to building software that meets requirements without defects is testing. Software testing helps developers know they are building the right software. When tests are run as part of the development process (often with continuous integration tools), they build confidence and prevent regressions in the code.\n\nLike traditional software, testing dApps involves testing the entire stack that makes up the dApp (backend, frontend, db, etc.).\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is Software Testing?", + "url": "https://www.guru99.com/software-testing-introduction-importance.html", + "type": "article" + }, + { + "title": "Testing Pyramid", + "url": "https://www.browserstack.com/guide/testing-pyramid-for-test-automation", + "type": "article" + }, + { + "title": "How to test dApps (decentralized applications)", + "url": "https://rhian-is.medium.com/how-to-test-dapps-decentralized-applications-4662cf61db90", + "type": "article" + }, + { + "title": "Explore top posts about Testing", + "url": "https://app.daily.dev/tags/testing?ref=roadmapsh", + "type": "article" + } + ] + }, + "cW3_Ki9Bx7fYluDLKKKgl": { + "title": "Deployment", + "description": "Deploying a dApp involves deployment of all of its layers, generally through a management framework.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Tutorial for building an Ethereum DApp with Integrated Web3 Monitoring", + "url": "https://www.moesif.com/blog/blockchain/ethereum/Tutorial-for-building-Ethereum-Dapp-with-Integrated-Error-Monitoring/", + "type": "article" + }, + { + "title": "Explore top posts about CI/CD", + "url": "https://app.daily.dev/tags/cicd?ref=roadmapsh", + "type": "article" + }, + { + "title": "Build and Deploy a Modern Web 3.0 Blockchain App", + "url": "https://youtu.be/Wn_Kb3MR_cU", + "type": "video" + } + ] + }, + "XvVpnlYhT_yOsvjAvwZpr": { + "title": "Maintenance", + "description": "dApps can be harder to maintain because the code and data published to the blockchain is harder to modify. It’s hard for developers to make updates to their dapps (or the underlying data stored by a dapp) once they are deployed, even if bugs or security risks are identified in an old version.", + "links": [] + }, + "B6GGTUbzEaIz5yu32WrAq": { + "title": "Architecture", + "description": "Unlike Web2 applications, in Web3 there’s no centralized database that stores the application state or user identity, and there’s no centralized web server where the backend logic resides.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "The Architecture of a Web 3.0 application", + "url": "https://www.preethikasireddy.com/post/the-architecture-of-a-web-3-0-application", + "type": "article" + }, + { + "title": "Explore top posts about Architecture", + "url": "https://app.daily.dev/tags/architecture?ref=roadmapsh", + "type": "article" + }, + { + "title": "Blockchain Development: Dapp Architecture", + "url": "https://youtu.be/KBSq8-LnUDI?t=286", + "type": "video" + } + ] + }, + "PBTrg9ivOpw9uNTVisIpx": { + "title": "Security", + "description": "dApps face unique security challenges as they run on immutable blockchains. dApps are harder to maintain, and developers cannot modify or update their codes once deployed. Therefore, special consideration must be taken before putting it on the blockchain.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "DAPP Security Standards", + "url": "https://github.com/Dexaran/DAPP-security-standards/blob/master/README.md", + "type": "opensource" + }, + { + "title": "dApp Security Considerations", + "url": "https://livebook.manning.com/book/building-ethereum-dapps/chapter-14/", + "type": "article" + }, + { + "title": "dApp Security:All You Need to Know", + "url": "https://www.immunebytes.com/blog/dapp-security/#Benefits_of_DApps_Security", + "type": "article" + }, + { + "title": "Explore top posts about Security", + "url": "https://app.daily.dev/tags/security?ref=roadmapsh", + "type": "article" + } + ] + }, + "vZiDpX9pEB9gfueSKZiQL": { + "title": "Client Libraries", + "description": "You don't need to write every smart contract in your project from scratch. There are many open source smart contract libraries available that provide reusable building blocks for your project that can save you from having to reinvent the wheel.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Viem library with great TypeScript support", + "url": "https://viem.sh", + "type": "article" + } + ] + }, + "i-ltfXPTCu3WaBo-xaN05": { + "title": "ethers.js", + "description": "The ethers.js library aims to be a complete and compact library for interacting with the Ethereum Blockchain and its ecosystem. It was originally designed for use with [ethers.io](http://ethers.io) and has since expanded into a more general-purpose library.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Ethers.js Documentation", + "url": "https://docs.ethers.io/", + "type": "article" + }, + { + "title": "Explore top posts about JavaScript", + "url": "https://app.daily.dev/tags/javascript?ref=roadmapsh", + "type": "article" + } + ] + }, + "jwuMeo9TwaQviGIMO13Jf": { + "title": "web3.js", + "description": "web3.js is a collection of libraries that allow you to interact with a local or remote ethereum node using HTTP, IPC or WebSocket.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "web3.js Documentation", + "url": "https://web3js.readthedocs.io/", + "type": "article" + }, + { + "title": "Explore top posts about Web3", + "url": "https://app.daily.dev/tags/web3?ref=roadmapsh", + "type": "article" + } + ] + }, + "RFgetmTvKvpV2PG6Vfev7": { + "title": "Moralis", + "description": "A library that gives you access to the powerful Moralis Server backend from your JavaScript app.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Moralis SDK", + "url": "https://github.com/MoralisWeb3/Moralis-JS-SDK/blob/main/README.md", + "type": "opensource" + }, + { + "title": "Explore top posts about Moralis", + "url": "https://app.daily.dev/tags/moralis?ref=roadmapsh", + "type": "article" + } + ] + }, + "CoYEwHNNmrQ0i0sSQTcB7": { + "title": "Client Nodes", + "description": "A blockchain is a distributed network of computers (known as nodes) running software that can verify blocks and transaction data. The software application, known as a client, must be run on your computer to turn it into a blockchain node.", + "links": [] + }, + "DBRaXtwvdq2UGE8rVCmI1": { + "title": "Geth", + "description": "Go Ethereum (Geth) is one of the three original implementations (along with C++ and Python) of the Ethereum protocol. It is written in Go, fully open source and licensed under the GNU LGPL v3.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Geth Documentation", + "url": "https://geth.ethereum.org/docs/", + "type": "article" + } + ] + }, + "Gnnp5qrFmuSVtaq31rvMX": { + "title": "Besu", + "description": "Besu is an Apache 2.0 licensed, MainNet compatible, Ethereum client written in Java.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Besu Ethereum Client", + "url": "https://github.com/hyperledger/besu", + "type": "opensource" + } + ] + }, + "xtYwg0WAcE8Ea9VgC2RSc": { + "title": "Nethermind", + "description": "Nethermind is a high-performance, highly configurable full Ethereum protocol client built on .NET that runs on Linux, Windows, and macOS, and supports Clique, Aura, Ethash, and Proof-of-Stake consensus algorithms.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Nethermind Documentation", + "url": "https://docs.nethermind.io/nethermind/", + "type": "article" + } + ] + }, + "E4uuJZFZz-M1vlpZmdJTO": { + "title": "Substrate", + "description": "Substrate is a Software Development Kit (SDK) specifically designed to provide you with all of the fundamental components s blockchain requires so you can focus on crafting the logic that makes your chain unique and innovative.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Substrate Documentation", + "url": "https://docs.substrate.io/quick-start/", + "type": "article" + } + ] + }, + "bTdRKEiIUmu1pnp8UbJK9": { + "title": "Building for Scale", + "description": "Due to the limited number of transactions-per-second (TPS) built-in to blockchains, a number of alternative mechanism and technologies have emerged to aid the scaling of blockchain dApps.", + "links": [] + }, + "5T5c3SrFfMZLEKAzxJ-_S": { + "title": "State & Payment Channels", + "description": "State channels refer to the process in which users transact with one another directly outside of the blockchain, or ‘off-chain,’ and greatly minimize their use of ‘on-chain’ operations.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "The Basics of State Channels", + "url": "https://education.district0x.io/general-topics/understanding-ethereum/basics-state-channels/", + "type": "article" + }, + { + "title": "State Channels: An Introduction to Off-chain Transactions", + "url": "https://www.talentica.com/blogs/state-channels-an-introduction-to-off-chain-transactions/", + "type": "article" + } + ] + }, + "ti6-LSK52dTCLVdxArp9q": { + "title": "Optimistic Rollups & Fraud Proofs", + "description": "‍Optimistic rollups are a layer 2 (L2) construction that improves throughput and latency on Ethereum’s base layer by moving computation and data storage off-chain. An optimistic rollup processes transactions outside of Ethereum Mainnet, reducing congestion on the base layer and improving scalability.\n\nOptimistic rollups allow anyone to publish blocks without providing proofs of validity. However, to ensure the chain remains safe, optimistic rollups specify a time window during which anyone can dispute a state transition.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "How Do Optimistic Rollups Work (The Complete Guide)", + "url": "https://www.alchemy.com/overviews/optimistic-rollups", + "type": "article" + } + ] + }, + "PykoX4j5Q3eJWIpUoczjM": { + "title": " Zk Rollups & Zero Knowledge Proof", + "description": "Zero-knowledge rollups (ZK-rollups) are layer 2 scaling solutions that increase the throughput of a blockchain by moving computation and state-storage off-chain.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Zero-Knowledge Rollups - Ethereum", + "url": "https://ethereum.org/en/developers/docs/scaling/zk-rollups", + "type": "article" + }, + { + "title": "Why and How zk-SNARK Works", + "url": "https://medium.com/@imolfar/why-and-how-zk-snark-works-1-introduction-the-medium-of-a-proof-d946e931160", + "type": "article" + }, + { + "title": "Introduction to zk-SNARKs", + "url": "https://vitalik.eth.limo/general/2021/01/26/snarks.html", + "type": "article" + } + ] + }, + "chmxDwNVOefp98IbjEgNl": { + "title": "Validium", + "description": "Validium is a scaling solution that enforces integrity of transactions using validity proofs like ZK-rollups, but doesn’t store transaction data on the Ethereum Mainnet. While off-chain data availability introduces trade-offs, it can lead to massive improvements in scalability\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Validium - Ethereum", + "url": "https://ethereum.org/en/developers/docs/scaling/validium/", + "type": "article" + } + ] + }, + "dMesiPUPBegYQ7hgeMMJK": { + "title": "Plasma", + "description": "Plasma is a framework that allows the creation of child blockchains that use the main Ethereum chain as a layer of trust and arbitration. In Plasma, child chains can be designed to meet the requirements of specific use cases, specifically those that are not currently feasible on Ethereum.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Plasma Chains - Ethereum", + "url": "https://ethereum.org/en/developers/docs/scaling/plasma/", + "type": "article" + } + ] + }, + "lSQA3CfQBugEBhAh2yxro": { + "title": "Sidechains", + "description": "A sidechain is a separate blockchain network that connects to another blockchain – called a parent blockchain or mainnet – via a two-way peg.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Sidechains - Ethereum", + "url": "https://ethereum.org/en/developers/docs/scaling/sidechains/", + "type": "article" + }, + { + "title": "An Introduction to Sidechains", + "url": "https://www.coindesk.com/learn/an-introduction-to-sidechains", + "type": "article" + } + ] + }, + "9uz6LBQwYpOid61LrK5dl": { + "title": "Ethereum 2.0", + "description": "Ethereum 2.0 marks a long-anticipated upgrade to the Ethereum public mainnet. Designed to accelerate Ethereum’s usage and adoption by improving its performance, Ethereum 2.0 implements Proof of Stake.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What Is Ethereum 2.0?", + "url": "https://consensys.net/blog/blockchain-explained/what-is-ethereum-2/", + "type": "article" + }, + { + "title": "What Is Ethereum 2.0? Understanding The Merge", + "url": "https://www.forbes.com/advisor/investing/cryptocurrency/ethereum-2/", + "type": "article" + }, + { + "title": "Explore top posts about Ethereum", + "url": "https://app.daily.dev/tags/ethereum?ref=roadmapsh", + "type": "article" + } + ] + }, + "1bUD9-vFo-tsHiB1a06tO": { + "title": "On-Chain Scaling", + "description": "On-chain scaling refers to any direct modification made to a blockchain, like data sharding and execution sharding in the incoming version of Ethereum 2.0. Another type of on-chain scaling would be a sidechain with two-way bridge to Ethereum, like Polygon.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Scaling - Ethereum", + "url": "https://ethereum.org/en/developers/docs/scaling/", + "type": "article" + } + ] + }, + "ecT4W5z8Vq9pXjnuhMdpl": { + "title": "Why it matters?", + "description": "The nature of blockchain allows for trustless systems to be built on top of it. Users don’t rely on a centralized group of people, such as a bank, to make decisions and allow transactions to flow through. Because the system is decentralized, users know that transactions will never be denied for non-custodial reasons.\n\nThis decentralization enables use-cases that were previously impossible, such as parametric insurance, decentralized finance, and decentralized organizations (DAOs), among a few. This allows developers to build products that provide immediate value without having to go through a bureaucratic process of applications, approvals, and general red tape.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Why Blockchain?", + "url": "https://chain.link/education-hub/blockchain", + "type": "article" + }, + { + "title": "What Is The Blockchain And Why Does It Matter?", + "url": "https://www.forbes.com/sites/theyec/2020/05/18/what-is-the-blockchain-and-why-does-it-matter/", + "type": "article" + }, + { + "title": "Web3/Crypto: Why Bother?", + "url": "https://continuations.com/post/671863718643105792/web3crypto-why-bother", + "type": "article" + }, + { + "title": "Why is Blockchain Important and Why Does it Matter", + "url": "https://www.simplilearn.com/tutorials/blockchain-tutorial/why-is-blockchain-important", + "type": "article" + }, + { + "title": "Explore top posts about Blockchain", + "url": "https://app.daily.dev/tags/blockchain?ref=roadmapsh", + "type": "article" + } + ] + }, + "zvUCR0KeigEi9beqFpwny": { + "title": "Storage", + "description": "Unlike a centralized server operated by a single company or organization, decentralized storage systems consist of a peer-to-peer network of user-operators who hold a portion of the overall data, creating a resilient file storage sharing system.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Blockchain Storage", + "url": "https://www.techtarget.com/searchstorage/definition/blockchain-storage", + "type": "article" + }, + { + "title": "Decentralized Storage", + "url": "https://ethereum.org/en/developers/docs/storage/", + "type": "article" + }, + { + "title": "How IPFS works", + "url": "https://docs.ipfs.tech/concepts/how-ipfs-works/", + "type": "article" + }, + { + "title": "Explore top posts about Storage", + "url": "https://app.daily.dev/tags/storage?ref=roadmapsh", + "type": "article" + } + ] + } +} \ No newline at end of file diff --git a/public/roadmap-content/computer-science.json b/public/roadmap-content/computer-science.json new file mode 100644 index 000000000..ca1965ce5 --- /dev/null +++ b/public/roadmap-content/computer-science.json @@ -0,0 +1,4202 @@ +{ + "tU4Umtnfu01t9gLlnlK6b": { + "title": "Pick a Language", + "description": "You need to pick a programming language to learn the Computer Science concepts. My personal recommendation would be to pick C++ or C and the reason for that is:\n\n* They allow you to deal with pointers and memory allocation/deallocation, so you feel the data structures and algorithms in your bones. In higher level languages like Python or Java, these are hidden from you. In day to day work, that's terrific, but when you're learning how these low-level data structures are built, it's great to feel close to the metal.\n* You will be able to find a lot of resources for the topics listed in this roadmap using C or C++. You can find a lot of resources for Python and Java, but they are not as abundant as C++ and C.\n\nGiven below is the list of resources; pick ones relevant to the language of your choice.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Learn C++ - W3Schools", + "url": "https://www.w3schools.com/cpp/", + "type": "article" + }, + { + "title": "Learn C++ - Tutorials Point", + "url": "https://www.tutorialspoint.com/cplusplus/index.htm", + "type": "article" + }, + { + "title": "Learn C - W3Schools", + "url": "https://www.w3schools.com/c/", + "type": "article" + }, + { + "title": "Learn C - Tutorials Point", + "url": "https://www.tutorialspoint.com/cprogramming/index.htm", + "type": "article" + }, + { + "title": "C++ Programming Course - Beginner to Advanced", + "url": "https://www.youtube.com/watch?v=8jLOx1hD3_o", + "type": "video" + }, + { + "title": "C++ Tutorial for Beginners - Full Course", + "url": "https://www.youtube.com/watch?v=vLnPwxZdW4Y", + "type": "video" + }, + { + "title": "C Programming Tutorial for Beginners", + "url": "https://www.youtube.com/watch?v=KJgsSFOSQv0", + "type": "video" + }, + { + "title": "Learn C Programming with Dr. Chuck", + "url": "https://www.youtube.com/watch?v=j-_s8f5K30I", + "type": "video" + } + ] + }, + "RlKZzs44biQPgxD0tK1qx": { + "title": "Python", + "description": "Python is a well known programming language which is both a strongly typed and a dynamically typed language. Being an interpreted language, code is executed as soon as it is written and the Python syntax allows for writing code in functional, procedural or object-oriented programmatic ways.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Visit Dedicated Python Roadmap", + "url": "/python", + "type": "article" + }, + { + "title": "Python Website", + "url": "https://www.python.org/", + "type": "article" + }, + { + "title": "Python Getting Started", + "url": "https://www.python.org/about/gettingstarted/", + "type": "article" + }, + { + "title": "Automate the Boring Stuff", + "url": "https://automatetheboringstuff.com/", + "type": "article" + }, + { + "title": "Python principles - Python basics", + "url": "https://pythonprinciples.com/", + "type": "article" + }, + { + "title": "W3Schools - Python Tutorial ", + "url": "https://www.w3schools.com/python/", + "type": "article" + }, + { + "title": "Python Crash Course", + "url": "https://ehmatthes.github.io/pcc/", + "type": "article" + }, + { + "title": "An Introduction to Python for Non-Programmers", + "url": "https://thenewstack.io/an-introduction-to-python-for-non-programmers/", + "type": "article" + }, + { + "title": "Getting Started with Python and InfluxDB", + "url": "https://thenewstack.io/getting-started-with-python-and-influxdb/", + "type": "article" + }, + { + "title": "Explore top posts about Python", + "url": "https://app.daily.dev/tags/python?ref=roadmapsh", + "type": "article" + } + ] + }, + "mWW88VnkqWgDz02qw5zU-": { + "title": "Go", + "description": "Go is an open source programming language supported by Google. Go can be used to write cloud services, CLI tools, used for API development, and much more.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Visit Dedicated Go Roadmap", + "url": "/golang", + "type": "article" + }, + { + "title": "A Tour of Go – Go Basics", + "url": "https://go.dev/tour/welcome/1", + "type": "article" + }, + { + "title": "Go Reference Documentation", + "url": "https://go.dev/doc/", + "type": "article" + }, + { + "title": "Go by Example - annotated example programs", + "url": "https://gobyexample.com/", + "type": "article" + }, + { + "title": "W3Schools Go Tutorial ", + "url": "https://www.w3schools.com/go/", + "type": "article" + }, + { + "title": "Making a RESTful JSON API in Go", + "url": "https://thenewstack.io/make-a-restful-json-api-go/", + "type": "article" + }, + { + "title": "Go, the Programming Language of the Cloud", + "url": "https://thenewstack.io/go-the-programming-language-of-the-cloud/", + "type": "article" + }, + { + "title": "Explore top posts about Golang", + "url": "https://app.daily.dev/tags/golang?ref=roadmapsh", + "type": "article" + } + ] + }, + "jHKCLfLml8oZyj4829gx0": { + "title": "C#", + "description": "C# (pronounced \"C sharp\") is a general purpose programming language made by Microsoft. It is used to perform different tasks and can be used to create web apps, games, mobile apps, etc.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "C# Learning Path", + "url": "https://docs.microsoft.com/en-us/learn/paths/csharp-first-steps/?WT.mc_id=dotnet-35129-website", + "type": "article" + }, + { + "title": "C# on W3 schools", + "url": "https://www.w3schools.com/cs/index.php", + "type": "article" + }, + { + "title": "Introduction to C#", + "url": "https://docs.microsoft.com/en-us/shows/CSharp-101/?WT.mc_id=Educationalcsharp-c9-scottha", + "type": "article" + }, + { + "title": "Explore top posts about C# Programming", + "url": "https://app.daily.dev/tags/csharp?ref=roadmapsh", + "type": "article" + }, + { + "title": "C# tutorials", + "url": "https://www.youtube.com/watch?v=gfkTfcpWqAY&list=PLTjRvDozrdlz3_FPXwb6lX_HoGXa09Yef", + "type": "video" + } + ] + }, + "1bDjg-KBDKRmE6f1MWY8l": { + "title": "Rust", + "description": "Rust is a modern systems programming language focusing on safety, speed, and concurrency. It accomplishes these goals by being memory safe without using garbage collection.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "The Rust Programming Language - online book", + "url": "https://doc.rust-lang.org/book/", + "type": "article" + }, + { + "title": "Rust by Example - collection of runnable examples", + "url": "https://doc.rust-lang.org/stable/rust-by-example/index.html", + "type": "article" + }, + { + "title": "Rust vs. Go: Why They’re Better Together", + "url": "https://thenewstack.io/rust-vs-go-why-theyre-better-together/", + "type": "article" + }, + { + "title": "Rust by the Numbers: The Rust Programming Language in 2021", + "url": "https://thenewstack.io/rust-by-the-numbers-the-rust-programming-language-in-2021/", + "type": "article" + }, + { + "title": "Explore top posts about Rust", + "url": "https://app.daily.dev/tags/rust?ref=roadmapsh", + "type": "article" + } + ] + }, + "1kQJs-3Aw8Bi7d_Xh67zS": { + "title": "C++", + "description": "C++ is a powerful general-purpose programming language. It can be used to develop operating systems, browsers, games, and so on. C++ supports different ways of programming like procedural, object-oriented, functional, and so on. This makes C++ powerful as well as flexible.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Learn C++", + "url": "https://learncpp.com/", + "type": "article" + }, + { + "title": "Cpp Reference", + "url": "https://en.cppreference.com/", + "type": "article" + }, + { + "title": "CPlusPlus", + "url": "https://cplusplus.com/", + "type": "article" + }, + { + "title": "C++ TutorialsPoint", + "url": "https://www.tutorialspoint.com/cplusplus/index.htm", + "type": "article" + }, + { + "title": "W3Schools C++", + "url": "https://www.w3schools.com/cpp/default.asp", + "type": "article" + }, + { + "title": "C++ Roadmap", + "url": "https://roadmap.sh/cpp", + "type": "article" + }, + { + "title": "Explore top posts about C++ Programming", + "url": "https://app.daily.dev/tags/c++?ref=roadmapsh", + "type": "article" + } + ] + }, + "n4IsklfYJXFIyF1rGWuEa": { + "title": "C", + "description": "C is a general-purpose computer programming language. It was created in the 1970s by Dennis Ritchie, and remains very widely used and influential. By design, C's features cleanly reflect the capabilities of the targeted CPUs.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Learn C - W3Schools", + "url": "https://www.w3schools.com/c/", + "type": "article" + }, + { + "title": "Learn C - Tutorials Point", + "url": "https://www.tutorialspoint.com/cprogramming/index.htm", + "type": "article" + }, + { + "title": "Explore top posts about C Programming", + "url": "https://app.daily.dev/tags/c?ref=roadmapsh", + "type": "article" + }, + { + "title": "C Programming Tutorial for Beginners", + "url": "https://www.youtube.com/watch?v=KJgsSFOSQv0", + "type": "video" + }, + { + "title": "Learn C Programming with Dr. Chuck", + "url": "https://www.youtube.com/watch?v=j-_s8f5K30I", + "type": "video" + }, + { + "title": "C Programming Full Course (Bro Code)", + "url": "https://youtu.be/87SH2Cn0s9A", + "type": "video" + } + ] + }, + "1lQSUFrrIGq19nUnM92-I": { + "title": "Java", + "description": "Java is general-purpose language, primarily used for Internet-based applications. It was created in 1995 by James Gosling at Sun Microsystems and is one of the most popular options for backend developers.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Visit Dedicated Java Roadmap", + "url": "/java", + "type": "article" + }, + { + "title": "Java Website", + "url": "https://www.java.com/", + "type": "article" + }, + { + "title": "W3 Schools Tutorials", + "url": "https://www.w3schools.com/java/", + "type": "article" + }, + { + "title": "Explore top posts about Java", + "url": "https://app.daily.dev/tags/java?ref=roadmapsh", + "type": "article" + }, + { + "title": "Java Crash Course", + "url": "https://www.youtube.com/watch?v=eIrMbAQSU34", + "type": "video" + } + ] + }, + "NM7q5REW1sJgMhxJhPpLT": { + "title": "Data Structures", + "description": "As the name indicates itself, a **Data Structure** is a way of organizing the data in the **memory** so that it can be used efficiently. Some common data structures are array, linked list, stack, hashtable, queue, tree, heap, and graph.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Data Structures and Algorithms By Google", + "url": "https://techdevguide.withgoogle.com/paths/data-structures-and-algorithms/", + "type": "course" + }, + { + "title": "Data Structures and Algorithms", + "url": "https://www.javatpoint.com/data-structure-tutorial", + "type": "article" + }, + { + "title": "Explore top posts about Data Structures", + "url": "https://app.daily.dev/tags/data-structures?ref=roadmapsh", + "type": "article" + }, + { + "title": "Data Structures Illustrated", + "url": "https://www.youtube.com/watch?v=9rhT3P1MDHk&list=PLkZYeFmDuaN2-KUIv-mvbjfKszIGJ4FaY", + "type": "video" + }, + { + "title": "Data Structures playlist", + "url": "https://youtube.com/playlist?list=PLDV1Zeh2NRsB6SWUrDFW2RmDotAfPbeHu&si=_EEf7x58G6lUcMGG", + "type": "video" + } + ] + }, + "gr8BK6vq4AVwp_aUozZmf": { + "title": "Linked List", + "description": "Arrays store elements in contiguous memory locations, resulting in easily calculable addresses for the elements stored and this allows faster access to an element at a specific index. Linked lists are less rigid in their storage structure and elements are usually not stored in contiguous locations, hence they need to be stored with additional tags giving a reference to the next element. This difference in the data storage scheme decides which data structure would be more suitable for a given situation.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Singly Linked Lists", + "url": "https://www.coursera.org/lecture/data-structures/singly-linked-lists-kHhgK", + "type": "course" + }, + { + "title": "Core: Linked Lists vs Arrays", + "url": "https://www.coursera.org/lecture/data-structures-optimizing-performance/core-linked-lists-vs-arrays-rjBs9", + "type": "course" + }, + { + "title": "In the Real World: Linked Lists vs Arrays", + "url": "https://www.coursera.org/lecture/data-structures-optimizing-performance/in-the-real-world-lists-vs-arrays-QUaUd", + "type": "course" + }, + { + "title": "Doubly Linked Lists", + "url": "https://www.coursera.org/lecture/data-structures/doubly-linked-lists-jpGKD", + "type": "course" + }, + { + "title": "CS 61B Lecture 7: Linked Lists I", + "url": "https://archive.org/details/ucberkeley_webcast_htzJdKoEmO0", + "type": "article" + }, + { + "title": "CS 61B Lecture 7: Linked Lists II", + "url": "https://archive.org/details/ucberkeley_webcast_-c4I3gFYe3w", + "type": "article" + }, + { + "title": "Linked List Data Structure | Illustrated Data Structures", + "url": "https://www.youtube.com/watch?v=odW9FU8jPRQ", + "type": "video" + }, + { + "title": "Linked Lists in 4 minutes", + "url": "https://www.youtube.com/watch?v=F8AbOfQwl1c", + "type": "video" + }, + { + "title": "Why you should avoid Linked Lists?", + "url": "https://www.youtube.com/watch?v=YQs6IC-vgmo", + "type": "video" + } + ] + }, + "hJB5gO9tosRlC4UmdSNzl": { + "title": "Stack", + "description": "Stack is a linear collection of items where items are inserted and removed in a particular order. Stack is also called a LIFO Data Structure because it follows the \"Last In First Out\" principle i.e. the item that is inserted in the last is the one that is taken out first.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Stack Data Structure", + "url": "https://www.coursera.org/lecture/data-structures/stacks-UdKzQ", + "type": "course" + }, + { + "title": "Stack Data Structure | Illustrated Data Structures", + "url": "https://www.youtube.com/watch?v=I5lq6sCuABE", + "type": "video" + }, + { + "title": "Stack in 3 minutes", + "url": "https://www.youtube.com/watch?v=KcT3aVgrrpU", + "type": "video" + } + ] + }, + "JI990pDX2jjNe6IH_Y_t0": { + "title": "Queue", + "description": "Queue is a linear collection of items where items are inserted and removed in a particular order. The queue is also called a FIFO Data Structure because it follows the \"First In, First Out\" principle i.e., the item that is inserted in the first is the one that is taken out first.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Queues - Coursera", + "url": "https://www.coursera.org/lecture/data-structures/queues-EShpq", + "type": "course" + }, + { + "title": "Circular Buffer - Wikipedia", + "url": "https://en.wikipedia.org/wiki/Circular_buffer", + "type": "article" + }, + { + "title": "Queue Data Structure | Illustrated Data Structures", + "url": "https://www.youtube.com/watch?v=mDCi1lXd9hc", + "type": "video" + }, + { + "title": "Queue in 3 Minutes", + "url": "https://www.youtube.com/watch?v=D6gu-_tmEpQ", + "type": "video" + } + ] + }, + "G2dN2FO0SN_I-5AhO_EUk": { + "title": "Hash Table", + "description": "Hash Table, Map, HashMap, Dictionary or Associative are all the names of the same data structure. It is one of the most commonly used data structures.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Hash Table | Illustrated Data Structures", + "url": "https://www.youtube.com/watch?v=jalSiaIi8j4", + "type": "video" + }, + { + "title": "Hash Table in 4 Minutes", + "url": "https://youtu.be/knV86FlSXJ8", + "type": "video" + }, + { + "title": "Hashing with Chaining", + "url": "https://www.youtube.com/watch?v=0M_kIqhwbFo&list=PLUl4u3cNGP61Oq3tWYp6V_F-5jb5L2iHb&index=9", + "type": "video" + }, + { + "title": "Table Doubling, Karp-Rabin", + "url": "https://www.youtube.com/watch?v=BRO7mVIFt08&list=PLUl4u3cNGP61Oq3tWYp6V_F-5jb5L2iHb&index=10", + "type": "video" + }, + { + "title": "Open Addressing, Cryptographic Hashing", + "url": "https://www.youtube.com/watch?v=rvdJDijO2Ro&list=PLUl4u3cNGP61Oq3tWYp6V_F-5jb5L2iHb&index=11", + "type": "video" + }, + { + "title": "PyCon 2010: The Mighty Dictionary", + "url": "https://www.youtube.com/watch?v=C4Kc8xzcA68", + "type": "video" + }, + { + "title": "PyCon 2017: The Dictionary Even Mightier", + "url": "https://www.youtube.com/watch?v=66P5FMkWoVU", + "type": "video" + }, + { + "title": "(Advanced) Randomization: Universal & Perfect Hashing", + "url": "https://www.youtube.com/watch?v=z0lJ2k0sl1g&list=PLUl4u3cNGP6317WaSNfmCvGym2ucw3oGp&index=11", + "type": "video" + }, + { + "title": "(Advanced) Perfect hashing", + "url": "https://www.youtube.com/watch?v=N0COwN14gt0&list=PL2B4EEwhKD-NbwZ4ezj7gyc_3yNrojKM9&index=4", + "type": "video" + } + ] + }, + "TwW6SO2IXqkxJXVjLzdwU": { + "title": "Array", + "description": "Arrays store elements in contiguous memory locations, resulting in easily calculable addresses for the elements stored and this allows faster access to an element at a specific index.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Array Data Structure - Coursera", + "url": "https://www.coursera.org/lecture/data-structures/arrays-OsBSF", + "type": "course" + }, + { + "title": "Dynamic Arrays - Coursera", + "url": "https://www.coursera.org/lecture/data-structures/dynamic-arrays-EwbnV", + "type": "course" + }, + { + "title": "UC Berkeley CS61B - Linear and Multi-Dim Arrays (Start watching from 15m 32s)", + "url": "https://archive.org/details/ucberkeley_webcast_Wp8oiO_CZZE", + "type": "article" + }, + { + "title": "Array Data Structure | Illustrated Data Structures", + "url": "https://www.youtube.com/watch?v=QJNwK2uJyGs", + "type": "video" + }, + { + "title": "Dynamic and Static Arrays", + "url": "https://www.youtube.com/watch?v=PEnFFiQe1pM&list=PLDV1Zeh2NRsB6SWUrDFW2RmDotAfPbeHu&index=6", + "type": "video" + }, + { + "title": "Dynamic Array Code", + "url": "https://www.youtube.com/watch?v=tvw4v7FEF1w&list=PLDV1Zeh2NRsB6SWUrDFW2RmDotAfPbeHu&index=5", + "type": "video" + }, + { + "title": "Jagged Arrays", + "url": "https://www.youtube.com/watch?v=1jtrQqYpt7g", + "type": "video" + } + ] + }, + "_eWqiWUmOj0zUo_Ix3j1O": { + "title": "Tree", + "description": "A tree is non-linear and a hierarchical data structure consisting of a collection of nodes such that each node of the tree stores a value and a list of references to other nodes (the “children”).\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Tree | Illustrated Data Structures", + "url": "https://www.youtube.com/watch?v=S2W3SXGPVyU", + "type": "video" + } + ] + }, + "G9dnegZ6zNvxrCZ3O_b_z": { + "title": "Binary Tree", + "description": "A binary tree is a tree data structure in which each node has at most two children, which are referred to as the left child and the right child.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Explore top posts about Binary Tree", + "url": "https://app.daily.dev/tags/binary-tree?ref=roadmapsh", + "type": "article" + }, + { + "title": "Binary Trees - Part 1", + "url": "https://www.youtube.com/watch?v=76dhtgZt38A&list=PLUl4u3cNGP63EdVPNLG3ToM6LaEUuStEY&index=9", + "type": "video" + }, + { + "title": "Binary Trees - Part 2", + "url": "https://www.youtube.com/watch?v=U1JYwHcFfso&list=PLUl4u3cNGP63EdVPNLG3ToM6LaEUuStEY&index=10", + "type": "video" + } + ] + }, + "et7l85gnxjvzD2tDyHT_T": { + "title": "Binary Search Tree", + "description": "A binary search tree, also called an ordered or sorted binary tree, is a rooted binary tree data structure with the key of each internal node being greater than all the keys in the respective node's left subtree and less than the ones in its right subtree.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Binary Search Trees - Coursera", + "url": "https://www.coursera.org/learn/data-structures/lecture/E7cXP/introduction", + "type": "course" + }, + { + "title": "Explore top posts about General Programming", + "url": "https://app.daily.dev/tags/general-programming?ref=roadmapsh", + "type": "article" + }, + { + "title": "Tree | Illustrated Data Structures", + "url": "https://www.youtube.com/watch?v=S2W3SXGPVyU", + "type": "video" + }, + { + "title": "Binary Search Trees - MIT", + "url": "https://www.youtube.com/watch?v=76dhtgZt38A", + "type": "video" + }, + { + "title": "Binary Search Tree Implementation in C++", + "url": "https://www.youtube.com/watch?v=COZK7NATh4k&list=PL2_aWCzGMAwI3W_JlcBbtYTwiQSsOTa6P&index=29", + "type": "video" + }, + { + "title": "BST implementation - memory allocation in stack and heap", + "url": "https://www.youtube.com/watch?v=hWokyBoo0aI&list=PL2_aWCzGMAwI3W_JlcBbtYTwiQSsOTa6P&index=30", + "type": "video" + }, + { + "title": "Find Min and Max Element in Binary Search Tree", + "url": "https://www.youtube.com/watch?v=Ut90klNN264&list=PL2_aWCzGMAwI3W_JlcBbtYTwiQSsOTa6P&index=31", + "type": "video" + }, + { + "title": "Check if Given Tree is Binary Search Tree or Not", + "url": "https://www.youtube.com/watch?v=yEwSGhSsT0U&list=PL2_aWCzGMAwI3W_JlcBbtYTwiQSsOTa6P&index=36", + "type": "video" + }, + { + "title": "Delete an Element from Binary Search Tree", + "url": "https://www.youtube.com/watch?v=gcULXE7ViZw&list=PL2_aWCzGMAwI3W_JlcBbtYTwiQSsOTa6P&index=37", + "type": "video" + }, + { + "title": "Inorder Successor in a binary search tree", + "url": "https://www.youtube.com/watch?v=5cPbNCrdotA&list=PL2_aWCzGMAwI3W_JlcBbtYTwiQSsOTa6P&index=38", + "type": "video" + } + ] + }, + "Q8ZJNeTbc22Q08Mra-McY": { + "title": "Full Binary Tree", + "description": "A full Binary tree is a special type of binary tree in which every parent node/internal node has either two or no children. It is also known as a proper binary tree.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Full Binary Tree", + "url": "https://www.programiz.com/dsa/full-binary-tree", + "type": "article" + } + ] + }, + "Jx1WWCjm8jkfAGtHv15n1": { + "title": "Complete Binary Tree", + "description": "A complete binary tree is a special type of binary tree where all the levels of the tree are filled completely except the lowest level nodes which are filled from as left as possible.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Complete Binary Tree - Programiz", + "url": "https://www.programiz.com/dsa/complete-binary-tree", + "type": "article" + } + ] + }, + "ZaWGLvZyu4sIPn-zwVISN": { + "title": " Balanced Tree", + "description": "A balanced binary tree, also referred to as a height-balanced binary tree, is defined as a binary tree in which the height of the left and right subtree of any node differ by not more than 1.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Balanced Binary Tree", + "url": "https://www.programiz.com/dsa/balanced-binary-tree", + "type": "article" + } + ] + }, + "w4sxmZR1BjX6wlrZmuOlf": { + "title": "Unbalanced Tree", + "description": "An unbalanced binary tree is one that is not balanced.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Balanced Binary Tree", + "url": "https://www.programiz.com/dsa/balanced-binary-tree", + "type": "article" + } + ] + }, + "vQm046o6ozcvLoqg9L6eL": { + "title": "Graph", + "description": "Graphs in data structures are non-linear data structures made up of a finite number of nodes or vertices and the edges that connect them. Graphs in data structures are used to address real-world problems in which it represents the problem area as a network like telephone networks, circuit networks, and social networks.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Graph Data Structure", + "url": "https://www.simplilearn.com/tutorials/data-structure-tutorial/graphs-in-data-structure", + "type": "article" + }, + { + "title": "Graph Data Structure | Illustrated Data Structures", + "url": "https://www.youtube.com/watch?v=0sQE8zKhad0", + "type": "video" + }, + { + "title": "CSE373 2020 - Lecture 10 - Graph Data Structures", + "url": "https://www.youtube.com/watch?v=Sjk0xqWWPCc&list=PLOtl7M3yp-DX6ic0HGT0PUX_wiNmkWkXx&index=10", + "type": "video" + }, + { + "title": "CSE373 2020 - Lecture 11 - Graph Traversal", + "url": "https://www.youtube.com/watch?v=ZTwjXj81NVY&list=PLOtl7M3yp-DX6ic0HGT0PUX_wiNmkWkXx&index=11", + "type": "video" + }, + { + "title": "CSE373 2020 - Lecture 12 - Depth First Search", + "url": "https://www.youtube.com/watch?v=KyordYB3BOs&list=PLOtl7M3yp-DX6ic0HGT0PUX_wiNmkWkXx&index=12", + "type": "video" + }, + { + "title": "CSE373 2020 - Lecture 13 - Minimum Spanning Trees", + "url": "https://www.youtube.com/watch?v=oolm2VnJUKw&list=PLOtl7M3yp-DX6ic0HGT0PUX_wiNmkWkXx&index=13", + "type": "video" + }, + { + "title": "CSE373 2020 - Lecture 14 - Minimum Spanning Trees (cont)", + "url": "https://www.youtube.com/watch?v=RktgPx0MarY&list=PLOtl7M3yp-DX6ic0HGT0PUX_wiNmkWkXx&index=14", + "type": "video" + }, + { + "title": "CSE373 2020 - Lecture 15 - Graph Algorithms (cont 2)", + "url": "https://www.youtube.com/watch?v=MUe5DXRhyAo&list=PLOtl7M3yp-DX6ic0HGT0PUX_wiNmkWkXx&index=15", + "type": "video" + }, + { + "title": "6.006 Single-Source Shortest Paths Problem", + "url": "https://www.youtube.com/watch?v=Aa2sqUhIn-E&index=15&list=PLUl4u3cNGP61Oq3tWYp6V_F-5jb5L2iHb", + "type": "video" + } + ] + }, + "6Jy8SXHuYA08h9iLjKFWR": { + "title": "Directed Graph", + "description": "A directed graph is graph, i.e., a set of objects (called vertices or nodes) that are connected together, where all the edges are directed from one vertex to another. A directed graph is sometimes called a digraph or a directed network. In contrast, a graph where the edges are bidirectional is called an undirected graph.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Directed Graph", + "url": "https://en.wikipedia.org/wiki/Directed_graph", + "type": "article" + } + ] + }, + "QGx8it2N_85PiPVjpTGK_": { + "title": "Undirected Graph", + "description": "An undirected graph is graph, i.e., a set of objects (called vertices or nodes) that are connected together, where all the edges are bidirectional. An undirected graph is sometimes called an undirected network. In contrast, a graph where the edges point in a direction is called a directed graph.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Undirected Graph", + "url": "https://mathinsight.org/definition/undirected_graph", + "type": "article" + } + ] + }, + "L4xtWOdqGUf4SbJkoOsNM": { + "title": "Spanning Tree", + "description": "A spanning tree is a subset of Graph G, which has all the vertices covered with minimum possible number of edges. Hence, a spanning tree does not have cycles and it cannot be disconnected..\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Spanning Tree", + "url": "https://www.tutorialspoint.com/data_structures_algorithms/spanning_tree.htm", + "type": "article" + }, + { + "title": "CSE373 2020 - Lecture 13 - Minimum Spanning Trees", + "url": "https://www.youtube.com/watch?v=oolm2VnJUKw&list=PLOtl7M3yp-DX6ic0HGT0PUX_wiNmkWkXx&index=13", + "type": "video" + }, + { + "title": "CSE373 2020 - Lecture 14 - Minimum Spanning Trees (cont)", + "url": "https://www.youtube.com/watch?v=RktgPx0MarY&list=PLOtl7M3yp-DX6ic0HGT0PUX_wiNmkWkXx&index=14", + "type": "video" + }, + { + "title": "Greedy Algorithms: Minimum Spanning Tree", + "url": "https://www.youtube.com/watch?v=tKwnms5iRBU&index=16&list=PLUl4u3cNGP6317WaSNfmCvGym2ucw3oGp", + "type": "video" + } + ] + }, + "HZ1kk0TQ13FLC9t13BZl5": { + "title": "Adjacency Matrix", + "description": "", + "links": [] + }, + "rTnKJcPniUtqvfOyC88N0": { + "title": "Adjacency List", + "description": "A graph can either be represented as an adjacency matrix or an adjacency list.\n\nThe adjacency matrix is a 2D array of size `V x V` where `V` is the number of vertices in a graph. Let the 2D array be `adj[][]`, a slot `adj[i][j] = 1` indicates that there is an edge from vertex `i` to vertex `j`.\n\nAdjacency list is an array of vectors. Size of the array is equal to the number of vertices. Let the array be `array[]`. An entry `array[i]` represents the list of vertices adjacent to the ith vertex. This representation can also be used to represent a weighted graph. The weights of edges can be represented as lists of pairs.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Adjacency Matrix - Graph Representation", + "url": "https://www.programiz.com/dsa/graph-adjacency-matrix", + "type": "article" + }, + { + "title": "Adjacency List - Graph Representation", + "url": "https://www.programiz.com/dsa/graph-adjacency-list", + "type": "article" + } + ] + }, + "pw3ZCC3HKU7D5SQwte4vE": { + "title": "Heap", + "description": "Heap is a tree-based data structure that follows the properties of a complete binary tree and is either a Min Heap or a Max Heap.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Priority Queue - Introduction", + "url": "https://www.coursera.org/lecture/data-structures/introduction-2OpTs", + "type": "course" + }, + { + "title": "CS 61B Lecture 24: Priority Queues", + "url": "https://archive.org/details/ucberkeley_webcast_yIUFT6AKBGE", + "type": "article" + }, + { + "title": "Heap | Illustrated Data Structures", + "url": "https://www.youtube.com/watch?v=F_r0sJ1RqWk", + "type": "video" + }, + { + "title": "Heaps and Heap Sort", + "url": "https://www.youtube.com/watch?v=B7hVxCmfPtM&list=PLUl4u3cNGP61Oq3tWYp6V_F-5jb5L2iHb&index=5", + "type": "video" + } + ] + }, + "UpBrpmrUwYUB9eesNxCq5": { + "title": "Asymptotic Notation", + "description": "The efficiency of an algorithm depends on the amount of time, storage and other resources required to execute the algorithm. The efficiency is measured with the help of asymptotic notations.\n\nAn algorithm may not have the same performance for different types of inputs. With the increase in the input size, the performance will change.\n\nThe study of change in performance of the algorithm with the change in the order of the input size is defined as asymptotic analysis.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Asymptotic Analysis: Big-O Notation and More", + "url": "https://www.programiz.com/dsa/asymptotic-notations", + "type": "article" + }, + { + "title": "CS 61B Lecture 19: Asymptotic Analysis", + "url": "https://archive.org/details/ucberkeley_webcast_VIS4YDpuP98", + "type": "article" + }, + { + "title": "Big-O Cheat Sheet", + "url": "https://www.bigocheatsheet.com/", + "type": "article" + }, + { + "title": "Big O Notation — Calculating Time Complexity", + "url": "https://www.youtube.com/watch?v=Z0bH0cMY0E8", + "type": "video" + }, + { + "title": "Big O Notation in 5 Minutes", + "url": "https://www.youtube.com/watch?v=__vX2sjlpXU", + "type": "video" + }, + { + "title": "Asymptotic Notation - CS50", + "url": "https://www.youtube.com/watch?v=iOq5kSKqeR4", + "type": "video" + } + ] + }, + "c-NrTtJuNihbHzyPEOKTW": { + "title": "Big O", + "description": "Big O Notation describes, how well an algorithm scales with the input size. It is used to describe the worst case scenario of an algorithm. It is used to compare algorithms and to determine which algorithm is better.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "moviesCS 61B Lecture 19: Asymptotic Analysis", + "url": "https://archive.org/details/ucberkeley_webcast_VIS4YDpuP98", + "type": "article" + }, + { + "title": "Big O Notation — Calculating Time Complexity", + "url": "https://www.youtube.com/watch?v=Z0bH0cMY0E8", + "type": "video" + }, + { + "title": "Big O Notations", + "url": "https://www.youtube.com/watch?v=V6mKVRU1evU", + "type": "video" + }, + { + "title": "Big Oh Notation (and Omega and Theta)", + "url": "https://www.youtube.com/watch?v=ei-A_wy5Yxw&list=PL1BaGV1cIH4UhkL8a9bJGG356covJ76qN&index=3", + "type": "video" + } + ] + }, + "ThLpVZQIJ4diY5m0dik8m": { + "title": "Big-Theta", + "description": "While Big O Notation refers to the upper bound of a function, Big Theta Notation refers to the exact bound of a function. Big Theta Notation is used to describe the exact growth rate of a function. It is denoted by the symbol Θ.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Big Oh Notation (and Omega and Theta)", + "url": "https://www.youtube.com/watch?v=ei-A_wy5Yxw&list=PL1BaGV1cIH4UhkL8a9bJGG356covJ76qN&index=3", + "type": "video" + }, + { + "title": "Asymptotic Notation - CS50", + "url": "https://www.youtube.com/watch?v=iOq5kSKqeR4", + "type": "video" + } + ] + }, + "X33735aeAVSlJ6yv9GS-h": { + "title": "Big Omega", + "description": "Big Omega notation is used to describe the lower bound of a function. It is the opposite of Big O notation. While Big O is used to describe the worst case scenario of an algorithm, Big Omega is used to describe the best case scenario of an algorithm.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Big Oh Notation (and Omega and Theta)", + "url": "https://www.youtube.com/watch?v=ei-A_wy5Yxw&list=PL1BaGV1cIH4UhkL8a9bJGG356covJ76qN&index=3", + "type": "video" + }, + { + "title": "Asymptotic Notation - CS50", + "url": "https://www.youtube.com/watch?v=iOq5kSKqeR4", + "type": "video" + } + ] + }, + "3F_QBv_sU39ehOxpurF88": { + "title": "Constant", + "description": "Constant time algorithms are the simplest and most efficient algorithms. They are algorithms that always take the same amount of time to run, regardless of the size of the input. This is the best case scenario for an algorithm, and is the goal of all algorithms.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Big O Notation — Calculating Time Complexity", + "url": "https://www.youtube.com/watch?v=Z0bH0cMY0E8", + "type": "video" + }, + { + "title": "Big O Notations", + "url": "https://www.youtube.com/watch?v=V6mKVRU1evU", + "type": "video" + } + ] + }, + "nnlMNkQn1HU4U9yPbV9kQ": { + "title": "Logarithmic", + "description": "Logarithmic complexity algorithms are the second fastest algorithms. They are faster than linear algorithms, but slower than constant algorithms.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Big O Notation — Calculating Time Complexity", + "url": "https://www.youtube.com/watch?v=Z0bH0cMY0E8", + "type": "video" + }, + { + "title": "Big O Notations", + "url": "https://www.youtube.com/watch?v=V6mKVRU1evU", + "type": "video" + } + ] + }, + "jymhjv8GiFALQpox6aZeu": { + "title": "Linear", + "description": "Linear algorithms are algorithms that have a runtime that is directly proportional to the size of the input. This means that the runtime of the algorithm will increase linearly with the size of the input. For example, if the input size is 10, the runtime will be 10 times the runtime of the algorithm when the input size is 1. If the input size is 100, the runtime will be 100 times the runtime of the algorithm when the input size is 1.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Big O Notation — Calculating Time Complexity", + "url": "https://www.youtube.com/watch?v=Z0bH0cMY0E8", + "type": "video" + }, + { + "title": "Big O Notations", + "url": "https://www.youtube.com/watch?v=V6mKVRU1evU", + "type": "video" + } + ] + }, + "sVFvpsAO1_ZH9aliEj9aF": { + "title": "Polynomial", + "description": "Polynomial algorithms are algorithms that have a runtime that is a polynomial function of the input size. This means that the runtime is a function of the form `n^k` where `k` is a constant. For example, the runtime of the following algorithm is `n^2`:\n\n def polynomial_algorithm(n):\n for i in range(n):\n for j in range(n):\n print(i, j)\n \n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Big O Notation — Calculating Time Complexity", + "url": "https://www.youtube.com/watch?v=Z0bH0cMY0E8", + "type": "video" + }, + { + "title": "Big O Notations", + "url": "https://www.youtube.com/watch?v=V6mKVRU1evU", + "type": "video" + } + ] + }, + "5mMLr6tWbiSZuox1lx0_B": { + "title": "Exponential", + "description": "Exponential algorithms are those that grow at a rate of 2^n. This means that for each additional input, the algorithm will take twice as long to run. The following function is an example of an exponential algorithm:\n\n def exponential(n):\n if n == 0:\n return 1\n return exponential(n - 1) + exponential(n - 1)\n \n\nAs you can see, the algorithm's runtime grows exponentially. For each additional input, the algorithm will take twice as long to run.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Big O Notation — Calculating Time Complexity", + "url": "https://www.youtube.com/watch?v=Z0bH0cMY0E8", + "type": "video" + }, + { + "title": "Big O Notations", + "url": "https://www.youtube.com/watch?v=V6mKVRU1evU", + "type": "video" + } + ] + }, + "m0umGQNdvg95UiNpQZsQN": { + "title": "Factorial", + "description": "Factorial complexity algorithms have a runtime of `O(n!)`. This is the worst case scenario for an algorithm. Factorial complexity algorithms are very inefficient and should be avoided.\n\n def generate_permutations(s):\n # Base case: If the string length is 1, return a list containing the string\n if len(s) == 1:\n return [s]\n \n # Initialize the result list\n permutations = []\n \n # Recursively generate all permutations\n for i in range(len(s)):\n # Current character\n current_char = s[i]\n # Remaining characters\n remaining_chars = s[:i] + s[i+1:]\n # Generate all permutations of the remaining characters\n for perm in generate_permutations(remaining_chars):\n # Add the current character to the front of each generated permutation\n permutations.append(current_char + perm)\n \n return permutations", + "links": [] + }, + "7a6-AnBI-3tAU1dkOvPkx": { + "title": "Common Algorithms", + "description": "Here are some common algorithms that you should know. You can find more information about them in the [Algorithms](https://www.khanacademy.org/computing/computer-science/algorithms) section of the Computer Science course.\n\n* Sorting\n* Recursion\n* Searching\n* Cache Algorithms\n* Tree Algorithms\n* Graph Algorithms\n* Greedy Algorithms\n* Backtracking\n* Substring Search\n* Suffix Arrays\n* Dynamic Programming", + "links": [] + }, + "0_qNhprnXU3i8koW3XTdD": { + "title": "Tail Recursion", + "description": "Tail recursion is a special kind of recursion where the recursive call is the very last thing in the function. It's a function that does not do anything at all after recursing.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Tail Recursion", + "url": "https://www.coursera.org/lecture/programming-languages/tail-recursion-YZic1", + "type": "course" + }, + { + "title": "What is tail recursion? Why is it so bad?", + "url": "https://www.quora.com/What-is-tail-recursion-Why-is-it-so-bad", + "type": "article" + }, + { + "title": "Explore top posts about Recursion", + "url": "https://app.daily.dev/tags/recursion?ref=roadmapsh", + "type": "article" + } + ] + }, + "iLEOuQgUgU5Jc38iXDpp5": { + "title": "Non-Tail Recursion", + "description": "Tail recursion is when a function can directly return the result of a recursive call - there are no outstanding operations, and there is no need for the call stack frame to be preserved. So it can be translated to a “goto with arguments”, and the stack usage will be constant.\n\nIn “non-tail recursion”, there are outstanding operations after the recursive call, and the stack frame cannot be nuked.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is non-tail recursion?", + "url": "https://www.quora.com/What-is-non-tail-recursion", + "type": "article" + }, + { + "title": "Tail vs Non-Tail Recursion", + "url": "https://www.baeldung.com/cs/tail-vs-non-tail-recursion", + "type": "article" + }, + { + "title": "Explore top posts about Recursion", + "url": "https://app.daily.dev/tags/recursion?ref=roadmapsh", + "type": "article" + }, + { + "title": "Recursion (Solved Problem 1)", + "url": "https://www.youtube.com/watch?v=IVLUGb_gDDE", + "type": "video" + }, + { + "title": "Types of Recursion (Part 2) | Tail & Non-tail Recursion", + "url": "https://www.youtube.com/watch?v=HIt_GPuD7wk", + "type": "video" + } + ] + }, + "cEsboPT8nLvo0Zt0_oBq6": { + "title": "LRU Cache", + "description": "LRU cache is a cache that evicts the least recently used item first. It is a very common cache algorithm. It is used in many places, such as in the browser cache, the database cache, and the cache of the operating system.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "The Magic of LRU Cache (100 Days of Google Dev)", + "url": "https://www.youtube.com/watch?v=R5ON3iwx78M", + "type": "video" + }, + { + "title": "Implementing LRU - Udacity", + "url": "https://www.youtube.com/watch?v=bq6N7Ym81iI", + "type": "video" + }, + { + "title": "LeetCode | 146 LRU Cache | C++ | Explanation", + "url": "https://www.youtube.com/watch?v=8-FZRAjR7qU", + "type": "video" + } + ] + }, + "hna0HLu0l4NTNNpyGqlW5": { + "title": "MFU Cache", + "description": "MFU Cache is another cache algorithm. The difference is that instead of deleting the least frequently used entry, the MFU Cache deletes the most frequently used entry.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Comparison of MFU and LRU page replacement algorithms", + "url": "https://stackoverflow.com/questions/13597246/comparison-of-mfu-and-lru-page-replacement-algorithms", + "type": "article" + }, + { + "title": "Why does cache use Most Recently Used (MRU) algorithm as evict policy?", + "url": "https://stackoverflow.com/questions/5088128/why-does-cache-use-most-recently-used-mru-algorithm-as-evict-policy", + "type": "article" + } + ] + }, + "8kTg4O9MrTHRUrp6U-ctA": { + "title": "Binary Search", + "description": "Binary search is a search algorithm that finds the position of a target value within a sorted array. Binary search compares the target value to the middle element of the array. If they are not equal, the half in which the target cannot lie is eliminated and the search continues on the remaining half, again taking the middle element to compare to the target value, and repeating this until the target value is found. If the search ends with the remaining half being empty, the target is not in the array.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Binary Search - Khan Academy", + "url": "https://www.khanacademy.org/computing/computer-science/algorithms/binary-search/a/binary-search", + "type": "article" + }, + { + "title": "Binary Search", + "url": "https://www.topcoder.com/thrive/articles/Binary%20Search", + "type": "article" + }, + { + "title": "Explore top posts about Binary Search", + "url": "https://app.daily.dev/tags/binary-search?ref=roadmapsh", + "type": "article" + }, + { + "title": "Binary Search in 4 Minutes", + "url": "https://www.youtube.com/watch?v=fDKIpRe8GW4&feature=youtu.be", + "type": "video" + }, + { + "title": "Binary Search - CS50", + "url": "https://www.youtube.com/watch?v=D5SrAga1pno", + "type": "video" + } + ] + }, + "XwyqBK9rgP1MMcJrdIzm5": { + "title": "Linear Search", + "description": "Linear search is a very simple algorithm that is used to search for a value in an array. It sequentially checks each element of the array until a match is found or until all the elements have been searched.\n\nVisit the following resources to learn more:", + "links": [] + }, + "4wGBYFZpcdTt97WTbSazx": { + "title": "Bubble Sort", + "description": "Bubble sort is a simple sorting algorithm that repeatedly steps through the list, compares adjacent elements and swaps them if they are in the wrong order. The pass through the list is repeated until the list is sorted.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Bubble Sort", + "url": "https://www.youtube.com/watch?v=P00xJgWzz2c&index=1&list=PL89B61F78B552C1AB", + "type": "video" + }, + { + "title": "Analyzing Bubble Sort", + "url": "https://www.youtube.com/watch?v=ni_zk257Nqo&index=7&list=PL89B61F78B552C1AB", + "type": "video" + }, + { + "title": "Bubble sort in 2 minutes", + "url": "https://youtu.be/xli_FI7CuzA", + "type": "video" + } + ] + }, + "rOsHFXQm5jNz0RyZQ5ZGs": { + "title": "Selection Sort", + "description": "Selection sort is a sorting algorithm that selects the smallest unsorted item in the list and swaps it with index 0, then finds the next smallest and places it into index 1 and so on.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Selection Sort", + "url": "https://www.coursera.org/lecture/algorithms-part1/selection-UQxFT", + "type": "course" + }, + { + "title": "Selection Sort in 3 Minutes", + "url": "https://www.youtube.com/watch?v=g-PGLbMth_g", + "type": "video" + } + ] + }, + "ujDCW6zZE8dV_fpNf-oIl": { + "title": "Insertion Sort", + "description": "Insertion sort is a simple sorting algorithm that builds the final sorted array one item at a time by comparisons. It is much less efficient on large lists than more advanced algorithms such as quicksort, heapsort, or merge sort.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Insertion Sort Algorithm", + "url": "https://www.programiz.com/dsa/insertion-sort", + "type": "article" + }, + { + "title": "Insertion Sort — MIT", + "url": "https://www.youtube.com/watch?v=Kg4bqzAqRBM&list=PLUl4u3cNGP61Oq3tWYp6V_F-5jb5L2iHb&index=4", + "type": "video" + }, + { + "title": "Insertion Sort in 3 Minutes", + "url": "https://www.youtube.com/watch?v=JU767SDMDvA", + "type": "video" + } + ] + }, + "CovrRsBY-sYW_xSegcN6N": { + "title": "Heap Sort", + "description": "Heap sort is a comparison based sorting algorithm. It is similar to selection sort where we first find the maximum element and place the maximum element at the end. We repeat the same process for remaining element.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Heap Sort Algorithm", + "url": "https://www.coursera.org/lecture/data-structures/heap-sort-hSzMO", + "type": "course" + }, + { + "title": "Heap Sort Algorithm", + "url": "https://www.programiz.com/dsa/heap-sort", + "type": "article" + }, + { + "title": "Heap Sort in 4 Minutes", + "url": "https://www.youtube.com/watch?v=2DmK_H7IdTo", + "type": "video" + }, + { + "title": "Heap Sort Algorithm - MIT", + "url": "https://www.youtube.com/watch?v=odNJmw5TOEE&list=PLFDnELG9dpVxQCxuD-9BSy2E7BWY3t5Sm&t=3291s", + "type": "video" + }, + { + "title": "Lecture 4 - Heaps and Heap Sort", + "url": "https://www.youtube.com/watch?v=B7hVxCmfPtM&list=PLUl4u3cNGP61Oq3tWYp6V_F-5jb5L2iHb&index=5", + "type": "video" + } + ] + }, + "be0Q8GW9ZX1JbPrBvbMji": { + "title": "Quick Sort", + "description": "Quick Sort is a divide and conquer algorithm. It picks an element as pivot and partitions the given array around the picked pivot. There are many different versions of quickSort that pick pivot in different ways.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Quick Sort Implementation in Python", + "url": "https://github.com/jwasham/practice-python/blob/master/quick_sort/quick_sort.py", + "type": "opensource" + }, + { + "title": "Quick Sort Algorithm", + "url": "https://www.programiz.com/dsa/quick-sort", + "type": "article" + }, + { + "title": "Quick Sort Implementation in C", + "url": "http://www.cs.yale.edu/homes/aspnes/classes/223/examples/randomization/quick.c", + "type": "article" + }, + { + "title": "Quick Sort in 4 Minutes", + "url": "https://www.youtube.com/watch?v=Hoixgm4-P4M&feature=youtu.be", + "type": "video" + } + ] + }, + "Bh-whDbcCjl3m6mRm9w02": { + "title": "Merge Sort", + "description": "Merge sort is a divide and conquer algorithm. It divides the input array into two halves, calls itself for the two halves, and then merges the two sorted halves. The `merge()` function is used for merging two halves. The `merge(arr, l, m, r)` is key process that assumes that `arr[l..m]` and `arr[m+1..r]` are sorted and merges the two sorted sub-arrays into one.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Merge Sort Algorithm", + "url": "https://www.programiz.com/dsa/merge-sort", + "type": "article" + }, + { + "title": "Merge Sort in 3 Minutes", + "url": "https://www.youtube.com/watch?v=4VqmGXwpLqc", + "type": "video" + } + ] + }, + "psTN5N66xoFHFopgd5faW": { + "title": "Pre-Order Traversal", + "description": "Pre-order traversal is a tree traversal algorithm that visits the root node first, then recursively traverses the left subtree, followed by the right subtree.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Tree | Illustrated Data Structures", + "url": "https://www.youtube.com/watch?v=S2W3SXGPVyU", + "type": "video" + }, + { + "title": "Pre-order tree traversal in 3 minutes", + "url": "https://www.youtube.com/watch?v=1WxLM2hwL-U", + "type": "video" + } + ] + }, + "KTrgf14Q6rg2f0v4dqq2s": { + "title": "In-Order Traversal", + "description": "In-order traversal is a tree traversal algorithm that visits the left subtree, the root, and then the right subtree. This is the most common way to traverse a binary search tree. It is also used to create a sorted list of nodes in a binary search tree.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Tree | Illustrated Data Structures", + "url": "https://www.youtube.com/watch?v=S2W3SXGPVyU", + "type": "video" + } + ] + }, + "4_oGXwjzSY5cX9n7L4iAA": { + "title": "Post Order Traversal", + "description": "Post-order traversal is a type of tree traversal that visits the left subtree, then the right subtree, and finally the root node. This is the opposite of pre-order traversal, which visits the root node first, then the left subtree, and finally the right subtree.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Tree | Illustrated Data Structures", + "url": "https://www.youtube.com/watch?v=S2W3SXGPVyU", + "type": "video" + } + ] + }, + "Ke_ch4fnuKIiGAXUM_LPP": { + "title": "Breadth First Search", + "description": "Breadth first search is a graph traversal algorithm that starts at the root node and explores all of the neighbor nodes at the present depth prior to moving on to the nodes at the next depth level.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Breadth-First Search (BFS) and Depth-First Search (DFS) for Binary Trees in Java", + "url": "https://www.digitalocean.com/community/tutorials/breadth-first-search-depth-first-search-bfs-dfs", + "type": "article" + }, + { + "title": "BFS and DFS in a Binary Tree", + "url": "https://www.youtube.com/watch?v=uWL6FJhq5fM", + "type": "video" + }, + { + "title": "Breadth-first search in 4 minutes", + "url": "https://www.youtube.com/watch?v=HZ5YTanv5QE", + "type": "video" + } + ] + }, + "chzP5_t2gMi6MstxEzCi5": { + "title": "Depth First Search", + "description": "Depth first search is a graph traversal algorithm that starts at a root node and explores as far as possible along each branch before backtracking.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Breadth-First Search (BFS) and Depth-First Search (DFS) for Binary Trees in Java", + "url": "https://www.digitalocean.com/community/tutorials/breadth-first-search-depth-first-search-bfs-dfs", + "type": "article" + }, + { + "title": "BFS and DFS in a Binary Tree", + "url": "https://www.youtube.com/watch?v=uWL6FJhq5fM", + "type": "video" + }, + { + "title": "Depth First Search in 4 Minutes", + "url": "https://www.youtube.com/watch?v=Urx87-NMm6c", + "type": "video" + } + ] + }, + "vcV6TEM_PqXxtThdfMKGw": { + "title": "Breadth First Search", + "description": "Breadth first search for a graph is a way to traverse the graph. It starts at the root node and explores all of the neighbor nodes at the present depth prior to moving on to the nodes at the next depth level.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Graph Algorithms II - DFS, BFS, Kruskals Algorithm, Union Find Data Structure - Lecture 7", + "url": "https://www.youtube.com/watch?v=ufj5_bppBsA&list=PLFDnELG9dpVxQCxuD-9BSy2E7BWY3t5Sm&index=7", + "type": "video" + }, + { + "title": "Breadth-first search in 4 minutes", + "url": "https://www.youtube.com/watch?v=hz5ytanv5qe", + "type": "video" + } + ] + }, + "Yf5gOIe7oiL19MjEVcpdw": { + "title": "Depth First Search", + "description": "Depth first search is a graph traversal algorithm that starts at a root node and explores as far as possible along each branch before backtracking.\n\nVisit the following resources to learn more:", + "links": [] + }, + "eY4nK2lPYsrR-a_8y2sao": { + "title": "Bellman Ford's Algorithm", + "description": "Bellman ford's algorithm is a graph algorithm that finds the shortest path from a source vertex to all other vertices in a graph. It is a dynamic programming algorithm that uses a bottom-up approach to find the shortest path. It is similar to Dijkstra's algorithm but it can handle negative weights. It is also similar to Floyd-Warshall's algorithm but it can handle negative weights and it is faster than Floyd-Warshall's algorithm.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Explore top posts about Data Science", + "url": "https://app.daily.dev/tags/data-science?ref=roadmapsh", + "type": "article" + }, + { + "title": "Bellman-Ford - MIT", + "url": "https://www.youtube.com/watch?v=f9cVS_URPc0&ab_channel=MITOpenCourseWare", + "type": "video" + }, + { + "title": "Bellman-Ford in 4 Minutes", + "url": "https://www.youtube.com/watch?v=9PHkk0UavIM", + "type": "video" + } + ] + }, + "oJstm-8c-4seWbIWcNgDv": { + "title": "Dijkstra's Algorithm", + "description": "Dijkstra's algorithm is a graph traversal algorithm that finds the shortest path between two nodes in a graph. It is a weighted graph algorithm, meaning that each edge in the graph has a weight associated with it. The algorithm works by finding the shortest path from the starting node to all other nodes in the graph. It does this by keeping track of the distance from the starting node to each node, and then choosing the node with the shortest distance from the starting node to visit next. It then updates the distance of each node from the starting node, and repeats the process until all nodes have been visited.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Explore top posts about Data Science", + "url": "https://app.daily.dev/tags/data-science?ref=roadmapsh", + "type": "article" + }, + { + "title": "Dijkstras Algorithm - MIT", + "url": "https://www.youtube.com/watch?v=NSHizBK9JD8&t=1731s&ab_channel=MITOpenCourseWare", + "type": "video" + }, + { + "title": "Dijkstras Algorithm in 3 Minutes", + "url": "https://www.youtube.com/watch?v=_lHSawdgXpI", + "type": "video" + } + ] + }, + "Yrk2PLUa-_FAPlhCkMl3e": { + "title": "A* Algorithm", + "description": "A\\* is a graph traversal algorithm that is used to find the shortest path between two nodes in a graph. It is a modified version of Dijkstra's algorithm that uses heuristics to find the shortest path. It is used in pathfinding and graph traversal.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "A* Search Algorithm - Wikipedia", + "url": "https://en.wikipedia.org/wiki/A*_search_algorithm", + "type": "article" + }, + { + "title": "Explore top posts about Data Science", + "url": "https://app.daily.dev/tags/data-science?ref=roadmapsh", + "type": "article" + }, + { + "title": "A* Pathfinding (E01: algorithm explanation)", + "url": "https://www.youtube.com/watch?v=-L-WgKMFuhE", + "type": "video" + } + ] + }, + "aBjBHpq_OajgQjxdCobXD": { + "title": "Finding Hamiltonian Paths", + "description": "Hamiltonian paths are paths that visit every node in a graph exactly once. They are named after the famous mathematician [Hamilton](https://en.wikipedia.org/wiki/William_Rowan_Hamilton). Hamiltonian paths are a special case of [Hamiltonian cycles](https://en.wikipedia.org/wiki/Hamiltonian_cycle), which are cycles that visit every node in a graph exactly once.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Hamiltonian Path", + "url": "https://www.hackerearth.com/practice/algorithms/graphs/hamiltonian-path/tutorial/", + "type": "article" + }, + { + "title": "Hamiltonian Paths and Cycles", + "url": "https://medium.com/stamatics-iit-kanpur/hamiltonian-paths-and-cycles-4f233bfbc53a", + "type": "article" + }, + { + "title": "Hamiltonian Paths - Lecture 7", + "url": "https://people.csail.mit.edu/virgi/6.s078/lecture17.pdf", + "type": "article" + } + ] + }, + "HlAmTY1udqDT2mTfBU9P-": { + "title": "Solving N Queen Problem", + "description": "N Queen Problem is a famous problem in Computer Science. It is a problem of placing n queens on an n x n chessboard such that no two queens attack each other. The problem is to find all possible solutions to the problem.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "N-Queens problem using backtracking in Java/C++", + "url": "https://www.digitalocean.com/community/tutorials/n-queens-problem-java-c-plus-plus", + "type": "article" + }, + { + "title": "6.1 N Queens Problem using Backtracking", + "url": "https://www.youtube.com/watch?v=xFv_Hl4B83A", + "type": "video" + } + ] + }, + "nODFT-i2YsxUVsgwWkIWn": { + "title": "Maze Solving Problem", + "description": "Maze solving problem is a classic problem in computer science. It is a problem where we have to find a path from a starting point to an end point in a maze. The maze is represented as a grid of cells. Each cell can be either a wall or a path. The path cells are connected to each other. The starting point and the end point are also given. The goal is to find a path from the starting point to the end point. The path can only be made up of path cells. The path cannot go through the wall cells.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Maze Solving Algorithms", + "url": "https://github.com/john-science/mazelib/blob/main/docs/MAZE_SOLVE_ALGOS.md", + "type": "opensource" + }, + { + "title": "Maze Solving Algorithms", + "url": "https://kidscodecs.com/maze-solving-algorithms/", + "type": "article" + }, + { + "title": "Maze Solving - Computerphile", + "url": "https://www.youtube.com/watch?v=rop0W4QDOUI", + "type": "video" + }, + { + "title": "Python Maze Solving Tutorial (Using Recursion)", + "url": "https://www.youtube.com/watch?v=XP94WC_XnZc", + "type": "video" + } + ] + }, + "NcKW_3vJWL_rVlDBB_Qjs": { + "title": "The Knight's Tour Problem", + "description": "Knight's Tour Problem is a problem where we have to find a path for a knight to visit all the cells of a chessboard without visiting any cell twice.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Knights Tour", + "url": "https://bradfieldcs.com/algos/graphs/knights-tour/", + "type": "article" + }, + { + "title": "Knights Tour Proble", + "url": "https://www.codesdope.com/course/algorithms-knights-tour-problem/", + "type": "article" + }, + { + "title": "Backtracking: The Knight’s Tour Problem", + "url": "https://www.codingninjas.com/codestudio/library/backtracking-the-knights-tour-problem", + "type": "article" + } + ] + }, + "l3X9UrEYTWs5kBXI1NNCf": { + "title": "Dijkstra's Algorithm", + "description": "Dijkstra's algorithm is a greedy algorithm that finds the shortest path between two nodes in a graph. It is a very common algorithm used in computer science and is used in many applications such as GPS navigation, network routing, and finding the shortest path in a maze.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Explore top posts about Data Science", + "url": "https://app.daily.dev/tags/data-science?ref=roadmapsh", + "type": "article" + }, + { + "title": "Dijkstras Algorithm in 3 Minutes", + "url": "https://www.youtube.com/watch?v=_lHSawdgXpI", + "type": "video" + }, + { + "title": "Dijkstras Algorithm - MIT", + "url": "https://www.youtube.com/watch?v=NSHizBK9JD8&t=1731s&ab_channel=MITOpenCourseWare", + "type": "video" + }, + { + "title": "Speeding Up Dijkstras Algorithm - MIT", + "url": "https://www.youtube.com/watch?v=CHvQ3q_gJ7E&list=PLUl4u3cNGP61Oq3tWYp6V_F-5jb5L2iHb&index=18", + "type": "video" + } + ] + }, + "QrcijPiVT3bgqfkF-6ssi": { + "title": "Huffman Coding", + "description": "Huffman coding is a lossless data compression algorithm. The idea is to assign variable-length codes to input characters, lengths of the assigned codes are based on the frequencies of corresponding characters. The most frequent character gets the smallest code and the least frequent character gets the largest code.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Huffman Coding", + "url": "https://www.programiz.com/dsa/huffman-coding", + "type": "article" + }, + { + "title": "Huffman Coding - Greedy Method", + "url": "https://www.youtube.com/watch?v=co4_ahEDCho", + "type": "video" + } + ] + }, + "Wqhg7E-lOz1oNcRXjUej8": { + "title": "Kruskal's Algorithm", + "description": "Kruskal's algorithm is a greedy algorithm that finds a minimum spanning tree for a connected weighted graph. It is a minimum spanning tree algorithm that takes a graph as input and finds the subset of the edges of that graph which form a tree that includes every vertex, where the total weight of all the edges in the tree is minimized. If the graph is not connected, then it finds a minimum spanning forest (a minimum spanning tree for each connected component).\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Explore top posts about Data Science", + "url": "https://app.daily.dev/tags/data-science?ref=roadmapsh", + "type": "article" + }, + { + "title": "Kruskals Algorithm in 2 Minutes", + "url": "https://www.youtube.com/watch?v=71UQH7Pr9kU", + "type": "video" + }, + { + "title": "Graph Algorithms II - DFS, BFS, Kruskals Algorithm, Union Find Data Structure - Lecture 7", + "url": "https://www.youtube.com/watch?v=ufj5_bppBsA&list=PLFDnELG9dpVxQCxuD-9BSy2E7BWY3t5Sm&index=8", + "type": "video" + } + ] + }, + "Hqw2eGtgfbVggqXBnIOdI": { + "title": "Ford-Fulkerson Algorithm", + "description": "Ford Fulkerson Algorithm is a greedy algorithm that is used to find the maximum flow in a flow network. It is also known as the Edmonds-Karp Algorithm.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Ford-Fulkerson Algorithm", + "url": "https://www.programiz.com/dsa/ford-fulkerson-algorithm", + "type": "article" + }, + { + "title": "Explore top posts about Data Science", + "url": "https://app.daily.dev/tags/data-science?ref=roadmapsh", + "type": "article" + }, + { + "title": "Ford-Fulkerson in 5 minutes", + "url": "https://www.youtube.com/watch?v=Tl90tNtKvxs", + "type": "video" + } + ] + }, + "znpFIKwMJlepKJ8dH3kvC": { + "title": "Prim's Algorithm", + "description": "Prim's algorithm is a greedy algorithm that finds a minimum spanning tree for a weighted undirected graph. A minimum spanning tree is a subset of the edges of a connected, edge-weighted undirected graph that connects all the vertices together, without any cycles and with the minimum possible total edge weight. A minimum spanning tree for a weighted undirected graph is also called a minimum weight spanning tree or minimum cost spanning tree.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Prims Algorithm", + "url": "https://www.programiz.com/dsa/prim-algorithm", + "type": "article" + }, + { + "title": "Explore top posts about Data Science", + "url": "https://app.daily.dev/tags/data-science?ref=roadmapsh", + "type": "article" + }, + { + "title": "Graph Algorithms I - Topological Sorting, Prims Algorithm - Lecture 6", + "url": "https://www.youtube.com/watch?v=i_AQT_XfvD8&list=PLFDnELG9dpVxQCxuD-9BSy2E7BWY3t5Sm&index=7", + "type": "video" + } + ] + }, + "l5--4opvZoc_seVn__5R3": { + "title": "Rabin-Karp Algorithm", + "description": "Rabin-Karp algorithm is a string searching algorithm that uses hashing to find any one of a set of pattern strings in a text. For strings of average length `n`, it performs in `O(n+m)` time with `O(m)` space, where `m` is the length of the pattern. It is often used in bioinformatics to search for DNA patterns.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Rabin Karps Algorithm", + "url": "https://www.coursera.org/lecture/data-structures/rabin-karps-algorithm-c0Qkw", + "type": "course" + }, + { + "title": "Optimization: Precomputation", + "url": "https://www.coursera.org/learn/data-structures/lecture/nYrc8/optimization-precomputation", + "type": "course" + }, + { + "title": "Optimization: Implementation and Analysis", + "url": "https://www.coursera.org/learn/data-structures/lecture/h4ZLc/optimization-implementation-and-analysis", + "type": "course" + }, + { + "title": "Explore top posts about Data Science", + "url": "https://app.daily.dev/tags/data-science?ref=roadmapsh", + "type": "article" + }, + { + "title": "Lecture 9: Table Doubling, Karp-Rabin", + "url": "https://www.youtube.com/watch?v=BRO7mVIFt08&list=PLUl4u3cNGP61Oq3tWYp6V_F-5jb5L2iHb&index=9", + "type": "video" + }, + { + "title": "Rolling Hashes, Amortized Analysis", + "url": "https://www.youtube.com/watch?v=w6nuXg0BISo&list=PLUl4u3cNGP61Oq3tWYp6V_F-5jb5L2iHb&index=33", + "type": "video" + } + ] + }, + "n11ujPI3of-52l2KW2CDQ": { + "title": "LFU Cache", + "description": "LFU Cache is a data structure that stores key-value pairs. It has a fixed size and when it is full, it removes the least frequently used key-value pair. It is a variation of the LRU Cache and is used in many applications such as caching web pages, caching database queries, and caching images.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "1117. Data Structure - LFU Cache", + "url": "https://jojozhuang.github.io/algorithm/data-structure-lfu-cache/", + "type": "article" + } + ] + }, + "NUWk-7IXmMU-4TT-oS8fF": { + "title": "String Search & Manipulations", + "description": "String search and manipulation is a very important topic in computer science. It is used in many different applications, such as searching or replacing a specific pattern, word or character in a string.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "String-searching algorithm", + "url": "https://en.wikipedia.org/wiki/String-searching_algorithm", + "type": "article" + } + ] + }, + "L4M5yFRKKR2axKXGcIA7d": { + "title": "Search Pattern in Text", + "description": "Searching pattern in text is a very common task in computer science. It is used in many applications like spell checkers, text editors, and many more.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Search Pattern in Text", + "url": "https://www.coursera.org/learn/data-structures/lecture/tAfHI/search-pattern-in-text", + "type": "course" + } + ] + }, + "Ld1TUNS8713coMNvwhNIJ": { + "title": "Suffix Arrays", + "description": "Suffix arrays are a data structure that allows us to quickly find all the suffixes of a string in lexicographical order. This is useful for many problems, such as finding the longest common substring between two strings, or finding the number of distinct substrings of a string.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Suffix Arrays - Coursera", + "url": "https://www.coursera.org/learn/algorithms-part2/lecture/TH18W/suffix-arrays", + "type": "course" + }, + { + "title": "Suffix array introduction", + "url": "https://www.youtube.com/watch?v=zqKlL3ZpTqs", + "type": "video" + }, + { + "title": "Advanced Data Structures: Suffix Arrays", + "url": "https://www.youtube.com/watch?v=IzMxbboPcqQ", + "type": "video" + }, + { + "title": "Suffix arrays: building", + "url": "https://www.youtube.com/watch?v=ZWlbhBjjwyA", + "type": "video" + } + ] + }, + "g3b0kt1fCAjjYyS2WelWi": { + "title": "Brute Force Search", + "description": "Brute force search is a simple algorithm that checks for a pattern in a string by comparing each character of the string with the first character of the pattern. If the first character matches, it then compares the next character of the string with the next character of the pattern and so on. If all the characters of the pattern match, then the pattern is found. If the first character does not match, then the algorithm compares the second character of the string with the first character of the pattern and so on.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Brute-Force Substring Search", + "url": "https://www.coursera.org/learn/algorithms-part2/lecture/2Kn5i/brute-force-substring-search", + "type": "course" + }, + { + "title": "A beginner guide to Brute Force Algorithm for substring search", + "url": "https://nulpointerexception.com/2019/02/10/a-beginner-guide-to-brute-force-algorithm-for-substring-search/", + "type": "article" + }, + { + "title": "Brute Force Algorithm in Cybersecurity and String Search", + "url": "https://www.baeldung.com/cs/brute-force-cybersecurity-string-search", + "type": "article" + } + ] + }, + "8abFKMfaV9H8F38I0DcMT": { + "title": "Knuth-Morris Pratt", + "description": "Knuth morris pratt is a string searching algorithm that uses a precomputed array to find the substring in a string. This array is known as the prefix function. The prefix function is the longest prefix that is also a suffix of a substring. The prefix function is used to skip the characters that are already matched. The algorithm is as follows:\n\n* Compute the prefix function of the substring.\n* Traverse through the string and substring simultaneously.\n* If the characters match, increment the index of both the string and substring.\n* If the characters don't match, increment the index of the string by the value of the prefix function at the index of the substring.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Knuth-Morris Pratt", + "url": "https://www.coursera.org/learn/algorithms-part2/lecture/TAtDr/knuth-morris-pratt", + "type": "course" + }, + { + "title": "The Knuth-Morris-Pratt (KMP)Algorithm", + "url": "https://www.javatpoint.com/daa-knuth-morris-pratt-algorithm", + "type": "article" + }, + { + "title": "9.1 Knuth-Morris-Pratt KMP String Matching Algorithm", + "url": "https://www.youtube.com/watch?v=V5-7GzOfADQ", + "type": "video" + } + ] + }, + "p6qub32jcaGloHXUDUrlG": { + "title": "Boyer-Moore", + "description": "Boyer Moore algorithm is a string searching algorithm that is used to find the index of a substring in a string. It is a very efficient algorithm that is used in many applications. It is used in text editors, compilers, and many other applications.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Boyer Moore Algorithm", + "url": "https://www.coursera.org/learn/algorithms-part2/lecture/CYxOT/boyer-moore", + "type": "course" + }, + { + "title": "The Boyer-Moore Algorithm", + "url": "https://www.javatpoint.com/daa-boyer-moore-algorithm", + "type": "article" + } + ] + }, + "Km5LvcJpyntrW2iA8h5e3": { + "title": "Robin-Karp", + "description": "Rabin-Karp algorithm is a string searching algorithm that uses hashing to find any one of a set of pattern strings in a text. For strings of average length `n`, it performs in `O(n+m)` time with `O(m)` space, where `m` is the length of the pattern. It is often used in bioinformatics to search for DNA patterns.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Rabin Karps Algorithm", + "url": "https://www.coursera.org/lecture/data-structures/rabin-karps-algorithm-c0Qkw", + "type": "course" + }, + { + "title": "Optimization: Precomputation", + "url": "https://www.coursera.org/learn/data-structures/lecture/nYrc8/optimization-precomputation", + "type": "course" + }, + { + "title": "Optimization: Implementation and Analysis", + "url": "https://www.coursera.org/learn/data-structures/lecture/h4ZLc/optimization-implementation-and-analysis", + "type": "course" + }, + { + "title": "Lecture 9: Table Doubling, Karp-Rabin", + "url": "https://www.youtube.com/watch?v=BRO7mVIFt08&list=PLUl4u3cNGP61Oq3tWYp6V_F-5jb5L2iHb&index=9", + "type": "video" + }, + { + "title": "Rolling Hashes, Amortized Analysis", + "url": "https://www.youtube.com/watch?v=w6nuXg0BISo&list=PLUl4u3cNGP61Oq3tWYp6V_F-5jb5L2iHb&index=33", + "type": "video" + } + ] + }, + "E9hf1ux1KKGHvvAShm67w": { + "title": "Substring Search", + "description": "Substring search is the problem of finding a substring in a string. This is a very common problem in computer science, and there are many algorithms for solving it.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Introduction to Substring Search", + "url": "https://www.coursera.org/lecture/algorithms-part2/introduction-to-substring-search-n3ZpG", + "type": "course" + }, + { + "title": "What is the fastest substring search algorithm?", + "url": "https://stackoverflow.com/questions/3183582/what-is-the-fastest-substring-search-algorithm", + "type": "article" + }, + { + "title": "Substring Search - Exercises", + "url": "https://algs4.cs.princeton.edu/53substring/", + "type": "article" + } + ] + }, + "-bawIWfrYuAxy8cP-AGFS": { + "title": "Floating Point Math", + "description": "Floating point numbers are numbers that have a decimal point in them. They are used to represent real numbers. For example, 3.14 is a floating point number. 3 is not a floating point number because it does not have a decimal point in it.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Representation of Floating Point Numbers - 1", + "url": "https://www.youtube.com/watch?v=ji3SfClm8TU", + "type": "video" + }, + { + "title": "Why 0.1 + 0.2 != 0.3? | Floating Point Math", + "url": "https://www.youtube.com/watch?v=RIiq4tTt6rI", + "type": "video" + } + ] + }, + "vjMqqFsTxSjaiYxmliwLi": { + "title": "Endianess", + "description": "Endianess is the order in which bytes are stored in memory. The two most common types of endianess are big endian and little endian. Big endian stores the most significant byte first, while little endian stores the least significant byte first.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Big Endian vs Little Endian.mp4", + "url": "https://www.youtube.com/watch?v=JrNF0KRAlyo", + "type": "video" + }, + { + "title": "Endianness Explained With an Egg - Computerphile", + "url": "https://www.youtube.com/watch?v=NcaiHcBvDR4", + "type": "video" + } + ] + }, + "U8AgY6I-62mN5Ohg9MsG6": { + "title": "Big Endian", + "description": "Big endian is the most common type of endianness. In this type, the most significant byte is stored at the lowest memory address. This means that the most significant byte is stored first and the least significant byte is stored last.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Lecture 22. Big Endian and Little Endian", + "url": "https://www.youtube.com/watch?v=T1C9Kj_78ek", + "type": "video" + } + ] + }, + "rD_5n5bjiNMVC0cPhEVyG": { + "title": "Little Endian", + "description": "Little Endian is a way of storing data in memory. It is the opposite of Big Endian. In Little Endian, the least significant byte is stored first. In Big Endian, the most significant byte is stored first.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Big Endian vs Little Endian.mp4", + "url": "https://www.youtube.com/watch?v=JrNF0KRAlyo", + "type": "video" + }, + { + "title": "Endianness Explained With an Egg - Computerphile", + "url": "https://www.youtube.com/watch?v=NcaiHcBvDR4", + "type": "video" + } + ] + }, + "DMuMsEowpevTCALtziwTB": { + "title": "Common UML Diagrams", + "description": "UML is a standard way of visualizing a software system. It is a general-purpose, developmental, modeling language in the field of software engineering that is intended to provide a standard way to visualize the design of a system.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "UML Diagrams Full Course (Unified Modeling Language)", + "url": "https://www.youtube.com/watch?v=WnMQ8HlmeXc", + "type": "video" + } + ] + }, + "04hpfxI2x958tQI-0clsp": { + "title": "Unicode", + "description": "Unicode is a standard for encoding characters. It is a superset of ASCII, which means that ASCII is a subset of Unicode. Unicode is a 16-bit encoding, which means that it can encode 2^16 = 65536 characters. This is a lot more than ASCII, which can only encode 128 characters.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "How Unicode Works: What Every Developer Needs to Know About Strings and 🦄", + "url": "https://deliciousbrains.com/how-unicode-works/", + "type": "article" + }, + { + "title": "Characters, Symbols and the Unicode Miracle - Computerphile", + "url": "https://www.youtube.com/watch?v=MijmeoH9LT4", + "type": "video" + } + ] + }, + "hmwsNp60alS43kJg4A_Og": { + "title": "ASCII", + "description": "ASCII is a character encoding standard for electronic communication. It was developed from telegraph code and uses 7 bits to represent 128 different characters. The first 32 characters are non-printable control characters used to control devices like printers and typewriters. The remaining 96 characters are printable and include the letters of the English alphabet, numbers, punctuation, and various symbols.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Must Know about Character Encodings", + "url": "https://www.joelonsoftware.com/2003/10/08/the-absolute-minimum-every-software-developer-absolutely-positively-must-know-about-unicode-and-character-sets-no-excuses/", + "type": "article" + }, + { + "title": "Character Encoding", + "url": "https://cs.lmu.edu/~ray/notes/charenc/", + "type": "article" + } + ] + }, + "DtG9EJonWi4_2oQ4fWOKY": { + "title": "Character Encodings", + "description": "Character encodings are a way of representing characters as numbers. They are used to store and transmit text. The most common character encoding is ASCII, which is a 7-bit encoding. This means that each character is represented by a number between 0 and 127. The ASCII character set contains 128 characters, including letters, numbers, punctuation, and control characters. The ASCII character set is a subset of the Unicode character set, which is a 16-bit encoding. Unicode is a superset of ASCII, so ASCII characters can be represented by Unicode. Unicode is the most common character encoding used on the web.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Must Know about Character Encodings", + "url": "https://www.joelonsoftware.com/2003/10/08/the-absolute-minimum-every-software-developer-absolutely-positively-must-know-about-unicode-and-character-sets-no-excuses/", + "type": "article" + }, + { + "title": "Character Encoding", + "url": "https://cs.lmu.edu/~ray/notes/charenc/", + "type": "article" + } + ] + }, + "FZ8znJSz8PvQlnEyRLXmQ": { + "title": "Class Diagrams", + "description": "Class Diagrams are used to model the static structure of a system. They are used to show the classes, their attributes, operations (or methods), and the relationships between objects.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "UML Class Diagram Tutorial", + "url": "https://www.youtube.com/watch?v=UI6lqHOVHic", + "type": "video" + }, + { + "title": "UML Class Diagram Tutorial", + "url": "https://www.youtube.com/watch?v=3cmzqZzwNDM&list=PLfoY2ARMh0hC2FcJKP5voAKCpk6PZXSd5&index=2", + "type": "video" + } + ] + }, + "dVvYbrcaxHLpvtX6HbS7g": { + "title": "Usecase Diagrams", + "description": "Usecase diagrams are a type of diagram that are used to model the interactions between the **actors** and the **usecases** of the system.\n\nAn actor is a person or a system that interacts with the system. Actors are represented by a rectangle with the name of the actor written inside it.\n\nA usecase is a task that the system performs. Usecases are represented by an ellipse with the name of the usecase written inside it.\n\nA usecase diagram is a diagram that shows the actors and the usecases of the system. The diagram is represented by a rectangle that contains the name of the system inside it. The actors are represented by rectangles and the usecases are represented by ellipses.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "UML Use Case Diagram Tutorial", + "url": "https://www.lucidchart.com/pages/uml-use-case-diagram", + "type": "article" + }, + { + "title": "What is Use Case Diagram?", + "url": "https://www.visual-paradigm.com/guide/uml-unified-modeling-language/what-is-use-case-diagram/", + "type": "article" + }, + { + "title": "UML Use Case Diagram Tutorial", + "url": "https://www.youtube.com/watch?v=zid-MVo7M-E", + "type": "video" + } + ] + }, + "ptfRNiU0mC0Q5SLA_FWZu": { + "title": "Activity Diagrams", + "description": "Activity diagrams are used to model the flow of control in a system. They are used in conjunction with use case diagrams to model the behavior of the system for each use case. They are also used to model the behavior of a single class.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "UML Activity Diagram Tutorial", + "url": "https://www.lucidchart.com/pages/uml-activity-diagram", + "type": "article" + }, + { + "title": "What is Activity Diagram?", + "url": "https://www.visual-paradigm.com/guide/uml-unified-modeling-language/what-is-activity-diagram/", + "type": "article" + } + ] + }, + "X0CKM9LaJUrgI5gIdqFvU": { + "title": "Statemachine Diagrams", + "description": "State machine diagrams are used to show the different states an object can be in at a given time. The object can be in one and only one state at a given time. State machine diagrams are similar to activity diagrams, but they are more focused on the flow of an object's state rather than the flow of the object itself.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is State Machine Diagram?", + "url": "https://www.visual-paradigm.com/guide/uml-unified-modeling-language/what-is-state-machine-diagram/", + "type": "article" + }, + { + "title": "State Machine Diagram Tutorial", + "url": "https://www.lucidchart.com/pages/uml-state-machine-diagram", + "type": "article" + }, + { + "title": "State Machine Diagram", + "url": "https://www.sciencedirect.com/topics/computer-science/state-machine-diagram", + "type": "article" + } + ] + }, + "gaUT5K2xS-WQMrIA0Bkb_": { + "title": "Sequence Diagrams", + "description": "Sequence diagrams are a way to show how objects or systems interact with each other over time.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "How to Make a UML Sequence Diagram", + "url": "https://www.youtube.com/watch?v=pCK6prSq8aw", + "type": "video" + }, + { + "title": "Sequence Diagrams Tutorial", + "url": "https://www.youtube.com/watch?v=cxG-qWthxt4&list=PLfoY2ARMh0hBthB9VqsQzogSouTjzkMHe&index=2", + "type": "video" + } + ] + }, + "-De1hU2ONGwhQmUpsyrCO": { + "title": "Design Patterns", + "description": "Design patterns are solutions to common problems in software design. They are formalized best practices that the programmer can use to solve common problems when designing an application or system.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Design Patterns for Humans", + "url": "https://github.com/kamranahmedse/design-patterns-for-humans", + "type": "opensource" + }, + { + "title": "Design Patterns - Wikipedia", + "url": "https://en.wikipedia.org/wiki/Software_design_pattern", + "type": "article" + }, + { + "title": "Refactoring Guru - Design Patterns", + "url": "https://refactoring.guru/design-patterns/", + "type": "article" + }, + { + "title": "Explore top posts about Design Patterns", + "url": "https://app.daily.dev/tags/design-patterns?ref=roadmapsh", + "type": "article" + } + ] + }, + "iPN9mSyFwYZER5HSkj6oL": { + "title": "GoF Design Patterns", + "description": "Gang of Four (GoF) design patterns are a set of 23 design patterns that were first described in the book \"Design Patterns: Elements of Reusable Object-Oriented Software\" by Erich Gamma, Richard Helm, Ralph Johnson, and John Vlissides. The book is commonly referred to as the \"Gang of Four book\".\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Design Patterns for Humans", + "url": "https://github.com/kamranahmedse/design-patterns-for-humans", + "type": "opensource" + } + ] + }, + "BGhJNtszbYJtKyhqr2jax": { + "title": "Architectural Patterns", + "description": "Architectural patterns are a high-level design pattern that focuses on the overall structure of the system. They are similar to design patterns, but they are more concerned with the structure of the system. They are used to solve problems that are common to many software systems.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "10 Common Software Architectural Patterns in a nutshell", + "url": "https://towardsdatascience.com/10-common-software-architectural-patterns-in-a-nutshell-a0b47a1e9013", + "type": "article" + }, + { + "title": "Architectural Pattern - Wikipedia", + "url": "https://en.wikipedia.org/wiki/Architectural_pattern", + "type": "article" + } + ] + }, + "J7DKRklMtJ94Y-18Jo50r": { + "title": "Dependency Injection", + "description": "Dependency injection is a software design pattern that allows us to decouple the dependencies of a class from the class itself. This allows us to write more flexible and testable code.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Dependency Injection - StackOverflow", + "url": "https://stackoverflow.com/questions/130794/what-is-dependency-injection", + "type": "article" + }, + { + "title": "Explore top posts about Dependency Injection", + "url": "https://app.daily.dev/tags/dependency-injection?ref=roadmapsh", + "type": "article" + }, + { + "title": "What is Dependency Injection?", + "url": "https://www.youtube.com/watch?v=0yc2UANSDiw", + "type": "video" + } + ] + }, + "PpFQ0zmObZLvmBvqhTEMz": { + "title": "Null Object Pattern", + "description": "Null object pattern is a design pattern that is used to represent a null value with an object. It is a way to avoid null reference exceptions by providing a default object that does nothing. It is a way to provide a default behavior in case data is not available.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Design Patterns - Null Object Pattern", + "url": "https://www.tutorialspoint.com/design_pattern/null_object_pattern.htm", + "type": "article" + } + ] + }, + "_2Jddpz_cdwyeIgrg5qt_": { + "title": "Type Object Pattern", + "description": "Type object pattern is a creational design pattern that allows us to create a new object of a type without exposing the object creation logic to the client. It is used when we need to create a new object of a type, but we don't know which type we need to create until runtime. It is like a factory pattern, but instead of returning a new object of a type, it returns a new object of a type that is already created.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Type Object Pattern", + "url": "https://gameprogrammingpatterns.com/type-object.html", + "type": "article" + } + ] + }, + "cdNi0EYrQ5nsgNxFVnXL8": { + "title": "Basic Math Skills", + "description": "Math is a fundamental skill for computer science.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Computer Science 70, 001 - Spring 2015 - Discrete Mathematics and Probability Theory", + "url": "http://www.infocobuild.com/education/audio-video-courses/computer-science/cs70-spring2015-berkeley.html", + "type": "article" + }, + { + "title": "Discrete Mathematics By IIT Ropar NPTEL", + "url": "https://nptel.ac.in/courses/106/106/106106183/", + "type": "article" + }, + { + "title": "Explore top posts about Math", + "url": "https://app.daily.dev/tags/math?ref=roadmapsh", + "type": "article" + }, + { + "title": "Lec 1 | MIT 6.042J Mathematics for Computer Science, Fall 2010", + "url": "https://www.youtube.com/watch?v=L3LMbpZIKhQ&list=PLB7540DEDD482705B", + "type": "video" + }, + { + "title": "Integer Arithmetic, Karatsuba Multiplication", + "url": "https://www.youtube.com/watch?v=eCaXlAaN2uE&index=11&list=PLUl4u3cNGP61Oq3tWYp6V_F-5jb5L2iHb", + "type": "video" + }, + { + "title": "The Chinese Remainder Theorem (used in cryptography)", + "url": "https://www.youtube.com/watch?v=ru7mWZJlRQg", + "type": "video" + }, + { + "title": "Discrete Mathematics by Shai Simonson (19 videos)", + "url": "https://www.youtube.com/playlist?list=PLWX710qNZo_sNlSWRMVIh6kfTjolNaZ8t", + "type": "video" + }, + { + "title": "MIT 6.042J - Probability Introduction", + "url": "https://www.youtube.com/watch?v=SmFwFdESMHI&index=18&list=PLB7540DEDD482705B", + "type": "video" + }, + { + "title": "MIT 6.042J - Conditional Probability", + "url": "https://www.youtube.com/watch?v=E6FbvM-FGZ8&index=19&list=PLB7540DEDD482705B", + "type": "video" + }, + { + "title": "MIT 6.042J - Independence", + "url": "https://www.youtube.com/watch?v=l1BCv3qqW4A&index=20&list=PLB7540DEDD482705B", + "type": "video" + }, + { + "title": "MIT 6.042J - Random Variables", + "url": "https://www.youtube.com/watch?v=MOfhhFaQdjw&list=PLB7540DEDD482705B&index=21", + "type": "video" + }, + { + "title": "MIT 6.042J - Expectation I", + "url": "https://www.youtube.com/watch?v=gGlMSe7uEkA&index=22&list=PLB7540DEDD482705B", + "type": "video" + }, + { + "title": "MIT 6.042J - Expectation II", + "url": "https://www.youtube.com/watch?v=oI9fMUqgfxY&index=23&list=PLB7540DEDD482705B", + "type": "video" + }, + { + "title": "MIT 6.042J - Large Deviations", + "url": "https://www.youtube.com/watch?v=q4mwO2qS2z4&index=24&list=PLB7540DEDD482705B", + "type": "video" + }, + { + "title": "MIT 6.042J - Random Walks", + "url": "https://www.youtube.com/watch?v=56iFMY8QW2k&list=PLB7540DEDD482705B&index=25", + "type": "video" + } + ] + }, + "HZ3_xyphbjhBPwwQo_rHH": { + "title": "Probability", + "description": "Probability is the study of how likely an event is to occur. It is a measure of how certain we are that an event will happen.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Explore top posts about Statistics", + "url": "https://app.daily.dev/tags/statistics?ref=roadmapsh", + "type": "article" + }, + { + "title": "MIT 6.042J - Probability Introduction", + "url": "https://www.youtube.com/watch?v=SmFwFdESMHI&index=18&list=PLB7540DEDD482705B", + "type": "video" + }, + { + "title": "MIT 6.042J - Conditional Probability", + "url": "https://www.youtube.com/watch?v=E6FbvM-FGZ8&index=19&list=PLB7540DEDD482705B", + "type": "video" + }, + { + "title": "MIT 6.042J - Independence", + "url": "https://www.youtube.com/watch?v=l1BCv3qqW4A&index=20&list=PLB7540DEDD482705B", + "type": "video" + }, + { + "title": "MIT 6.042J - Random Variables", + "url": "https://www.youtube.com/watch?v=MOfhhFaQdjw&list=PLB7540DEDD482705B&index=21", + "type": "video" + }, + { + "title": "MIT 6.042J - Expectation I", + "url": "https://www.youtube.com/watch?v=gGlMSe7uEkA&index=22&list=PLB7540DEDD482705B", + "type": "video" + }, + { + "title": "MIT 6.042J - Expectation II", + "url": "https://www.youtube.com/watch?v=oI9fMUqgfxY&index=23&list=PLB7540DEDD482705B", + "type": "video" + }, + { + "title": "MIT 6.042J - Large Deviations", + "url": "https://www.youtube.com/watch?v=q4mwO2qS2z4&index=24&list=PLB7540DEDD482705B", + "type": "video" + }, + { + "title": "MIT 6.042J - Random Walks", + "url": "https://www.youtube.com/watch?v=56iFMY8QW2k&list=PLB7540DEDD482705B&index=25", + "type": "video" + } + ] + }, + "lWnAY0DgrUOmT6yqnxeBN": { + "title": "Combinatorics", + "description": "Combinatorics is the study of counting. It is a branch of mathematics that is used to solve problems in a variety of fields, including computer science, statistics, and physics. In computer science, combinatorics is used to solve problems related to counting the number of possible outcomes of a given problem. For example, if you are given a set of 10 objects, how many different ways can you arrange them? Or, if you are given a set of 10 objects, how many different ways can you choose 3 objects from that set? These are examples of combinatorial problems.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Probability and Combinatorics Topic", + "url": "https://www.khanacademy.org/math/probability/probability-and-combinatorics-topic", + "type": "article" + }, + { + "title": "Math Skills: How to find Factorial, Permutation and Combination", + "url": "https://www.youtube.com/watch?v=8RRo6Ti9d0U", + "type": "video" + }, + { + "title": "Make School: Probability", + "url": "https://www.youtube.com/watch?v=sZkAAk9Wwa4", + "type": "video" + }, + { + "title": "Make School: More Probability and Markov Chains", + "url": "https://www.youtube.com/watch?v=dNaJg-mLobQ", + "type": "video" + } + ] + }, + "YLCmZvoLkhOk9wlgYW2Ms": { + "title": "Complexity Classes", + "description": "In computer science, there exist some problems whose solutions are not yet found, the problems are divided into classes known as Complexity Classes. In complexity theory, a Complexity Class is a set of problems with related complexity. These classes help scientists to groups problems based on how much time and space they require to solve problems and verify the solutions. It is the branch of the theory of computation that deals with the resources required to solve a problem.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Trying to understand P vs NP vs NP Complete vs NP Hard", + "url": "https://softwareengineering.stackexchange.com/questions/308178/trying-to-understand-p-vs-np-vs-np-complete-vs-np-hard", + "type": "article" + }, + { + "title": "Complexity: P, NP, NP-completeness, Reductions", + "url": "https://www.youtube.com/watch?v=eHZifpgyH_4&list=PLUl4u3cNGP6317WaSNfmCvGym2ucw3oGp&index=22", + "type": "video" + }, + { + "title": "Complexity: Approximation Algorithms", + "url": "https://www.youtube.com/watch?v=MEz1J9wY2iM&list=PLUl4u3cNGP6317WaSNfmCvGym2ucw3oGp&index=24", + "type": "video" + }, + { + "title": "Complexity: Fixed-Parameter Algorithms", + "url": "https://www.youtube.com/watch?v=4q-jmGrmxKs&index=25&list=PLUl4u3cNGP6317WaSNfmCvGym2ucw3oGp", + "type": "video" + }, + { + "title": "Lecture 23: Computational Complexity", + "url": "https://www.youtube.com/watch?v=moPtwq_cVH8&list=PLUl4u3cNGP61Oq3tWYp6V_F-5jb5L2iHb&index=24", + "type": "video" + }, + { + "title": "Greedy Algs. II & Intro to NP Completeness", + "url": "https://youtu.be/qcGnJ47Smlo?list=PLFDnELG9dpVxQCxuD-9BSy2E7BWY3t5Sm&t=2939", + "type": "video" + }, + { + "title": "NP Completeness II & Reductions", + "url": "https://www.youtube.com/watch?v=e0tGC6ZQdQE&index=16&list=PLFDnELG9dpVxQCxuD-9BSy2E7BWY3t5Sm", + "type": "video" + }, + { + "title": "NP Completeness III", + "url": "https://www.youtube.com/watch?v=fCX1BGT3wjE&index=17&list=PLFDnELG9dpVxQCxuD-9BSy2E7BWY3t5Sm", + "type": "video" + }, + { + "title": "NP Completeness IV", + "url": "https://www.youtube.com/watch?v=NKLDp3Rch3M&list=PLFDnELG9dpVxQCxuD-9BSy2E7BWY3t5Sm&index=18", + "type": "video" + }, + { + "title": "CSE373 2020 - Lecture 23 - NP-Completeness", + "url": "https://www.youtube.com/watch?v=ItHp5laE1VE&list=PLOtl7M3yp-DX6ic0HGT0PUX_wiNmkWkXx&index=23", + "type": "video" + }, + { + "title": "CSE373 2020 - Lecture 24 - Satisfiability", + "url": "https://www.youtube.com/watch?v=inaFJeCzGxU&list=PLOtl7M3yp-DX6ic0HGT0PUX_wiNmkWkXx&index=24", + "type": "video" + }, + { + "title": "CSE373 2020 - Lecture 25 - More NP-Completeness", + "url": "https://www.youtube.com/watch?v=B-bhKxjZLlc&list=PLOtl7M3yp-DX6ic0HGT0PUX_wiNmkWkXx&index=25", + "type": "video" + }, + { + "title": "CSE373 2020 - Lecture 26 - NP-Completeness Challenge", + "url": "https://www.youtube.com/watch?v=_EzetTkG_Cc&list=PLOtl7M3yp-DX6ic0HGT0PUX_wiNmkWkXx&index=26", + "type": "video" + } + ] + }, + "3aM17dPKNi8tRJsW8lesI": { + "title": "P", + "description": "The P in the P class stands for Polynomial Time. It is the collection of decision problems(problems with a “yes” or “no” answer) that can be solved by a deterministic machine in polynomial time.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Trying to understand P vs NP vs NP Complete vs NP Hard", + "url": "https://softwareengineering.stackexchange.com/questions/308178/trying-to-understand-p-vs-np-vs-np-complete-vs-np-hard", + "type": "article" + }, + { + "title": "Complexity: P, NP, NP-completeness, Reductions", + "url": "https://www.youtube.com/watch?v=eHZifpgyH_4&list=PLUl4u3cNGP6317WaSNfmCvGym2ucw3oGp&index=22", + "type": "video" + }, + { + "title": "Complexity: Approximation Algorithms", + "url": "https://www.youtube.com/watch?v=MEz1J9wY2iM&list=PLUl4u3cNGP6317WaSNfmCvGym2ucw3oGp&index=24", + "type": "video" + }, + { + "title": "Complexity: Fixed-Parameter Algorithms", + "url": "https://www.youtube.com/watch?v=4q-jmGrmxKs&index=25&list=PLUl4u3cNGP6317WaSNfmCvGym2ucw3oGp", + "type": "video" + }, + { + "title": "Lecture 23: Computational Complexity", + "url": "https://www.youtube.com/watch?v=moPtwq_cVH8&list=PLUl4u3cNGP61Oq3tWYp6V_F-5jb5L2iHb&index=24", + "type": "video" + }, + { + "title": "Greedy Algs. II & Intro to NP Completeness", + "url": "https://youtu.be/qcGnJ47Smlo?list=PLFDnELG9dpVxQCxuD-9BSy2E7BWY3t5Sm&t=2939", + "type": "video" + }, + { + "title": "NP Completeness II & Reductions", + "url": "https://www.youtube.com/watch?v=e0tGC6ZQdQE&index=16&list=PLFDnELG9dpVxQCxuD-9BSy2E7BWY3t5Sm", + "type": "video" + }, + { + "title": "NP Completeness III", + "url": "https://www.youtube.com/watch?v=fCX1BGT3wjE&index=17&list=PLFDnELG9dpVxQCxuD-9BSy2E7BWY3t5Sm", + "type": "video" + }, + { + "title": "NP Completeness IV", + "url": "https://www.youtube.com/watch?v=NKLDp3Rch3M&list=PLFDnELG9dpVxQCxuD-9BSy2E7BWY3t5Sm&index=18", + "type": "video" + }, + { + "title": "CSE373 2020 - Lecture 23 - NP-Completeness", + "url": "https://www.youtube.com/watch?v=ItHp5laE1VE&list=PLOtl7M3yp-DX6ic0HGT0PUX_wiNmkWkXx&index=23", + "type": "video" + }, + { + "title": "CSE373 2020 - Lecture 24 - Satisfiability", + "url": "https://www.youtube.com/watch?v=inaFJeCzGxU&list=PLOtl7M3yp-DX6ic0HGT0PUX_wiNmkWkXx&index=24", + "type": "video" + }, + { + "title": "CSE373 2020 - Lecture 25 - More NP-Completeness", + "url": "https://www.youtube.com/watch?v=B-bhKxjZLlc&list=PLOtl7M3yp-DX6ic0HGT0PUX_wiNmkWkXx&index=25", + "type": "video" + }, + { + "title": "CSE373 2020 - Lecture 26 - NP-Completeness Challenge", + "url": "https://www.youtube.com/watch?v=_EzetTkG_Cc&list=PLOtl7M3yp-DX6ic0HGT0PUX_wiNmkWkXx&index=26", + "type": "video" + } + ] + }, + "ewXUHpOnjm8YrLhce1dAQ": { + "title": "NP", + "description": "The NP in NP class stands for Non-deterministic Polynomial Time. It is the collection of decision problems that can be solved by a non-deterministic machine in polynomial time.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Trying to understand P vs NP vs NP Complete vs NP Hard", + "url": "https://softwareengineering.stackexchange.com/questions/308178/trying-to-understand-p-vs-np-vs-np-complete-vs-np-hard", + "type": "article" + }, + { + "title": "Complexity: P, NP, NP-completeness, Reductions", + "url": "https://www.youtube.com/watch?v=eHZifpgyH_4&list=PLUl4u3cNGP6317WaSNfmCvGym2ucw3oGp&index=22", + "type": "video" + }, + { + "title": "Complexity: Approximation Algorithms", + "url": "https://www.youtube.com/watch?v=MEz1J9wY2iM&list=PLUl4u3cNGP6317WaSNfmCvGym2ucw3oGp&index=24", + "type": "video" + }, + { + "title": "Complexity: Fixed-Parameter Algorithms", + "url": "https://www.youtube.com/watch?v=4q-jmGrmxKs&index=25&list=PLUl4u3cNGP6317WaSNfmCvGym2ucw3oGp", + "type": "video" + }, + { + "title": "Lecture 23: Computational Complexity", + "url": "https://www.youtube.com/watch?v=moPtwq_cVH8&list=PLUl4u3cNGP61Oq3tWYp6V_F-5jb5L2iHb&index=24", + "type": "video" + }, + { + "title": "Greedy Algs. II & Intro to NP Completeness", + "url": "https://youtu.be/qcGnJ47Smlo?list=PLFDnELG9dpVxQCxuD-9BSy2E7BWY3t5Sm&t=2939", + "type": "video" + }, + { + "title": "NP Completeness II & Reductions", + "url": "https://www.youtube.com/watch?v=e0tGC6ZQdQE&index=16&list=PLFDnELG9dpVxQCxuD-9BSy2E7BWY3t5Sm", + "type": "video" + }, + { + "title": "NP Completeness III", + "url": "https://www.youtube.com/watch?v=fCX1BGT3wjE&index=17&list=PLFDnELG9dpVxQCxuD-9BSy2E7BWY3t5Sm", + "type": "video" + }, + { + "title": "NP Completeness IV", + "url": "https://www.youtube.com/watch?v=NKLDp3Rch3M&list=PLFDnELG9dpVxQCxuD-9BSy2E7BWY3t5Sm&index=18", + "type": "video" + }, + { + "title": "CSE373 2020 - Lecture 23 - NP-Completeness", + "url": "https://www.youtube.com/watch?v=ItHp5laE1VE&list=PLOtl7M3yp-DX6ic0HGT0PUX_wiNmkWkXx&index=23", + "type": "video" + }, + { + "title": "CSE373 2020 - Lecture 24 - Satisfiability", + "url": "https://www.youtube.com/watch?v=inaFJeCzGxU&list=PLOtl7M3yp-DX6ic0HGT0PUX_wiNmkWkXx&index=24", + "type": "video" + }, + { + "title": "CSE373 2020 - Lecture 25 - More NP-Completeness", + "url": "https://www.youtube.com/watch?v=B-bhKxjZLlc&list=PLOtl7M3yp-DX6ic0HGT0PUX_wiNmkWkXx&index=25", + "type": "video" + }, + { + "title": "CSE373 2020 - Lecture 26 - NP-Completeness Challenge", + "url": "https://www.youtube.com/watch?v=_EzetTkG_Cc&list=PLOtl7M3yp-DX6ic0HGT0PUX_wiNmkWkXx&index=26", + "type": "video" + } + ] + }, + "mJJ8DGUpBuHEJ7I6UTy1T": { + "title": "NP Hard", + "description": "An NP-hard problem is at least as hard as the hardest problem in NP and it is the class of the problems such that every problem in NP reduces to NP-hard.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Trying to understand P vs NP vs NP Complete vs NP Hard", + "url": "https://softwareengineering.stackexchange.com/questions/308178/trying-to-understand-p-vs-np-vs-np-complete-vs-np-hard", + "type": "article" + }, + { + "title": "Complexity: P, NP, NP-completeness, Reductions", + "url": "https://www.youtube.com/watch?v=eHZifpgyH_4&list=PLUl4u3cNGP6317WaSNfmCvGym2ucw3oGp&index=22", + "type": "video" + }, + { + "title": "Complexity: Approximation Algorithms", + "url": "https://www.youtube.com/watch?v=MEz1J9wY2iM&list=PLUl4u3cNGP6317WaSNfmCvGym2ucw3oGp&index=24", + "type": "video" + }, + { + "title": "Complexity: Fixed-Parameter Algorithms", + "url": "https://www.youtube.com/watch?v=4q-jmGrmxKs&index=25&list=PLUl4u3cNGP6317WaSNfmCvGym2ucw3oGp", + "type": "video" + }, + { + "title": "Lecture 23: Computational Complexity", + "url": "https://www.youtube.com/watch?v=moPtwq_cVH8&list=PLUl4u3cNGP61Oq3tWYp6V_F-5jb5L2iHb&index=24", + "type": "video" + }, + { + "title": "Greedy Algs. II & Intro to NP Completeness", + "url": "https://youtu.be/qcGnJ47Smlo?list=PLFDnELG9dpVxQCxuD-9BSy2E7BWY3t5Sm&t=2939", + "type": "video" + }, + { + "title": "NP Completeness II & Reductions", + "url": "https://www.youtube.com/watch?v=e0tGC6ZQdQE&index=16&list=PLFDnELG9dpVxQCxuD-9BSy2E7BWY3t5Sm", + "type": "video" + }, + { + "title": "NP Completeness III", + "url": "https://www.youtube.com/watch?v=fCX1BGT3wjE&index=17&list=PLFDnELG9dpVxQCxuD-9BSy2E7BWY3t5Sm", + "type": "video" + }, + { + "title": "NP Completeness IV", + "url": "https://www.youtube.com/watch?v=NKLDp3Rch3M&list=PLFDnELG9dpVxQCxuD-9BSy2E7BWY3t5Sm&index=18", + "type": "video" + }, + { + "title": "CSE373 2020 - Lecture 23 - NP-Completeness", + "url": "https://www.youtube.com/watch?v=ItHp5laE1VE&list=PLOtl7M3yp-DX6ic0HGT0PUX_wiNmkWkXx&index=23", + "type": "video" + }, + { + "title": "CSE373 2020 - Lecture 24 - Satisfiability", + "url": "https://www.youtube.com/watch?v=inaFJeCzGxU&list=PLOtl7M3yp-DX6ic0HGT0PUX_wiNmkWkXx&index=24", + "type": "video" + }, + { + "title": "CSE373 2020 - Lecture 25 - More NP-Completeness", + "url": "https://www.youtube.com/watch?v=B-bhKxjZLlc&list=PLOtl7M3yp-DX6ic0HGT0PUX_wiNmkWkXx&index=25", + "type": "video" + }, + { + "title": "CSE373 2020 - Lecture 26 - NP-Completeness Challenge", + "url": "https://www.youtube.com/watch?v=_EzetTkG_Cc&list=PLOtl7M3yp-DX6ic0HGT0PUX_wiNmkWkXx&index=26", + "type": "video" + } + ] + }, + "Lwkz7qozXRlVNA20zJbSw": { + "title": "NP Complete", + "description": "A problem is NP-complete if it is both NP and NP-hard. NP-complete problems are the hard problems in NP.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Trying to understand P vs NP vs NP Complete vs NP Hard", + "url": "https://softwareengineering.stackexchange.com/questions/308178/trying-to-understand-p-vs-np-vs-np-complete-vs-np-hard", + "type": "article" + }, + { + "title": "Complexity: P, NP, NP-completeness, Reductions", + "url": "https://www.youtube.com/watch?v=eHZifpgyH_4&list=PLUl4u3cNGP6317WaSNfmCvGym2ucw3oGp&index=22", + "type": "video" + }, + { + "title": "P vs. NP and the Computational Complexity Zoo", + "url": "https://www.youtube.com/watch?v=YX40hbAHx3s", + "type": "video" + }, + { + "title": "Complexity: Approximation Algorithms", + "url": "https://www.youtube.com/watch?v=MEz1J9wY2iM&list=PLUl4u3cNGP6317WaSNfmCvGym2ucw3oGp&index=24", + "type": "video" + }, + { + "title": "Complexity: Fixed-Parameter Algorithms", + "url": "https://www.youtube.com/watch?v=4q-jmGrmxKs&index=25&list=PLUl4u3cNGP6317WaSNfmCvGym2ucw3oGp", + "type": "video" + }, + { + "title": "Lecture 23: Computational Complexity", + "url": "https://www.youtube.com/watch?v=moPtwq_cVH8&list=PLUl4u3cNGP61Oq3tWYp6V_F-5jb5L2iHb&index=24", + "type": "video" + }, + { + "title": "Greedy Algs. II & Intro to NP Completeness", + "url": "https://youtu.be/qcGnJ47Smlo?list=PLFDnELG9dpVxQCxuD-9BSy2E7BWY3t5Sm&t=2939", + "type": "video" + }, + { + "title": "NP Completeness II & Reductions", + "url": "https://www.youtube.com/watch?v=e0tGC6ZQdQE&index=16&list=PLFDnELG9dpVxQCxuD-9BSy2E7BWY3t5Sm", + "type": "video" + }, + { + "title": "NP Completeness III", + "url": "https://www.youtube.com/watch?v=fCX1BGT3wjE&index=17&list=PLFDnELG9dpVxQCxuD-9BSy2E7BWY3t5Sm", + "type": "video" + }, + { + "title": "NP Completeness IV", + "url": "https://www.youtube.com/watch?v=NKLDp3Rch3M&list=PLFDnELG9dpVxQCxuD-9BSy2E7BWY3t5Sm&index=18", + "type": "video" + }, + { + "title": "CSE373 2020 - Lecture 23 - NP-Completeness", + "url": "https://www.youtube.com/watch?v=ItHp5laE1VE&list=PLOtl7M3yp-DX6ic0HGT0PUX_wiNmkWkXx&index=23", + "type": "video" + }, + { + "title": "CSE373 2020 - Lecture 24 - Satisfiability", + "url": "https://www.youtube.com/watch?v=inaFJeCzGxU&list=PLOtl7M3yp-DX6ic0HGT0PUX_wiNmkWkXx&index=24", + "type": "video" + }, + { + "title": "CSE373 2020 - Lecture 25 - More NP-Completeness", + "url": "https://www.youtube.com/watch?v=B-bhKxjZLlc&list=PLOtl7M3yp-DX6ic0HGT0PUX_wiNmkWkXx&index=25", + "type": "video" + }, + { + "title": "CSE373 2020 - Lecture 26 - NP-Completeness Challenge", + "url": "https://www.youtube.com/watch?v=_EzetTkG_Cc&list=PLOtl7M3yp-DX6ic0HGT0PUX_wiNmkWkXx&index=26", + "type": "video" + } + ] + }, + "0btHNkzWL1w_-pUgU_k2y": { + "title": "P = NP", + "description": "The P = NP problem is one of the most famous problems in computer science. It asks if the problem of determining if a given input belongs to a certain class of problems is as hard as the problem of solving the given input. In other words, it asks if the problem of determining if a given input belongs to a certain class of problems is as hard as the problem of determining if a given input belongs to a certain class of problems. This problem is also known as the Halting Problem.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Whats P=NP?, and why is it such a famous question?", + "url": "https://stackoverflow.com/questions/111307/whats-p-np-and-why-is-it-such-a-famous-question", + "type": "article" + } + ] + }, + "ZG-hWjVoS3p9XfrtBL0sD": { + "title": "Travelling Salesman Problem", + "description": "The Travelling Salesman Problem (TSP) is a classic problem in computer science. It is a problem that is NP-complete, which means that it is a problem that is hard to solve. It is also a problem that is used to test the efficiency of algorithms.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is the Traveling Salesman Problem?", + "url": "https://www.youtube.com/watch?v=1pmBjIZ20pE", + "type": "video" + }, + { + "title": "4.7 Traveling Salesperson Problem - Dynamic Programming", + "url": "https://www.youtube.com/watch?v=XaXsJJh-Q5Y", + "type": "video" + }, + { + "title": "Traveling Salesman Problem | Dynamic Programming | Graph Theory", + "url": "https://www.youtube.com/watch?v=cY4HiiFHO1o", + "type": "video" + } + ] + }, + "yHeCRPhfAOWiggZeUHPU9": { + "title": "Knapsack Problem", + "description": "KnapSack Problem is a classic problem in computer science. It is a problem in which we are given a set of items, each with a weight and a value, and we need to determine which items to include in a collection so that the total weight is less than or equal to a given limit and the total value is as large as possible.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "How to solve the Knapsack Problem with dynamic programming", + "url": "https://medium.com/@fabianterh/how-to-solve-the-knapsack-problem-with-dynamic-programming-eb88c706d3cf", + "type": "article" + }, + { + "title": "3.1 Knapsack Problem - Greedy Method", + "url": "https://www.youtube.com/watch?v=oTTzNMHM05I", + "type": "video" + } + ] + }, + "4QVVYjDODMWsjnrE-4UBs": { + "title": "Longest Path Problem", + "description": "Longest path problem is a problem that asks us to find the longest path in a graph.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Shortest/Longest path on a Directed Acyclic Graph (DAG) | Graph Theory", + "url": "https://www.youtube.com/watch?v=TXkDpqjDMHA", + "type": "video" + }, + { + "title": "Longest Simple Path - Intro to Algorithms", + "url": "https://www.youtube.com/watch?v=lRH0tax5dFA", + "type": "video" + } + ] + }, + "4qUVacMEz3XFiL_dMre6P": { + "title": "Tries", + "description": "Tries are a data structure that can be used to store strings. The idea is to store the characters of the string in a tree-like structure, where each node of the tree represents a single character. We can use this structure to store strings in a way that allows us to quickly search for strings with a common prefix.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "0. Tries - Coursera", + "url": "https://www.coursera.org/learn/algorithms-part2/home/week/4", + "type": "course" + }, + { + "title": "1. R Way Tries", + "url": "https://www.coursera.org/learn/algorithms-part2/lecture/CPVdr/r-way-tries", + "type": "course" + }, + { + "title": "2. Ternary Search Tries", + "url": "https://www.coursera.org/learn/algorithms-part2/lecture/yQM8K/ternary-search-tries", + "type": "course" + }, + { + "title": "3. Character Based Operations", + "url": "https://www.coursera.org/learn/algorithms-part2/lecture/jwNmV/character-based-operations", + "type": "course" + }, + { + "title": "Tries - DataStructure Notes", + "url": "http://www.cs.yale.edu/homes/aspnes/classes/223/notes.html#Tries", + "type": "article" + }, + { + "title": "The Trie: A Neglected Data Structure", + "url": "https://www.toptal.com/java/the-trie-a-neglected-data-structure", + "type": "article" + }, + { + "title": "TopCoder - Using Tries", + "url": "https://www.topcoder.com/thrive/articles/Using%20Tries", + "type": "article" + }, + { + "title": "Stanford Lecture (real world use case)", + "url": "https://www.youtube.com/watch?v=TJ8SkcUSdbU", + "type": "video" + }, + { + "title": "MIT, Advanced Data Structures, Strings (can get pretty obscure about halfway through)", + "url": "https://www.youtube.com/watch?v=NinWEPPrkDQ&index=16&list=PLUl4u3cNGP61hsJNdULdudlRL493b-XZf", + "type": "video" + } + ] + }, + "7DF3UhefOSqqTuccH8c8X": { + "title": "Balanced Search Trees", + "description": "Balanced search trees are a type of data structure that allow for fast insertion, deletion, and lookup of data. They are a type of self-balancing binary search tree, which means that they are a binary tree that maintains the binary search tree property while also keeping the tree balanced. This means that the tree is always approximately balanced, which allows for fast insertion, deletion, and lookup of data.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Self-balancing binary search tree - Wikipedia", + "url": "https://en.wikipedia.org/wiki/Self-balancing_binary_search_tree", + "type": "article" + }, + { + "title": "Balanced Search Trees Operations and Applications 11 min", + "url": "https://www.youtube.com/watch?v=IbNZ-x1I2IM", + "type": "video" + }, + { + "title": "Balanced binary search tree rotations", + "url": "https://www.youtube.com/watch?v=q4fnJZr8ztY", + "type": "video" + } + ] + }, + "OUistS7H7hQQxVV-lOg39": { + "title": "AVL Trees", + "description": "AVL trees are a type of self-balancing binary search tree. They are named after their inventors, Adelson-Velskii and Landis. AVL trees are the most popular self-balancing binary search tree.\n\nIn practice: From what I can tell, these aren't used much in practice, but I could see where they would be: The AVL tree is another structure supporting O(log n) search, insertion, and removal. It is more rigidly balanced than red–black trees, leading to slower insertion and removal but faster retrieval. This makes it attractive for data structures that may be built once and loaded without reconstruction, such as language dictionaries (or program dictionaries, such as the opcodes of an assembler or interpreter)\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "AVL Trees", + "url": "https://www.coursera.org/learn/data-structures/lecture/Qq5E0/avl-trees", + "type": "course" + }, + { + "title": "AVL Tree Implementation", + "url": "https://www.coursera.org/learn/data-structures/lecture/PKEBC/avl-tree-implementation", + "type": "course" + }, + { + "title": "Split And Merge", + "url": "https://www.coursera.org/learn/data-structures/lecture/22BgE/split-and-merge", + "type": "course" + }, + { + "title": "MIT AVL Trees / AVL Sort", + "url": "https://www.youtube.com/watch?v=FNeL18KsWPc&list=PLUl4u3cNGP61Oq3tWYp6V_F-5jb5L2iHb&index=6", + "type": "video" + } + ] + }, + "eOL0_SzxBn7_xhNcBDi2D": { + "title": "Red / Black Trees", + "description": "In computer science, a red–black tree is a kind of self-balancing binary search tree. Each node stores an extra bit representing \"color\", used to ensure that the tree remains balanced during insertions and deletions.\n\nThese are a translation of a 2-3 tree (see below).\n\nIn practice: Red–black trees offer worst-case guarantees for insertion time, deletion time, and search time. Not only does this make them valuable in time-sensitive applications such as real-time applications, but it makes them valuable building blocks in other data structures which provide worst-case guarantees; for example, many data structures used in computational geometry can be based on red–black trees, and the Completely Fair Scheduler used in current Linux kernels uses red–black trees. In the version 8 of Java, the Collection HashMap has been modified such that instead of using a LinkedList to store identical elements with poor hashcodes, a Red-Black tree is used.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Red-Black Tree - Wikipedia", + "url": "https://en.wikipedia.org/wiki/Red%E2%80%93black_tree", + "type": "article" + }, + { + "title": "An Introduction To Binary Search And Red Black Tree", + "url": "https://www.topcoder.com/thrive/articles/An%20Introduction%20to%20Binary%20Search%20and%20Red-Black%20Trees", + "type": "article" + }, + { + "title": "Red-Black Trees (playlist) in 30 minutes", + "url": "https://www.youtube.com/playlist?list=PL9xmBV_5YoZNqDI8qfOZgzbqahCUmUEin", + "type": "video" + }, + { + "title": "Aduni - Algorithms - Lecture 4 (link jumps to starting point)", + "url": "https://youtu.be/1W3x0f_RmUo?list=PLFDnELG9dpVxQCxuD-9BSy2E7BWY3t5Sm&t=3871", + "type": "video" + }, + { + "title": "Aduni - Algorithms - Lecture 5", + "url": "https://www.youtube.com/watch?v=hm2GHwyKF1o&list=PLFDnELG9dpVxQCxuD-9BSy2E7BWY3t5Sm&index=5", + "type": "video" + } + ] + }, + "3jiV9R82qxpqIGfpEq_wK": { + "title": "2 3 Search Trees", + "description": "In practice: 2-3 trees have faster inserts at the expense of slower searches (since height is more compared to AVL trees).\n\nYou would use 2-3 tree very rarely because its implementation involves different types of nodes. Instead, people use Red Black trees.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "23-Tree Intuition and Definition", + "url": "https://www.youtube.com/watch?v=C3SsdUqasD4&list=PLA5Lqm4uh9Bbq-E0ZnqTIa8LRaL77ica6&index=2", + "type": "video" + }, + { + "title": "Binary View of 23-Tree", + "url": "https://www.youtube.com/watch?v=iYvBtGKsqSg&index=3&list=PLA5Lqm4uh9Bbq-E0ZnqTIa8LRaL77ica6", + "type": "video" + }, + { + "title": "2-3 Trees (student recitation)", + "url": "https://www.youtube.com/watch?v=TOb1tuEZ2X4&index=5&list=PLUl4u3cNGP6317WaSNfmCvGym2ucw3oGp", + "type": "video" + } + ] + }, + "IaPd_zuLbiOCwoSHQLoIG": { + "title": "2 3 4 Trees", + "description": "In practice: For every 2-4 tree, there are corresponding red–black trees with data elements in the same order. The insertion and deletion operations on 2-4 trees are also equivalent to color-flipping and rotations in red–black trees. This makes 2-4 trees an important tool for understanding the logic behind red–black trees, and this is why many introductory algorithm texts introduce 2-4 trees just before red–black trees, even though 2-4 trees are not often used in practice.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "CS 61B Lecture 26: Balanced Search Trees", + "url": "https://archive.org/details/ucberkeley_webcast_zqrqYXkth6Q", + "type": "article" + }, + { + "title": "Bottom Up 234-Trees", + "url": "https://www.youtube.com/watch?v=DQdMYevEyE4&index=4&list=PLA5Lqm4uh9Bbq-E0ZnqTIa8LRaL77ica6", + "type": "video" + }, + { + "title": "Top Down 234-Trees", + "url": "https://www.youtube.com/watch?v=2679VQ26Fp4&list=PLA5Lqm4uh9Bbq-E0ZnqTIa8LRaL77ica6&index=5", + "type": "video" + } + ] + }, + "UOYeM-hqIKCrB9hGez4Q_": { + "title": "K-ary / M-ary Tree", + "description": "Note: the N or K is the branching factor (max branches)\n\nBinary trees are a 2-ary tree, with branching factor = 2\n\n2-3 trees are 3-ary\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "K-Ary Tree", + "url": "https://en.wikipedia.org/wiki/K-ary_tree", + "type": "article" + } + ] + }, + "s0Pi6CemUUsmZSEu2j2gH": { + "title": "B-Tree", + "description": "Fun fact: it's a mystery, but the B could stand for Boeing, Balanced, or Bayer (co-inventor).\n\nIn Practice: B-Trees are widely used in databases. Most modern filesystems use B-trees (or Variants). In addition to its use in databases, the B-tree is also used in filesystems to allow quick random access to an arbitrary block in a particular file. The basic problem is turning the file block i address into a disk block (or perhaps to a cylinder-head-sector) address\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "B-Tree - Wikipedia", + "url": "https://en.wikipedia.org/wiki/B-tree", + "type": "article" + }, + { + "title": "B-Tree Datastructure", + "url": "http://btechsmartclass.com/data_structures/b-trees.html", + "type": "article" + }, + { + "title": "Introduction to B-Trees", + "url": "https://www.youtube.com/watch?v=I22wEC1tTGo&list=PLA5Lqm4uh9Bbq-E0ZnqTIa8LRaL77ica6&index=6", + "type": "video" + }, + { + "title": "B-Tree Definition and Insertion", + "url": "https://www.youtube.com/watch?v=s3bCdZGrgpA&index=7&list=PLA5Lqm4uh9Bbq-E0ZnqTIa8LRaL77ica6", + "type": "video" + }, + { + "title": "B-Tree Deletion", + "url": "https://www.youtube.com/watch?v=svfnVhJOfMc&index=8&list=PLA5Lqm4uh9Bbq-E0ZnqTIa8LRaL77ica6", + "type": "video" + }, + { + "title": "MIT 6.851 - Memory Hierarchy Models", + "url": "https://www.youtube.com/watch?v=V3omVLzI0WE&index=7&list=PLUl4u3cNGP61hsJNdULdudlRL493b-XZf", + "type": "video" + }, + { + "title": "B-Trees (playlist) in 26 minutes", + "url": "https://www.youtube.com/playlist?list=PL9xmBV_5YoZNFPPv98DjTdD9X6UI9KMHz", + "type": "video" + } + ] + }, + "w-fvEiSJysywR3AOAB0ve": { + "title": "System Design", + "description": "System design is the process of defining the architecture, modules, interfaces, and data for a system to satisfy specified requirements. It is a very broad topic, and there are many ways to approach it.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "System Design Primer", + "url": "https://github.com/donnemartin/system-design-primer", + "type": "opensource" + }, + { + "title": "System Design: The complete course", + "url": "https://dev.to/karanpratapsingh/system-design-the-complete-course-10fo", + "type": "article" + }, + { + "title": "Explore top posts about Career", + "url": "https://app.daily.dev/tags/career?ref=roadmapsh", + "type": "article" + }, + { + "title": "System Design 101", + "url": "https://www.youtube.com/watch?v=Y-Gl4HEyeUQ", + "type": "video" + }, + { + "title": "Scaling the Unscalable", + "url": "https://www.youtube.com/watch?v=a2rcgzludDU", + "type": "video" + }, + { + "title": "System design interview: Scale to 1 million users", + "url": "https://www.youtube.com/watch?v=YkGHxOg9d3M", + "type": "video" + } + ] + }, + "-34WGppX6QC5fkCvfCghp": { + "title": "Horizontal vs Vertical Scaling", + "description": "Horizontal scaling is the process of adding more machines to your system. This is also known as scaling out. Vertical scaling is the process of adding more power to a single machine. This is also known as scaling up.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Scaling Horizontally vs. Scaling Vertically", + "url": "https://www.digitalocean.com/resources/article/horizontal-scaling-vs-vertical-scaling", + "type": "article" + }, + { + "title": "System Design Basics: Horizontal vs. Vertical Scaling", + "url": "https://www.youtube.com/watch?v=xpDnVSmNFX0", + "type": "video" + }, + { + "title": "Vertical vs. Horizontal Scaling for Database Servers", + "url": "https://www.youtube.com/watch?v=R99R-SNbo9g", + "type": "video" + } + ] + }, + "xDiS0HmrEoPjkQg9x2O3o": { + "title": "Load Balancing", + "description": "Load balancing is the process of distributing network or application traffic across a cluster of servers. Load balancing is used to improve responsiveness and reliability of applications, maximize throughput, minimize response time, and avoid overload of any single server.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Load Balancers 101", + "url": "https://www.youtube.com/watch?v=galcDRNd5Ow", + "type": "video" + }, + { + "title": "What is Load Balancing?", + "url": "https://www.youtube.com/watch?v=gGLophKzJs8", + "type": "video" + } + ] + }, + "wAsZ-M5N9ERufQnBaXbAE": { + "title": "Clustering", + "description": "At a high level, a computer cluster is a group of two or more computers, or nodes, that run in parallel to achieve a common goal. This allows workloads consisting of a high number of individual, parallelizable tasks to be distributed among the nodes in the cluster. As a result, these tasks can leverage the combined memory and processing power of each computer to increase overall performance.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "System Design: Clustering", + "url": "https://dev.to/karanpratapsingh/system-design-clustering-3726", + "type": "article" + } + ] + }, + "4u5w9QCptLWuY9O7_UU8V": { + "title": "Caching", + "description": "Caching is a way of storing data in a temporary storage to make future requests faster. It is one of the most important tools in the computer science toolbox.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "System Design - Caching", + "url": "https://dev.to/karanpratapsingh/system-design-the-complete-course-10fo#caching", + "type": "article" + }, + { + "title": "What is Caching | System Design Basics", + "url": "https://www.youtube.com/watch?v=joifNgoXXFk", + "type": "video" + } + ] + }, + "r8V9bZpc98SrLvXc070bZ": { + "title": "CDN", + "description": "A CDN is a network of servers that are distributed geographically. The servers are connected to each other and to the internet. The servers are used to deliver content to users. The content is delivered to the user from the server that is closest to the user. This is done to reduce latency and improve the performance of the content delivery.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Content Delivery Network (CDN) - System Design", + "url": "https://dev.to/karanpratapsingh/system-design-the-complete-course-10fo#content-delivery-network-cdn", + "type": "article" + }, + { + "title": "Content Delivery Networks", + "url": "https://www.youtube.com/watch?v=6DXEPcXKQNY", + "type": "video" + } + ] + }, + "qLLJ_ehC8tSRfXsorYMQz": { + "title": "Proxy", + "description": "A proxy server is an intermediary piece of hardware/software sitting between the client and the backend server. It receives requests from clients and relays them to the origin servers. Typically, proxies are used to filter requests, log requests, or sometimes transform requests (by adding/removing headers, encrypting/decrypting, or compression).\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Proxy - System Design", + "url": "https://dev.to/karanpratapsingh/system-design-the-complete-course-10fo#proxy", + "type": "article" + }, + { + "title": "Proxy Servers", + "url": "https://roadmap.sh/guides/proxy-servers", + "type": "article" + } + ] + }, + "GZI8EsEqiMJX3fSbO_E-7": { + "title": "CAP Theorem", + "description": "The CAP theorem states that it is impossible for a distributed data store to simultaneously provide more than two out of Consistency, Availability and Partition Tolerance.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "CAP Theorem - Wikipedia", + "url": "https://en.wikipedia.org/wiki/CAP_theorem", + "type": "article" + }, + { + "title": "What is CAP Theorem?", + "url": "https://www.youtube.com/watch?v=_RbsFXWRZ10", + "type": "video" + } + ] + }, + "LCPEzpyAuHY3ggN3nYrNN": { + "title": "Queues", + "description": "Messaging queues are a common way to decouple systems. They are used to decouple the producer of a message from the consumer of a message. This allows the producer to send a message and not have to wait for the consumer to process it. It also allows the consumer to process the message at their own pace.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Message Queues - System Design", + "url": "https://dev.to/karanpratapsingh/system-design-the-complete-course-10fo#message-queues", + "type": "article" + }, + { + "title": "What is a Message Queue?", + "url": "https://www.youtube.com/watch?v=xErwDaOc-Gs", + "type": "video" + }, + { + "title": "What is a Message Queue and Where is it used?", + "url": "https://www.youtube.com/watch?v=oUJbuFMyBDk", + "type": "video" + } + ] + }, + "X1wv7xV69IEzepg8bF8oZ": { + "title": "Architectural Styles", + "description": "Architectural patterns are the fundamental organization of a system, defining how the system is composed and how its components interact. Architectural patterns are identified by their name, like client-server, peer-to-peer, and layered.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "List of software architecture styles and patterns", + "url": "https://en.wikipedia.org/wiki/List_of_software_architecture_styles_and_patterns", + "type": "article" + } + ] + }, + "Kvz3Sn7L8CxP5ZXO1hlcU": { + "title": "REST", + "description": "REST, or REpresentational State Transfer, is an architectural style for providing standards between computer systems on the web, making it easier for systems to communicate with each other.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is a REST API?", + "url": "https://www.redhat.com/en/topics/api/what-is-a-rest-api", + "type": "article" + }, + { + "title": "Roy Fieldings dissertation chapter, Representational State Transfer (REST)", + "url": "https://www.ics.uci.edu/~fielding/pubs/dissertation/rest_arch_style.htm", + "type": "article" + }, + { + "title": "Learn REST: A RESTful Tutorial", + "url": "https://restapitutorial.com/", + "type": "article" + }, + { + "title": "Explore top posts about REST API", + "url": "https://app.daily.dev/tags/rest-api?ref=roadmapsh", + "type": "article" + } + ] + }, + "DeE_dZecLmKCjePovTfFS": { + "title": "GraphQL", + "description": "GraphQL is a query language for APIs and a runtime for fulfilling those queries with your existing data. GraphQL provides a complete and understandable description of the data in your API, gives clients the power to ask for exactly what they need and nothing more, makes it easier to evolve APIs over time, and enables powerful developer tools.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Apollo GraphQL Tutorials", + "url": "https://www.apollographql.com/tutorials/", + "type": "article" + }, + { + "title": "Explore top posts about GraphQL", + "url": "https://app.daily.dev/tags/graphql?ref=roadmapsh", + "type": "article" + } + ] + }, + "M4vdwAbjLPrLjWUsG_P1I": { + "title": "gRPC", + "description": "gRPC is a platform agnostic serialization protocol that is used to communicate between services. Designed by Google in 2015, it is a modern alternative to REST APIs. It is a binary protocol that uses HTTP/2 as a transport layer. It is a high performance, open source, general-purpose RPC framework that puts mobile and HTTP/2 first.\n\nIt's main use case is for communication between two different languages within the same application. You can use Python to communicate with Go, or Java to communicate with C#.\n\ngRPC uses the protocol buffer language to define the structure of the data that is\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "gRPC Website", + "url": "https://grpc.io/", + "type": "article" + }, + { + "title": "gRPC Introduction", + "url": "https://grpc.io/docs/what-is-grpc/introduction/", + "type": "article" + }, + { + "title": "gRPC Core Concepts", + "url": "https://grpc.io/docs/what-is-grpc/core-concepts/", + "type": "article" + }, + { + "title": "Explore top posts about gRPC", + "url": "https://app.daily.dev/tags/grpc?ref=roadmapsh", + "type": "article" + }, + { + "title": "Stephane Maarek - gRPC Introduction", + "url": "https://youtu.be/XRXTsQwyZSU", + "type": "video" + } + ] + }, + "YMO9oD_sbzyDZPNU2xZwe": { + "title": "Cloud Design Ptterns", + "description": "These design patterns are useful for building reliable, scalable, secure applications in the cloud.\n\nThe link below has cloud design patterns where each pattern describes the problem that the pattern addresses, considerations for applying the pattern, and an example based on Microsoft Azure. Most patterns include code samples or snippets that show how to implement the pattern on Azure. However, most patterns are relevant to any distributed system, whether hosted on Azure or other cloud platforms.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Cloud Design Patterns", + "url": "https://learn.microsoft.com/en-us/azure/architecture/patterns/", + "type": "article" + }, + { + "title": "Explore top posts about Cloud", + "url": "https://app.daily.dev/tags/cloud?ref=roadmapsh", + "type": "article" + } + ] + }, + "XeJi6TwpI5Uaszj00Uv_2": { + "title": "Long Polling", + "description": "Long polling is a technique used to implement server push functionality over HTTP. It is a method of opening a request on the server and keeping it open until an event occurs, at which point the server responds. This is in contrast to a regular HTTP request, where the server responds immediately with whatever data is available at the time.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Long polling", + "url": "https://javascript.info/long-polling", + "type": "article" + }, + { + "title": "What are Long-Polling, Websockets, Server-Sent Events (SSE) and Comet?", + "url": "https://stackoverflow.com/questions/11077857/what-are-long-polling-websockets-server-sent-events-sse-and-comet", + "type": "article" + } + ] + }, + "gGmNJ8dK28iqrlQHPz6md": { + "title": "Short Polling", + "description": "In short polling, the client requests information from the server. The server processes the request. If data is available for the request, server responds to the request with the required information. However, if the server has no data available for the client, server returns an empty response. In both the situation, the connection will be closed after returning the response. Clients keep issuing new requests even after server sends the empty responses. This mechanism increases the network cost on the server.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What are Long-Polling, Websockets, Server-Sent Events (SSE) and Comet?", + "url": "https://stackoverflow.com/questions/11077857/what-are-long-polling-websockets-server-sent-events-sse-and-comet", + "type": "article" + } + ] + }, + "bVjI14VismTHNCyA0mEBP": { + "title": "Web Sockets", + "description": "Web sockets are a bidirectional communication protocol between a client and a server. They are used for real-time applications like chat, multiplayer games, and live data updates. Web sockets are also used to establish a connection between a server and a client. This connection is then used to send data in both directions.\n\nVisit the following resources to learn more:", + "links": [] + }, + "JckRqZA8C6IqQLPpTCgf4": { + "title": "SSE", + "description": "Server-Sent Events is a server push technology enabling a client to receive automatic updates from a server via an HTTP connection, and describes how servers can initiate data transmission towards clients once an initial client connection has been established.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is Server-Sent Events (SSE) and how to implement it?", + "url": "https://medium.com/yemeksepeti-teknoloji/what-is-server-sent-events-sse-and-how-to-implement-it-904938bffd73", + "type": "article" + }, + { + "title": "Using server-sent events", + "url": "https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events", + "type": "article" + }, + { + "title": "Explore top posts about Cryptography", + "url": "https://app.daily.dev/tags/cryptography?ref=roadmapsh", + "type": "article" + } + ] + }, + "zvlTQ0A-My4QDrslp_lru": { + "title": "Databases", + "description": "A database is a collection of useful data of one or more related organizations structured in a way to make data an asset to the organization. A database management system is a software designed to assist in maintaining and extracting large collections of data in a timely fashion.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Oracle: What is a Database?", + "url": "https://www.oracle.com/database/what-is-database/", + "type": "article" + }, + { + "title": "Prisma.io: What are Databases?", + "url": "https://www.prisma.io/dataguide/intro/what-are-databases", + "type": "article" + }, + { + "title": "Explore top posts about Backend Development", + "url": "https://app.daily.dev/tags/backend?ref=roadmapsh", + "type": "article" + }, + { + "title": "DBMS by Stanford", + "url": "https://www.youtube.com/watch?v=D-k-h0GuFmE&list=PL9ysvtVnryGpnIj9rcIqNDxakUn6v72Hm", + "type": "video" + } + ] + }, + "nprZscHdC_RdRIcGeGyVk": { + "title": "SQL vs NoSQL Databases", + "description": "SQL stands for Structured Query Language. It's used for relational databases. A SQL database is a collection of tables that stores a specific set of structured data. Some examples are PostgreSQL, MySQL, MariaDB etc.\n\nNoSQL stands for Not Only SQL. It's used for non-relational databases. A NoSQL database is a collection of collections that stores a specific set of unstructured data. Some examples are MongoDB, CouchDB, Redis etc.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Explore top posts about NoSQL", + "url": "https://app.daily.dev/tags/nosql?ref=roadmapsh", + "type": "article" + }, + { + "title": "SQL vs. NoSQL: Whats the difference?", + "url": "https://www.youtube.com/watch?v=Q5aTUc7c4jg", + "type": "video" + }, + { + "title": "Database Design Tips | Choosing the Best Database in a System Design Interview", + "url": "https://www.youtube.com/watch?v=cODCpXtPHbQ&t=22s", + "type": "video" + }, + { + "title": "NoSQL vs SQL – Which Type of Database Should You Use?", + "url": "https://www.youtube.com/watch?v=FzlpwoeSrE0", + "type": "video" + } + ] + }, + "Rw7QjLC8hLtXSwmU_9kHC": { + "title": "Normalization / Denormalization", + "description": "Database normalization is a process used to organize a database into tables and columns. The idea is that a table should be about a specific topic and that only those columns which support that topic are included. This limits the number of duplicate data contained within your database. This makes the database more flexible by eliminating issues stemming from database modifications.\n\nDenormalization is the opposite of normalization. It is the process of adding redundant data to a database to improve read performance. This is done by adding duplicate data into multiple tables to avoid expensive joins. This is done at the expense of increased storage and decreased write performance.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Normalization vs. Denormalization | Events and Event Streaming", + "url": "https://www.youtube.com/watch?v=sDU94hraq8g", + "type": "video" + }, + { + "title": "Normalization - 1NF, 2NF, 3NF and 4NF", + "url": "https://www.youtube.com/watch?v=UrYLYV7WSHM", + "type": "video" + } + ] + }, + "XnxxEmr2TyW2kOpB4gtR5": { + "title": "Entity-Relationship Model", + "description": "Entity relationship model is a high-level data model that describes the logical structure of a database. It is a graphical representation of entities and their relationships to each other, typically used in modeling the organization of data within databases or information systems.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Entity Relationship Diagram (ERD) Tutorial - Part 1", + "url": "https://www.youtube.com/watch?v=QpdhBUYk7Kk", + "type": "video" + }, + { + "title": "Entity Relationship Diagram (ERD) Tutorial - Part 2", + "url": "https://www.youtube.com/watch?v=-CuY5ADwn24", + "type": "video" + } + ] + }, + "ii1vF74u3yrFNlw_21b3B": { + "title": "DDL", + "description": "DDL or Data Definition Language actually consists of the SQL commands that can be used to define the database schema. It simply deals with descriptions of the database schema and is used to create and modify the structure of database objects in the database. DDL is a set of SQL commands used to create, modify, and delete database structures but not data. These commands are normally not used by a general user, who should be accessing the database via an application.\n\nVisit the following resources to learn more:", + "links": [] + }, + "tcQSH-eAvJUZuePTDjAIb": { + "title": "DML", + "description": "The SQL commands that deals with the manipulation of data present in the database belong to DML or Data Manipulation Language and this includes most of the SQL statements. It is the component of the SQL statement that controls access to data and to the database. Basically, DCL statements are grouped with DML statements.\n\nVisit the following resources to learn more:", + "links": [] + }, + "05lkb3B86Won7Rkf-8DeD": { + "title": "DQL", + "description": "DQL statements are used for performing queries on the data within schema objects. The purpose of the DQL Command is to get some schema relation based on the query passed to it. We can define DQL as follows it is a component of SQL statement that allows getting data from the database and imposing order upon it. It includes the SELECT statement. This command allows getting the data out of the database to perform operations with it. When a SELECT is fired against a table or tables the result is compiled into a further temporary table, which is displayed or perhaps received by the program i.e. a front-end.\n\nVisit the following resources to learn more:", + "links": [] + }, + "4bUmfuP2qgcli8I2Vm9zh": { + "title": "DCL", + "description": "DCL includes commands such as GRANT and REVOKE which mainly deal with the rights, permissions, and other controls of the database system.\n\nVisit the following resources to learn more:", + "links": [] + }, + "_sm63rZNKoibVndeNgOpW": { + "title": "Locking", + "description": "Locks are used to prevent data from being modified by multiple processes at the same time. This is important because if two processes are modifying the same data at the same time, the data can become corrupted. Locks are used to prevent this from happening.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Locking in Databases and Isolation Mechanisms", + "url": "https://medium.com/inspiredbrilliance/what-are-database-locks-1aff9117c290", + "type": "article" + }, + { + "title": "Understanding Database Lock Timeouts and Deadlocks", + "url": "https://www.dbta.com/Columns/DBA-Corner/Understanding-Database-Lock-Timeouts-and-Deadlocks-148659.aspx", + "type": "article" + }, + { + "title": "Row-Level Database Locks Explained - (Read vs Exclusive)", + "url": "https://www.youtube.com/watch?v=nuBi2XbHH18", + "type": "video" + } + ] + }, + "W5B-v-BFcCRmuN0L1m6PI": { + "title": "ACID Model", + "description": "ACID are the four properties of any database system that help in making sure that we are able to perform the transactions in a reliable manner. It's an acronym which refers to the presence of four properties: atomicity, consistency, isolation and durability\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is ACID Compliant Database?", + "url": "https://retool.com/blog/whats-an-acid-compliant-database/", + "type": "article" + }, + { + "title": "What is ACID Compliance?: Atomicity, Consistency, Isolation", + "url": "https://fauna.com/blog/what-is-acid-compliance-atomicity-consistency-isolation", + "type": "article" + }, + { + "title": "ACID Explained: Atomic, Consistent, Isolated & Durable", + "url": "https://www.youtube.com/watch?v=yaQ5YMWkxq4", + "type": "video" + } + ] + }, + "q3nRhTYS5wg9tYnQe2sCF": { + "title": "BASE", + "description": "The rise in popularity of NoSQL databases provided a flexible and fluidity with ease to manipulate data and as a result, a new database model was designed, reflecting these properties. The acronym BASE is slightly more confusing than ACID but however, the words behind it suggest ways in which the BASE model is different and acronym BASE stands for:-\n\n* **B**asically **A**vailable\n* **S**oft state\n* **E**ventual consistency\n\nVisit the following resources to learn more:", + "links": [] + }, + "uqfeiQ9K--QkGNwks4kjk": { + "title": "CAP Theorem", + "description": "CAP is an acronym for Consistency, Availability, and Partition Tolerance. According to the CAP theorem, any distributed system can only guarantee two of the three properties at any time. You can't guarantee all three properties at once.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is CAP Theorem?", + "url": "https://www.bmc.com/blogs/cap-theorem/", + "type": "article" + }, + { + "title": "CAP Theorem - Wikipedia", + "url": "https://en.wikipedia.org/wiki/CAP_theorem", + "type": "article" + }, + { + "title": "An Illustrated Proof of the CAP Theorem", + "url": "https://mwhittaker.github.io/blog/an_illustrated_proof_of_the_cap_theorem/", + "type": "article" + }, + { + "title": "CAP Theorem and its applications in NoSQL Databases", + "url": "https://www.ibm.com/uk-en/cloud/learn/cap-theorem", + "type": "article" + }, + { + "title": "What is CAP Theorem?", + "url": "https://www.youtube.com/watch?v=_RbsFXWRZ10", + "type": "video" + } + ] + }, + "g6HeyLptaAYx9QBKuHQyM": { + "title": "PACELC", + "description": "The PACELC Theorem is an extension of the CAP Theorem. One of the questions that CAP Theorem wasn’t able to answer was “what happens when there is no Partition, What Logical Combination then a Distributed System have?“. So to answer this, In addition to Consistency, Availability, and Partition Tolerance it also includes Latency as one of the desired properties of a Distributed System. The acronym PACELC stands for Partitioned, Availability, Consistency Else Latency, Consistency.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "PACELC Theorem", + "url": "https://www.scylladb.com/glossary/pacelc-theorem/", + "type": "article" + } + ] + }, + "BEbsUA39kZ5itqCwD585f": { + "title": "Indexes", + "description": "An index is a data structure that you build and assign on top of an existing table that basically looks through your table and tries to analyze and summarize so that it can create shortcuts.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Database Indexing Explained", + "url": "https://www.youtube.com/watch?v=-qNSXK7s7_w", + "type": "video" + } + ] + }, + "lOj_ReWI1kQ3WajJZYOWU": { + "title": "Views", + "description": "Views in SQL are kind of virtual tables. A view also has rows and columns as they are in a real table in the database. We can create a view by selecting fields from one or more tables present in the database. A View can either have all the rows of a table or specific rows based on certain condition.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Database Views", + "url": "https://www.ibm.com/docs/en/eamfoc/7.6.0?topic=structure-views", + "type": "article" + }, + { + "title": "SQL Views - Wikipedia", + "url": "https://en.wikipedia.org/wiki/View_(SQL)", + "type": "article" + } + ] + }, + "KhvYJtSCUBOpEZXjHpQde": { + "title": "Transactions", + "description": "In short, a database transaction is a sequence of multiple operations performed on a database, and all served as a single logical unit of work — taking place wholly or not at all. In other words, there's never a case where only half of the operations are performed and the results saved.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What are Transactions?", + "url": "https://fauna.com/blog/database-transaction", + "type": "article" + } + ] + }, + "FjhZGhzJjYyDn0PShKmpX": { + "title": "Stored Procedures", + "description": "Stored Procedures are created to perform one or more DML operations on Database. It is nothing but the group of SQL statements that accepts some input in the form of parameters and performs some task and may or may not returns a value.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "SQL Stored Procedures", + "url": "https://www.programiz.com/sql/stored-procedures", + "type": "article" + } + ] + }, + "3r21avMlo0xFB9i7PHccX": { + "title": "Database Federation", + "description": "Federation (or functional partitioning) splits up databases by function. The federation architecture makes several distinct physical databases appear as one logical database to end-users.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Database Federation", + "url": "https://dev.to/karanpratapsingh/system-design-the-complete-course-10fo#database-federation", + "type": "article" + }, + { + "title": "Explore top posts about Database", + "url": "https://app.daily.dev/tags/database?ref=roadmapsh", + "type": "article" + } + ] + }, + "WUEmEtlszbISSIWDKpvg6": { + "title": "Replication", + "description": "Replication is a process that involves sharing information to ensure consistency between redundant resources such as multiple databases, to improve reliability, fault-tolerance, or accessibility.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Database Replication", + "url": "https://dev.to/karanpratapsingh/system-design-the-complete-course-10fo#database-replication", + "type": "article" + }, + { + "title": "Replication (computing)", + "url": "https://en.wikipedia.org/wiki/Replication_(computing)", + "type": "article" + } + ] + }, + "3K65efPag2076dy-MeTg4": { + "title": "Sharding", + "description": "Database sharding is a method of distributing data across multiple machines. It is a horizontal scaling technique, as opposed to vertical scaling, which is scaling by adding more power to a single machine. Sharding is a common way to scale a database.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Sharding", + "url": "https://dev.to/karanpratapsingh/system-design-the-complete-course-10fo#sharding", + "type": "article" + }, + { + "title": "Explore top posts about Backend Development", + "url": "https://app.daily.dev/tags/backend?ref=roadmapsh", + "type": "article" + }, + { + "title": "Sharding & Database Partitioning | System Design Basics", + "url": "https://www.youtube.com/watch?v=RynPj8C0BXA", + "type": "video" + }, + { + "title": "Database Sharding - Watch", + "url": "https://www.youtube.com/watch?v=hdxdhCpgYo8", + "type": "video" + }, + { + "title": "Database Sharding in 5 minutes", + "url": "https://www.youtube.com/watch?v=kSH4bt8ypOQ", + "type": "video" + } + ] + }, + "zG5t3HqbZnh9CGRqp1Sb-": { + "title": "Networking", + "description": "Networking is the process of connecting two or more computing devices together for the purpose of sharing data. In a data network, shared data may be as simple as a printer or as complex as a global financial transaction.\n\nIf you have networking experience or want to be a reliability engineer or operations engineer, expect questions from these topics. Otherwise, this is just good to know.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Khan Academy - Networking", + "url": "https://www.khanacademy.org/computing/code-org/computers-and-the-internet", + "type": "article" + }, + { + "title": "Explore top posts about Networking", + "url": "https://app.daily.dev/tags/networking?ref=roadmapsh", + "type": "article" + }, + { + "title": "Computer Networking Course - Network Engineering", + "url": "https://www.youtube.com/watch?v=qiQR5rTSshw", + "type": "video" + }, + { + "title": "Networking Video Series (21 videos)", + "url": "https://www.youtube.com/playlist?list=PLEbnTDJUr_IegfoqO4iPnPYQui46QqT0j", + "type": "video" + } + ] + }, + "pZ5x_zDYGzW9VxYycyXtN": { + "title": "OSI Model", + "description": "The OSI and TCP/IP model is used to help the developer to design their system for interoperability. The OSI model has 7 layers while the TCP/IP model has a more summarized form of the OSI model only consisting 4 layers. This is important if you're are trying to design a system to communicate with other systems.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Cloudflare - What is the OSI model", + "url": "https://www.cloudflare.com/learning/ddos/glossary/open-systems-interconnection-model-osi/", + "type": "article" + }, + { + "title": "TCP/IP and the OSI Model Explained!", + "url": "https://www.youtube.com/watch?v=e5DEVa9eSN0", + "type": "video" + } + ] + }, + "Fed5y1D95WPpqoVg7kmob": { + "title": "TCP/IP Model", + "description": "The OSI and TCP/IP model is used to help the developer to design their system for interoperability. The OSI model has 7 layers while the TCP/IP model has a more summarized form of the OSI model only consisting 4 layers. This is important if you're are trying to design a system to communicate with other systems.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Cloudflare - What is the OSI model", + "url": "https://www.cloudflare.com/learning/ddos/glossary/open-systems-interconnection-model-osi/", + "type": "article" + }, + { + "title": "TCP/IP and the OSI Model Explained!", + "url": "https://www.youtube.com/watch?v=e5DEVa9eSN0", + "type": "video" + } + ] + }, + "fYjoXB4rnkM5gg46sqVz5": { + "title": "DNS", + "description": "The Domain Name System (DNS) is the phonebook of the Internet. Humans access information online through domain names, like [nytimes.com](http://nytimes.com) or [espn.com](http://espn.com). Web browsers interact through Internet Protocol (IP) addresses. DNS translates domain names to IP addresses so browsers can load Internet resources.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is DNS?", + "url": "https://www.cloudflare.com/en-gb/learning/dns/what-is-dns/", + "type": "article" + }, + { + "title": "How DNS works (comic)", + "url": "https://howdns.works/", + "type": "article" + }, + { + "title": "Explore top posts about DNS", + "url": "https://app.daily.dev/tags/dns?ref=roadmapsh", + "type": "article" + }, + { + "title": "DNS and How does it Work?", + "url": "https://www.youtube.com/watch?v=Wj0od2ag5sk", + "type": "video" + }, + { + "title": "DNS Records", + "url": "https://www.youtube.com/watch?v=7lxgpKh_fRY", + "type": "video" + }, + { + "title": "Complete DNS mini-series", + "url": "https://www.youtube.com/watch?v=zEmUuNFBgN8&list=PLTk5ZYSbd9MhMmOiPhfRJNW7bhxHo4q-K", + "type": "video" + } + ] + }, + "2tUwl-br-SRuwADSzmQag": { + "title": "HTTP", + "description": "HTTP is the `TCP/IP` based application layer communication protocol which standardizes how the client and server communicate with each other. It defines how the content is requested and transmitted across the internet.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Everything you need to know about HTTP", + "url": "https://cs.fyi/guide/http-in-depth", + "type": "article" + }, + { + "title": "What is HTTP?", + "url": "https://www.cloudflare.com/en-gb/learning/ddos/glossary/hypertext-transfer-protocol-http/", + "type": "article" + }, + { + "title": "An overview of HTTP", + "url": "https://developer.mozilla.org/en-US/docs/Web/HTTP/Overview", + "type": "article" + }, + { + "title": "HTTP/3 From A To Z: Core Concepts", + "url": "https://www.smashingmagazine.com/2021/08/http3-core-concepts-part1/", + "type": "article" + }, + { + "title": "HTTP/1 to HTTP/2 to HTTP/3", + "url": "https://www.youtube.com/watch?v=a-sBfyiXysI", + "type": "video" + }, + { + "title": "HTTP Crash Course & Exploration", + "url": "https://www.youtube.com/watch?v=iYM2zFP3Zn0", + "type": "video" + } + ] + }, + "3pu2bGhoCLFIs6kNanwtz": { + "title": "TLS & HTTPS", + "description": "TLS (Transport Layer Security) is a cryptographic protocol that provides privacy and data integrity between two communicating applications. It is widely used to secure HTTP, although it can be used with any protocol. TLS is often used in combination with HTTPS, which is HTTP over TLS.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "SSL and HTTPS", + "url": "https://www.youtube.com/watch?v=S2iBR2ZlZf0", + "type": "video" + }, + { + "title": "SSL/TLS - Cristina Formaini", + "url": "https://www.youtube.com/watch?v=Rp3iZUvXWlM", + "type": "video" + } + ] + }, + "u-c-UFegRb7xqsmvj9gVb": { + "title": "Sockets", + "description": "A socket is an interface for network communication. It is a way for two programs to communicate with each other over a network. It is a way for a client to send a request to a server and for the server to send a response back to the client.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "A Beginners Guide to WebSockets", + "url": "https://www.youtube.com/watch?v=8ARodQ4Wlf4", + "type": "video" + }, + { + "title": "WebSockets in 100 Seconds & Beyond with Socket.io", + "url": "https://www.youtube.com/watch?v=1BfCnjr_Vjg", + "type": "video" + } + ] + }, + "JnJbJtsqKbVETY2vdLqCO": { + "title": "Security", + "description": "Web security refers to the protective measures taken by the developers to protect the web applications from threats that could affect the business.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "OWASP Web Application Security Testing Checklist", + "url": "https://github.com/0xRadi/OWASP-Web-Checklist", + "type": "opensource" + }, + { + "title": "Why HTTPS Matters", + "url": "https://developers.google.com/web/fundamentals/security/encrypt-in-transit/why-https", + "type": "article" + }, + { + "title": "Wikipedia - OWASP", + "url": "https://en.wikipedia.org/wiki/OWASP", + "type": "article" + }, + { + "title": "OWASP Top 10 Security Risks", + "url": "https://sucuri.net/guides/owasp-top-10-security-vulnerabilities-2021/", + "type": "article" + }, + { + "title": "OWASP Cheatsheets", + "url": "https://cheatsheetseries.owasp.org/cheatsheets/AJAX_Security_Cheat_Sheet.html", + "type": "article" + }, + { + "title": "Content Security Policy (CSP)", + "url": "https://developer.mozilla.org/en-US/docs/Web/HTTP/CSP", + "type": "article" + }, + { + "title": "Explore top posts about Security", + "url": "https://app.daily.dev/tags/security?ref=roadmapsh", + "type": "article" + }, + { + "title": "MIT 6.858 Computer Systems Security, Fall 2014", + "url": "https://www.youtube.com/playlist?list=PLUl4u3cNGP62K2DjQLRxDNRi0z2IRWnNh", + "type": "video" + } + ] + }, + "bDZ34BPm3lX06ERSE10cY": { + "title": "Public Key Cryptography", + "description": "Public-key cryptography, or asymmetric cryptography, is the field of cryptographic systems that use pairs of related keys. Each key pair consists of a public key and a corresponding private key. Key pairs are generated with cryptographic algorithms based on mathematical problems termed one-way functions.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Public-key cryptography - Wikipedia", + "url": "https://en.wikipedia.org/wiki/Public-key_cryptography", + "type": "article" + }, + { + "title": "Explore top posts about Cryptography", + "url": "https://app.daily.dev/tags/cryptography?ref=roadmapsh", + "type": "article" + }, + { + "title": "Public Key Cryptography - Computerphile", + "url": "https://www.youtube.com/watch?v=GSIDS_lvRv4", + "type": "video" + }, + { + "title": "Public Key Cryptography: RSA Encryption Algorithm", + "url": "https://www.youtube.com/watch?v=wXB-V_Keiu8", + "type": "video" + } + ] + }, + "7r7o8pYhFHVAJIv0wNT6X": { + "title": "Hashing / Encryption / Encoding", + "description": "Hashing is a one-way function that takes an input and produces a fixed-length output. The output is called a hash. The hash is a unique representation of the input. The hash is deterministic, meaning that the same input will always produce the same hash. The hash is irreversible, meaning that it is impossible to go from the hash back to the original input. The hash is collision-resistant, meaning that it is impossible to find two different inputs that produce the same hash.\n\nEncryption is a two-way function that takes an input and produces an output. The output is called ciphertext. The ciphertext is a unique representation of the input. The ciphertext is deterministic, meaning that the same input will always produce the same ciphertext. The ciphertext is reversible, meaning that it is possible to go from the ciphertext back to the original input. The ciphertext is collision-resistant, meaning that it is impossible to find two different inputs that produce the same ciphertext.\n\nEncoding is a two-way function that takes an input and produces an output. The output is called encoded text. The encoded text is a unique representation of the input. The encoded text is deterministic, meaning that the same input will always produce the same encoded text. The encoded text is reversible, meaning that it is possible to go from the encoded text back to the original input. The encoded text is not collision-resistant, meaning that it is possible to find two different inputs that produce the same encoded text.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Explore top posts about Encryption", + "url": "https://app.daily.dev/tags/encryption?ref=roadmapsh", + "type": "article" + }, + { + "title": "Encoding, Encryption and Hashing -- Whats the Difference?", + "url": "https://www.youtube.com/watch?v=-bAnBzvMLig", + "type": "video" + } + ] + }, + "EX_e4B6G07zTb4JjJ7482": { + "title": "Hashing Algorithms", + "description": "Hashing algorithms are used to generate a unique value for a given input. This value is called a hash. Hashing algorithms are used to verify the integrity of data, to store passwords, and to generate unique identifiers for data.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Explore top posts about Algorithms", + "url": "https://app.daily.dev/tags/algorithms?ref=roadmapsh", + "type": "article" + }, + { + "title": "Hashing Algorithms and Security - Computerphile", + "url": "https://www.youtube.com/watch?v=b4b8ktEV4Bg", + "type": "video" + }, + { + "title": "Top Hashing Algorithms In Cryptography | MD5 and SHA 256 Algorithms Expalined | Simplilearn", + "url": "https://www.youtube.com/watch?v=Plp4F3ZfC7A", + "type": "video" + }, + { + "title": "SHA: Secure Hashing Algorithm - Computerphile", + "url": "https://www.youtube.com/watch?v=DMtFhACPnTY", + "type": "video" + } + ] + }, + "3rPSp135TdSCyvXzEzn4p": { + "title": "OWASP Top 10", + "description": "OWASP or Open Web Application Security Project is an online community that produces freely-available articles, methodologies, documentation, tools, and technologies in the field of web application security.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "OWASP Web Application Security Testing Checklist", + "url": "https://github.com/0xRadi/OWASP-Web-Checklist", + "type": "opensource" + }, + { + "title": "Wikipedia - OWASP", + "url": "https://en.wikipedia.org/wiki/OWASP", + "type": "article" + }, + { + "title": "OWASP Top 10 Security Risks", + "url": "https://sucuri.net/guides/owasp-top-10-security-vulnerabilities-2021/", + "type": "article" + }, + { + "title": "OWASP Cheatsheets", + "url": "https://cheatsheetseries.owasp.org/cheatsheets/AJAX_Security_Cheat_Sheet.html", + "type": "article" + } + ] + }, + "-emdKpD5uRNuvem5rbFXJ": { + "title": "How Computers Work", + "description": "Computers are everywhere. They are in our phones, our cars, our homes, and even in our pockets. But how do they actually work? How do they take in information, and how do they output information?\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "How CPU executes a program", + "url": "https://www.youtube.com/watch?v=XM4lGflQFvA", + "type": "video" + }, + { + "title": "How computers calculate - ALU", + "url": "https://youtu.be/1I5ZMmrOfnA", + "type": "video" + }, + { + "title": "Registers and RAM", + "url": "https://youtu.be/fpnE6UAfbtU", + "type": "video" + }, + { + "title": "The Central Processing Unit (CPU)", + "url": "https://youtu.be/FZGugFqdr60", + "type": "video" + }, + { + "title": "Instructions and Programs", + "url": "https://youtu.be/zltgXvg6r3k", + "type": "video" + } + ] + }, + "1eglba39q426Nh0E0qcdj": { + "title": "How CPU Executes Programs", + "description": "Visit the following resources to learn more:", + "links": [ + { + "title": "Explore top posts about Computing", + "url": "https://app.daily.dev/tags/computing?ref=roadmapsh", + "type": "article" + }, + { + "title": "How CPU executes a program", + "url": "https://www.youtube.com/watch?v=XM4lGflQFvA", + "type": "video" + } + ] + }, + "GDLKJkKgB-i7n0YcV2NDa": { + "title": "How Computers Calculate", + "description": "Visit the following resources to learn more:", + "links": [ + { + "title": "How computers calculate - ALU", + "url": "https://youtu.be/1I5ZMmrOfnA", + "type": "video" + } + ] + }, + "U3379F4AO1KSmGtVmPr27": { + "title": "Registers and RAM", + "description": "**_Registers_** are the smallest data-holding elements built into the processor itself. Registers are the memory locations that are directly accessible by the processor. The registers hold the instruction or operands currently accessed by the CPU.\n\nRegisters are the high-speed accessible storage elements. The processor accesses the registers within one CPU clock cycle. The processor can decode the instructions and perform operations on the register contents at more than one operation per CPU clock cycle.\n\n**_Memory_** is a hardware device that stores computer programs, instructions, and data. The memory that is internal to the processor is primary memory (RAM), and the memory that is external to the processor is secondary (**Hard Drive**). Primary memory or RAM is a volatile memory, meaning the primary memory data exist when the system's power is on, and the data vanishes as the system is switched off. The primary memory contains the data required by the currently executing program in the CPU. If the data required by the processor is not in primary memory, then the data is transferred from secondary storage to primary memory, and then it is fetched by the processor.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Registers and RAM", + "url": "https://youtu.be/fpnE6UAfbtU", + "type": "video" + } + ] + }, + "AxiGqbteK7ZSXEUt_zckH": { + "title": "Instructions and Programs", + "description": "Visit the following resources to learn more:", + "links": [ + { + "title": "Instructions and Programs", + "url": "https://youtu.be/zltgXvg6r3k", + "type": "video" + } + ] + }, + "DjTQjMbika4_yTzrBpcmB": { + "title": "CPU Cache", + "description": "Visit the following resources to learn more:", + "links": [ + { + "title": "Explore top posts about Computing", + "url": "https://app.daily.dev/tags/computing?ref=roadmapsh", + "type": "article" + }, + { + "title": "MIT 6.004 L15: The Memory Hierarchy", + "url": "https://www.youtube.com/watch?v=vjYF_fAZI5E&list=PLrRW1w6CGAcXbMtDFj205vALOGmiRc82-&index=24", + "type": "video" + }, + { + "title": "MIT 6.004 L16: Cache Issues", + "url": "https://www.youtube.com/watch?v=ajgC3-pyGlk&index=25&list=PLrRW1w6CGAcXbMtDFj205vALOGmiRc82-", + "type": "video" + } + ] + }, + "ETEUA7jaEGyOEX8tAVNWs": { + "title": "Porcesses and Threads", + "description": "Processes and threads are the basic building blocks of a computer program. They are the smallest units of execution in a program. A process is an instance of a program that is being executed. A thread is a sequence of instructions within a process that can be executed independently of other code.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Whats the difference between Process and a Thread?", + "url": "https://www.quora.com/What-is-the-difference-between-a-process-and-a-thread", + "type": "article" + }, + { + "title": "Operating Systems and System Programming", + "url": "https://archive.org/details/ucberkeley-webcast-PL-XXv-cvA_iBDyz-ba4yDskqMDY6A1w_c", + "type": "article" + } + ] + }, + "RbdT5MOE4L-E7PPWKRITX": { + "title": "Process Forking", + "description": "Process forking is a way to create a new process from an existing process. The new process is a copy of the existing process. The new process is called a child process and the existing process is called a parent process.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Understanding fork() system call for new process creation", + "url": "https://www.youtube.com/watch?v=PwxTbksJ2fo", + "type": "video" + }, + { + "title": "fork() and exec() System Calls", + "url": "https://www.youtube.com/watch?v=IFEFVXvjiHY", + "type": "video" + }, + { + "title": "The fork() function in C", + "url": "https://www.youtube.com/watch?v=cex9XrZCU14", + "type": "video" + } + ] + }, + "jVsZFTzyrYEDyR8LiBQL0": { + "title": "Memory Management", + "description": "Memory management is the process of allocating and deallocating memory. It is a very important part of any programming language.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "MIT 6.004 L15: The Memory Hierarchy", + "url": "https://www.youtube.com/watch?v=vjYF_fAZI5E&list=PLrRW1w6CGAcXbMtDFj205vALOGmiRc82-&index=24", + "type": "video" + }, + { + "title": "MIT 6.004 L16: Cache Issues", + "url": "https://www.youtube.com/watch?v=ajgC3-pyGlk&index=25&list=PLrRW1w6CGAcXbMtDFj205vALOGmiRc82-", + "type": "video" + } + ] + }, + "O6-vG3FuoYet4D0hbiyrv": { + "title": "Lock / Mutex / Semaphore", + "description": "A lock allows only one thread to enter the part that's locked and the lock is not shared with any other processes.\n\nA mutex is the same as a lock but it can be system wide (shared by multiple processes).\n\nA semaphore does the same as a mutex but allows x number of threads to enter, this can be used for example to limit the number of cpu, io or ram intensive tasks running at the same time.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is the difference between lock, mutex and semaphore?", + "url": "https://stackoverflow.com/questions/2332765/what-is-the-difference-between-lock-mutex-and-semaphore", + "type": "article" + }, + { + "title": "What is a Semaphore", + "url": "https://stackoverflow.com/questions/34519/what-is-a-semaphore/40238#40238", + "type": "article" + } + ] + }, + "xUo5Ox_HTgGyeQMDIkVyK": { + "title": "Concurrency in Multiple Cores", + "description": "Visit the following resources to learn more:", + "links": [ + { + "title": "What is the difference between multicore and concurrent programming?", + "url": "https://stackoverflow.com/questions/5372861/what-is-the-difference-between-multicore-and-concurrent-programming", + "type": "article" + }, + { + "title": "Concurrency in Multicore systems", + "url": "https://cs.stackexchange.com/questions/140793/concurrency-in-multiple-core", + "type": "article" + } + ] + }, + "Ge2nagN86ofa2y-yYR1lv": { + "title": "Scheduling Algorithms", + "description": "CPU Scheduling is the process of selecting a process from the ready queue and allocating the CPU to it. The selection of a process is based on a particular scheduling algorithm. The scheduling algorithm is chosen depending on the type of system and the requirements of the processes.\n\nHere is the list of some of the most commonly used scheduling algorithms:\n\n* **First Come First Serve (FCFS):** The process that arrives first is allocated the CPU first. It is a non-preemptive algorithm.\n* **Shortest Job First (SJF):** The process with the smallest execution time is allocated the CPU first. It is a non-preemptive algorithm.\n* **Shortest Remaining Time First (SRTF):** The process with the smallest remaining execution time is allocated the CPU first. It is a preemptive algorithm.\n* **Round Robin (RR):** The process is allocated the CPU for a fixed time slice. The time slice is usually 10 milliseconds. It is a preemptive algorithm.\n* **Priority Scheduling:** The process with the highest priority is allocated the CPU first. It is a preemptive algorithm.\n* **Multi-level Queue Scheduling:** The processes are divided into different queues based on their priority. The process with the highest priority is allocated the CPU first. It is a preemptive algorithm.\n* **Multi-level Feedback Queue Scheduling:** The processes are divided into different queues based on their priority. The process with the highest priority is allocated the CPU first. If a process is preempted, it is moved to the next queue. It is a preemptive algorithm.\n* **Lottery Scheduling:** The process is allocated the CPU based on a lottery system. It is a preemptive algorithm.\n* **Multilevel Feedback Queue Scheduling:** The processes are divided into different queues based on their priority. The process with the highest priority is allocated the CPU first. If a process is preempted, it is moved to the next queue. It is a preemptive algorithm.", + "links": [] + }, + "cpQvB0qMDL3-NWret7oeA": { + "title": "CPU Interrupts", + "description": "CPU Interrupts are a way for the CPU to communicate with the rest of the computer. They are a way for the CPU to tell the rest of the computer that it needs to do something. For example, if the CPU is running a program and it needs to read from the keyboard, it will send an interrupt to the keyboard to tell it to send the data to the CPU. The CPU will then wait for the keyboard to send the data and then continue running the program.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Explore top posts about Computing", + "url": "https://app.daily.dev/tags/computing?ref=roadmapsh", + "type": "article" + }, + { + "title": "Video on Interrupts", + "url": "https://youtu.be/iKlAWIKEyuw", + "type": "video" + } + ] + }, + "IEX6v_MYpE5Ylk_28K2ZU": { + "title": "Processes vs Threads", + "description": "Processes and threads are the basic building blocks of a computer program. They are the smallest units of execution in a program. A process is an instance of a program that is being executed. A thread is a sequence of instructions within a process that can be executed independently of other code.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Whats the difference between Process and a Thread?", + "url": "https://www.quora.com/What-is-the-difference-between-a-process-and-a-thread", + "type": "article" + }, + { + "title": "Operating Systems and System Programming", + "url": "https://archive.org/details/ucberkeley-webcast-PL-XXv-cvA_iBDyz-ba4yDskqMDY6A1w_c", + "type": "article" + } + ] + }, + "C1eNsPjiQo8INbvL2Lt7L": { + "title": "K-D Trees", + "description": "K-D Trees are a data structure that allow for fast nearest neighbor search in high dimensional spaces. They are a generalization of binary search trees, and are used in a variety of applications, including computer vision and computational geometry.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "K-D Tree Algorithm", + "url": "https://www.youtube.com/watch?v=Y4ZgLlDfKDg", + "type": "video" + }, + { + "title": "K-d Trees - Computerphile", + "url": "https://www.youtube.com/watch?v=BK5x7IUTIyU", + "type": "video" + } + ] + }, + "K96ggeWqd5OwoNnkL04pc": { + "title": "Skip Lists", + "description": "Skip lists are a data structure that allows you to perform operations on a sorted list in O(log n) time. Skip lists are a probabilistic data structure, which means that the probability of a certain operation taking a certain amount of time is a certain value. In the case of skip lists, the probability of an operation taking O(log n) time is 1.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Skip Lists - Wikipedia", + "url": "https://en.wikipedia.org/wiki/Skip_list", + "type": "article" + }, + { + "title": "Randomization: Skip Lists", + "url": "https://www.youtube.com/watch?v=2g9OSRKJuzM&index=10&list=PLUl4u3cNGP6317WaSNfmCvGym2ucw3oGp", + "type": "video" + } + ] + }, + "pLBfkzi0qfgVRqi_4AmMI": { + "title": "Co-NP", + "description": "Co-NP stands for the complement of NP Class. It means if the answer to a problem in Co-NP is No, then there is proof that can be checked in polynomial time.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Trying to understand P vs NP vs NP Complete vs NP Hard", + "url": "https://softwareengineering.stackexchange.com/questions/308178/trying-to-understand-p-vs-np-vs-np-complete-vs-np-hard", + "type": "article" + }, + { + "title": "Complexity: P, NP, NP-completeness, Reductions", + "url": "https://www.youtube.com/watch?v=eHZifpgyH_4&list=PLUl4u3cNGP6317WaSNfmCvGym2ucw3oGp&index=22", + "type": "video" + }, + { + "title": "Complexity: Approximation Algorithms", + "url": "https://www.youtube.com/watch?v=MEz1J9wY2iM&list=PLUl4u3cNGP6317WaSNfmCvGym2ucw3oGp&index=24", + "type": "video" + }, + { + "title": "Complexity: Fixed-Parameter Algorithms", + "url": "https://www.youtube.com/watch?v=4q-jmGrmxKs&index=25&list=PLUl4u3cNGP6317WaSNfmCvGym2ucw3oGp", + "type": "video" + }, + { + "title": "Lecture 23: Computational Complexity", + "url": "https://www.youtube.com/watch?v=moPtwq_cVH8&list=PLUl4u3cNGP61Oq3tWYp6V_F-5jb5L2iHb&index=24", + "type": "video" + }, + { + "title": "Greedy Algs. II & Intro to NP Completeness", + "url": "https://youtu.be/qcGnJ47Smlo?list=PLFDnELG9dpVxQCxuD-9BSy2E7BWY3t5Sm&t=2939", + "type": "video" + }, + { + "title": "NP Completeness II & Reductions", + "url": "https://www.youtube.com/watch?v=e0tGC6ZQdQE&index=16&list=PLFDnELG9dpVxQCxuD-9BSy2E7BWY3t5Sm", + "type": "video" + }, + { + "title": "NP Completeness III", + "url": "https://www.youtube.com/watch?v=fCX1BGT3wjE&index=17&list=PLFDnELG9dpVxQCxuD-9BSy2E7BWY3t5Sm", + "type": "video" + }, + { + "title": "NP Completeness IV", + "url": "https://www.youtube.com/watch?v=NKLDp3Rch3M&list=PLFDnELG9dpVxQCxuD-9BSy2E7BWY3t5Sm&index=18", + "type": "video" + }, + { + "title": "CSE373 2020 - Lecture 23 - NP-Completeness", + "url": "https://www.youtube.com/watch?v=ItHp5laE1VE&list=PLOtl7M3yp-DX6ic0HGT0PUX_wiNmkWkXx&index=23", + "type": "video" + }, + { + "title": "CSE373 2020 - Lecture 24 - Satisfiability", + "url": "https://www.youtube.com/watch?v=inaFJeCzGxU&list=PLOtl7M3yp-DX6ic0HGT0PUX_wiNmkWkXx&index=24", + "type": "video" + }, + { + "title": "CSE373 2020 - Lecture 25 - More NP-Completeness", + "url": "https://www.youtube.com/watch?v=B-bhKxjZLlc&list=PLOtl7M3yp-DX6ic0HGT0PUX_wiNmkWkXx&index=25", + "type": "video" + }, + { + "title": "CSE373 2020 - Lecture 26 - NP-Completeness Challenge", + "url": "https://www.youtube.com/watch?v=_EzetTkG_Cc&list=PLOtl7M3yp-DX6ic0HGT0PUX_wiNmkWkXx&index=26", + "type": "video" + } + ] + }, + "hTPLcuOW7eEE5oYhNfmOG": { + "title": "Bitwise Operators", + "description": "Bitwise operators are used to perform operations on individual bits of a number. They are used in cryptography, image processing, and other applications.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Bit Manipulation", + "url": "https://www.youtube.com/watch?v=7jkIUgLC29I", + "type": "video" + }, + { + "title": "Binary: Plusses & Minuses (Why We Use Twos Complement) - Computerphile", + "url": "https://www.youtube.com/watch?v=lKTsv6iVxV4", + "type": "video" + }, + { + "title": "Algorithms: Bit Manipulation", + "url": "https://www.youtube.com/watch?v=NLKQEOgBAnw", + "type": "video" + } + ] + } +} \ No newline at end of file diff --git a/public/roadmap-content/cyber-security.json b/public/roadmap-content/cyber-security.json new file mode 100644 index 000000000..d19159fb3 --- /dev/null +++ b/public/roadmap-content/cyber-security.json @@ -0,0 +1,2243 @@ +{ + "oimYzZYFXKjgvc7D4c-2u": { + "title": "Fundamental IT Skills", + "description": "Basic IT skills are the foundation for understanding and navigating the digital world, as well as playing a crucial role in cyber security. Given below are some essential IT skills that will help you enhance your experience with technology and better protect your digital assets.\n\nComputer Navigation\n-------------------\n\nUnderstanding how to navigate a computer's operating system is a vital skill. This includes knowing how to:\n\n* Power on/off the device\n* Manage files and folders\n* Use shortcuts and right-click options\n* Install and uninstall software\n* Customize settings\n\nInternet Usage\n--------------\n\nHaving a working knowledge of how to navigate the internet will allow you to access information and resources more efficiently. Key skills include:\n\n* Web browsing\n* Internet searching\n* Bookmark management\n* Downloading files\n* Understanding hyperlinks and web addresses\n* Recognizing secure websites\n\nEmail Management\n----------------\n\nCommunication using email is an essential aspect of the modern digital world. Important email management skills are:\n\n* Creating and organizing contacts\n* Composing, sending, and receiving emails\n* Detecting and avoiding spam and phishing emails\n* Managing email attachments\n* Understanding email etiquette\n\nWord Processing\n---------------\n\nWord processing is a basic IT skill that is useful in both personal and professional environments. Skills related to word processing include:\n\n* Formatting text (font, size, bold, italic, etc.)\n* Creating and editing documents\n* Copying and pasting text\n* Inserting images and tables\n* Saving and printing documents\n\nSoftware and Application Installation\n-------------------------------------\n\nBeing able to install and manage software can make your experience with technology more efficient and tailored to your needs. Basic software-related skills include:\n\n* Identifying reliable sources for downloading software\n* Installing and updating applications\n* Uninstalling unwanted or unnecessary programs\n* Configuring applications according to your preferences\n* Updating software to prevent vulnerabilities\n\nDigital Security Awareness\n--------------------------\n\nAs the digital world is constantly evolving, so too are cyber threats. Therefore, remaining vigilant and familiarizing yourself with common cyber security practices is crucial. Some fundamental digital security skills include:\n\n* Creating strong, unique passwords\n* Ensuring a secure and updated Wi-Fi connection\n* Recognizing and avoiding phishing attempts\n* Keeping software and operating systems updated\n* Regularly backing up data\n\nBy honing these basic IT skills, you will be better prepared to navigate and protect your digital life, as well as making the most of the technology at your fingertips.", + "links": [ + { + "title": "Explore top posts about Career", + "url": "https://app.daily.dev/tags/career?ref=roadmapsh", + "type": "article" + }, + { + "title": "IT skills Training for beginners | Complete Course", + "url": "https://www.youtube.com/watch?v=On6dsIp5yw0", + "type": "video" + } + ] + }, + "Ih0YZt8u9vDwYo8y1t41n": { + "title": "Computer Hardware Components", + "description": "When it comes to understanding basic IT skills, one cannot overlook the importance of familiarizing yourself with the essential computer hardware components. These are the physical parts that make up a computer system, and understanding their functions will help you troubleshoot issues and maintain your device better. Here's a brief overview of some of the primary computer hardware components:\n\nCentral Processing Unit (CPU)\n-----------------------------\n\nThe CPU serves as the heart and brain of a computer. It performs all the processing inside the computer and is responsible for executing instructions, performing calculations, and managing the flow of data.\n\n**Key Points:**\n\n* Considered the \"brain\" of the computer.\n* Performs all the major processes and calculations.\n\nMotherboard\n-----------\n\nThe motherboard is the main circuit board that connects all components of the computer. It provides a central hub for communication between the CPU, memory, and other hardware components.\n\n**Key Points:**\n\n* Connects all other hardware components.\n* Allows components to communicate with each other.\n\nMemory (RAM)\n------------\n\nRandom Access Memory (RAM) is where data is temporarily stored while the computer is powered on. The data is constantly accessed, written, and rewritten by the CPU. The more RAM a system has, the more tasks it can process simultaneously.\n\n**Key Points:**\n\n* Temporary storage for data while the computer is on.\n* More RAM allows for better multitasking.\n\nStorage (Hard Drives)\n---------------------\n\nStorage devices like hard disk drives (HDD) or solid-state drives (SSD) are used to store data permanently on the computer, even when the device is powered off. Operating systems, software, and user files are stored on these drives.\n\n**Key Points:**\n\n* Permanent storage for data.\n* Comes in HDD and SSD types, with SSDs being faster but more expensive.\n\nGraphics Processing Unit (GPU)\n------------------------------\n\nThe GPU is responsible for rendering images, videos, and animations on the computer screen. Its main function is to handle and display graphics, making your visuals smooth and responsive.\n\n**Key Points:**\n\n* Handles and processes graphics and visuals.\n* Important for gaming, video editing, and graphic design tasks.\n\nPower Supply Unit (PSU)\n-----------------------\n\nThe power supply unit provides the necessary power to all components in the computer. It converts the AC power from the wall socket into the DC power that the computer's components require.\n\n**Key Points:**\n\n* Provides power to all computer components.\n* Converts AC power to DC power.\n\nInput/Output Devices\n--------------------\n\nInput devices, such as a mouse, keyboard, or scanner, are used to interact with and input data into the computer. Output devices, like the display monitor and speakers, present information and data in a format we can understand.\n\n**Key Points:**\n\n* Input devices allow users to interact with the computer.\n* Output devices present information to the user.\n\nBy understanding these essential computer hardware components, you can enhance your knowledge of how a computer functions and improve your IT troubleshooting and maintenance skills. Happy computing!", + "links": [ + { + "title": "Explore top posts about Hardware", + "url": "https://app.daily.dev/tags/hardware?ref=roadmapsh", + "type": "article" + }, + { + "title": "What does what in your computer? Computer parts Explained", + "url": "https://youtu.be/ExxFxD4OSZ0", + "type": "video" + } + ] + }, + "F1QVCEmGkgvz-_H5lTxY2": { + "title": "Connection Types and their function", + "description": "In the realm of cyber security, understanding various connection types is crucial in maintaining a secure network environment. This section will provide you with an overview of different connection types commonly encountered in IT and their impact on security.\n\nWired Connections\n-----------------\n\nEthernet is the most widespread and commonly used wired connection type. It provides a secure, high-speed data transmission between devices, such as computers, routers, and switches, using Category 5 (Cat5) or higher cables. Ethernet connections are generally considered more reliable and secure compared to wireless connections because they are less vulnerable to interference and unauthorized access.\n\nUSB (Universal Serial Bus)\n--------------------------\n\nUSB is a popular connection type, primarily used for connecting peripheral devices such as keyboards, mice, and storage devices to computers. While USB provides a convenient way of expanding a computer's functionality, it also poses security risks. Using untrusted USB devices can lead to the spread of malware, making it essential to ensure that only trusted devices are connected to your system.\n\nWireless Connections\n--------------------\n\nWi-Fi is the most prevalent wireless connection type, allowing devices to connect to the internet and each other without the need for physical cables. Although Wi-Fi provides greater flexibility and mobility, it introduces additional security risks. To minimize these risks, always use encryption (preferably WPA3 or WPA2), strong passwords, and update your router's firmware regularly.\n\nBluetooth\n---------\n\nBluetooth is another widely used wireless connection type, primarily designed for short-range communication between devices such as smartphones, speakers, and headsets. While Bluetooth offers convenience, it can also be susceptible to attacks, such as Bluesnarfing and Bluejacking. To mitigate these risks, keep your devices updated, use Bluetooth 4.0 or higher, and disable Bluetooth when not in use.\n\nNetwork Connections\n-------------------\n\nA VPN is a secure tunnel that creates a private network connection over a public network (such as the internet) by encrypting data transfers between devices. VPNs help protect sensitive information from being intercepted by unauthorized parties and are especially useful when accessing public Wi-Fi hotspots. Always use trusted VPN providers to ensure your data remains encrypted and private.\n\nPeer-to-Peer (P2P)\n------------------\n\nP2P is a decentralized connection type where devices connect directly with each other, without the need for a central server. P2P is commonly used for file-sharing services and can pose significant security risks if utilized without adequate security measures in place. To minimize risks, avoid using untrusted P2P services and refrain from sharing sensitive information on such networks.\n\nIn summary, understanding and managing different connection types is an essential aspect of cyber security. By using secure connections and taking preventive measures, you can reduce the risk of unauthorized access, data breaches, and other malicious activities.", + "links": [ + { + "title": "Connection & Service Types Pt. 1", + "url": "https://youtu.be/TzEMiD2mc-Q", + "type": "video" + }, + { + "title": "Connection & Services Types Pt. 2 ", + "url": "https://youtu.be/4N3M1aKzoyQ", + "type": "video" + } + ] + }, + "pJUhQin--BGMuXHPwx3JJ": { + "title": "OS-Independent Troubleshooting", + "description": "OS-independent troubleshooting techniques are essential for every cybersecurity professional since they allow you to effectively diagnose and resolve issues on any operating system (OS). By using these OS-agnostic skills, you can quickly resolve problems and minimize downtime.\n\nUnderstanding Common Symptoms\n-----------------------------\n\nIn order to troubleshoot effectively, it is important to recognize and understand the common symptoms encountered in IT systems. These can range from hardware-related issues, such as overheating or physical damage, to software-related problems, such as slow performance or unresponsiveness.\n\nBasic Troubleshooting Process\n-----------------------------\n\nFollowing a systematic troubleshooting process is critical, regardless of the operating system. Here are the basic steps you might follow:\n\n* **Identify the problem**: Gather information on the issue and its symptoms, and attempt to reproduce the problem, if possible. Take note of any error messages or unusual behaviors.\n* **Research and analyze**: Search for potential causes and remedies on relevant forums, web resources, or vendor documentation.\n* **Develop a plan**: Formulate a strategy to resolve the issue, considering the least disruptive approach first, where possible.\n* **Test and implement**: Execute the proposed solution(s) and verify if the problem is resolved. If not, repeat the troubleshooting process with a new plan until the issue is fixed.\n* **Document the process and findings**: Record the steps taken, solutions implemented, and results to foster learning and improve future troubleshooting efforts.\n\nIsolating the Problem\n---------------------\n\nTo pinpoint the root cause of an issue, it's important to isolate the problem. You can perform this by:\n\n* **Disabling or isolating hardware components**: Disconnect any peripherals or external devices, then reconnect and test them one by one to identify the defective component(s).\n* **Checking resource usage**: Utilize built-in or third-party tools to monitor resource usage (e.g., CPU, memory, and disk) to determine whether a bottleneck is causing the problem.\n* **Verifying software configurations**: Analyze the configuration files or settings for any software or applications that could be contributing to the problem.\n\nNetworking and Connectivity Issues\n----------------------------------\n\nEffective troubleshooting of network-related issues requires an understanding of various protocols, tools, and devices involved in networking. Here are some basic steps you can follow:\n\n* **Verify physical connectivity**: Inspect cables, connectors, and devices to ensure all components are securely connected and functioning correctly.\n* **Confirm IP configurations**: Check the system's IP address and related settings to ensure it has a valid IP configuration.\n* **Test network services**: Use command-line tools, such as `ping` and `traceroute` (or `tracert` in Windows), to test network connections and diagnose potential problems.\n\nLog Analysis\n------------\n\nLogs are records of system events, application behavior, and user activity, which can be invaluable when troubleshooting issues. To effectively analyze logs, you should:\n\n* **Identify relevant logs**: Determine which log files contain information related to the problem under investigation.\n* **Analyze log content**: Examine events, error messages, or patterns that might shed light on the root cause of the issue.\n* **Leverage log-analysis tools**: Utilize specialized tools or scripts to help parse, filter, and analyze large or complex log files.\n\nIn conclusion, developing OS-independent troubleshooting skills allows you to effectively diagnose and resolve issues on any system. By following a structured approach, understanding common symptoms, and utilizing the appropriate tools, you can minimize downtime and maintain the security and efficiency of your organization's IT systems.", + "links": [ + { + "title": "How to identify 9 signs of Operating System.", + "url": "https://bro4u.com/blog/how-to-identify-9-signs-of-operating-system", + "type": "article" + }, + { + "title": "Trouble shooting guide", + "url": "https://cdnsm5-ss6.sharpschool.com/userfiles/servers/server_20856499/file/teacher%20pages/lindsay%20dolezal/it%20essentials/5.6.pdf", + "type": "article" + } + ] + }, + "_7RjH4Goi0x6Noy6za0rP": { + "title": "Understand Basics of Popular Suites", + "description": "Software suites are widely used in professional and personal environments and provide various tools to perform tasks such as word processing, data management, presentations, and communication. Familiarity with these suites will allow you to perform essential tasks while also maintaining cyber hygiene.\n\nMicrosoft Office\n----------------\n\nMicrosoft Office is the most widely used suite of applications, consisting of programs such as:\n\n* _Word_: A powerful word processor used for creating documents, reports, and letters.\n* _Excel_: A versatile spreadsheet application used for data analysis, calculations, and visualizations.\n* _PowerPoint_: A presentation software for designing and displaying slideshows.\n* _Outlook_: A comprehensive email and calendar management tool.\n* _OneNote_: A digital notebook for organizing and storing information.\n\nMicrosoft Office is available both as a standalone product and as part of the cloud-based Office 365 subscription, which includes additional features and collaboration options.\n\nGoogle Workspace (formerly G Suite)\n-----------------------------------\n\nGoogle Workspace is a cloud-based suite of productivity tools by Google, which includes widely known applications such as:\n\n* _Google Docs_: A collaborative word processor that seamlessly integrates with other Google services.\n* _Google Sheets_: A robust spreadsheet application with a wide array of functions and capabilities.\n* _Google Slides_: A user-friendly presentation tool that makes collaboration effortless.\n* _Google Drive_: A cloud storage service that allows for easy storage, sharing, and syncing of files.\n* _Gmail_: A popular email service with advanced filtering and search capabilities.\n* _Google Calendar_: A scheduling and event management application that integrates with other Google services.\n\nGoogle Workspace is particularly popular for its real-time collaboration capabilities and ease of access through web browsers.\n\nLibreOffice\n-----------\n\nLibreOffice is a free, open-source suite of applications that offers a solid alternative to proprietary productivity suites. It includes tools such as:\n\n* _Writer_: A word processor that supports various document formats.\n* _Calc_: A powerful spreadsheet application with extensive formula and function libraries.\n* _Impress_: A presentation software that supports customizable templates and animations.\n* _Base_: A database management tool for creating and managing relational databases.\n* _Draw_: A vector graphics editor for creating and editing images and diagrams.\n\nLibreOffice is compatible with various platforms, including Windows, macOS, and Linux, and provides excellent support for standard file formats.\n\nIn conclusion, being proficient in using these popular software suites will not only improve your basic IT skills but also help you maintain good cybersecurity practices. Familiarity with these suites will enable you to effectively manage and secure your digital assets while also identifying potential vulnerabilities that may arise during their use. Stay tuned for further topics on enhancing your cybersecurity knowledge.", + "links": [] + }, + "T0aU8ZQGShmF9uXhWY4sD": { + "title": "Basics of Computer Networking", + "description": "Computer networking refers to the practice of connecting two or more computing devices, creating an infrastructure in which they can exchange data, resources, and software. It is a fundamental part of cyber security and IT skills. In this chapter, we will cover five aspects of computer networking, including networking devices, network types, network protocols, IP addresses, and the OSI model.\n\nNetworking Devices\n------------------\n\nSeveral devices enable and facilitate communication between different devices. Common networking devices include:\n\n* **Hubs**: Devices that connect different devices together, transmitting data packets to all devices on the network.\n* **Switches**: Similar to hubs, but transmit data packets only to specific devices instead of broadcasting to all.\n* **Routers**: Devices that direct data packets between networks and provide the best path for data packets to reach their destination.\n* **Firewalls**: Devices or software that monitor and filter incoming and outgoing network traffic, allowing only authorized data to pass through.\n\nNetwork Types\n-------------\n\nThere are various types of networks based on the distance they cover, and the number of devices they connect. A few common network types are:\n\n* **Personal Area Network (PAN)**: Connects devices within an individual workspace, typically within a range of 10 meters.\n* **Local Area Network (LAN)**: Covers a small geographical area, such as a home or office, connecting multiple computers and other devices.\n* **Wide Area Network (WAN)**: Covers a larger geographical area, interconnecting different LANs, often using leased telecommunication lines or wireless links.\n* **Virtual Private Network (VPN)**: A secure network established over the public internet, encrypting the data transferred and restricting access to authorized users only.\n\nNetwork Protocols\n-----------------\n\nProtocols are sets of rules that govern the communication between devices within a network. Some of the most common protocols include:\n\n* **Transmission Control Protocol (TCP)**: Ensures the reliable transmission of data and establishes connections between devices.\n* **Internet Protocol (IP)**: Facilitates the transmission of data packets, assigning unique IP addresses to identify devices.\n* **User Datagram Protocol (UDP)**: A lightweight, fast, but less reliable protocol compared to TCP, often used for streaming and gaming applications.\n\nIP Addresses\n------------\n\nAn IP address is a unique identifier assigned to every device in a network. There are two types of IP addresses:\n\n* **IPv4**: Uses a 32-bit addressing system, allowing for approximately 4.3 billion unique IP addresses.\n* **IPv6**: Uses a 128-bit addressing system, providing a significantly larger number of available IP addresses.\n\nIP addresses can also be categorized as dynamic or static, depending on whether they change over time or remain constant for a device.\n\nOSI Model\n---------\n\nThe Open Systems Interconnection (OSI) model is a conceptual framework used to understand and describe how different network protocols interact. It divides networking functions into seven distinct layers:\n\n* **Physical Layer**: Deals with the physical connection between devices, including cabling and hardware.\n* **Data Link Layer**: Handles the communication between adjacent devices on the same network.\n* **Network Layer**: Identifies the best route for data packets and manages IP addresses.\n* **Transport Layer**: Ensures the reliable transmission of data, including error checking and flow control.\n* **Session Layer**: Establishes, maintains, and terminates connections between applications on different devices.\n* **Presentation Layer**: Translates data into a format that is suitable for transmission between devices.\n* **Application Layer**: Represents the user interface with which applications interact.\n\nMastering the basics of computer networking is key to understanding and implementing effective cyber security measures. This chapter has covered essential networking concepts, but it is important to continually expand your knowledge in this ever-evolving field.", + "links": [ + { + "title": "What is Computer Networking?", + "url": "https://tryhackme.com/room/whatisnetworking", + "type": "article" + }, + { + "title": "Explore top posts about Networking", + "url": "https://app.daily.dev/tags/networking?ref=roadmapsh", + "type": "article" + }, + { + "title": "Learn Networking in 3 hours (basics for cybersecurity and DevOps)", + "url": "https://www.youtube.com/watch?v=iSOfkw_YyOU&t=1549s", + "type": "video" + } + ] + }, + "hwAUFLYpc_ftCfXq95dey": { + "title": "NFC", + "description": "**Near Field Communication**, or **NFC**, is a short-range wireless communication technology that enables devices to interact with each other within a close proximity, typically within a few centimeters. It operates at a frequency of 13.56 MHz and can be used for various applications, such as contactless payment systems, secure access control, and data sharing between devices like smartphones, tablets, and other compatible gadgets.\n\nHow NFC works\n-------------\n\nWhen two NFC-enabled devices are brought close to each other, a connection is established, and they can exchange data with each other. This communication is enabled through _NFC Tags_ and _NFC Readers_. NFC Tags are small integrated circuits that store and transmit data, while NFC Readers are devices capable of reading the data stored in NFC Tags.\n\nNFC Modes\n---------\n\nNFC operates primarily in three modes:\n\n* **Reader/Writer Mode**: This mode enables the NFC device to read or write data from or to NFC Tags. For example, you can scan an NFC Tag on a poster to access more information about a product or service.\n* **Peer-to-Peer Mode**: This mode allows two NFC-enabled devices to exchange information directly. Examples include sharing data such as contact information, photos, or connecting devices for multiplayer gaming.\n* **Card Emulation Mode**: This mode allows an NFC device to act like a smart card or access card, enabling contactless payment and secure access control applications.\n\nSecurity Concerns\n-----------------\n\nWhile NFC brings convenience through its numerous applications, it also poses security risks, and it's essential to be aware of these. Some possible concerns include:\n\n* **Eavesdropping**: Attackers can potentially intercept data exchange between NFC devices if they manage to get into the communication range.\n* **Data manipulation**: Attackers might alter or manipulate the data exchanged between the devices.\n* **Unauthorized access**: An attacker can potentially exploit a vulnerability in your device, and gain unauthorized access to sensitive information.\n\nSecurity Best Practices\n-----------------------\n\nTo minimize the risks associated with NFC, follow these best practices:\n\n* Keep your device's firmware and applications updated to minimize known vulnerabilities.\n* Use strong and unique passwords for secure NFC applications and services.\n* Turn off NFC when not in use to prevent unauthorized access.\n* Be cautious when scanning unknown NFC Tags and interacting with unfamiliar devices.\n* Ensure you're using trusted and secure apps to handle your NFC transactions.\n\nIn conclusion, understanding the basics of NFC and adhering to security best practices will help ensure that you can safely and effectively use this innovative technology.", + "links": [ + { + "title": "The Beginner's Guide to NFCs", + "url": "https://www.spiceworks.com/tech/networking/articles/what-is-near-field-communication/", + "type": "article" + }, + { + "title": "NFC Guide: All You Need to Know About Near Field Communication", + "url": "https://squareup.com/us/en/the-bottom-line/managing-your-finances/nfc", + "type": "article" + }, + { + "title": "NFC Explained: What is NFC? How NFC Works? Applications of NFC", + "url": "https://youtu.be/eWPtt2hLnJk", + "type": "video" + } + ] + }, + "fUBNKHNPXbemRYrnzH3VT": { + "title": "WiFi", + "description": "**WiFi** stands for \"wireless fidelity\" and is a popular way to connect to the internet without the need for physical cables. It uses radio frequency (RF) technology to communicate between devices, such as routers, computers, tablets, smartphones, and other hardware.\n\nAdvantages of WiFi\n------------------\n\nWiFi has several advantages over wired connections, including:\n\n* **Convenience**: Users can access the internet from anywhere within the WiFi signal's range, providing flexibility and mobility.\n \n* **Easy Setup**: WiFi devices connect to the internet simply by entering a password once, without the need for any additional cables or adapters.\n \n* **Scalability**: WiFi networks can easily expand to accommodate additional devices without the need for significant infrastructure changes.\n \n\nSecurity Risks and WiFi Threats\n-------------------------------\n\nDespite its numerous benefits, WiFi also brings potential security risks. Some common threats include:\n\n* **Eavesdropping**: Hackers can intercept data transmitted over a WiFi connection, potentially accessing sensitive information such as personal or financial details.\n \n* **Rogue access points**: An unauthorized user could set up a fake WiFi network that appears legitimate, tricking users into connecting and providing access to their devices.\n \n* **Man-in-the-middle attacks**: An attacker intercepts data transmission between your device and the WiFi network, potentially altering data or injecting malware.\n \n\nBest Practices for Secure WiFi Connections\n------------------------------------------\n\nTo protect yourself and your devices, follow these best practices:\n\n* **Use strong encryption**: Ensure your WiFi network uses the latest available encryption standards, such as WPA3 or, at minimum, WPA2.\n \n* **Change default credentials**: Change the default username and password for your WiFi router to prevent unauthorized access and configuration.\n \n* **Keep your router firmware up to date**: Regularly check for and install any available firmware updates to prevent potential security vulnerabilities.\n \n* **Create a guest network**: If you have visitors or clients, set up a separate guest network for them to use. This ensures your primary network remains secure.\n \n* **Disable WiFi Protected Setup (WPS)**: Although WPS can simplify the connection process, it may also create security vulnerabilities. Disabling it forces users to connect via the more secure password method.\n \n* **Use a Virtual Private Network (VPN)**: Connect to the internet using a VPN, which provides a secure, encrypted tunnel for data transmission.\n \n\nBy understanding the potential security risks associated with WiFi connections and following these best practices, you can enjoy the convenience, flexibility, and mobility of WiFi while ensuring a secure browsing experience.", + "links": [ + { + "title": "Wireless Networks - Howstuffworks", + "url": "https://computer.howstuffworks.com/wireless-network.htm", + "type": "article" + }, + { + "title": "That's How Wi-Fi Works", + "url": "https://youtu.be/hePLDVbULZc", + "type": "video" + } + ] + }, + "DbWf5LdqiByPiJa4xHtl_": { + "title": "Bluetooth", + "description": "**Bluetooth** is a wireless technology used to transfer data between devices over short distances. It operates in the 2.4 GHz frequency band and offers a reasonably secure means of communication between devices like smartphones, computers, headphones, and more.\n\nBelow are some key points about Bluetooth:\n\n* **Short-range communication**: Bluetooth typically works within a radius of 10 meters (33 feet), giving it a significant advantage in terms of power consumption when compared to other wireless technologies such as Wi-Fi. The short range also reduces the chances of interference between devices.\n \n* **Low power consumption**: Bluetooth devices are designed to use relatively low power compared to other wireless technologies. This aspect contributes to their widespread adoption in battery-powered devices like wearable gadgets and IoT sensors.\n \n* **Convenience**: Bluetooth allows for easy, automatic connection between devices once they have been paired. This 'pair and play' functionality ensures users can quickly establish connectivity between their devices with minimal effort.\n \n* **Security**: Bluetooth includes security features like encryption and authentication, which ensure secure communication between paired devices. However, users must remain vigilant in terms of keeping their devices up-to-date with the latest Bluetooth security patches and protocols.\n \n* **Potential vulnerabilities**: Despite its built-in security measures, Bluetooth is not immune to cyber attacks. Some common risks include \"bluejacking\" (unauthorized sending of messages or files), \"bluesnarfing\" (unauthorized access to device data), and \"BlueBorne\" (an attack vector that exploits Bluetooth connections to infiltrate devices and spread malware). Users should be cautious in their usage of Bluetooth and follow best practices like not accepting unknown connection requests and turning off Bluetooth when not in use.\n \n\nIn conclusion, Bluetooth offers a convenient means of connecting devices wirelessly. While it provides reasonably secure communication, users must stay informed about potential vulnerabilities and follow good security practices to safeguard their devices.", + "links": [ + { + "title": "Bluetooth security risks to know (and how to avoid them)", + "url": "https://us.norton.com/blog/mobile/bluetooth-security", + "type": "article" + }, + { + "title": "Bluetooth security best practices from official website", + "url": "https://www.bluetooth.com/learn-about-bluetooth/key-attributes/bluetooth-security/", + "type": "article" + }, + { + "title": "Explore top posts about Bluetooth", + "url": "https://app.daily.dev/tags/bluetooth?ref=roadmapsh", + "type": "article" + } + ] + }, + "KsZ63c3KQLLn373c5CZnp": { + "title": "Infrared", + "description": "Infrared (IR) is a type of wireless communication technology that utilizes light waves in the electromagnetic spectrum to transmit data between devices. Infrared connections are widely used in short-range communication, commonly found in devices like remote controls, wireless keyboards and mice, and computer-to-printer communication. Let's take a closer look at the features of infrared connectivity:\n\nAdvantages of Infrared Connections\n----------------------------------\n\n* **Privacy:** Since IR signals don't penetrate walls, there's less chance of interference or eavesdropping from neighboring devices.\n* **Ease of setup:** Infrared devices often require minimal setup, making them easy to use and hassle-free.\n* **Low power consumption:** Infrared connections typically consume little power, which is suitable for battery-operated devices.\n\nDisadvantages of Infrared Connections\n-------------------------------------\n\n* **Limited range:** Infrared transmissions have a short range, usually up to only a few meters.\n* **Line-of-sight transmission:** The signal gets blocked if objects are in the way between the sender and the receiver, as IR uses line-of-sight transmission.\n* **Slower data transfer rates:** Infrared connections have slower data transfer rates compared to other wireless technologies like Wi-Fi or Bluetooth.\n\nInfrared Security Considerations\n--------------------------------\n\nWhile infrared connections are generally secure due to their limited range and inability to penetrate walls, they are still susceptible to attacks. An attacker with direct access to the transmission path can intercept, modify or inject data into the communication.\n\nTo maintain security in infrared connections, consider the following precautions:\n\n* **Encryption:** Use encryption methods to protect sensitive data transmitted over infrared connections.\n* **Authentication:** Implement authentication mechanisms that confirm the identities of devices before allowing access.\n* **Physical security:** Ensure that devices using infrared communication are located in secure areas, limiting the possibility of tampering or eavesdropping.\n\nIn summary, infrared is a useful technology for short-range communication purposes with certain benefits, such as privacy and low power consumption. However, it also has limitations and security considerations that must be addressed.", + "links": [] + }, + "E7yfALgu9E2auOYDOTmex": { + "title": "iCloud", + "description": "iCloud is a cloud storage and cloud computing service provided by Apple Inc. It allows users to store data, such as documents, photos, and music, on remote servers and synchronize them across their Apple devices, including iPhones, iPads, and MacBooks.\n\nFeatures and Benefits\n---------------------\n\niCloud offers a range of features and benefits that enhance the user experience and improve security. Here are some key aspects of the service:\n\n* **iCloud Storage**: Users are provided with 5 GB of free storage space on iCloud, and they can upgrade to higher plans (50 GB, 200 GB, or 2 TB) for an additional cost. This storage can be used for documents, photos, videos, backups, and app data.\n \n* **iCloud Backup**: iCloud automatically backs up essential data from iOS devices when they are connected to Wi-Fi and charging. This includes app data, device settings, messages, and much more. In case of device loss or replacement, users can restore the backup to the new device.\n \n* **iCloud Photos**: This feature allows users to automatically upload and store their photos and videos on iCloud, making them accessible across all their devices. iCloud also syncs edits, deletions, and album organization, ensuring that the photo library stays updated across all devices.\n \n* **Find My**: This service helps users locate their lost Apple devices using their iCloud account on another device. It also offers features like remote device lock and erase, ensuring that user data remains secure even if the device cannot be recovered.\n \n* **iCloud Drive**: Users can store documents and files of various types in iCloud Drive, making them accessible from all devices. This feature is built into the Mac Finder and can also be accessed via the Files app on iOS devices or the iCloud website.\n \n* **App-specific Data Sync**: Many apps can make use of iCloud to sync their data across devices. This enables a seamless experience, ensuring that users can pick up where they left off regardless of the device they are using.\n \n\nSecurity\n--------\n\nApple takes the security of iCloud very seriously and has implemented multiple layers of protection to keep user data safe. Some of these measures include:\n\n* **Encryption**: Data stored on iCloud is encrypted during transit and on the server. Photos, documents, and other data are secured using a minimum of 128-bit AES encryption.\n* **Two-Factor Authentication (2FA)**: Users can enable 2FA for their Apple ID to add an extra layer of security. This requires an additional verification step (such as entering a code received on a trusted device) when signing into iCloud or any Apple service.\n* **Secure Tokens**: Apple uses secure tokens for authentication, which means that your iCloud password is not stored on your devices or on Apple's servers.\n\nOverall, iCloud is a convenient and secure way for Apple device users to store and synchronize their data across devices. This cloud-based service offers numerous features to ensure seamless access and enhanced protection for user data.", + "links": [ + { + "title": "All about iCloud", + "url": "https://www.intego.com/mac-security-blog/everything-you-can-do-with-icloud-the-complete-guide/", + "type": "article" + } + ] + }, + "IOK_FluAv34j3Tj_NvwdO": { + "title": "Google Suite", + "description": "Google Suite, also known as G Suite or Google Workspace, is a collection of cloud-based productivity and collaboration tools developed by Google. These tools are designed to help individuals and businesses collaborate more efficiently and effectively. Here is a summary of some of the most popular tools in Google Suite:\n\nGoogle Drive\n------------\n\nGoogle Drive is a cloud storage service that allows users to store files, sync them across devices, and easily share them with others. With Google Drive, users get 15 GB of free storage, while more storage can be purchased as needed.\n\nGoogle Docs, Sheets, and Slides\n-------------------------------\n\nThese are the office suite tools that include a word processor (Docs), a spreadsheet program (Sheets), and a presentation program (Slides). All of these applications are web-based, allowing users to create, edit, and share documents in real-time with colleagues or collaborators. They also come with a variety of built-in templates, making it easier for users to quickly create and format their documents.\n\nGoogle Forms\n------------\n\nGoogle Forms is a tool for creating custom online forms and surveys. Users can design forms with various question types, including multiple-choice, dropdown, and text-based questions. The data collected from the forms can be automatically organized and analyzed in Google Sheets.\n\nGoogle Calendar\n---------------\n\nA powerful scheduling tool, Google Calendar allows users to create and manage individual or shared calendars. Users can create events, invite attendees, and set reminders for themselves or others. Google Calendar also integrates with Gmail, allowing users to create and update events directly from their email.\n\nGmail\n-----\n\nGmail is a widely-used email service that provides a clean and user-friendly interface, powerful search capabilities, and excellent spam filtering. Gmail also integrates with other Google tools, making it a seamless part of the overall suite.\n\nGoogle Meet\n-----------\n\nGoogle Meet is a video conferencing tool that allows users to host and join secure video meetings. With a G Suite account, users can schedule and join meetings directly from Google Calendar. Google Meet also supports screen sharing, breakout rooms, and live captioning during meetings.\n\nGoogle Chat\n-----------\n\nGoogle Chat is a communication platform for teams that provides direct messaging, group conversations, and virtual meeting spaces. Users can create chat rooms for specific projects or topics, collaborate on documents in real-time, and make use of Google Meet for video chats.\n\nThese are just some of the many tools offered by Google Suite. This platform is a popular choice for individuals, teams, and organizations looking for a comprehensive and efficient way to manage their work and communication needs.", + "links": [] + }, + "-5haJATqlmj0SFSFAqN6A": { + "title": "MS Office Suite", + "description": "Microsoft Office Suite, often referred to as MS Office, is one of the most widely-used software suites for productivity, communication, and document creation. It is a comprehensive set of applications designed to increase efficiency in both professional and personal settings. Below is an overview of the key applications within the MS Office Suite:\n\n* **Microsoft Word:** A versatile word processing application that allows users to create, format, and edit text documents. It is equipped with various tools for formatting, spell-checking, and collaborating in real-time with others.\n \n* **Microsoft Excel:** Excel is a powerful spreadsheet application that enables users to create, edit, and analyze data in a tabulated format. Functions and formulas simplify complicated calculations while charts and graphs help visualize data.\n \n* **Microsoft PowerPoint:** PowerPoint is a widely-used presentation software that allows users to create visually engaging slides with various multimedia elements. It is an effective tool for sharing ideas, data and presenting complex concepts in an understandable format.\n \n* **Microsoft Outlook:** Outlook is an email management system that integrates emails, calendars, tasks, and contacts into a single platform. It enables users to efficiently manage their inboxes, organize schedules and manage contacts.\n \n* **Microsoft OneNote:** OneNote is a digital notebook that allows users to take notes, annotate, and capture and store information from various sources (including web pages), organize it intuitively, and sync it across devices.\n \n* **Microsoft Access:** Access is a relational database management system that provides users with the tools needed to create, modify, and store data in an organized manner.\n \n\nAs part of Microsoft's Office 365 subscription, users also have access to cloud-based services like OneDrive, Skype for Business, and Microsoft Teams, which further enhance collaboration and productivity.\n\nWhen considering your cyber security strategy, it is essential to ensure that your MS Office applications are always up-to-date. Regular updates improve security, fix bugs, and protect against new threats. Additionally, it is crucial to follow best practices, such as using strong passwords and only downloading reputable add-ins, to minimize potential risks.", + "links": [] + }, + "wkuE_cChPZT2MHyGjUuU4": { + "title": "HackTheBox", + "description": "Hack The Box (HTB) is a popular online platform designed for security enthusiasts, penetration testers, and ethical hackers to develop and enhance their skills by engaging in real-world cybersecurity challenges. The platform provides a wide array of virtual machines (VMs), known as \"boxes,\" each with a unique set of security vulnerabilities to exploit.\n\nFeatures of Hack The Box\n------------------------\n\n* **Lab Environment:** HTB offers a secure and legal environment for hacking challenges. The platform provides a VPN connection to a private network where the vulnerable machines (boxes) are hosted.\n \n* **Various Difficulty Levels:** The boxes on HTB come in varying levels of difficulty (easy, medium, hard, and insane), allowing users of different skill levels to participate and learn progressively.\n \n* **New Challenges Regularly:** New boxes are added to the platform regularly, ensuring that participants can continuously learn and enhance their cybersecurity skills.\n \n* **Community-driven:** The HTB community often collaborates and shares knowledge, techniques, and experiences, fostering a sense of camaraderie among members.\n \n* **Competition:** Users can compete against one another by attempting to solve challenges as quickly as possible and get to the top of the leaderboard.\n \n\nParticipation Process\n---------------------\n\n* **Registration:** To get started with HTB, you will need to register for an account on the platform. Interestingly, the registration itself is a hacking challenge where you are required to find an invite code using your web application penetration testing skills. This unique invitation process ensures that only interested and skilled individuals join the community.\n \n* **Connect to the VPN:** After registration, connect to the HTB private network using the provided VPN configuration file. This allows you to access the lab environment and the boxes.\n \n* **Select a Box and Hack it:** Browse the list of available boxes, select one that suits your skill level, and start hacking! Each box has a specific set of objectives like finding particular files, referred to as \"flags,\" that are hidden on the machines. These flags contain proof of your exploit and are used for scoring and ranking purposes.\n \n* **Submit Flags and Write-ups:** Upon solving a challenge, submit the flags you found to gain points and secure your spot on the leaderboard. Additionally, once a box is retired from the platform, you can create and share write-ups of your solution technique with the community.\n \n\nHack The Box is an excellent resource for anyone looking to enhance their cybersecurity skills or explore the ethical hacking domain. Whether you're a beginner or a seasoned expert, HTB offers an engaging and collaborative environment to learn and grow as a cybersecurity professional.", + "links": [ + { + "title": "HackTheBox website", + "url": "https://www.hackthebox.com/", + "type": "article" + }, + { + "title": "HTB Academy ", + "url": "https://academy.hackthebox.com/", + "type": "article" + }, + { + "title": "Explore top posts about Security", + "url": "https://app.daily.dev/tags/security?ref=roadmapsh", + "type": "article" + } + ] + }, + "kht-L7_v-DbglMYUHuchp": { + "title": "TryHackMe", + "description": "[TryHackMe](https://tryhackme.com/) is an online platform for learning and practicing cyber security skills. It offers a wide range of cybersecurity challenges, known as \"rooms\", which are designed to teach various aspects of cybersecurity, such as ethical hacking, penetration testing, and digital forensics.\n\nKey Features:\n-------------\n\n* **Rooms**: Rooms are tasks and challenges that cover a wide range of topics and difficulty levels. Each room has specific learning objectives, resources, and guidance to help you learn and apply cybersecurity concepts.\n \n* **Hands-on Learning**: TryHackMe focuses on providing practical, hands-on experience by giving participants access to virtual machines to put their knowledge to the test.\n \n* **Gamification**: TryHackMe incorporates gamification elements such as points, badges, and leaderboards to engage users and encourage friendly competition.\n \n* **Community Collaboration**: The platform has a strong and supportive community, where users can share knowledge, ask questions, and collaborate on challenges.\n \n* **Educational Pathways**: TryHackMe offers learning pathways to guide users through a series of related rooms, helping them develop specific skills and knowledge in a structured way.\n \n\nGetting Started:\n----------------\n\nTo get started with TryHackMe, follow these steps:\n\n* Sign up for a free account at [tryhackme.com](https://tryhackme.com/).\n* Join a room based on your interests or skill level.\n* Follow the instructions and resources provided in the room to learn new concepts and complete the challenges.\n* Progress through various rooms and pathways to enhance your cybersecurity skills and knowledge.\n\nBy using TryHackMe, you'll have access to a constantly growing repository of cybersecurity challenges, tools, and resources, ensuring that you stay up-to-date with the latest developments in the field.", + "links": [] + }, + "W94wY_otBuvVW_-EFlKA6": { + "title": "VulnHub", + "description": "[VulnHub](https://www.vulnhub.com/) is a platform that provides a wide range of vulnerable virtual machines for you to practice your cybersecurity skills in a safe and legal environment. These machines, also known as virtual labs or boot-to-root (B2R), often mimic real-world scenarios, and are designed to train and challenge security enthusiasts, researchers, and students who want to learn how to find and exploit vulnerabilities.\n\nHow does VulnHub work?\n----------------------\n\n* **Download**: You can download a variety of virtual machines (VMs) from the VulnHub website. These VMs are usually available in `.ova`, `.vmx`, or `.vmdk` formats, which can be imported into virtualization platforms like VMware or VirtualBox.\n* **Configure**: After importing the VM, you'll need to configure the networking settings to ensure the host machine and the VM can communicate with each other.\n* **Attack**: You can now start exploring the VM, searching for vulnerabilities, and trying to exploit them. The ultimate goal is often to gain root or administrative access on the target machine.\n\nLearning Resources\n------------------\n\nVulnHub also provides learning resources like walkthroughs and hints from its community. These resources can be very helpful if you're a beginner and feeling stuck or just curious about another approach to solve a challenge. Remember that it's essential to experiment, learn from your mistakes, and improve your understanding of various cybersecurity concepts.\n\nCTF Integration\n---------------\n\nVulnHub can also be a great resource to practice for Capture The Flag (CTF) challenges. Many of the virtual machines and challenges available on VulnHub mirror the type of challenges you might encounter in a CTF competition. By practicing with these VMs, you will gain valuable experience that can be applied in a competitive CTF environment.\n\nIn summary, VulnHub is an excellent platform for anyone looking to improve their cybersecurity skills and gain hands-on experience by exploiting vulnerabilities in a safe and legal environment. The range of challenge difficulty ensures that both beginners and experienced security professionals can benefit from the platform while preparing for real-world scenarios and CTF competitions.", + "links": [] + }, + "pou5xHwnz9Zsy5J6lNlKq": { + "title": "picoCTF", + "description": "[PicoCTF](https://picoctf.org/) is a popular online Capture The Flag (CTF) competition designed for beginners and experienced cyber security enthusiasts alike. It is organized annually by the [Plaid Parliament of Pwning (PPP)](https://ppp.cylab.cmu.edu/) team, a group of cyber security researchers and students from Carnegie Mellon University.\n\nFeatures\n--------\n\n* **Level-based Challenges**: PicoCTF offers a wide range of challenges sorted by difficulty levels. You will find challenges in topics like cryptography, web exploitation, forensics, reverse engineering, binary exploitation, and much more. These challenges are designed to build practical cybersecurity skills and engage in real-world problem-solving.\n \n* **Learning Resources**: The platform includes a collection of learning resources to help participants better understand the topics they are tackling. This allows you to quickly learn the necessary background information to excel in each challenge.\n \n* **Collaborative Environment**: Users can collaborate with a team or join a group to work together and share ideas. Working with others allows for hands-on practice in communication, organization, and critical thinking skills that are vital in the cybersecurity field.\n \n* **Leaderboard and Competitive Spirit**: PicoCTF maintains a growing leaderboard where participants can see their ranking, adding an exciting competitive aspect to the learning experience.\n \n* **Open for All Ages**: The competition is open to individuals of all ages, with a focus on students in middle and high school in order to cultivate the next generation of cybersecurity professionals.\n \n\nIn conclusion, PicoCTF is an excellent platform for beginners to start learning about cybersecurity, as well as for experienced individuals looking to improve their skills and compete. By participating in PicoCTF, you can enhance your knowledge, engage with the cyber security community, and hone your skills in this ever-growing field.", + "links": [] + }, + "WCeJrvWl837m1BIjuA1Mu": { + "title": "SANS Holiday Hack Challenge", + "description": "The **SANs Holiday Hack Challenge** is a popular and engaging annual cybersecurity event that features a unique blend of digital forensics, offensive security, defensive security, and other cybersecurity topics. It is hosted by the SANS Institute, one of the largest and most trusted sources for information security training, certification, and research worldwide.\n\nOverview\n--------\n\nThe SANs Holiday Hack Challenge incorporates a series of challenging and entertaining cybersecurity puzzles, with a festive holiday theme, for participants of all skill levels. The event typically takes place during the December holiday season, and participants have around a month to complete the challenges. It is free to participate, making the event accessible to a wide range of cybersecurity enthusiasts, from beginners to seasoned professionals.\n\nFormat\n------\n\nThe SANs Holiday Hack Challenge presents a compelling storyline where participants assume the role of a security practitioner tasked with solving various security issues and puzzles. Details of the challenges are weaved into the storyline, which may contain videos, images, and other forms of multimedia. Solving the challenges requires creative problem-solving and the application of various cybersecurity skills, including:\n\n* Digital Forensics\n* Penetration Testing\n* Reverse Engineering\n* Web Application Security\n* Cryptography\n* Defensive Security Techniques\n\nEach year, the Holiday Hack Challenge presents a new storyline and set of challenges aimed at providing real-world learning opportunities for those looking to improve their cybersecurity skills.\n\nPrizes\n------\n\nParticipants have a chance to win prestigious recognition for their performance in the challenge. By successfully solving the holiday-themed cybersecurity puzzles, participants may be awarded prizes, SANS training courses, certifications, or other recognition in the cybersecurity community.\n\nWhy Participate\n---------------\n\nThe SANs Holiday Hack Challenge is a valuable experience for people with an interest in cybersecurity, offering an entertaining and educational challenge. Reasons to participate include:\n\n* **Skill Development**: The challenge provides an opportunity to sharpen your technical skills in various cybersecurity domains.\n* **Networking**: Work with like-minded security enthusiasts to solve problems, share knowledge, and build connections in the industry.\n* **Recognition**: Achieve recognition for your skills and contribution to tackling real-world cybersecurity issues.\n* **Fun**: Experience the thrill of solving complex security problems while enjoying the festive theme and engaging storyline.\n\nIn conclusion, the SANs Holiday Hack Challenge offers a unique opportunity to develop your cybersecurity skills in a fun and challenging environment. Whether you are new to the field or an industry veteran, participating in this event will help you grow professionally and make valuable connections in the cybersecurity community. Don't miss the next SANs Holiday Hack Challenge!", + "links": [ + { + "title": "SANS Holiday Hack Challenge", + "url": "https://www.sans.org/holidayhack", + "type": "article" + }, + { + "title": "Explore top posts about Security", + "url": "https://app.daily.dev/tags/security?ref=roadmapsh", + "type": "article" + } + ] + }, + "lbAgU5lR1O7L_5mCbNz_D": { + "title": "CompTIA A+", + "description": "CompTIA A+ is an entry-level certification for IT professionals that focuses on essential knowledge and skills in computer hardware, software, and troubleshooting. This certification is widely recognized in the IT industry and can serve as a stepping stone for individuals looking to start a career in the field of information technology.\n\nObjectives\n----------\n\nThe CompTIA A+ certification aims to test and validate foundational IT knowledge and skills, including:\n\n* Installation, configuration, and upgrading of computer hardware, peripherals, and operating systems\n* Basic networking concepts and maintenance of wired and wireless networks\n* Troubleshooting and repair of computer hardware, software, and networks\n* Understanding the basics of mobile device hardware and networking\n* Familiarity with security concepts, operating system maintenance, and disaster recovery\n\nExams\n-----\n\nTo earn the CompTIA A+ certification, you'll need to pass two exams:\n\n* **CompTIA A+ 220-1001 (Core 1)**: This exam covers topics like mobile devices, networking technology, hardware, virtualization, and cloud computing.\n* **CompTIA A+ 220-1002 (Core 2)**: This exam focuses on topics such as operating systems, security, software troubleshooting, and operational procedures.\n\nBoth exams consist of 90 questions each, which you'll need to complete within 90 minutes. The passing score is 675 for Core 1 and 700 for Core 2 (on a scale of 100-900).\n\nRecommended Experience\n----------------------\n\nThough the CompTIA A+ certification is designed for beginners, it's recommended that you have at least 9-12 months of hands-on experience in the lab or field before attempting the exams. If you don't have prior experience, you could consider taking a training course or working through hands-on labs to gain the required knowledge and skills.\n\nBenefits\n--------\n\nAchieving a CompTIA A+ certification can offer several benefits, such as:\n\n* Establishing your credibility as an IT professional with a strong foundation in hardware, software, and networking\n* Demonstrating your commitment to continuing education and career growth in the IT industry\n* Improving your employability and widening your job prospects, especially for entry-level IT roles\n* Serving as a prerequisite for more advanced certifications, such as CompTIA Network+ and CompTIA Security+\n\nOverall, if you're an aspiring IT professional, the CompTIA A+ certification is a great starting point to kick off your IT career and begin acquiring the skills and knowledge needed to thrive in this ever-evolving industry.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Total Seminars - CompTIA A+ Core 1 (220-1101)", + "url": "https://www.udemy.com/course/comptia-aplus-core-1/", + "type": "course" + }, + { + "title": "Total Seminars - CompTIA A+ Core 2 (220-1102)", + "url": "https://www.udemy.com/course/comptia-aplus-core-2/", + "type": "course" + }, + { + "title": "Dion Training - CompTIA A+ Core 1 (220-1101)", + "url": "https://www.udemy.com/course/comptia-a-core-1/", + "type": "course" + }, + { + "title": "Dion Training - CompTIA A+ Core 2 (220-1102)", + "url": "https://www.udemy.com/course/comptia-a-core-2//", + "type": "course" + }, + { + "title": "CompTIA A+ Certification", + "url": "https://www.comptia.org/certifications/a", + "type": "article" + }, + { + "title": "CompTIA A+ 220-1101 - Professor Messer's Course FREE", + "url": "https://www.professormesser.com/free-a-plus-training/220-1101/220-1101-video/220-1101-training-course/", + "type": "article" + }, + { + "title": "CompTIA A+ 220-1102 - Professor Messer's Course FREE", + "url": "https://www.professormesser.com/free-a-plus-training/220-1102/220-1102-video/220-1102-training-course/", + "type": "article" + } + ] + }, + "p34Qwlj2sjwEPR2ay1WOK": { + "title": "CompTIA Linux+", + "description": "The CompTIA Linux+ certification is an entry-level certification aimed at individuals who are seeking to learn and demonstrate their skills and knowledge of the Linux operating system. This certification is widely recognized in the IT industry as an essential qualification for entry-level Linux administrators and helps them gain a strong foundation in Linux system administration tasks.\n\nOverview\n--------\n\n* **Difficulty Level:** Beginner\n* **Certification Type:** Professional\n* **Exam Format:** Multiple-choice and performance-based\n* **Duration:** 90 minutes\n* **Number of Questions:** Maximum of 90\n* **Passing Score:** 720 (on a scale of 100-900)\n\nTopics Covered\n--------------\n\nThe CompTIA Linux+ certification covers various aspects related to Linux, including:\n\n* **System Architecture:** Hardware settings, boot sequence, kernel modules, and system boot.\n \n* **Linux Installation and Package Management:** Designing hard disk layout, installing a boot manager, managing shared libraries, using Debian and RPM package management.\n \n* **GNU and Unix Commands:** Bash commands, text processing, redirection and pipes, and managing processes.\n \n* **Devices, Linux Filesystems, and Filesystem Hierarchy Standard:** Creating and configuring filesystems, maintaining the integrity of filesystems, managing disk quotas, and using file permissions to control access.\n \n* **Shells, Scripting, and Data Management:** Customizing and writing shell scripts, managing SQL data, and using regular expressions.\n \n* **User Interfaces and Desktops:** Installing X11, setting up display managers, and managing accessibility settings.\n \n* Administrative Tasks: Managing user and group accounts, automating system administration tasks, localization, and system logging.\n \n* Essential System Services: Configuring, managing, and troubleshooting network services, time synchronization, and system logging.\n \n* Network Fundamentals: Addressing and routing fundamentals, troubleshooting network issues, and configuring DNS clients.\n \n* Security: Perform security administration tasks, set up host security, and secure data with encryption.\n \n\nSkills Gained\n-------------\n\nBy earning the CompTIA Linux+ certification, you will be equipped with the knowledge and skills to:\n\n* Install, configure, and maintain Linux systems.\n* Perform essential Linux system administration tasks.\n* Troubleshoot and resolve issues related to Linux systems.\n* Implement basic security measures on Linux systems.\n\nExam Preparation\n----------------\n\nCompTIA provides a range of study materials and resources, including:\n\n* CompTIA Linux+ Study Guide: Thoroughly covers the exam objectives to help you prepare for the certification.\n* CompTIA Linux+ CertMaster Practice: A comprehensive online practice platform that helps you assess your knowledge and identify areas for improvement.\n* CompTIA Linux+ CertMaster Learn: Interactive learning experience offering a customizable learning path, flashcards, quizzes, and assessments.\n\nConclusion\n----------\n\nThe CompTIA Linux+ certification is an excellent starting point for aspiring Linux professionals, as it validates essential skills required for entry-level Linux administration roles. By obtaining this certification, you can enhance your career prospects and demonstrate your competence to potential employers. So, buckle up and start your Linux journey with the CompTIA Linux+ certification!", + "links": [ + { + "title": "Dion Training", + "url": "https://www.udemy.com/course/comptia-linux/", + "type": "course" + }, + { + "title": "Official CompTIA Linux+", + "url": "https://www.comptia.org/certifications/linux", + "type": "article" + }, + { + "title": "CompTIA Linux+ Exam Prep (XK0-005 revision)", + "url": "https://youtube.com/playlist?list=PL78ppT-_wOmuwT9idLvuoKOn6UYurFKCp&si=0OAFuOOsjko8Gg61", + "type": "video" + } + ] + }, + "4RGbNOfMPDbBcvUFWTTCV": { + "title": "CompTIA Network+", + "description": "The CompTIA Network+ is a highly sought-after certification for IT professionals who aim to build a solid foundation in networking concepts and practices. This certification is vendor-neutral, meaning that it covers a broad range of knowledge that can be applied to various network technologies, products, and solutions. The Network+ certification is designed for beginners in the world of IT networking, and it is recommended that you first obtain the [CompTIA A+ certification](#) before moving on to Network+.\n\nTopics Covered\n--------------\n\nThe CompTIA Network+ certification covers several essential networking topics, such as:\n\n* **Networking Concepts**: This includes understanding network architectures, devices, protocols, and services.\n* **Infrastructure**: Learn about the various network components such as cabling, network devices, and storage.\n* **Network Operations**: Gain knowledge on how to monitor, analyze, and optimize network performance, as well as maintain network documentation and policies.\n* **Network Security**: Understand the fundamentals of securing a network, including access control, encryption, and firewalls.\n* **Network Troubleshooting and Tools**: Learn how to troubleshoot and resolve network issues using various diagnostic tools and techniques.\n\nExam Details\n------------\n\nTo become Network+ certified, you must pass the [N10-008 exam](https://www.comptia.org/certifications/network) or [N10-009 exam](https://www.comptia.org/certifications/network). The exam consists of:\n\n* Up to 90 questions, including multiple-choice and performance-based questions\n* Duration: 90 minutes\n* Passing Score: 720 out of 900\n* Exam Cost: $369 USD\n\nBenefits of CompTIA Network+ Certification\n------------------------------------------\n\nBy earning the CompTIA Network+ certification, you can demonstrate your competency in networking fundamentals and start your journey as an IT professional. The benefits of this certification include:\n\n* **Increased job opportunities**: A Network+ certification showcases your knowledge in networking, which can help you land entry-level positions such as network administrator or network technician.\n* **Higher salary potential**: Professionals with the Network+ certification typically enjoy higher salaries compared to their non-certified counterparts.\n* **Professional growth**: Gaining the Network+ certification helps you stay up-to-date with networking technologies and sets the stage for more advanced certifications, such as [CompTIA Security+](#) or [Cisco CCNA](#).\n* **Vendor-neutral**: Since the Network+ certification covers a broad range of networking topics, it is applicable to many different network environments and technologies.\n\nTo get started with your CompTIA Network+ certification journey, [visit the official CompTIA website](https://www.comptia.org/certifications/network) for more information on the certification, exam preparation, and testing centers.\n\nPreparation Resources\n---------------------\n\n* **Strengthen Networking Fundamentals:**: The CompTIA Network+ exam emphasizes understanding networking fundamentals. To build a solid foundation, grasp concepts like TCP/IP protocols, subnetting, the OSI model, network devices, and addressing schemes.\n \n* **Engage in Hands-on Practice:**: Theory alone won't suffice for excelling in the N10-008 or N10-009 exam. Practical experience is crucial for understanding networking concepts and troubleshooting scenarios. Take practice exams to assess your readiness and get familiar with the exam format. Additionally, work with virtual labs to enhance your practical understanding of network configurations and troubleshooting.\n \n\nRecommended resources include:", + "links": [ + { + "title": "Total Seminars", + "url": "https://www.udemy.com/course/comptia-networkplus-certification/", + "type": "course" + }, + { + "title": "Dion Training", + "url": "https://www.udemy.com/course/comptia-network-009/", + "type": "course" + }, + { + "title": "Official CompTIA Network+", + "url": "https://www.comptia.org/certifications/network", + "type": "article" + }, + { + "title": "Professor Messer’s CompTIA N10-008 Network+ Course FREE", + "url": "https://www.professormesser.com/network-plus/n10-008/n10-008-video/n10-008-training-course/", + "type": "article" + }, + { + "title": "CompTIA Network+ Full Course FREE [23+ Hours]", + "url": "https://www.youtube.com/watch?v=xmpYfyNmWbw", + "type": "video" + } + ] + }, + "4RD22UZATfL8dc71YkJwQ": { + "title": "CCNA", + "description": "The Cisco Certified Network Associate (CCNA) certification is an entry-level certification for IT professionals who want to specialize in networking, specifically within the realm of Cisco products. This certification validates an individual's ability to install, configure, operate, and troubleshoot medium-sized routed and switched networks. It also covers the essentials of network security and management.\n\nKey Concepts\n------------\n\nAs a CCNA candidate, you will learn the following concepts:\n\n* Network fundamentals: understanding the basics of networking technologies, such as how devices communicate and how data is transmitted\n* LAN switching technologies: understanding how switches work and how to configure them for optimal performance\n* IPv4 and IPv6 routing technologies: learning how routers process packets and route data between networks\n* WAN technologies: understanding Wide Area Networks (WANs) and how they are used to connect geographically dispersed networks\n* Infrastructure services: learning about DHCP, DNS, and other essential network services\n* Infrastructure security: understanding how to secure network devices and implement basic security measures\n* Infrastructure management: learning about SNMP, Syslog, and other tools for network monitoring and management\n\nCCNA Exam\n---------\n\nTo obtain the CCNA certification, you will need to pass a single exam, currently the \"200-301 CCNA\" exam. This exam tests your knowledge and skills in the aforementioned key concepts. The exam consists of multiple-choice, drag-and-drop, and simulation questions that assess your understanding of networking theory, as well as your ability to perform practical tasks.\n\nWhy CCNA?\n---------\n\nA CCNA certification can provide you with a solid foundation in networking and open doors to various career opportunities, such as network administrator, network engineer, or security specialist roles. Many employers value CCNA-certified professionals for their validated skills in working with Cisco networking products and their understanding of networking fundamentals. Additionally, attaining a CCNA certification can serve as a stepping stone towards more advanced Cisco certifications, such as the Cisco Certified Network Professional (CCNP) and the Cisco Certified Internetwork Expert (CCIE).", + "links": [ + { + "title": "Free CCNA 200-301 | Complete Course 2023 by Jeremy's IT Lab", + "url": "https://www.youtube.com/playlist?list=PLxbwE86jKRgMpuZuLBivzlM8s2Dk5lXBQ", + "type": "video" + } + ] + }, + "AxeDcKK3cUtEojtHQPBw7": { + "title": "CompTIA Security+", + "description": "CompTIA Security+ is a highly recognized and respected certification for individuals seeking to start their careers in the field of cybersecurity. This certification is vendor-neutral, meaning it doesn't focus on any specific technology or platform, and provides a solid foundation in cybersecurity principles, concepts, and best practices.\n\nOverview\n--------\n\nThe CompTIA Security+ certification covers a variety of essential topics, including:\n\n* Network security\n* Threat management\n* Application, data, and host security\n* Access control and identity management\n* Cryptography\n* Compliance and operational security\n\nEarning the Security+ certification can open the door to various entry-level cybersecurity roles such as Security Analyst, Security Engineer, or Network Security Specialist.\n\nExam Details\n------------\n\nTo earn the CompTIA Security+ certification, candidates must pass the SY0-701 exam. The exam consists of 90 questions, which are a mix of multiple-choice and performance-based questions. Candidates are given 90 minutes to complete the exam, and a score of 750 out of 900 is required to pass.\n\nPreparation Resources\n---------------------\n\nPreparation for the CompTIA Security+ exam involves a combination of self-study, instructor-led courses, and hands-on experience in the cybersecurity field. Recommended resources include:\n\nWhile there are no formal prerequisites to take the Security+ exam, CompTIA recommends candidates have two years of experience in IT administration, focusing on security, and a CompTIA Network+ certification.\n\nOverall, the CompTIA Security+ certification is an excellent choice for those looking to begin their journey in cybersecurity. It provides candidates with a strong foundational knowledge, while also serving as a stepping stone for more advanced certifications in the field.", + "links": [ + { + "title": "Dion Training", + "url": "https://www.udemy.com/course/securityplus/", + "type": "course" + }, + { + "title": "Total Seminars", + "url": "https://www.udemy.com/course/total-comptia-security-plus/", + "type": "course" + }, + { + "title": "CompTIA Security+ 701 Audio Course Podcast", + "url": "https://open.spotify.com/show/1Ch1IPQc9V9FULKSBc6UfO?si=994f9ee5a0a24ee6", + "type": "podcast" + }, + { + "title": "Official CompTIA Security+ Study Guide", + "url": "https://www.comptia.org/training/books/security-sy0-701-study-guide", + "type": "article" + }, + { + "title": "CompTIA Security+ Certification Exam Details", + "url": "https://www.comptia.org/certifications/security#examdetails", + "type": "article" + }, + { + "title": "Professor Messer's Free Security+ Video Course", + "url": "https://youtube.com/playlist?list=PLG49S3nxzAnl4QDVqK-hOnoqcSKEIDDuv&si=nwydzQ13lug4ymbl", + "type": "video" + } + ] + }, + "BqvijNoRzSGYLCMP-6hhr": { + "title": "CISSP", + "description": "The Certified Information Systems Security Professional (CISSP) is a globally recognized certification offered by the International Information System Security Certification Consortium (ISC)². It is designed for experienced security professionals to validate their knowledge and expertise in the field of information security.\n\nWho Should Obtain the CISSP Certification?\n------------------------------------------\n\nThe CISSP certification is ideal for security consultants, managers, IT directors, security auditors, security analysts, and other professionals who are responsible for designing, implementing, and managing security for their organization. This certification is aimed at professionals with at least five years of full-time experience in two or more of the eight CISSP domains:\n\n* Security and Risk Management\n* Asset Security\n* Security Architecture and Engineering\n* Communication and Network Security\n* Identity and Access Management (IAM)\n* Security Assessment and Testing\n* Security Operations\n* Software Development Security\n\nCertification Process\n---------------------\n\nTo obtain the CISSP certification, candidates must meet the following requirements:\n\n* **Experience:** Possess a minimum of five years of cumulative, paid, full-time work experience in two or more of the eight domains of the CISSP Common Body of Knowledge (CBK).\n \n* **Exam:** Pass the CISSP examination with a minimum scaled score of 700 out of 1000 points. The exam consists of 100 to 150 multiple-choice and advanced innovative questions that must be completed within three hours.\n \n* **Endorsement:** After passing the exam, candidates must submit an endorsement application to be reviewed and endorsed by an (ISC)² CISSP holder within nine months of passing the exam.\n \n* **Continuing Professional Education (CPE):** To maintain the CISSP certification, professionals must earn 120 CPE credits every three years, with a minimum of 40 credits earned each year, and pay an annual maintenance fee.\n \n\nBenefits of CISSP Certification\n-------------------------------\n\nObtaining the CISSP certification comes with numerous benefits, such as:\n\n* Enhanced credibility, as the CISSP is often considered the gold standard in information security certifications.\n* Increased job opportunities, as many organizations and government agencies require or prefer CISSP-certified professionals.\n* Improved knowledge and skills, as the certification covers a broad range of security topics and best practices.\n* Higher salary potential, as CISSP-certified professionals often command higher salaries compared to their non-certified counterparts.\n* Access to a network of other CISSP-certified professionals and resources, enabling continuous learning and professional development.", + "links": [] + }, + "lqFp4VLY_S-5tAbhNQTew": { + "title": "CISA", + "description": "The **Certified Information Systems Auditor (CISA)** is a globally recognized certification for professionals who audit, control, monitor, and assess an organization's information technology and business systems.\n\nOverview\n--------\n\nCISA was established by the Information Systems Audit and Control Association (ISACA) and is designed to demonstrate an individual's expertise in managing vulnerabilities, ensuring compliance with industry regulations, and instituting controls within the business environment.\n\nWho Should Pursue CISA?\n-----------------------\n\nCISA is most suitable for professionals with roles such as:\n\n* IT auditors\n* IT security professionals\n* IT risk analysts\n* IT compliance analysts\n* Security consultants\n\nExam and Prerequisites\n----------------------\n\nTo earn the CISA certification, candidates must pass a comprehensive exam. The prerequisites for the CISA certification include:\n\n* Five years of professional experience in information systems auditing, control, assurance, or security work. Some substitutions and waivers can be made for education, but a minimum of two years of experience in information systems audit or control is required.\n* Agree to the ISACA Code of Professional Ethics.\n* Adherence to the CISA Continuing Professional Education (CPE) Program, which requires a minimum of 20 CPE hours annually and 120 hours of CPE in a 3-year period.\n\nThe exam itself has a duration of four hours and consists of 150 multiple-choice questions. It covers five domains:\n\n* The Process of Auditing Information Systems (21%)\n* Governance and Management of IT (16%)\n* Information Systems Acquisition, Development, and Implementation (18%)\n* Information Systems Operations, Maintenance, and Service Management (20%)\n* Protection of Information Assets (25%)\n\nBenefits of CISA Certification\n------------------------------\n\nUpon obtaining the CISA certification, some of the benefits include:\n\n* Increased credibility and recognition in the industry\n* Enhanced career prospects and job security\n* A competitive edge over non-certified professionals\n* The potential for salary increase and promotions\n* Access to a global community of certified professionals and resources\n\nOverall, the CISA certification can be a valuable asset for those looking to advance their careers in cybersecurity, particularly in the area of auditing and controlling information systems.", + "links": [] + }, + "s86x24SHPEbbOB9lYNU-w": { + "title": "CISM", + "description": "The [Certified Information Security Manager (CISM)](https://www.isaca.org/credentialing/cism) is an advanced cybersecurity certification offered by ISACA that focuses on information security management. It is designed for professionals who have a strong understanding of information security and are responsible for overseeing, designing, and managing an organization's information security programs.\n\nWho Should Pursue CISM Certification?\n-------------------------------------\n\nThe CISM certification is ideal for:\n\n* Information security managers\n* IT consultants\n* IT auditors\n* Senior IT professionals responsible for information security\n* Security architects and engineers\n\nExam Requirements and Process\n-----------------------------\n\nTo obtain the CISM certification, candidates must:\n\n* **Register for the CISM Exam**: You must [register](https://www.isaca.org/exams) for the exam, pay the registration fee, and select an exam date during one of the three annual exam windows.\n \n* **Meet the Experience Requirements**: You must have at least five years of experience in information security management across at least three of the four CISM domains. There is the option to waive up to two years of experience based on your education or other certifications.\n \n* **Study for the Exam**: Thorough exam preparation is essential for success. ISACA provides a range of study materials, including the [CISM Review Manual](https://www.isaca.org/bookstore), online question banks, and instructor-led courses.\n \n* **Take the Exam**: The CISM exam consists of 150 multiple-choice questions, and you have four hours to complete it. It covers four main domains:\n \n * Information Security Governance\n * Information Risk Management\n * Information Security Program Development and Management\n * Information Security Incident Management\n* **Maintain Your Certification**: Once you pass the exam and meet the experience requirements, you need to [apply for certification](https://www.isaca.org/credentialing/certified-information-security-manager/get-cism-certified). To maintain your CISM credential, you must earn Continuing Professional Education (CPE) hours and renew your certification every three years.\n \n\nThe CISM certification is globally recognized for its emphasis on the strategic and managerial aspects of information security. Professionals with this certification are in high demand, as they possess the knowledge and skills to develop and manage comprehensive information security programs in various organizations.", + "links": [] + }, + "nlmATCTgHoIoMcEOW8bUW": { + "title": "GSEC", + "description": "The **GIAC Security Essentials Certification (GSEC)** is an advanced cybersecurity certification that demonstrates an individual's knowledge and skills in addressing security threats and vulnerabilities in various systems. Developed by the Global Information Assurance Certification (GIAC), this certification is suitable for security professionals, IT managers, and network administrators who want to enhance their expertise in the core cybersecurity concepts and practices.\n\nKey Features of GSEC\n--------------------\n\n* **Comprehensive coverage of security concepts**: GSEC covers a wide range of cybersecurity topics, including risk management, cryptography, access control, authentication, network security, wireless security, web application security, and incident response.\n* **Hands-on approach**: GSEC focuses on practical, real-world situations and encourages students to develop problem-solving skills through hands-on labs and exercises.\n* **Vendor-neutral**: Unlike other certifications that focus on specific technologies or tools, GSEC is vendor-neutral and teaches concepts and techniques that can be applied in various environments and platforms.\n* **Globally recognized**: GSEC is a widely acknowledged certification among security professionals, and receiving it can help boost an individual's career in the cybersecurity industry.\n\nGSEC Exam Details\n-----------------\n\nThe GSEC exam consists of 180 questions, and candidates have a total of 5 hours to complete the test. The minimum passing score is 73%. The exam covers the following domains:\n\n* Active defense concepts\n* Authentication and access control\n* Basic understanding of cryptographic concepts\n* Incident handling and response\n* IP networking concepts and network security\n* Security policy and contingency planning\n\nPreparing for the GSEC Exam\n---------------------------\n\nTo prepare for the GSEC exam, you can use the following resources:\n\n* **GIAC's official training courses**: GIAC offers a comprehensive training course, known as \"SEC401: Security Essentials Boot- camp Style,\" to help students develop the necessary knowledge and skills for the GSEC certification exam. This course is available in various formats, including online, classroom-based, and on-demand.\n* **Study materials**: You can find several study guides, practice exams, and books specifically designed for GSEC exam preparation. These resources can help you deepen your understanding of the GSEC exam objectives and practice your skills through hands-on exercises.\n* **Online forums and study groups**: Participate in online forums and study groups related to GSEC and cybersecurity in general. These platforms can provide valuable insights, tips, and experiences from other security professionals and candidates preparing for the exam.\n* **GSEC Practice Exams**: GIAC offers two practice exams for the GSEC certification, which are an excellent way to assess your knowledge and identify areas that may require further attention.\n\nBy obtaining the GSEC certification, you will demonstrate your advanced knowledge and skills in cybersecurity, showcasing your ability to protect information systems and networks effectively. This certification can be a significant asset to your career and help you stand out in the competitive cybersecurity job market.", + "links": [] + }, + "t4h9rEKWz5Us0qJKXhxlX": { + "title": "GPEN", + "description": "The **GIAC Penetration Tester (GPEN)** certification is an advanced-level credential designed for professionals who want to demonstrate their expertise in the field of penetration testing and ethical hacking. Created by the Global Information Assurance Certification (GIAC) organization, GPEN validates an individual's ability to conduct legal, systematic, and effective penetration tests to assess the security of computer networks, systems, and applications.\n\nKey Topics\n----------\n\n* **Reconnaissance:** Utilize various methods to gather information on a target's infrastructure, services, and vulnerabilities.\n* **Scanning:** Employ tools and techniques to actively probe and evaluate target systems, including Nmap, Nessus, and Metasploit.\n* **Exploitation:** Understand how to exploit vulnerabilities effectively, including buffer overflow attacks, SQL injection, and browser-based attacks.\n* **Password Attacks:** Employ password cracking tools and techniques to bypass authentication mechanisms.\n* **Wireless and Monitoring**: Identify and exploit wireless networks, as well as monitor network traffic to uncover useful information.\n* **Post Exploitation**: Perform post-exploitation activities like privilege escalation, lateral movement, and data exfiltration.\n* **Legal and Compliance**: Understand the legal considerations involved in penetration testing, and follow industry best practices and standards.\n\nTarget Audience\n---------------\n\nThe GPEN certification is primarily aimed at cybersecurity professionals, network administrators, security consultants, and penetration testers looking to enhance their skills and reinforce their credibility in the industry.\n\nPreparing for the GPEN Exam\n---------------------------\n\nTo prepare for the GPEN exam, candidates are recommended to have a strong foundation in the fundamentals of cybersecurity, networking, and ethical hacking. GIAC offers a comprehensive training course called \"SEC560: Network Penetration Testing and Ethical Hacking\" which aligns with the GPEN exam objectives. However, self-study using other resources like books, articles, and online tutorials is also a viable option.\n\nExam Details\n------------\n\n* **Number of Questions:** 115\n* **Type of Questions:** Multiple-choice\n* **Duration:** 3 hours\n* **Passing Score:** 74%\n* **Exam Delivery:** Proctored, Online or at a testing center\n* **Cost:** $1,999 USD (Includes one retake)\n\nUpon successfully passing the exam, candidates will receive the GIAC Penetration Tester certification, which is valid for four years. To maintain the certification, professionals must earn plus 36 Continuing Professional Education (CPE) credits every two years and pay a maintenance fee to keep their credentials active.", + "links": [] + }, + "rwniCTWfYpKP5gi02Pa9f": { + "title": "GWAPT", + "description": "The **GIAC Web Application Penetration Tester (GWAPT)** certification validates an individual's ability to perform in-depth web application security assessments and exploit vulnerabilities. GWAPT focuses on using ethical hacking methodologies to conduct web application penetration testing with the goal of identifying, evaluating, and mitigating security risks.\n\nKey Concepts\n------------\n\nThe GWAPT certification covers several key concepts and areas, including but not limited to:\n\n* **Web Application Security:** Knowledge of various web application security concepts, such as authentication mechanisms, session management, input validation, and access control.\n* **Testing Methodologies:** Understanding and application of web application penetration testing methodologies, such as OWASP Testing Guide and OWASP ASVS.\n* **Vulnerability Identification and Exploitation:** Identifying, exploiting, and assessing the impact of common web application vulnerabilities such as XSS, CSRF, SQL Injection, and others.\n* **Tools and Techniques:** Mastery of various web application testing tools, such as Burp Suite, WebInspect, and others.\n* **Report Preparation and Presentation:** Ability to document and present findings in a clear, concise manner, which can be understood by both technical and non-technical audiences.\n\nCertification Process\n---------------------\n\nTo attain the GWAPT certification, candidates must:\n\n* Register for the GWAPT exam through the GIAC website ([www.giac.org](http://www.giac.org)).\n* Prepare for the exam by undergoing various training methods, such as attending the SEC542: Web App Penetration Testing and Ethical Hacking course by SANS, self-study, attending workshops, or gaining hands-on experience.\n* Pass the proctored 75-question multiple-choice exam with a minimum score of 68% within the 2-hour time limit.\n* Maintain the certification by earning 36 Continuing Professional Experience (CPE) credits every four years and paying the renewal fee.\n\nWho Should Pursue GWAPT Certification?\n--------------------------------------\n\nThe GWAPT certification is aimed at professionals who are involved in web application security, such as penetration testers, security analysts, or application developers. Obtaining this certification demonstrates a high level of technical skill and knowledge in web application security testing, making it a valuable addition to any cybersecurity professional's credentials.\n\nBenefits of GWAPT Certification\n-------------------------------\n\n* Validates your skills and knowledge in web application security testing.\n* Enhances your professional credibility and marketability in the cybersecurity industry.\n* Provides a competitive edge over non-certified individuals.\n* Demonstrates a commitment to staying current with industry advancements and best practices.\n* Assists in advancing your career by meeting employer or client requirements for certified professionals.", + "links": [] + }, + "ZiUT-lyIBfHTzG-dwSy96": { + "title": "GIAC", + "description": "GIAC is a globally recognized organization that provides certifications for information security professionals. Established in 1999, its primary aim is to validate the knowledge and skills of professionals in various cybersecurity domains. GIAC certifications focus on practical and hands-on abilities to ensure that certified individuals possess the necessary expertise to tackle real-world cybersecurity challenges.\n\nGIAC Certification Categories\n-----------------------------\n\nGIAC certifications are divided into several categories, catering to different aspects of information security:\n\n* **Cyber Defense**: Certifications tailored to secure an organization's information infrastructure and develop incident response capabilities.\n* **Penetration Testing**: Certifications targeting professionals who conduct penetration tests to identify and mitigate security vulnerabilities.\n* **Incident Response and Forensics**: Certifications focusing on incident handling, forensics, and the legal aspects of cybersecurity.\n* **Management, Audit, Legal and Security Awareness**: Certifications aimed at security managers, auditors, and executives who are responsible for developing and managing security policies and procedures.\n* **Industrial Control Systems**: Certifications addressing the unique security requirements of industrial control systems and critical infrastructure.\n* **Developer**: Certifications targeting software developers and programmers to help them develop secure applications.\n\nGIAC Certification Process\n--------------------------\n\nTo obtain a GIAC certification, candidates must pass a comprehensive proctored exam that tests their knowledge and practical skills. The exams are usually associated with corresponding training courses offered by SANS Institute, a leading provider of cybersecurity training. However, taking a SANS course is not mandatory to sit for the exam. Individuals with sufficient knowledge and experience can directly register for a GIAC exam.\n\nThe exams typically consist of multiple-choice questions and can range from 75 to 150 questions, depending on the certification. Candidates are given 2-5 hours to complete the exam, and a passing score varies between 63% and 80%.\n\nBenefits of GIAC Certifications\n-------------------------------\n\nGIAC-certified professionals are highly sought after due to the rigorous assessment and practical skills they possess. Obtaining a GIAC certification can lead to:\n\n* Enhanced career prospects\n* Higher salary potential\n* Peer recognition\n* Demonstrated commitment to professional development\n\nIn summary, GIAC certifications are valuable and respected credentials that pave the way for a successful cybersecurity career. By completing a GIAC certification, you validate your expertise and increase your employability in the competitive field of cybersecurity.", + "links": [] + }, + "SwVGVP2bbCFs2uNg9Qtxb": { + "title": "OSCP", + "description": "Offensive Security Certified Professional (OSCP)\n------------------------------------------------\n\nThe **Offensive Security Certified Professional (OSCP)** is a highly respected and sought-after certification in the field of cybersecurity. This certification is designed to test your practical knowledge and skills in the identification and exploitation of vulnerabilities in a target environment, as well as your ability to effectively implement offensive security techniques to assess the security posture of networks and systems.\n\nKey Topics Covered:\n-------------------\n\n* Penetration testing methodologies\n* Advanced information gathering techniques\n* Buffer overflow attacks\n* Web application attacks\n* Various exploitation techniques\n* Privilege escalation\n* Client-side attacks\n* Post-exploitation techniques\n* Basic scripting and automation\n\nPrerequisites:\n--------------\n\nThere are no strict prerequisites for the OSCP, but it is recommended that candidates have a solid understanding of networking, system administration, and Linux/Unix command-line environments. Familiarity with basic programming concepts, scripting languages (e.g., Python, Bash), and operating system concepts will also be helpful.\n\nExam Format:\n------------\n\nTo obtain the OSCP certification, you must successfully complete the 24-hour hands-on exam, where you are required to attack and penetrate a target network, compromising several machines and completing specific objectives within the given time frame.\n\nBefore attempting the exam, candidates must complete the accompanying training course, **Penetration Testing with Kali Linux (PWK)**, which provides the necessary knowledge and practical experience required for the OSCP exam.\n\nWhy Pursue the OSCP Certification?\n----------------------------------\n\n* **Hands-on Approach:** OSCP emphasizes a practical, hands-on approach, ensuring that certified professionals possess both the theoretical knowledge and practical skills required to succeed in the cybersecurity field.\n* **Industry Recognition:** OSCP is widely recognized and respected within the cybersecurity community as a rigorous and demanding certification that validates a candidate's ability to perform under pressure.\n* **Career Advancement:** With the OSCP certification, you can demonstrate your advanced skills in offensive security techniques, making you a valuable asset to any security team and potentially opening up opportunities for career growth, higher salaries, and challenging roles in the industry.\n* **Continuous Learning:** Pursuing the OSCP certification will help you develop a deeper understanding of underlying vulnerabilities and attack vectors. This knowledge, combined with constantly evolving offensive security techniques, ensures that you stay ahead in the ever-changing cybersecurity landscape.\n\nObtaining the OSCP certification can be a challenging and rewarding journey that provides you with practical skills and industry recognition, enabling you to stand out as a cybersecurity professional and advance your career in the field.", + "links": [] + }, + "rA1skdztev3-8VmAtIlmr": { + "title": "CREST", + "description": "CREST is a non-profit, accreditation and certification body that represents the technical information security industry. Established in 2008, its mission is to promote the development and professionalization of the cyber security sector. CREST provides certifications for individuals and accreditations for companies, helping customers find knowledgeable and experienced professionals in the field.\n\nCREST Examinations and Certifications\n-------------------------------------\n\nCREST offers various examinations and certifications, including:\n\n* **CREST Practitioner Security Analyst (CPSA)**: This is an entry-level certification for individuals looking to demonstrate their knowledge and competence in vulnerability assessment and penetration testing. Passing the CPSA exam is a prerequisite for taking other CREST technical examinations.\n \n* **CREST Registered Penetration Tester (CRT)**: This certification is aimed at professionals with a solid understanding of infrastructure and web application penetration testing. CRT holders have demonstrated practical skills in identifying and exploiting vulnerabilities in a controlled environment.\n \n* **CREST Certified Infrastructure Tester (CCIT)** and **CREST Certified Web Application Tester (CCWAT)**: These advanced certifications require candidates to have a deep technical understanding and practical skills in infrastructure or web application testing, respectively. These certifications are intended for experienced professionals who can perform in-depth technical assessments and identify advanced security vulnerabilities.\n \n* **CREST Certified Simulated Attack Manager (CCSAM)** and **CREST Certified Simulated Attack Specialist (CCSAS)**: These certifications focus on the planning, scoping, and management of simulated attack engagements, or red teaming. They require candidates to have experience in both the technical and managerial aspects of coordinated cyber attacks.\n \n\nBenefits of CREST Certifications\n--------------------------------\n\nObtaining CREST certifications provides several benefits, such as:\n\n* Increased credibility and recognition within the cyber security industry\n* Validation of your technical knowledge and expertise\n* Access to resources and support through the CREST community\n* Assurance for employers and clients that you're skilled and trustworthy\n\nIn the rapidly evolving field of cyber security, CREST certifications demonstrate a commitment to continuous learning, growth, and professionalism.", + "links": [] + }, + "AAo7DXB7hyBzO6p05gx1i": { + "title": "CEH", + "description": "**Certified Ethical Hacker (CEH)** is an advanced certification focused on equipping cybersecurity professionals with the knowledge and skills required to defend against the continuously evolving landscape of cyber threats. This certification is facilitated by the EC-Council, an internationally recognized organization for information security certifications.\n\nObjectives\n----------\n\nThe CEH certification aims to provide professionals with the following skills:\n\n* Understand the ethics and legal requirements of ethical hacking\n* Identify and analyze common cyber threats, including malware, social engineering, and various network attacks\n* Utilize the latest penetration testing tools and methodologies to uncover vulnerabilities in systems, networks, and applications\n* Implement defensive countermeasures to protect against cyber attacks\n\nTarget Audience\n---------------\n\nThe CEH certification is ideal for:\n\n* Cybersecurity professionals seeking to expand their skill set\n* IT administrators responsible for securing their organization's systems and network\n* Penetration testers looking to demonstrate their ethical hacking capabilities\n* Security consultants who want a recognized certification in the IT security field\n\nExam Details\n------------\n\nTo become a Certified Ethical Hacker, you must pass the CEH exam, which consists of the following:\n\n* Number of Questions: 125\n* Exam Type: Multiple choice questions\n* Duration: 4 hours\n* Passing Score: 70%\n\nPreparation\n-----------\n\nTo prepare for the CEH exam, candidates can follow the EC-Council's official training course or opt for self-study. The recommended resources include:\n\n* EC-Council's [_CEH v11: Certified Ethical Hacker_](https://www.eccouncil.org/programs/certified-ethical-hacker-ceh/) training course\n* Official CEH study guide and practice exams\n* CEH-related books, articles, and online resources\n\nRecertification\n---------------\n\nCEH holders need to earn 120 ECE (Education Credits) within three years of obtaining their certification to retain their credentials. These credits can be obtained through training, workshops, conferences, and other continuous learning opportunities in the field of information security.", + "links": [] + }, + "UY6xdt_V3YMkZxZ1hZLvW": { + "title": "Operating Systems", + "description": "An **operating system (OS)** is a crucial component of a computer system as it manages and controls both the hardware and software resources. It provides a user-friendly interface and ensures the seamless functioning of the various applications installed on the computer.\n\nIn the context of cybersecurity, selection and proper maintenance of an operating system is paramount. This section will discuss the three major operating systems: Windows, macOS, and Linux, along with security considerations.\n\nWindows\n-------\n\nMicrosoft Windows is ubiquitous amongst desktop and laptop users, making it a primary target for cybercriminals. Attackers often focus on finding and exploiting vulnerabilities within Windows due to its extensive user-base. That said, Windows continues to enhance its built-in security features with updates and patches. Key features include:\n\n* Windows Defender: An antivirus program that detects and removes malware.\n* Windows Firewall: Monitors and controls incoming and outgoing network traffic.\n* BitLocker: A full disk encryption feature for securing data.\n\nAs a Windows user, keeping your system up-to-date and using additional security tools such as anti-malware software is vital.\n\nmacOS\n-----\n\nThe macOS, Apple's operating system for Macintosh computers, holds a reputation for strong security. Apple designed macOS with several built-in features to protect user privacy and data:\n\n* Gatekeeper: Ensures downloaded apps originate from trusted sources.\n* FileVault 2: Offers full-disk encryption for data protection.\n* XProtect: An antivirus tool that scans newly installed apps for malware.\n\nDespite macOS's sound security measures, no operating system is completely immune to threats. Running reputable security software and keeping your macOS updated is essential to safeguard against potential cyberattacks.\n\nLinux\n-----\n\nLinux is an open-source operating system considered to be more secure than its commercial counterparts. Linux uses a multi-user environment, mitigating the impact of potential threats by separating user information and privileges. Other notable features include:\n\n* Software Repositories: Official software repositories maintained by Linux distributions provide trusted sources for software installation.\n* SELinux (Security-Enhanced Linux): A security architecture that allows administrators to control system access.\n* System/package updates: Regular updates offered by distributions hold essential security fixes.\n\nAlthough Linux distributions are less targeted by cybercriminals, it is vital to follow security best practices, such as keeping your system updated and employing security tools like antivirus software and firewalls.\n\nRemember, the security of your operating system relies on timely updates, proper configuration, and the use of appropriate security tools. Stay vigilant and informed to ensure your system remains secure against ever-evolving cyber threats.", + "links": [] + }, + "BNUKEQ4YpZmIhSPQdkHgU": { + "title": "Windows", + "description": "Windows is a popular operating system (OS) developed by Microsoft Corporation. It was first introduced in 1985 and has since evolved to become one of the most widely used OS worldwide. Windows is known for its graphical user interface (GUI), and it supports a wide variety of applications, making it a versatile choice for both personal and professional use.\n\nKey Features\n------------\n\n* **Ease of use:** Windows is designed with a user-friendly interface, making it easy for users to navigate, manage files, and access applications.\n \n* **Compatibility:** Windows is compatible with a vast range of hardware and software, including most peripherals like printers, webcams, and more.\n \n* **Regular updates:** Microsoft provides regular updates for Windows, which helps maintain security, fix bugs, and enhance features.\n \n* **Large user community:** Due to its widespread use, there is a vast online community of users who provide support, solutions, and information about the platform.\n \n* **Versatile application support:** Windows supports a plethora of applications, including office productivity tools, games, multimedia software, and more.\n \n\nSecurity Features\n-----------------\n\nWindows has made significant strides to improve its security over the years. Some of the security features include:\n\n* **Windows Defender:** A built-in antivirus software that provides real-time protection against malware, ransomware, and other threats.\n \n* **Windows Firewall:** This feature helps protect your device from unauthorized access or intrusion by blocking potentially harmful network connections.\n \n* **User Account Control (UAC):** UAC helps prevent unauthorized changes to the system settings by prompting users for administrative permission when making system modifications.\n \n* **Windows Update:** Regular updates ensure that your system is up-to-date with the latest security patches, bug fixes, and feature improvements.\n \n* **BitLocker:** A disk encryption feature available in certain Windows editions, BitLocker helps secure your data by providing encryption for your hard drive or external storage devices.\n \n\nEssential Security Tips for Windows Users\n-----------------------------------------\n\nTo improve the security of Windows devices, users should:\n\n* Ensure that the Windows OS and all installed software are up-to-date.\n \n* Regularly update and run antivirus and anti-malware software.\n \n* Enable the built-in Windows Firewall to protect the device from unauthorized access.\n \n* Use strong and unique passwords for user accounts and enable two-factor authentication wherever possible.\n \n* Regularly back up important data to an external storage device or a secure cloud service to avoid data loss.\n \n\nBy following these security tips and staying informed about potential threats, Windows users can protect their devices and data from various cyber-attacks.", + "links": [ + { + "title": "Windows Security", + "url": "https://learn.microsoft.com/en-us/windows/security/", + "type": "article" + }, + { + "title": "Explore top posts about Windows", + "url": "https://app.daily.dev/tags/windows?ref=roadmapsh", + "type": "article" + } + ] + }, + "4frVcjYI1VlVU9hQgpwcT": { + "title": "Linux", + "description": "Linux is an open-source operating system (OS) that is widely popular due to its flexibility, stability, and security features. As a Unix-based OS, Linux has a command-line interface, which allows users to perform various tasks through text commands. However, graphical user interfaces (GUIs) can also be installed for ease of use.\n\nKey Features\n------------\n\n* **Open-source**: Anyone can view, modify, and distribute the Linux source code, promoting collaboration and continuous improvement within the OS community.\n* **Modular design**: Linux can be customized for various computing environments, such as desktops, servers, and embedded systems.\n* **Stability and performance**: Linux is well-known for its ability to handle heavy loads without crashing, making it an ideal choice for servers.\n* **Strong Security**: Linux has robust security mechanisms, such as file permissions, a built-in firewall, and an extensive user privilege system.\n* **Large Community**: Linux has a vast, active user community that offers a wealth of knowledge, user-contributed software, and support forums.\n\nPopular Linux Distributions\n---------------------------\n\nThere are numerous Linux distributions available, catering to specific user needs and preferences. Some popular distributions include:\n\n* **Ubuntu**: A user-friendly distribution suitable for beginners, often used for desktop environments.\n* **Fedora**: A cutting-edge distribution with frequent updates and innovative features, ideal for developers and advanced users.\n* **Debian**: A very stable distribution that prioritizes free software and benefits from a large, active community.\n* **Arch Linux**: A highly customizable distribution that allows users to build their system from the ground up, suited for experienced users.\n* **CentOS**: A distribution focused on stability, security, and manageability, making it a popular choice for server environments.\n\nSecurity Best Practices for Linux\n---------------------------------\n\nWhile Linux is inherently secure, there are best practices to enhance your system's security further:\n\n* Keep your system updated: Regularly update your kernel, OS packages, and installed software to ensure you have the latest security patches.\n* Enable a firewall: Configure and enable a firewall, such as `iptables`, to control incoming and outgoing network traffic.\n* Use strong passwords and user accounts: Create separate accounts with strong passwords for different users and grant them only the required privileges.\n* Disable unused services: Unnecessary services can be potential security risks; ensure only required services are running on your system.\n* Implement a Security-Enhanced Linux (SELinux) policy: SELinux provides a mandatory access control (MAC) system that restricts user and process access to system resources.\n\nBy understanding Linux's features and best practices, you can leverage its powerful capabilities and robust security features to enhance your computing environment's performance and safety.", + "links": [ + { + "title": "Linux from scratch- Cisco", + "url": "https://www.netacad.com/courses/os-it/ndg-linux-unhatched", + "type": "course" + }, + { + "title": "Learn Linux", + "url": "https://linuxjourney.com/", + "type": "article" + }, + { + "title": "Linux Commands Cheat Sheet", + "url": "https://cdn.hostinger.com/tutorials/pdf/Linux-Commands-Cheat-Sheet.pdf", + "type": "article" + }, + { + "title": "Explore top posts about Linux", + "url": "https://app.daily.dev/tags/linux?ref=roadmapsh", + "type": "article" + }, + { + "title": "Linux in 100 Seconds", + "url": "https://www.youtube.com/watch?v=rrB13utjYV4", + "type": "video" + }, + { + "title": "Introduction to Linux", + "url": "https://youtu.be/sWbUDq4S6Y8", + "type": "video" + } + ] + }, + "dztwr-DSckggQbcNIi4_2": { + "title": "MacOS", + "description": "**macOS** is a series of proprietary graphical operating systems developed and marketed by Apple Inc. It is the primary operating system for Apple's Mac computers. macOS is widely recognized for its sleek design, robust performance, and innovative features, making it one of the most popular operating systems globally.\n\nKey Features\n------------\n\n* **User-friendly interface**: macOS is known for its simple and intuitive user interface, which makes it easy for users to navigate and use the system efficiently.\n \n* **Security**: macOS has several built-in security features, such as XProtect, Gatekeeper, and FileVault, to provide a secure computing environment. Additionally, macOS is based on UNIX, which is known for its strong security and stability.\n \n* **Integration with Apple ecosystem**: macOS is seamlessly integrated with Apple's software and hardware ecosystem, including iOS, iCloud, and other Apple devices, providing a consistent and well-connected user experience.\n \n* **App Store**: Apple's App Store offers a large and diverse selection of applications for macOS, ensuring easy and secure software downloads and installations.\n \n* **Time Machine**: macOS's Time Machine feature provides an easy and automatic way to back up your data, ensuring you never lose important files and can recover from system crashes.\n \n\nSecurity Tips\n-------------\n\n* **Keep your macOS up-to-date**: Always ensure that your macOS is running the latest version and security updates, as Apple regularly releases patches to fix potential vulnerabilities.\n \n* **Enable the Firewall**: Make sure to enable macOS's built-in firewall to protect your system from unauthorized access and potential intrusions.\n \n* **Use strong, unique passwords**: Ensure that your macOS user account is protected with a strong, unique password and enable two-factor authentication for your Apple ID.\n \n* **Be cautious with downloads**: Be careful when downloading and installing software from unknown sources. Use the macOS App Store whenever possible, and avoid downloading from third-party websites.\n \n* **Install antivirus software**: To add an extra layer of security, consider installing a reputable antivirus program on your Mac to protect against malware and other threats.\n \n\nBy following these security tips and staying vigilant, users can ensure their Mac remains a secure and enjoyable computing environment.", + "links": [] + }, + "02aaEP9E5tlefeGBxf_Rj": { + "title": "Installation and Configuration", + "description": "To effectively protect your systems and data, it is vital to understand how to securely install software and configure settings, as well as assess the implications and potential vulnerabilities during installation and configuration processes.\n\nImportance of Proper Installation and Configuration\n---------------------------------------------------\n\nImproper installation or configuration of software can lead to an array of security risks, including unauthorized access, data breaches, and other harmful attacks. To ensure that your system is safeguarded against these potential threats, it is essential to follow best practices for software installation and configuration:\n\n* **Research the Software**: Before installing any software or application, research its security features and reputation. Check for any known vulnerabilities, recent patches, and the software's overall trustworthiness.\n \n* **Use Official Sources**: Always download software from trusted sources, such as the software vendor's official website. Avoid using third-party download links, as they may contain malicious code or altered software.\n \n* **Verify File Integrity**: Verify the integrity of the downloaded software by checking its cryptographic hash, often provided by the software vendor. This ensures that the software has not been tampered with or corrupted during the download process.\n \n* **Install Updates**: During the installation process, ensure that all available updates and patches are installed, as they may contain vital security fixes.\n \n* **Secure Configurations**: Following the installation, properly configure the software by following the vendor's documentation or industry best practices. This can include adjusting settings related to authentication, encryption, and access control, among other important security parameters.\n \n\nConfiguration Considerations\n----------------------------\n\nWhile software configurations will vary depending on the specific application or system being utilized, there are several key aspects to keep in mind:\n\n* **Least Privilege**: Configure user accounts and permissions with the principle of least privilege. Limit user access to the minimal level necessary to accomplish their tasks, reducing the potential attack surface.\n \n* **Password Policies**: Implement strong password policies, including complexity requirements, minimum password length, and password expiration periods.\n \n* **Encryption**: Enable data encryption to protect sensitive information from unauthorized access. This can include both storage encryption and encryption of data in transit.\n \n* **Firewalls and Network Security**: Configure firewalls and other network security measures to limit the attack surface and restrict unauthorized access to your systems.\n \n* **Logging and Auditing**: Configure logging and auditing to capture relevant security events and allow for analysis in the event of a breach or security incident.\n \n* **Disable Unnecessary Services**: Disable any unused or unnecessary services on your systems. Unnecessary services can contribute to an increased attack surface and potential vulnerabilities.\n \n\nBy following these guidelines, you can establish a robust foundation for system security through proper installation and configuration. Remember that maintaining strong cybersecurity is an ongoing process that requires continuous learning and adaptation to stay ahead of evolving threats.", + "links": [] + }, + "yXOGqlufAZ69uiBzKFfh6": { + "title": "Different Versions and Differences", + "description": "In the field of cyber security, it is essential to stay up-to-date with different versions of software, tools, and technology, as well as understanding the differences between them. Regularly updating software ensures that you have the latest security features in place to protect yourself from potential threats.\n\nImportance of Versions\n----------------------\n\n* **Security**: Newer versions of software often introduce patches to fix security vulnerabilities. Using outdated software can leave your system exposed to cyber attacks.\n \n* **Features**: Upgrading to a newer version of software can provide access to new features and functionalities, improving the user experience and performance.\n \n* **Compatibility**: As technology evolves, staying up-to-date with versions helps ensure that software or tools are compatible across various platforms and devices.\n \n\nUnderstanding Differences\n-------------------------\n\nWhen we talk about differences in the context of cybersecurity, they can refer to:\n\n* **Software Differences**: Different software or tools offer different features and capabilities, so it's crucial to choose one that meets your specific needs. Additionally, open-source tools may differ from proprietary tools in terms of functionalities, licensing, and costs.\n \n* **Operating System Differences**: Cybersecurity practices may differ across operating systems such as Windows, Linux, or macOS. Each operating system has its own security controls, vulnerabilities, and potential attack vectors.\n \n* **Protocol Differences**: Understanding the differences between various network protocols (HTTP, HTTPS, SSH, FTP, etc.) can help you choose the most secure method for your purposes.\n \n* **Threat Differences**: Various types of cyber threats exist (e.g., malware, phishing, denial-of-service attacks), and it is crucial to understand their differences in order to implement the most effective countermeasures.\n \n\nTo sum up, keeping up with different versions of software and understanding the differences between technologies and threats are vital steps in maintaining a strong cyber security posture. Always update your software to the latest version, and continuously educate yourself on emerging threats and technologies to stay one step ahead of potential cyber attacks.", + "links": [] + }, + "MGitS_eJBoY99zOR-W3F4": { + "title": "Navigating using GUI and CLI", + "description": "Graphical User Interface (GUI) and Command Line Interface (CLI) are the two essential methods to navigate through a computer system or a network device. Both these interfaces are crucial for understanding and managing cyber security.\n\nGraphical User Interface (GUI)\n------------------------------\n\nA Graphical User Interface (GUI) is a type of user interface that allows users to interact with a software program, computer, or network device using images, icons, and visual indicators. The GUI is designed to make the user experience more intuitive, as it enables users to perform tasks using a mouse and a keyboard without having to delve into complex commands. Most modern operating systems (Windows, macOS, and Linux) offer GUIs as the primary means of interaction.\n\n**Advantages of GUI:**\n\n* User-friendly and visually appealing\n* Easier for beginners to learn and navigate\n* Reduces the need to memorize complex commands\n\n**Disadvantages of GUI:**\n\n* Consumes more system resources (memory, CPU) than CLI\n* Some advanced features might not be available or accessibly as quickly compared to CLI\n\nCommand Line Interface (CLI)\n----------------------------\n\nA Command Line Interface (CLI) is a text-based interface that allows users to interact with computer programs or network devices directly through commands that are entered via a keyboard. CLIs are used in a variety of contexts, including operating systems (e.g., Windows Command Prompt or PowerShell, macOS Terminal, and Linux shell), network devices (such as routers and switches), and some software applications.\n\n**Advantages of CLI:**\n\n* Faster and more efficient in performing tasks once commands are known\n* Requires fewer system resources (memory, CPU) than GUI\n* Provides more control and advanced features for experienced users\n\n**Disadvantages of CLI:**\n\n* Steeper learning curve for beginners\n* Requires memorization or reference material for commands and syntax\n\nBy understanding how to navigate and use both GUI and CLI, you will be better equipped to manage and secure your computer systems and network devices, as well as perform various cyber security tasks that may require a combination of these interfaces. It is essential to be familiar with both methods, as some tasks may require the precision and control offered by CLI, while others may be more efficiently performed using a GUI.\n\nIn the following sections, we will discuss some common CLI tools and their usage, along with how to secure and manage your computer systems and network devices using these interfaces. Stay tuned!", + "links": [] + }, + "bTfL7cPOmBBFl-eHxUJI6": { + "title": "Understand Permissions", + "description": "Understanding permissions is crucial for maintaining a secure environment in any system. Permissions determine the level of access and control users have over files, applications, and other system resources. By setting the appropriate permissions, you can effectively limit the potential for unauthorized access and data breaches.\n\nDifferent Types of Permissions\n------------------------------\n\nPermissions can be broadly categorized into three types:\n\n* **Read (R)**: This permission level allows users to view the content of a file or folder, without the ability to make any changes or execute actions.\n* **Write (W)**: This permission level grants users the ability to create, modify, or delete files and folders.\n* **Execute (X)**: This permission level allows users to run a file or application and execute actions within it.\n\nThese permissions can be combined in different ways to form the desired access level. For example, a user may have read and write permissions for a file, allowing them to view and modify its contents, but not execute any actions within it.\n\nSetting and Managing Permissions\n--------------------------------\n\nPermissions can be set and managed using various tools and methods, depending on the operating system being used:\n\n* **Windows**: Permissions are set through Access Control Lists (ACLs) in the security properties of a file or folder. This allows you to grant or deny specific permissions to users and groups.\n* **Mac**: Mac uses POSIX permissions to manage access control, which can be set using the \"Get Info\" window for a file or folder, or through Terminal commands.\n* **Linux**: Permissions on Linux systems are managed using the `chmod` command, along with the `chown` and `chgrp` commands to change the ownership of files and groups.\n\nIt's essential to understand how these tools work and use them effectively to maintain a secure environment.\n\nBest Practices for Implementing Permissions\n-------------------------------------------\n\nTo ensure cyber security with permissions, follow these best practices:\n\n* **Least Privilege Principle**: Grant users the minimum level of access they need to perform their tasks. People should not have unnecessary access to sensitive information or resources.\n* **Regularly Review Permissions**: Regularly audit permissions to ensure they are up-to-date and align with the current organizational roles and responsibilities.\n* **Use Groups and Roles**: Group users based on their job roles and assign permissions to groups instead of individuals. This simplifies the permission management process.\n* **Implement Security Training**: Educate users about the importance of permissions and their responsibilities to maintain a secure environment.\n\nBy understanding permissions and following best practices, you can enhance cyber security and minimize the risk of unauthorized access and data breaches.", + "links": [ + { + "title": "Linux File Permissions (Linux Journey)", + "url": "https://linuxjourney.com/lesson/file-permissions", + "type": "article" + } + ] + }, + "Ot3LGpM-CT_nKsNqIKIye": { + "title": "Installing Software and Applications", + "description": "In the realm of cyber security, installing apps safely and securely is vital to protect your devices and personal information. In this guide, we'll cover some essential steps to follow when installing apps on your devices.\n\nChoose trusted sources\n----------------------\n\nTo ensure the safety of your device, always choose apps from trusted sources, such as official app stores (e.g., Google Play Store for Android or Apple's App Store for iOS devices). These app stores have strict guidelines and often review apps for malicious content before making them available for download.\n\nResearch the app and its developer\n----------------------------------\n\nBefore installing an app, it is essential to research the app and its developer thoroughly. Check for app reviews from other users and look for any red flags related to security or privacy concerns. Investigate the developer's web presence and reputation to ensure they can be trusted.\n\nCheck app permissions\n---------------------\n\nBefore installing an app, always review the permissions requested. Be aware of any unusual permissions that do not correspond with the app's functionality. If an app is asking for access to your contacts, GPS, or microphone, and there isn't a reasonable explanation for why it needs this information, it could be a potential security risk.\n\nKeep your device and apps updated\n---------------------------------\n\nTo maintain your device's security, always install updates as soon as they become available. This applies not only to the apps but also to the operating system of your device. Updates often include security patches that fix known vulnerabilities, so it is essential to keep everything up to date.\n\nInstall a security app\n----------------------\n\nConsider installing a security app from a reputable company to protect your device against malware, viruses, and other threats. These apps can monitor for suspicious activity, scan for malicious software, and help keep your device secure.\n\nUninstall unused apps\n---------------------\n\nRegularly review the apps on your device and uninstall any that are no longer being used. This will not only free up storage space but also reduce potential security risks that might arise if these apps are not maintained or updated by their developers.\n\nBy following these guidelines, you can significantly increase your device's security and protect your valuable data from cyber threats.", + "links": [] + }, + "zRXyoJMap9irOYo3AdHE8": { + "title": "Performing CRUD on Files", + "description": "When working with files in any system or application, understanding and performing CRUD operations (Create, Read, Update, and Delete) is essential for implementing robust cyber security measures.\n\nFile Creation\n-------------\n\n* **Windows**: You can create new files using the built-in text editor (Notepad) or dedicated file creation software. You can also use PowerShell commands for quicker file creation. The `New-Item` command followed by the file path creates a file.\n \n New-Item -Path \"C:\\Example\\example.txt\" -ItemType \"file\"\n \n \n* **Linux**: Unlike Windows, file creation is usually done through the terminal. The `touch` command helps create a file in the desired directory.\n \n touch /example/example.txt\n \n \n\nFile Reading\n------------\n\n* **Windows**: You can read a file using standard file readers, such as Notepad, Wordpad, etc., or you can utilize PowerShell commands. The `Get-Content` command provides the file content.\n \n Get-Content -Path \"C:\\Example\\example.txt\"\n \n \n* **Linux**: The `cat` command is the most common way to read the contents of a file in Linux.\n \n cat /example/example.txt\n \n \n\nFile Updating\n-------------\n\n* **Windows**: File updating can be accomplished using the previously mentioned text editors or PowerShell. The `Set-Content` or `Add-Content` commands are useful for updating a file.\n \n Set-Content -Path \"C:\\Example\\example.txt\" -Value \"Updated content\"\n Add-Content -Path \"C:\\Example\\example.txt\" -Value \"Appended content\"\n \n \n* **Linux**: Linux uses the built-in text editors, such as `nano` or `vim`, to update files. Alternatively, the `echo` command can append content to a file.\n \n echo \"Appended content\" >> /example/example.txt\n \n \n\nFile Deletion\n-------------\n\n* **Windows**: File deletion is performed by right-clicking the file and selecting 'Delete' or using PowerShell commands. The `Remove-Item` command followed by the file path can delete a file.\n \n Remove-Item -Path \"C:\\Example\\example.txt\"\n \n \n* **Linux**: The `rm` command allows you to delete a file in Linux.\n \n rm /example/example.txt\n \n \n\nBy mastering these CRUD operations, you can enhance your cyber security knowledge and implement effective incident response and file management strategies.", + "links": [] + }, + "xeRWOX1fWQDLNLWMAFTEe": { + "title": "Troubleshooting", + "description": "**Troubleshooting** is a crucial skill in the realm of cyber security, as it involves identifying, analyzing, and resolving various issues with computer systems, networks, and software. It is a systematic approach that requires logical thinking and the ability to deduce the possible cause of a problem from various symptoms. As an aspiring cyber security professional, sharpening your troubleshooting skills means you'll be better equipped to handle any security threats, vulnerabilities, and attacks on your organization's digital infrastructure.\n\nBelow, we have outlined some key steps and best practices for effective troubleshooting in cyber security:\n\nIdentifying the Problem\n-----------------------\n\nThe first step in troubleshooting is to identify the problem itself. This may involve recognizing unusual system behavior, error messages, or even end-user reports. To identify the problem, look for symptoms such as slow performance, application crashes, or network connectivity issues.\n\nGathering Information\n---------------------\n\nOnce the problem has been identified, gather as much information as possible about it. This means consulting event logs, system documentation, and users who may have experienced the issue firsthand. Additionally, pay attention to any error messages or anomalies in the system behavior that can provide valuable insights.\n\nFormulate a Hypothesis\n----------------------\n\nAfter gathering all available information, come up with a hypothesis or an educated guess about what may be causing the issue. Keep in mind that you may not be able to determine a single cause at this stage, so try to identify all possible causes and prioritize them based on the available evidence.\n\nTest the Hypothesis\n-------------------\n\nTest your hypothesis by attempting to confirm or refute it. To do this, apply a specific solution and observe any changes that occur. If there is no change, reconsider your hypothesis and apply another solution. Repeat this process until you've identified a cause or have exhausted all possible solutions.\n\nDocument and Communicate Findings\n---------------------------------\n\nOnce you've identified and resolved the problem, document your findings and communicate them to relevant stakeholders. This will help to ensure that issues are addressed efficiently in the future and will also contribute to your organization's knowledge base.\n\nTroubleshooting Best Practices\n------------------------------\n\n* Develop a methodical approach: Take a step-by-step approach and use logic, pattern recognition, and experience to guide you through the troubleshooting process.\n* Collaborate: Engage with other professionals to discuss potential solutions, as well as share insights and experiences.\n* Stay informed: Continuously update your knowledge and skillset with the latest technologies, trends, and methods in the cyber security landscape.\n* Invest in tools: Utilize effective troubleshooting tools like network analyzers, penetration testing tools, or log analyzers to help you diagnose and resolve issues more efficiently.\n\nMastering the art of troubleshooting is essential for successful cyber security professionals, and by employing the strategies laid out above, you'll be well on your way to enhancing your problem-solving capabilities in the field.\n\n* * *\n\nI hope this brief summary on troubleshooting has been informative and helps you further enhance your understanding of cyber security. Keep learning and good luck in your cyber security journey!\n\n_\\[Your Name Here\\], The Cyber Security Guide Author_", + "links": [] + }, + "WDrSO7wBNn-2jB8mcyT7j": { + "title": "Common Commands", + "description": "In this guide, we will cover essential common commands you need to know when starting your journey in cyber security. By becoming proficient in these commands, you will be able to navigate, analyze, and manage different aspects of systems and networks. The list will cover command prompts, shell commands, and other tools.\n\n_Please note this guide assumes you already have basic knowledge of command line interfaces (CLI)_\n\nOperating System Commands\n-------------------------\n\nThese commands are useful for managing and understanding your operating system and its components.\n\nWindows\n-------\n\n* `ipconfig`: Display the IP configuration for all network interfaces on the device.\n \n* `netstat`: Display active network connections, listening ports, and routing tables.\n \n* `systeminfo`: Display detailed information about the computer's hardware and software configuration.\n \n* `nslookup`: Look up the IP address of a domain or host.\n \n* `ping`: Send a series of network packets to test network connectivity.\n \n\nLinux/Unix/MacOS\n----------------\n\n* `ifconfig`: Display the IP configuration for all network interfaces on the device.\n \n* `netstat`: Display active network connections, listening ports, and routing tables.\n \n* `uname -a`: Display detailed information about the operating system.\n \n* `dig`: Look up the IP address of a domain or host.\n \n* `ping`: Send a series of network packets to test network connectivity.\n \n\nFile System Commands\n--------------------\n\nThese commands are useful for navigating and managing file systems on your device.\n\nWindows\n-------\n\n* `dir`: List files and directories in the current directory.\n \n* `cd`: Change the current directory.\n \n* `copy`: Copy files from one location to another.\n \n* `move`: Move files from one location to another.\n \n* `del`: Delete specified files.\n \n\nLinux/Unix/MacOS\n----------------\n\n* `ls`: List files and directories in the current directory.\n \n* `cd`: Change the current directory.\n \n* `cp`: Copy files from one location to another.\n \n* `mv`: Move files from one location to another.\n \n* `rm`: Delete specified files.\n \n\nNetwork Analysis Commands\n-------------------------\n\nThese commands are useful for analyzing and troubleshooting network connections.\n\n* `traceroute` (Linux/Unix/MacOS) / `tracert` (Windows): Display the route and transit delay of packets across a network.\n \n* `tcpdump` (Linux/Unix/MacOS) / `Wireshark` (Windows): Capture and analyze network traffic.\n \n\nCyber Security Tools\n--------------------\n\n* `nmap`: Scan networks and hosts for open ports and network services.\n \n* `Metasploit`: A penetration testing framework that simplifies the discovery and exploitation of vulnerabilities.\n \n* `John the Ripper`: A password-cracking tool that automatically detects and cracks multiple password formats.\n \n* `Wireshark`: A network protocol analyzer that captures and analyzes network traffic.\n \n* `Aircrack-ng`: A suite of tools for auditing wireless networks.\n \n\nBy familiarizing yourself with these common commands and tools, you'll have a solid foundation to build upon in your cyber security journey. As you progress, you will encounter more advanced tools and techniques, so keep learning and stay curious!", + "links": [] + }, + "gSLr-Lc119eX9Ig-kDzJ2": { + "title": "Networking Knowledge", + "description": "In the world of cyber security, having a strong foundation in networking knowledge is crucial. It's important to understand the fundamental concepts and mechanisms that govern how data is transferred, communicated, and secured across digital networks.\n\nTopics\n------\n\n* **Network Architecture**: Learn about the different networking models, such as the OSI model and TCP/IP model, which define how data is structured, transmitted, and received in a network.\n \n* **Network Protocols**: Familiarize yourself with various network protocols that are essential for effective communication between devices, including HTTP, HTTPS, FTP, and more. These protocols ensure that data is transmitted reliably and securely across networks.\n \n* **IP Addressing and Subnetting**: Gain an understanding of IP addresses (both IPv4 and IPv6), how they are assigned, and how subnetting works to divide networks into smaller segments for better management and security.\n \n* **Routing and Switching**: Learn about the roles of routers and switches in a network, as well as related technologies and protocols like DHCP, NAT, and various routing protocols (such as OSPF and BGP).\n \n* **Wireless Networking**: Delve into the world of wireless networks by studying the different types of wireless technologies like Wi-Fi, Bluetooth, and cellular networks. Understand the security concerns and best practices associated with wireless communication.\n \n* **Network Security**: Explore various techniques and tools used to defend networks from cyber threats, including firewalls, intrusion detection systems (IDS), intrusion prevention systems (IPS), and VPNs. Learn about security protocols like SSL/TLS, encryption algorithms, and secure access control mechanisms.\n \n* **Network Troubleshooting**: Understand common network issues and how to resolve them, using various network troubleshooting tools and methodologies like ping, traceroute, and Wireshark.\n \n\nBy developing a strong foundation in networking knowledge, you will be well-equipped to tackle various cyber security challenges and protect your digital assets from potential threats. Remember, the ever-evolving landscape of cyber security demands continuous learning and updating of skills to stay ahead in the game.", + "links": [] + }, + "OXUd1UPPsBhNoUGLKZJGV": { + "title": "Understand the OSI Model", + "description": "The **Open Systems Interconnection (OSI) model** is a framework that standardizes the functions of a telecommunication or computing system into seven distinct layers. This model is widely used to understand how different networking protocols and technologies work together to enable data transmission and communication.\n\nGiven below are different layers of the OSI model, the primary functions they perform, and their relevance to network security.\n\nPhysical Layer\n--------------\n\nThe **Physical layer** deals with the physical connection between devices, like cables or wireless signals. It is responsible for transmitting raw data (in the form of bits) between devices over a physical medium, such as copper wires or fiber optic cables.\n\nData Link Layer\n---------------\n\nThe **Data Link layer** is responsible for creating a reliable link between two devices on a network. It establishes communication between devices by dividing the data into frames (small data units) and assigning each frame with a unique address. This layer also offers error detection and correction mechanisms to ensure reliable data transfer.\n\nNetwork Layer\n-------------\n\nThe **Network layer** is responsible for routing data packets between different devices on a network, regardless of the physical connection medium. It determines the optimal path to transfer data between the source and destination devices and assigns logical addresses (IP addresses) to devices on the network.\n\nTransport Layer\n---------------\n\nThe **Transport layer** is in charge of ensuring error-free and reliable data transmissions between devices. It achieves this by managing flow control, error checking, and data segmentation. This layer also establishes connections between devices and manages data transfer using protocols like Transmission Control Protocol (TCP) and User Datagram Protocol (UDP).\n\nSession Layer\n-------------\n\nThe **Session layer** manages sessions, which are continuous connections between devices. It establishes, maintains, and terminates connections between devices while ensuring proper synchronization and data exchange between the communication devices.\n\nPresentation Layer\n------------------\n\nThe **Presentation layer** is responsible for translating or converting the data format between different devices, allowing them to understand each other's data. This layer also deals with data encryption and decryption, which is an essential aspect of network security.\n\nApplication Layer\n-----------------\n\nThe **Application layer** is the interface between the user and the communication system. It is responsible for providing networking services for various applications, like email, web browsing, or file sharing.\n\nEach of these layers interacts with the adjacent layers to pass data packets back and forth. Understanding the OSI model is crucial for addressing potential security threats and vulnerabilities that can occur at each layer. By implementing strong network security measures at each layer, you can minimize the risk of cyber attacks and keep your data safe.\n\nIn the next section, we will discuss network protocols and how they play an essential role in network communication and security.", + "links": [ + { + "title": "What is OSI Model?", + "url": "https://www.youtube.com/watch?v=Ilk7UXzV_Qc", + "type": "video" + }, + { + "title": "Lecture - OSI Model", + "url": "https://www.youtube.com/watch?v=0Rb8AkTEASw", + "type": "video" + }, + { + "title": "OSI Model Animation", + "url": "https://www.youtube.com/watch?v=vv4y_uOneC0", + "type": "video" + } + ] + }, + "ViF-mpR17MB3_KJ1rV8mS": { + "title": "Common Protocols and their Uses", + "description": "In this section, we will discuss some of the most common protocols used in networking and their importance in maintaining cyber security. Protocols are a set of rules and procedures that define how data should be transmitted, formatted, and processed over a network.\n\nHyperText Transfer Protocol (HTTP) and HTTPS\n--------------------------------------------\n\nHTTP, or HyperText Transfer Protocol, is the foundation of data communication on the World Wide Web. It defines how data should be formatted and transmitted between a client (like your browser) and a web server. HTTP is a stateless protocol, meaning each request and response pair is independent from others.\n\nHTTPS, or HTTP Secure, is a secure version of HTTP that encrypts data between the client and server using Secure Sockets Layer (SSL) or Transport Layer Security (TLS) to protect sensitive data from being intercepted or tampered with.\n\nTransmission Control Protocol (TCP)\n-----------------------------------\n\nTCP, or Transmission Control Protocol, is a reliable, connection-oriented protocol that ensures data is delivered correctly between applications over a network. It ensures accurate and complete data delivery by establishing a connection, segmenting data into smaller packets, verifying the receipt of packets, and reordering packets to their original sequence.\n\nInternet Protocol (IP)\n----------------------\n\nInternet Protocol (IP) is responsible for delivering packets from the source host to the destination host based on their IP addresses. IP is the primary protocol in the Internet Layer of the Internet Protocol Suite and has two main versions - IPv4 and IPv6.\n\nUser Datagram Protocol (UDP)\n----------------------------\n\nUDP, or User Datagram Protocol, is a connectionless communication protocol used for fast and efficient data transmission. Unlike TCP, UDP does not provide error checking or guarantee delivery, making it suitable for real-time applications like video streaming and online gaming where low latency is crucial.\n\nDomain Name System (DNS)\n------------------------\n\nThe Domain Name System (DNS) is responsible for translating human-readable domain names (like [www.example.com](http://www.example.com)) into corresponding IP addresses that computers understand. This process is called domain name resolution. DNS is an essential component of internet communication, as it allows users to access websites using easy-to-remember names instead of numerical IP addresses.\n\nFile Transfer Protocol (FTP)\n----------------------------\n\nFile Transfer Protocol (FTP) is a standard network protocol used for transferring files from one host to another over a TCP-based network, such as the Internet. FTP is commonly used for sharing files and transferring files between a client and a server.\n\nSimple Mail Transfer Protocol (SMTP)\n------------------------------------\n\nSimple Mail Transfer Protocol (SMTP) is the standard protocol for sending email messages across a network. It defines how email messages should be formatted, encrypted, and relayed between email clients, servers, and other email systems.\n\nUnderstanding these common protocols and their roles in network communication is vital for ensuring the proper implementation of cyber security measures. It will help you better identify potential vulnerabilities and make informed decisions on network defense strategies.", + "links": [] + }, + "0tx2QYDYXhm85iYrCWd9U": { + "title": "Common Ports and their Uses", + "description": "Ports are crucial in networking, as they facilitate communication between devices and applications. They act as endpoints in the networking process, enabling data transfer. We've compiled a list of commonly used ports to help you understand their significance in cyber security.\n\nTransmission Control Protocol (TCP) Ports\n-----------------------------------------\n\n* **FTP (File Transfer Protocol) - Ports 20 and 21**: FTP is a widely used protocol for transferring files.\n \n* **SSH (Secure Shell) - Port 22**: SSH allows secure communication and remote access to devices over an unsecured network.\n \n* **Telnet - Port 23**: Telnet is a text-based protocol that allows you to interact with remote devices over networks.\n \n* **SMTP (Simple Mail Transfer Protocol) - Port 25**: SMTP is a protocol for sending and receiving emails.\n \n* **DNS (Domain Name System) - Port 53**: DNS translates human-readable domain names into IP addresses to facilitate communication between devices.\n \n* **HTTP (Hypertext Transfer Protocol) - Port 80**: HTTP is the primary protocol used for communication on the World Wide Web.\n \n* **POP3 (Post Office Protocol 3) - Port 110**: POP3 is a protocol for receiving emails from your email server.\n \n* **IMAP (Internet Message Access Protocol) - Port 143**: IMAP is a more advanced email protocol that allows you to access and manage your emails on the email server.\n \n* **HTTPS (Hypertext Transfer Protocol Secure) - Port 443**: HTTPS is an encrypted and secure version of HTTP.\n \n* **RDP (Remote Desktop Protocol) - Port 3389**: RDP is a Microsoft-developed protocol for remotely accessing Windows devices.\n \n\nUser Datagram Protocol (UDP) Ports\n----------------------------------\n\n* **DHCP (Dynamic Host Configuration Protocol) - Ports 67 and 68**: DHCP is used to allocate IP addresses to devices within a network.\n \n* **DNS (Domain Name System) - Port 53**: (same function as in TCP)\n \n* **TFTP (Trivial File Transfer Protocol) - Port 69**: TFTP is a simplified version of FTP for quick and easy file transfer.\n \n* **SNMP (Simple Network Management Protocol) - Port 161**: SNMP enables monitoring and managing network devices, including printers, routers, and switches.\n \n* **NTP (Network Time Protocol) - Port 123**: NTP is a standard protocol used to synchronize time across network devices.\n \n\nUnderstanding these common ports and their functions is essential for network administrators and cyber security professionals. Proper knowledge of these ports will help you identify and assess potential security risks, as well as implement robust network defense measures.", + "links": [] + }, + "dJ0NUsODFhk52W2zZxoPh": { + "title": "SSL and TLS Basics", + "description": "Secure Sockets Layer (SSL) and Transport Layer Security (TLS) are cryptographic protocols designed to provide secure communication over a computer network. They play a vital role in protecting sensitive information transmitted online, such as login credentials, financial information, and private user data.\n\nSecure Sockets Layer (SSL)\n--------------------------\n\nSSL is the predecessor to TLS and was first introduced in the 1990s. It creates an encrypted connection between a client (typically a web browser) and a server to ensure that any data transmitted remains private and secure. SSL uses a combination of symmetric and asymmetric encryption methods, as well as digital certificates, to establish and maintain secure communication.\n\nTransport Layer Security (TLS)\n------------------------------\n\nTLS is an improved and more secure version of SSL, with TLS 1.0 being released as an upgrade to SSL 3.0. The current version, as of this guide, is TLS 1.3. TLS provides a more robust and flexible security framework, addressing many of the vulnerabilities present in SSL. While many people still refer to SSL when discussing secure web communication, it's important to note that SSL has been deprecated, and TLS is the best-practice standard for secure communication.\n\nKey Components\n--------------\n\n* **Encryption**: SSL and TLS use powerful algorithms to protect data through encryption, ensuring it's unreadable by anyone without the proper decryption keys.\n* **Authentication**: SSL/TLS digital certificates verify the identities of clients and servers, providing trust and authenticity.\n* **Integrity**: These security protocols use message authentication codes to ensure that the data sent between clients and servers has not been tampered with during transmission.\n\nHandshake Process\n-----------------\n\nSSL and TLS follow a series of steps, known as the \"handshake process,\" to create a secure connection:\n\n* **Client hello**: The client initiates the handshake process by sending a message with supported cryptographic algorithms, random numbers, and session information.\n* **Server hello**: The server responds with its chosen cryptographic algorithms, random numbers, and its digital certificate. Optionally, the server can request the client's certificate for mutual authentication.\n* **Client verification**: The client verifies the server's certificate and may send its own if requested. It then creates a pre-master secret, encrypts it with the server's public key, and sends it to the server.\n* **Key generation and exchange**: Both the client and server generate the master secret and session keys using the pre-master secret and shared random numbers. These keys are used for encrypting and decrypting the data transmitted.\n* **Secured connection**: Once the keys are exchanged, the client and server can now communicate securely using the established encryption and keys.\n\nSecure communication is critical for any organization handling sensitive data. SSL and TLS serve as the backbone for protecting data in transit and play a significant role in ensuring the confidentiality, integrity, and authenticity of online communications.", + "links": [] + }, + "umbMBQ0yYmB5PgWfY6zfO": { + "title": "Basics of NAS and SAN", + "description": "Network Attached Storage (NAS) and Storage Area Network (SAN) technologies play a crucial role in managing data within an organization and serve as the building blocks for a more comprehensive IT infrastructure.\n\nNetwork Attached Storage (NAS)\n------------------------------\n\nNAS is a high-capacity storage solution that operates on a data file level, allowing multiple users and clients to access, store, and retrieve data from a centralized location over a network. NAS devices are generally connected to a local area network (LAN) and use various file-sharing protocols, such as NFS (Network File System), SMB/CIFS (Server Message Block/Common Internet File System), or AFP (Apple Filing Protocol).\n\nSome key features of a NAS system include:\n\n* **Ease of Deployment**: NAS devices are simple to install and configure, facilitating quick integration into existing network infrastructures.\n* **Scalability**: NAS systems can be easily expanded to accommodate growing storage needs by adding more drives or units.\n* **Data Protection**: Most NAS devices offer data protection features such as RAID (Redundant Array of Independent Disks), data backup, and data encryption.\n\nStorage Area Network (SAN)\n--------------------------\n\nSAN is a high-performance, dedicated storage network designed to provide block-level data storage for applications and servers. Unlike NAS, which uses file-sharing protocols, SANs utilize block-based protocols such as Fibre Channel (FC) and iSCSI (Internet Small Computer System Interface) to handle storage requests.\n\nSANs offer several advantages in terms of performance, reliability, and scalability:\n\n* **Performance**: SANs can handle low-latency, high-speed data transfers, providing optimal performance for mission-critical applications and large-scale virtualization.\n* **Fault Tolerance**: SANs are designed to provide redundancy and failover capabilities, ensuring continued access to data in the event of hardware failures.\n* **Scalability**: SANs can be easily scaled by adding more disk arrays, switches, or connections to meet growing storage demands.\n\nNAS vs. SAN: Choosing the Right Solution\n----------------------------------------\n\nWhen it comes to deciding between NAS and SAN, there are several factors to consider:\n\n* **Cost**: NAS devices are generally more affordable than SANs, making them an attractive option for smaller organizations or environments with limited budgets.\n* **Infrastructure**: NAS solutions can be more easily integrated into existing network infrastructures, whereas SANs may require dedicated hardware, connections, and management tools.\n* **Performance Requirements**: If you need high-performance storage for intensive applications, SANs may be a more appropriate choice than NAS.\n* **Data Management**: While NAS solutions excel in handling file-based storage, SANs provide better support for block-level storage and can deliver improved performance for virtualized environments and database applications.\n\nIt's essential to evaluate your organization's specific needs and requirements to determine which storage solution is the most appropriate fit. As you expand your knowledge in cyber security, a solid understanding of both NAS and SAN technologies will prove invaluable in implementing secure and efficient data storage systems.", + "links": [ + { + "title": "NAS vs SAN", + "url": "https://youtu.be/3yZDDr0JKVc", + "type": "video" + } + ] + }, + "E8Z7qFFW-I9ivr0HzoXCq": { + "title": "Basics of Subnetting", + "description": "Subnetting is the process of dividing an IP network into smaller sub-networks called subnets. It allows better allocation of IP addresses and provides better organization, control, and security for the network. Here we go through some of the basic concepts of subnetting and why it's crucial for cybersecurity.\n\nIP Addresses and Subnet Masks\n-----------------------------\n\nAn IP address is a unique identifier for devices on a network. It consists of two parts: the network address and the host address. The network address indicates the network to which a device belongs, while the host address identifies the specific device within that network.\n\nSubnet masks are used to define which portion of an IP address is the network address and which is the host address. For example, in the IP address `192.168.1.5`, and subnet mask `255.255.255.0`, the network address is `192.168.1.0`, and the host address is `5`.\n\nWhy Subnetting?\n---------------\n\nSubnetting has several advantages, including:\n\n* **Improved Network Performance**: Breaking a large network into smaller subnets helps reduce congestion and improve overall performance.\n* **Enhanced Security**: By isolating different parts of a network, you can control access and limit the spread of potential threats.\n* **Easier Administration**: Smaller networks are easier to manage and maintain, as it's simpler to track issues and allocate resources.\n\nSubnetting Process\n------------------\n\nThe process of subnetting involves the following steps:\n\n* **Choose the Appropriate Subnet Mask**: Determine the right subnet mask for your network based on the number of required subnets and hosts. The more subnets you need, the more bits you will \"borrow\" from the host portion of the IP address.\n \n* **Divide the Network into Subnets**: Calculate the subnet addresses by incrementing the network portion of the IP address by the value of the borrowed bits.\n \n* **Determine Host Ranges**: Calculate the valid host addresses within each subnet by identifying the first and last usable IP addresses. Remember that the first address in a subnet is the network address, and the last address is used for broadcasting.\n \n* **Assign IP Addresses**: Allocate IP addresses to devices within their respective subnets, and configure devices with the correct subnet mask.\n \n\nExample\n-------\n\nLet's suppose we have the network `192.168.1.0` with a subnet mask of `255.255.255.0`. We want to create four smaller subnets. Here's how we can do it:\n\n* `255.255.255.0` in binary is `11111111.11111111.11111111.00000000`. We can borrow 2 bits from the host portion to create four subnets: `11111111.11111111.11111111.11000000`, which is `255.255.255.192` in decimal format.\n \n* Our subnets will have the following network addresses:\n \n * `192.168.1.0`\n * `192.168.1.64`\n * `192.168.1.128`\n * `192.168.1.192`\n* The valid host ranges within each subnet are:\n \n * `192.168.1.1 - 192.168.1.62`\n * `192.168.1.65 - 192.168.1.126`\n * `192.168.1.129 - 192.168.1.190`\n * `192.168.1.193 - 192.168.1.254`\n* Allocate IP addresses from these host ranges to devices within their respective subnets, and configure devices with the correct subnet mask (`255.255.255.192`).\n \n\nUnderstanding the basics of subnetting is essential to properly configuring and securing your network. By efficiently dividing your network into smaller subnets, you can optimize performance, organization, and security.", + "links": [] + }, + "2nQfhnvBjJg1uDZ28aE4v": { + "title": "Public vs Private IP Addresses", + "description": "When it comes to IP addresses, they are categorized in two major types: Public IP Addresses and Private IP Addresses. Both play a key role in network communication; however, they serve different purposes. Let's examine them more closely:\n\nPublic IP Addresses\n-------------------\n\nA public IP address is a globally unique IP address that is assigned to a device or a network. This type of IP address is reachable over the Internet and enables devices to communicate with other devices, servers, and networks located anywhere in the world.\n\nHere are some key features of public IP addresses:\n\n* Routable over the Internet.\n* Assigned by the Internet Assigned Numbers Authority (IANA).\n* Usually assigned to an organization or Internet Service Provider (ISP).\n* Can be either static (permanent) or dynamic (changes periodically).\n\nExample: `72.14.207.99`\n\nPrivate IP Addresses\n--------------------\n\nPrivate IP addresses, on the other hand, are used within local area networks (LANs) and are not visible on the Internet. These addresses are reserved for internal use within an organization, home, or local network. They are often assigned by a router or a network administrator for devices within the same network, such as your computer, printer, or smartphone.\n\nHere are some key features of private IP addresses:\n\n* Not routable over the Internet (requires Network Address Translator (NAT) to communicate with public IP addresses).\n* Assigned by local network devices, such as routers or network administrators.\n* Reusable in different private networks (as they are not globally unique).\n* Static or dynamic (depending on the network's configuration).\n\nPrivate IP address ranges:\n\n* `10.0.0.0` to `10.255.255.255` (Class A)\n* `172.16.0.0` to `172.31.255.255` (Class B)\n* `192.168.0.0` to `192.168.255.255` (Class C)\n\nExample: `192.168.1.100`\n\nIn summary, public IP addresses are used for communication over the Internet, whereas private IP addresses are used within local networks. Understanding the difference between these two types of IP addresses is essential for grasping the basics of network connectivity and cyber security.", + "links": [] + }, + "0TWwox-4pSwuXojI8ixFO": { + "title": "localhost", + "description": "Localhost (also known as loopback address) is a term used to define a network address that is used by a device (usually a computer or a server) to refer to itself. In other words, it's a way for your device to establish a network connection to itself. The most commonly used IP address for localhost is `127.0.0.1`, which is reserved as a loopback address in IPv4 networks. For IPv6 networks, it's represented by `::1`.\n\nPurpose and Usage of Localhost\n------------------------------\n\nLocalhost is useful for a variety of reasons, such as:\n\n* **Testing and Development**: Developers can use localhost to develop and test web applications or software without the need for connecting to external network resources.\n \n* **Network Services**: Some applications and servers use localhost to provide network services to the local system only, optimizing performance and security.\n \n* **Troubleshooting**: Localhost can be used as a diagnostic tool to test if the network stack on the device is functioning correctly.\n \n\nConnecting to Localhost\n-----------------------\n\nTo connect to localhost, you can use several methods depending on the tasks you want to accomplish:\n\n* **Web Browser**: If you're running a local web server, you can simply enter `http://127.0.0.1` or `http://localhost` in your browser's address bar and access the locally hosted web application.\n \n* **Command Line**: You can use utilities like `ping`, `traceroute`, or `telnet` at the command prompt to verify connectivity and network functionality using localhost.\n \n* **Application Settings**: Some applications, such as web servers or database servers, may have configuration settings that allow you to bind them to the loopback address (`127.0.0.1` or `::1`). This will restrict the services to the local system and prevent them from being accessed by external sources.\n \n\nRemember, connections to localhost do not pass through your computer's physical network interfaces, and as such, they're not subject to the same security risks or performance limitations that a real network connection might have.", + "links": [] + }, + "W_oloLu2Euz5zRSy7v_T8": { + "title": "loopback", + "description": "Loopback is an essential concept in IP terminology that refers to a test mechanism used to validate the operation of various network protocols, and software or hardware components. The primary function of the loopback feature is to enable a device to send a data packet to itself to verify if the device's network stack is functioning correctly.\n\nImportance of Loopback\n----------------------\n\nThe concept of loopback is critical for the following reasons:\n\n* **Troubleshooting**: Loopback helps in diagnosing and detecting network connectivity issues. It can also help ascertain whether an application or device is correctly processing and responding to incoming network traffic.\n* **Testing**: Loopback can be used extensively by developers to test software applications or components without external network access. This ensures that the software behaves as expected even without a working network connection.\n\nLoopback Address\n----------------\n\nIn IP terminology, there's a pre-allocated IP address for loopback. For IPv4, the reserved address is `127.0.0.1`. For IPv6, the loopback address is `::1`. When a device sends a packet to either of these addresses, the packet is rerouted to the local device, making it the source and destination simultaneously.\n\nLoopback Interface\n------------------\n\nApart from loopback addresses, there's also a network device known as the \"loopback interface.\" This interface is a virtual network interface implemented in software. The loopback interface is assigned a loopback address and can be used to emulate network connections for various purposes, such as local services or inter-process communications.\n\nSummary\n-------\n\nLoopback plays a crucial role in IP technology by enabling devices to run diagnostic tests and validate the correct functioning of software and hardware components. Using the loopback addresses for IPv4 (`127.0.0.1`) and IPv6 (`::1`), it allows network packets to circulate internally within the local device, facilitating developers to test and verify network operations.", + "links": [] + }, + "PPIH1oHW4_ZDyD3U3shDg": { + "title": "CIDR", + "description": "CIDR, or Classless Inter-Domain Routing, is a method of allocating IP addresses and routing Internet Protocol packets in a more flexible and efficient way, compared to the older method of Classful IP addressing. Developed in the early 1990s, CIDR helps to slow down the depletion of IPv4 addresses and reduce the size of routing tables, resulting in better performance and scalability of the Internet.\n\nHow CIDR works\n--------------\n\nCIDR achieves its goals by replacing the traditional Class A, B, and C addressing schemes with a system that allows for variable-length subnet masking (VLSM). In CIDR, an IP address and its subnet mask are written together as a single entity, referred to as a _CIDR notation_.\n\nA CIDR notation looks like this: `192.168.1.0/24`. Here, `192.168.1.0` is the IP address, and `/24` represents the subnet mask. The number after the slash (/) is called the _prefix length_, which indicates how many bits of the subnet mask should be set to 1 (bitmask). The remaining bits of the subnet mask are set to 0.\n\nFor example, a `/24` prefix length corresponds to a subnet mask of `255.255.255.0`, because the first 24 bits are set to 1. This allows for 256 total IP addresses in the subnet, with 254 of these IPs available for devices (The first and last IP are reserved for the network address and broadcast address, respectively).\n\nAdvantages of CIDR\n------------------\n\n* **Efficient IP allocation:** CIDR allows for more granular allocation of IPv4 addresses, reducing wasted IP space.\n* **Reduction of routing table size:** CIDR enables route aggregation (route summarization), which combines multiple network routes to a single routing table entry.\n* **Decreased routing updates:** By allowing routers to share more generalized routing information, the number of routing updates gets significantly reduced, improving network stability and reducing router workload.\n\nCIDR in IPv6\n------------\n\nCIDR also plays a crucial role in the IPv6 addressing system, where the use of CIDR notation and address aggregation has become even more critical in managing the immense address space of IPv6 efficiently.\n\nIn conclusion, CIDR is an essential component of modern IP networking systems, enabling better utilization of IP address space and improving the overall scalability and performance of the Internet. It's crucial for network administrators and security professionals to have a solid understanding of CIDR, as it plays a significant role in configuring, managing, and securing IP networks.", + "links": [] + }, + "f-v8qtweWXFY_Ryo3oYUF": { + "title": "subnet mask", + "description": "A **subnet mask** is a crucial component of Internet Protocol (IP) addressing, acting as a \"mask\" to separate the network portion of an IP address from the host portion. It is a 32-bit number representing a sequence of 1's followed by a sequence of 0's, used to define the boundary of a subnet within a given IP address.\n\nThe primary purpose of a subnet mask is to:\n\n* Define network boundaries\n* Facilitate IP routing\n* Break down large IP networks into smaller, manageable subnetworks (subnets)\n\nFormat\n------\n\nThe subnet mask is written in the same dotted-decimal format as IP addresses (i.e., four octets separated by dots). For instance, the default subnet mask for a Class A IP address is `255.0.0.0`, for Class B is `255.255.0.0`, and for Class C is `255.255.255.0`.\n\nImportance in Cybersecurity\n---------------------------\n\nUnderstanding and configuring subnet masks correctly is crucial in cybersecurity, as they:\n\n* Help to isolate different segments of your network, leading to greater security control and more efficient usage of resources\n* Facilitate the division of IP networks into smaller subnets, which can then be assigned to different departments, groups, or functions within an organization\n* Enhance network efficiency by preventing unnecessary broadcast traffic\n* Improve the overall network stability and monitoring capabilities\n\nTo determine the appropriate subnet mask for different requirements, you can use various subnetting tools available online. Proper management of subnet masks is crucial for maintaining a secure, efficient, and well-functioning network.", + "links": [ + { + "title": "Wildcard mask", + "url": "https://en.wikipedia.org/wiki/Wildcard_mask", + "type": "article" + } + ] + }, + "5rKaFtjYx0n2iF8uTLs8X": { + "title": "default gateway", + "description": "In our journey through IP terminology, we now arrive at the topic of **Default Gateway**. Understanding the role and importance of the default gateway in a network is crucial for grasping the fundamentals of cyber security and data routing.\n\nOverview\n--------\n\nThe default gateway is basically a device (usually a router) on a network which serves as an access point for data traffic to travel from the local network to other networks, such as the internet. This device acts as a \"middleman\" between your computer and external networks, and is often set up by your internet service provider (ISP) or during the configuration of your own router.\n\nRole in Networks\n----------------\n\nIn a nutshell, the default gateway plays the following roles:\n\n* **Packet Routing**: It directs the network packets from your local computer or device to their ultimate destination. When a packet with a destination IP address is not on the same network as the source device, the default gateway routes the packet to the appropriate external network.\n \n* **Address Resolution Protocol (ARP)**: The default gateway obtains the physical address (MAC address) of a computer that is located on another network by using ARP.\n \n* **Protection**: In many cases, the default gateway also serves as a layer of network protection by restricting access to certain external networks, as well as regulating traffic from the internet.\n \n\nConfiguration\n-------------\n\nTo benefit from the services of a default gateway, your device needs to be properly configured. Most devices and operating systems obtain their network settings (including the default gateway address) automatically using DHCP. But you can also configure network settings manually if needed.\n\n**Note**: Each device connected to a network must have a unique IP address. Also, remember that devices on the same network should use the same default gateway address.\n\nIn conclusion, recognizing the significance of the default gateway and having a working knowledge of how it functions is an essential part of IP terminology, affecting both cyber security and efficient data routing. Continuing your education on the subject will better equip you to take advantage of your devices' networking features, as well as protect your valuable data from potential cyber threats.", + "links": [] + }, + "d5Cv3EXf6OXW19yPJ4x6e": { + "title": "VLAN", + "description": "A **VLAN** or **Virtual Local Area Network** is a logical grouping of devices or users within a network, based on shared attributes like location, department, or security requirements. VLANs play a crucial role in improving network security, enabling better resource allocation, and simplifying network management.\n\nKey Features of VLANs\n---------------------\n\n* **Isolation:** VLANs isolate traffic between different groups, helping to minimize the risk of unauthorized access to sensitive data.\n* **Scalability:** VLANs allow network administrators to grow and change networks with ease, without causing disruptions.\n* **Cost Effectiveness:** VLANs can reduce the need for additional hardware by reusing existing switches and networks for added functionality.\n* **Improved Performance:** By narrowing the broadcast domain, VLANs can improve network performance by reducing unnecessary traffic.\n\nTypes of VLANs\n--------------\n\n* **Port-based VLANs:** In this type, devices are separated based on their physical connection to the switch. Each port is assigned to a specific VLAN.\n* **Protocol-based VLANs:** Devices are grouped based on the network protocol they use. For example, all IP devices can be assigned to one VLAN, while IPX devices can be assigned to another.\n* **MAC-based VLANs:** Devices are assigned to VLANs based on their MAC addresses. This approach offers better security and flexibility but requires more administrative effort.\n\nCreating and Managing VLANs\n---------------------------\n\nVLANs are created and managed through network switches that support VLAN configuration. Switches use a VLAN ID (ranging from 1 to 4094) to uniquely identify each VLAN. VLAN Trunking Protocol (VTP) and IEEE 802.1Q standard are typically used to manage VLANs between different switches.\n\nSecurity Considerations\n-----------------------\n\nVLANs play a crucial role in network security; however, they are not foolproof. VLAN hopping and unauthorized access can still occur if proper measures, such as Private VLANs and Access Control Lists (ACLs), are not implemented to secure the network.\n\nIn summary, VLANs offer a flexible and secure way to manage and segment networks based on needs and requirements. By understanding their purpose, types, and security considerations, network administrators can efficiently use VLANs to improve overall network performance and security.", + "links": [ + { + "title": "VLAN Explained", + "url": "https://www.youtube.com/watch?v=jC6MJTh9fRE", + "type": "video" + } + ] + }, + "gfpvDQz61I3zTB7tGu7vp": { + "title": "DMZ", + "description": "A **DMZ**, also known as a **Demilitarized Zone**, is a specific part of a network that functions as a buffer or separation between an organization's internal, trusted network and the external, untrusted networks like the internet. The primary purpose of a DMZ is to isolate critical systems and data from the potentially hostile external environment and provide an extra layer of security.\n\nPurpose of DMZ\n--------------\n\n* **Security**: By segregating critical systems, a DMZ reduces the risk of unauthorized access and potential damage from external threats. This is achieved by implementing strong access controls, firewalls, and intrusion detection and prevention systems (IDS/IPS) to monitor and filter traffic between the DMZ and internal networks.\n* **Content Filtering**: It enables organizations to place publicly accessible servers (e.g., web and email servers) within the DMZ without exposing the entire internal network to potential attacks. This ensures that only authorized traffic is allowed to pass through.\n* **Ease of Management**: DMZ aids in simplifying security management processes as it provides a centralized location for implementing, auditing, and monitoring security policies, rules, and configurations for public-facing resources.\n\nComponents of DMZ\n-----------------\n\nThe key components in a DMZ include:\n\n* **Firewalls**: These devices are used to control and manage traffic between the DMZ, internal, and external networks. They can be configured to allow, deny, or restrict access based on pre-defined security policies and rules.\n* **Proxies**: Proxy servers act as intermediaries between the internal network and the internet. They help to screen and filter incoming and outgoing web traffic, providing an additional layer of security.\n* **Intrusion Detection and Prevention Systems (IDS/IPS)**: These tools continuously monitor and analyze network traffic, looking for signs of unauthorized access or malicious activities, and automatically take appropriate actions to mitigate threats.\n* **Public-Facing Servers**: These are the servers hosted within the DMZ, designed to serve content and resources to external users. They are typically configured with additional security measures to further reduce the risk of compromise.\n\nAs the author of this guide, I hope this brief summary about DMZ helps you enhance your understanding of cyber security terminologies and their importance in protecting organizations' networks and data. Keep reading for more insights!", + "links": [ + { + "title": "What is DMZ? (Demilitarized Zone)", + "url": "https://www.youtube.com/watch?v=dqlzQXo1wqo", + "type": "video" + } + ] + }, + "M52V7hmG4ORf4TIVw3W3J": { + "title": "ARP", + "description": "ARP is a protocol used by the Internet Protocol (IP) to map an IP address to a physical address, also known as a Media Access Control (MAC) address. ARP is essential for routing data between devices in a Local Area Network (LAN) as it allows for the translation of IP addresses to specific hardware on the network.\n\nHow It Works\n------------\n\nWhen a device wants to communicate with another device on the same LAN, it needs to determine the corresponding MAC address for the target IP address. ARP helps in this process by broadcasting an ARP request containing the target IP address. All devices within the broadcast domain receive this ARP request and compare the target IP address with their own IP address. If a match is found, the device with the matching IP address sends an ARP reply which contains its MAC address.\n\nThe device that initiated the ARP request can now update its ARP cache (a table that stores IP-to-MAC mappings) with the new information, and then proceed to send data to the target's MAC address.\n\nSecurity Concerns\n-----------------\n\nWhile ARP is crucial for the functioning of most networks, it also presents certain security risks. ARP poisoning, for example, occurs when an attacker sends fake ARP messages with the goal to associate their MAC address with the IP address of a target device. This can lead to Man-in-the-Middle (MITM) attacks where the attacker can intercept, modify, or block traffic intended for the target device.\n\nTo mitigate ARP poisoning attacks, organizations can implement security measures such as static ARP entries, dynamic ARP inspection, and ensuring that their network devices are updated with the latest security patches.\n\nBy understanding ARP and the potential security risks it presents, you can help protect your network by incorporating appropriate security solutions and staying vigilant against potential threats.", + "links": [ + { + "title": "ARP Explained - Address Resolution Protocol", + "url": "https://www.youtube.com/watch?v=cn8Zxh9bPio", + "type": "video" + } + ] + }, + "ZTC5bLWEIQcdmowc7sk_E": { + "title": "VM", + "description": "A **Virtual Machine (VM)** is a software-based emulation of a computer system that operates on a physical hardware, also known as a host. VMs provide an additional layer of isolation and security as they run independent of the host's operating system. They can execute their own operating system (called the guest OS) and applications, allowing users to run multiple operating systems on the same hardware simultaneously.\n\nVirtual machines are commonly used in cybersecurity for tasks such as:\n\n* **Testing and analysis**: Security researchers often use VMs to study malware and vulnerabilities in a safe and contained environment without risking their primary system.\n \n* **Network segmentation**: VMs can be used to isolate different network segments within an organization, to help prevent the spread of malware or limit the impact of an attack.\n \n* **System recovery**: VMs can act as backups for critical systems or applications. In the event of a system failure, a VM can be spun up to provide continuity in business operations.\n \n* **Software development and testing**: Developers can use VMs to build and test software in a controlled and reproducible environment, reducing the risks of incompatibilities or unexpected behaviors when the software is deployed on a live system.\n \n\nKey terminologies associated with VMs include:\n\n* **Hypervisor**: Also known as Virtual Machine Monitor (VMM), is a software or hardware component that creates, runs, and manages virtual machines. Hypervisors are divided into two types - Type 1 (bare-metal) and Type 2 (hosted).\n \n* **Snapshot**: A snapshot is a point-in-time image of a virtual machine that includes the state of the guest OS, applications, and data. Snapshots are useful for quickly reverting a VM back to a previous state if needed.\n \n* **Live Migration**: This refers to the process of moving a running virtual machine from one physical host to another with minimal or no disruption to the guest OS and its applications. Live migration enables load balancing and ensures minimal downtime during hardware maintenance.\n \n\nUnderstanding and effectively utilizing virtual machines plays a significant role in enhancing the security posture of an organization, allowing for agile incident response and proactive threat analysis.", + "links": [ + { + "title": "Explore top posts about Infrastructure", + "url": "https://app.daily.dev/tags/infrastructure?ref=roadmapsh", + "type": "article" + }, + { + "title": "Virtualization Explained", + "url": "https://www.youtube.com/watch?v=UBVVq-xz5i0", + "type": "video" + } + ] + }, + "T4312p70FqRBkzVfWKMaR": { + "title": "DHCP", + "description": "**Dynamic Host Configuration Protocol (DHCP)** is a network protocol that enables automatic assignment of IP addresses to devices on a network. It is an essential component of IP networking and aims to simplify the process of configuring devices to communicate over an IP-based network.\n\nKey Features of DHCP\n--------------------\n\n* **Automatic IP Address Assignment**: DHCP eliminates the need for manual IP address assignment by automatically providing devices with the necessary IP addresses, reducing the risk of duplicate addressing.\n* **Network Configuration**: In addition to IP addresses, DHCP can also provide other essential network information such as subnet mask, default gateway, and DNS server information.\n* **IP Address Reuse**: When a device leaves the network or no longer needs an IP address, DHCP allows the address to be reused and assigned to a different device.\n* **Lease Duration**: DHCP assigns IP addresses for a specific period called a \"lease.\" After a lease expires, the device must request a new IP address or get its current address renewed.\n\nHow DHCP Works\n--------------\n\nThe DHCP process consists of four main steps:\n\n* **DHCP Discover**: A device (client) looking to join a network sends a broadcast message known as a \"DHCP Discover\" message to locate a DHCP server.\n* **DHCP Offer**: Upon receiving the \"DHCP Discover\" broadcast, the DHCP server responds with a unicast \"DHCP Offer\" message containing the necessary network configuration information (e.g., IP address) for the client.\n* **DHCP Request**: The client receives the offer and sends back a \"DHCP Request\" message to confirm the IP address assignment and other network information.\n* **DHCP Acknowledgment (ACK)**: Finally, the DHCP server sends an \"ACK\" message confirming the successful assignment of IP address and network settings. The client can now use the allocated IP address to communicate over the network.\n\nImportance in Cyber Security\n----------------------------\n\nUnderstanding DHCP is crucial for network professionals and cyber security experts as it can be a potential attack vector. Adversaries can exploit DHCP by setting up rogue DHCP servers on the network, conducting man-in-the-middle attacks or even conducting denial-of-service attacks. Consequently, securing DHCP servers, monitoring network traffic for anomalies, and employing strong authentication and authorization methods are essential practices for maintaining network security.", + "links": [] + }, + "ORIdKG8H97VkBUYpiDtXf": { + "title": "DNS", + "description": "**DNS** is a key component in the internet infrastructure that translates human-friendly domain names (e.g., `www.example.com`) into IP addresses (e.g., `192.0.2.44`). This translation process enables us to easily connect to websites and other online resources without having to remember complex numeric IP addresses.\n\nThe DNS operates as a distributed and hierarchical system which involves the following components:\n\n* **DNS Resolver**: Your device's initial contact point with the DNS infrastructure, often provided by your Internet Service Provider (ISP) or a third-party service like Google Public DNS.\n \n* **Root Servers**: The authoritative servers on the top of the DNS hierarchy that guide DNS queries to the appropriate Top-Level Domain (TLD) servers.\n \n* **TLD Servers**: These servers manage the allocation of domain names for top-level domains, such as `.com`, `.org`, etc.\n \n* **Authoritative Name Servers**: These are the servers responsible for storing the DNS records pertaining to a specific domain (e.g., `example.com`).\n \n\nSome common DNS record types you might encounter include:\n\n* **A (Address) Record**: Maps a domain name to an IPv4 address.\n* **AAAA (Address) Record**: Maps a domain name to an IPv6 address.\n* **CNAME (Canonical Name) Record**: Maps an alias domain name to a canonical domain name.\n* **MX (Mail Exchange) Record**: Specifies the mail servers responsible for handling email for the domain.\n* **TXT (Text) Record**: Contains human-readable or machine-readable text, often used for verification purposes or providing additional information about a domain.\n\nAs an essential part of the internet, the security and integrity of the DNS infrastructure are crucial. However, it's vulnerable to various types of cyber attacks, such as DNS cache poisoning, Distributed Denial of Service (DDoS) attacks, and DNS hijacking. Proper DNS security measures, such as DNSSEC (DNS Security Extensions) and monitoring unusual DNS traffic patterns, can help mitigate risks associated with these attacks.", + "links": [ + { + "title": "DNS in detail (TryHackMe)", + "url": "https://tryhackme.com/room/dnsindetail", + "type": "article" + }, + { + "title": "Explore top posts about DNS", + "url": "https://app.daily.dev/tags/dns?ref=roadmapsh", + "type": "article" + }, + { + "title": "DNS Explained in 100 Seconds (YouTube)", + "url": "https://www.youtube.com/watch?v=UVR9lhUGAyU", + "type": "video" + } + ] + }, + "Kkd3f_0OYNCdpDgrJ-_Ju": { + "title": "NAT", + "description": "Network Address Translation (NAT) is a key element in modern network security. It acts as a middleman between devices on your local area network (LAN) and the external internet. NAT helps to conserve IP addresses and improve privacy and security by translating IP addresses within private networks to public IP addresses for communication on the internet.\n\nHow NAT works\n-------------\n\nNAT is implemented on a router, firewall or a similar networking device. When devices in the LAN communicate with external networks, NAT allows these devices to share a single public IP address, which is registered on the internet. This is achieved through the following translation types:\n\n* **Static NAT:** A one-to-one mapping between a private IP address and a public IP address. Each private address is mapped to a unique public address.\n* **Dynamic NAT:** A one-to-one mapping between a private IP address and a public IP address, but the public address is chosen from a pool rather than being pre-assigned.\n* **Port Address Translation (PAT):** Also known as NAT Overload, PAT maps multiple private IP addresses to a single public IP address, using unique source port numbers to differentiate the connections.\n\nAdvantages of NAT\n-----------------\n\n* **Conservation of IP addresses:** NAT helps mitigate the shortage of IPv4 addresses by allowing multiple devices to share a single public IP address, reducing the need for organizations to purchase additional IP addresses.\n* **Security and Privacy:** By hiding internal IP addresses, NAT adds a layer of obscurity, making it harder for attackers to target specific devices within your network.\n* **Flexibility:** NAT enables you to change your internal IP address scheme without having to update the public IP address, reducing time and effort in reconfiguring your network.\n\nDisadvantages of NAT\n--------------------\n\n* **Compatibility issues:** Certain applications and protocols may encounter issues when operating behind a NAT environment, such as IP-based authentication or peer-to-peer networking.\n* **Performance impact:** The translation process may introduce latency and reduce performance in high-traffic networks.\n* **End-to-End Connectivity:** NAT generally breaks the end-to-end communication model of the internet, which can cause issues in some scenarios.\n\nIn summary, NAT plays a crucial role in modern cybersecurity by conserving IP addresses, obscuring internal networks and providing a level of security against external threats. While there are some disadvantages, its benefits make it an essential component in network security.", + "links": [] + }, + "FdoqB2---uDAyz6xZjk_u": { + "title": "IP", + "description": "IP, or Internet Protocol, is a fundamental concept in cybersecurity that refers to the way data is transferred across networks, specifically the internet. It is a core component of the internet's architecture and serves as the primary building block for communication between devices connected to the network.\n\nIP Address\n----------\n\nAn IP address is a unique identifier assigned to each device connected to a network, like a computer or smartphone. It comprises a series of numbers separated by dots (e.g., 192.168.1.1). IP addresses can be either IPv4 (32-bit) or the newer IPv6 (128-bit) format, which provides more available addresses. They allow devices to send and receive data packets to and from other devices on the internet.\n\nIP Routing\n----------\n\nIP routing is the process of directing data packets from one IP address to another via routers. These routers help find the most efficient path for the data to take as it travels across networks, ensuring that communication is fast and reliable.\n\nIP Protocols\n------------\n\nTwo main IP protocols exist for transferring data over the internet: Transmission Control Protocol (TCP) and User Datagram Protocol (UDP). Each protocol has its own unique characteristics and use cases.\n\n* **TCP**: Designed to ensure error-free, in-order transmission of data packets, TCP is used for applications where reliability is more important than speed, such as file transfers, email, and web browsing.\n* **UDP**: A faster, connectionless protocol that doesn't guarantee the order or integrity of data packets, making it suitable for real-time applications like video streaming and online gaming.\n\nIP Security Risks\n-----------------\n\nIP-based attacks can disrupt communication between devices and even result in unauthorized access to sensitive data. Such attacks include:\n\n* **IP Spoofing**: Manipulating an IP address to disguise the source of traffic or impersonate another device on the network.\n* **DDoS Attacks**: Overwhelming a target IP address or network with a massive amount of traffic, making services unavailable to users.\n* **Man-in-the-Middle Attacks**: Interceptors intercept and potentially modify data in transit between two IP addresses, enabling eavesdropping, data theft, or message alteration.\n\nIP Security Best Practices\n--------------------------\n\nTo safeguard against IP-based threats, consider implementing the following cybersecurity best practices:\n\n* Deploy firewalls to filter out malicious traffic and block unauthorized access.\n* Use VPNs to encrypt data in transit and hide your IP address from potential attackers.\n* Regularly update network devices and software to patch vulnerabilities.\n* Employ intrusion detection and prevention systems (IDPS) to monitor and counter threats.\n* Educate users about safe internet habits and the importance of strong, unique passwords.\n\nUnderstanding IP and its associated security risks is crucial in ensuring the safe and efficient transfer of data across networks. By following best practices, you can help protect your network and devices from potential cyber threats.", + "links": [] + }, + "lwSFIbIX-xOZ0QK2sGFb1": { + "title": "Router", + "description": "A **router** is a networking device responsible for forwarding data packets between computer networks. It acts as a traffic coordinator, choosing the best possible path for data transmission, thus ensuring smooth communication between networks. Routers are an integral part of the internet, helping to establish and maintain connections between different networks and devices.\n\nFunctionality of Routers\n------------------------\n\n* **Routing Decisions**: Routers analyze incoming data packets and make decisions on which path to forward the data based on destination IP addresses and network conditions.\n \n* **Connecting Networks**: Routers are essential in connecting different networks together. They enable communication between your home network and the broader internet, as well as between different networks within an organization.\n \n* **Managing Traffic**: Routers manage the flow of data to ensure optimal performance and avoid network congestion. They can prioritize certain types of data, such as video streaming, to ensure a better user experience.\n \n\nTypes of Routers\n----------------\n\n* **Wired Routers**: Utilize Ethernet cables to connect devices to the network. They typically come with multiple ethernet ports for devices such as computers, gaming consoles, and smart TVs.\n \n* **Wireless Routers**: Provide network access without needing physical cables. Wireless routers use Wi-Fi to transmit data between devices and are the most common type of router found in homes and offices.\n \n* **Core Routers**: Operate within the backbone of the internet, directing data packets between major networks (such as ISPs). These routers are high-performance devices capable of handling massive amounts of data traffic.\n \n\nRouter Security\n---------------\n\nAs routers are a critical gateway between your network and the internet, it's essential to keep them secure. Some common router security practices include:\n\n* Changing default passwords and usernames: Manufacturers often set simple default passwords, which can be easily guessed or discovered by attackers. It's important to set a strong, unique password for your router.\n \n* Regular firmware updates: Router manufacturers release updates to address security vulnerabilities and improve performance. Keep your router's software up to date.\n \n* Disable remote management: Some routers have a feature that allows remote access, which can be exploited by hackers. If you don't need this feature, disable it.\n \n* Create a guest network: If your router supports it, create a separate network for guests to use. This isolates them from your primary network, ensuring that they cannot access your devices or data.\n \n\nBy understanding routers and their role in cybersecurity, you can take the necessary steps to secure your network and protect your data.", + "links": [] + }, + "r9byGV8XuBPzoqj5ZPf2W": { + "title": "Switch", + "description": "A **switch** is a networking device that connects devices together on a computer network. It filters and forwards data packets between different devices by using their MAC (Media Access Control) addresses to identify them. Switches play an essential role in managing traffic and ensuring that data reaches its intended destination efficiently.\n\nKey Features and Functions\n--------------------------\n\n* **Intelligent Traffic Management:** Switches monitor the data packets as they travel through the network, only forwarding them to the devices that need to receive the data. This optimizes network performance and reduces congestion.\n* **Layer 2 Switching:** Switches operate at the data link layer (Layer 2) of the OSI (Open Systems Interconnection) model. They use MAC addresses to identify devices and determine the appropriate path for data packets.\n* **Broadcast Domains:** A switch creates separate collision domains, breaking up a single broadcast domain into multiple smaller ones, which helps minimize the impact of broadcast traffic on network performance.\n* **MAC Address Table:** Switches maintain a MAC address table, storing the mapping of MAC addresses to the appropriate physical interfaces, helping the switch identify the destination of the data packets efficiently.\n\nTypes of Switches\n-----------------\n\nSwitches can be categorized into two main types:\n\n* **Unmanaged Switch:** These switches are simple plug-and-play devices that require no configuration. They are best suited for small networks or places where advanced features and customized settings are not necessary.\n* **Managed Switch:** These switches offer a higher level of control and customization, allowing network administrators to monitor, manage, and secure network traffic. Managed switches are typically used in enterprise-level networks or environments that require advanced security features and traffic optimization.\n\nBy understanding the role and functionality of switches within computer networks, you can better navigate the complexities of cyber security and make informed decisions for optimizing network performance and security.", + "links": [] + }, + "gTozEpxJeG1NTkVBHH-05": { + "title": "VPN", + "description": "A **Virtual Private Network** (VPN) is a technology that provides secure and encrypted connections between devices over a public network, such as the internet. VPNs are primarily used to protect your internet activity and privacy from being accessed or monitored by external parties, such as hackers or government agencies.\n\nThe main components of a VPN are:\n\n* **VPN client**: The software installed on your device that connects to the VPN server.\n* **VPN server**: A remote server that handles and encrypts your internet traffic before sending it to its intended destination.\n* **Encryption**: The process of converting your data into unreadable code to protect it from unauthorized access.\n\nWhen you connect to a VPN, your device's IP address is replaced with the VPN server's IP address, making it seem as if your internet activity is coming from the server's location. This allows you to access content and websites that may be blocked or restricted in your region, and also helps to protect your identity and location online.\n\nUsing a reliable VPN service is an essential part of maintaining good cyber security, especially when using public Wi-Fi networks or accessing sensitive information online.\n\nKeep in mind, however, that not all VPNs are created equal. Make sure to do your research and choose a reputable VPN provider with a strong focus on privacy and security. Some popular and trusted VPN services include ExpressVPN, NordVPN, and CyberGhost.", + "links": [] + }, + "LrwTMH_1fTd8iB9wJg-0t": { + "title": "MAN", + "description": "A Metropolitan Area Network **(MAN)** is a type of computer network that spans across a metropolitan area or a large geographical area, typically covering a city or a region. It is designed to interconnect various local area networks **(LANs)** and wide area networks **(WANs)** to enable communication and data exchange between different locations within the metropolitan area.\n\nExamples of MAN\n---------------\n\nSome examples of Metropolitan Area Networks **(MANs)** include:\n\n1. **Cable TV Networks:** Many cable TV networks also offer internet services to their subscribers, creating a MAN that covers a specific metropolitan area.\n2. **Educational Institutions:** Universities, colleges, and research institutions often have their own MANs to interconnect their campuses and facilities spread across a metropolitan area.\n3. **City-Wide Wi-Fi Networks:** Some cities have established their own Wi-Fi networks to provide internet access to residents and businesses, creating a MAN that covers the entire city.\n4. **Public Transportation Networks:** Some metropolitan areas have implemented MANs to provide internet connectivity on public transportation networks such as buses and trains.\n\nAdvantages of MAN\n-----------------\n\n* **Improved Connectivity:** MANs provide a high-speed and reliable means of communication between different locations within a metropolitan area, facilitating efficient data exchange and collaboration among organizations, businesses, and individuals.\n \n* **Cost-Effective:** Compared to establishing multiple separate networks for each location, implementing a MAN can be more cost-effective as it allows for shared infrastructure and resources, reducing overall costs of networking equipment and maintenance.\n \n* **Scalability:** MANs are highly scalable and can be expanded to accommodate new locations or increased network traffic as the metropolitan area grows, making it a flexible solution for evolving connectivity needs.\n \n* **Centralized Management:** A MAN allows for centralized management of the network, making it easier to monitor and control network operations, troubleshoot issues, and implement security measures.\n \n\nDisadvantages of MAN\n--------------------\n\n* **Complexity:** MANs can be complex to design, implement, and maintain due to their large scale and geographical spread. They require skilled network administrators and engineers to manage and troubleshoot the network effectively.\n \n* **Cost of Implementation:** Establishing a MAN requires significant upfront investment in networking infrastructure and equipment, which can be a barrier to entry for smaller organizations or municipalities.\n \n* **Limited Coverage:** MANs are typically limited to metropolitan areas, and their coverage may not extend to remote or rural areas outside the metropolitan region, which can pose connectivity challenges for organizations located in those areas.\n \n* **Vulnerability to Single Point of Failure:** Since MANs are centralized networks, they are susceptible to a single point of failure, such as a failure in the main network node, which can disrupt the entire network and impact communication and data exchange among connected locations.", + "links": [] + }, + "xWxusBtMEWnd-6n7oqjHz": { + "title": "LAN", + "description": "A **Local Area Network (LAN)** is a vital component of cyber security that you must understand. This chapter covers a brief introduction to LAN, its basic functionalities and importance in maintaining a secure network environment.\n\nWhat is LAN?\n------------\n\nLAN stands for Local Area Network, which is a group of computers and other devices interconnected within a limited geographical area, like an office, school campus or even a home. These networks facilitate sharing of resources, data and applications among connected devices. They can be wired (Ethernet) or wireless (Wi-Fi).\n\nKey Components of LAN\n---------------------\n\nLAN comprises several key components, including:\n\n* **Workstations**: End user devices like computers, laptops or smartphones connected to the network.\n* **Servers**: Computers that provide resources and services to the workstations.\n* **Switches**: Networking devices that connect workstations and servers, and distribute network traffic efficiently.\n* **Routers**: Devices that connect the LAN to the internet or other networks (e.g., Wide Area Networks or WANs).\n\nImportance of LAN\n-----------------\n\nLANs play a fundamental role in modern organizations, providing:\n\n* **Resource Sharing**: They allow sharing of resources such as printers, scanners, storage drives and software applications across multiple users.\n* **Communication**: They enable faster communication between connected devices and allow users to collaborate effectively using email, chat or VoIP services.\n* **Data Centralization**: They allow data storage and retrieval from central servers rather than individual devices, which simplifies data management and backups.\n* **Scalability**: LANs can be easily expanded to accommodate more users and resources to support business growth.\n\nLAN Security\n------------\n\nUnderstanding LAN is crucial for maintaining a secure network environment. Since a LAN connects multiple devices, it forms the central point of various security vulnerabilities. Implementing effective security measures is vital to prevent unauthorized access, data leaks, and malware infections. Some best practices for securing your LAN include:\n\n* **Firewalls**: Deploy hardware-based and software-based firewalls to protect your network from external and internal threats.\n* **Antivirus Software**: Use antivirus applications on workstations and servers to prevent malware infections.\n* **Wireless Security**: Implement robust Wi-Fi security measures like WPA2 encryption and strong passwords to prevent unauthorized access.\n* **Access Controls**: Implement network access controls to grant authorized users access to specific resources and data.\n* **Network Segmentation**: Divide the network into separate zones based on required access levels and functions to contain potential threats.\n* **Regular Updates**: Keep your workstations, servers and network devices up-to-date with security patches and updates to fix vulnerabilities.\n* **Network Monitoring**: Use network monitoring tools to keep track of network traffic and identify potential threats or anomalies.\n\nBy understanding the components and importance of LAN, you can effectively contribute to improving your organization's cyber security posture. In the next chapter, we will discuss additional cyber security topics that you need to be familiar with.", + "links": [] + }, + "vCkTJMkDXcQmwsmeNUAX5": { + "title": "WAN", + "description": "A **Wide Area Network (WAN)** is a telecommunication network that extends over a large geographical area, such as interconnecting multiple local area networks (LANs). WANs commonly use leased lines, circuit switching, or packet switching to transmit data between LANs, allowing them to share resources and communicate with one another. A WAN can be privately owned and managed, or leased from telecommunication service providers.\n\nCharacteristics of WANs\n-----------------------\n\n* **Large geographic coverage**: WANs can span across cities, states, and even countries, making them suitable for businesses with multiple locations requiring connectivity.\n \n* **Communication technologies**: WANs rely on multiple technologies for communication, such as fiber optic cables, leased line connections, satellite links, and even cellular networks.\n \n* **Data transmission rates**: WANs generally offer lower data transfer rates as compared to LANs, primarily due to the longer distances and increased complexity.\n \n* **Higher latency**: WANs can suffer from higher latency (delay in data transmission) due to the physical distance involved and routing of traffic through various devices and service providers.\n \n* **Security concerns**: Given the broad scope and involvement of third-party service providers, securing WAN connections is crucial to protect sensitive data transmission and maintain privacy.\n \n\nCommon WAN Technologies\n-----------------------\n\nHere are a few widely-used WAN technologies:\n\n* **Leased Line**: A dedicated, point-to-point communication link provided by telecommunication service providers. It offers a fixed bandwidth and guaranteed quality of service (QoS), making it suitable for businesses requiring high-speed and consistent connectivity.\n \n* **Multiprotocol Label Switching (MPLS)**: A protocol for high-speed data transfer between network nodes. MPLS enables traffic engineering, Quality of Service (QoS), and efficient use of bandwidth by labeling data packets and directing them over a predetermined path.\n \n* **Virtual Private Network (VPN)**: A VPN works by creating an encrypted tunnel over the internet between the two communicating sites, effectively creating a private and secure connection over a public network.\n \n* **Software-Defined WAN (SD-WAN)**: A technology that simplifies the management and operation of WANs by decoupling the networking hardware from its control mechanism. It allows businesses to use a combination of transport resources, optimize network traffic, and improve application performance.\n \n\nConclusion\n----------\n\nUnderstanding the concept of WAN is essential in the context of cyber security, as it forms the backbone of connectivity between remote LANs. Ensuring security measures are taken to protect data transmission over WANs is crucial to maintaining the overall protection of businesses and their sensitive information.", + "links": [] + }, + "QCVYF1rmPsMVtklBNDNaB": { + "title": "WLAN", + "description": "A **Wireless Local Area Network (WLAN)** is a type of local area network that uses wireless communication to connect devices, such as computers and smartphones, within a specific area. Unlike a wired network, which requires physical cables to establish connections, WLANs facilitate connections through radio frequency (RF) signals, providing a more flexible networking option.\n\nKey Components of WLAN\n----------------------\n\nThere are two main components in a WLAN:\n\n* **Wireless Access Point (WAP)**: A WAP is a networking device that enables wireless devices to connect to the network. It acts as a bridge between the devices and the wired network, converting RF signals into data that can travel through a wired connection.\n* **Wireless Client**: Wireless clients are devices like laptops, smartphones, and tablets that are fitted with WLAN adapters. These adapters enable devices to send and receive wireless signals to connect with the WAP.\n\nKey WLAN Standards\n------------------\n\nThere are several WLAN standards, defined by the Institute of Electrical and Electronics Engineers (IEEE) 802.11 series. Some of the most common standards include:\n\n* **802.11a**: Supports throughput up to 54 Mbps in the 5 GHz frequency band.\n* **802.11b**: Supports throughput up to 11 Mbps in the 2.4 GHz frequency band.\n* **802.11g**: Supports throughput up to 54 Mbps in the 2.4 GHz frequency band and is backward compatible with 802.11b.\n* **802.11n**: Supports throughput up to 600 Mbps and operates in both 2.4 GHz and 5 GHz frequency bands.\n* **802.11ac**: Supports throughput up to several Gigabits per second and operates in the 5 GHz frequency band. This is currently the most widely adopted standard.\n\nWLAN Security\n-------------\n\nAs WLANs use wireless signals to transmit data, they can be susceptible to various security threats. Some essential security measures include:\n\n* **Wired Equivalent Privacy (WEP)**: An early security protocol that uses encryption to protect wireless communications. Due to several security flaws, it has been replaced by more secure protocols.\n \n* **Wi-Fi Protected Access (WPA)**: WPA is an enhanced security protocol that addressed the vulnerabilities of WEP. It uses Temporal Key Integrity Protocol (TKIP) for encryption and provides better authentication and encryption methods.\n \n* **Wi-Fi Protected Access II (WPA2)**: WPA2 is an advanced security protocol that uses Advanced Encryption Standard (AES) encryption and replaces TKIP from WPA. This protocol provides a high level of security and is currently the recommended standard for securing WLANs.\n \n* **Wi-Fi Protected Access 3 (WPA3)**: WPA3 is the latest security standard with enhanced encryption and authentication features. It addresses the vulnerabilities in WPA2 and provides even stronger security for WLANs.\n \n\nTo maintain a secure WLAN, it's essential to use the appropriate security standard, change default settings, and regularly update firmware to address any security vulnerabilities.", + "links": [] + }, + "R5HEeh6jwpQDo27rz1KSH": { + "title": "DHCP", + "description": "**Dynamic Host Configuration Protocol (DHCP)** is a network protocol that enables automatic assignment of IP addresses to devices on a network. It is an essential component of IP networking and aims to simplify the process of configuring devices to communicate over an IP-based network.\n\nKey Features of DHCP\n--------------------\n\n* **Automatic IP Address Assignment**: DHCP eliminates the need for manual IP address assignment by automatically providing devices with the necessary IP addresses, reducing the risk of duplicate addressing.\n* **Network Configuration**: In addition to IP addresses, DHCP can also provide other essential network information such as subnet mask, default gateway, and DNS server information.\n* **IP Address Reuse**: When a device leaves the network or no longer needs an IP address, DHCP allows the address to be reused and assigned to a different device.\n* **Lease Duration**: DHCP assigns IP addresses for a specific period called a \"lease.\" After a lease expires, the device must request a new IP address or get its current address renewed.\n\nHow DHCP Works\n--------------\n\nThe DHCP process consists of four main steps:\n\n* **DHCP Discover**: A device (client) looking to join a network sends a broadcast message known as a \"DHCP Discover\" message to locate a DHCP server.\n* **DHCP Offer**: Upon receiving the \"DHCP Discover\" broadcast, the DHCP server responds with a unicast \"DHCP Offer\" message containing the necessary network configuration information (e.g., IP address) for the client.\n* **DHCP Request**: The client receives the offer and sends back a \"DHCP Request\" message to confirm the IP address assignment and other network information.\n* **DHCP Acknowledgment (ACK)**: Finally, the DHCP server sends an \"ACK\" message confirming the successful assignment of IP address and network settings. The client can now use the allocated IP address to communicate over the network.\n\nImportance in Cyber Security\n----------------------------\n\nUnderstanding DHCP is crucial for network professionals and cyber security experts as it can be a potential attack vector. Adversaries can exploit DHCP by setting up rogue DHCP servers on the network, conducting man-in-the-middle attacks or even conducting denial-of-service attacks. Consequently, securing DHCP servers, monitoring network traffic for anomalies, and employing strong authentication and authorization methods are essential practices for maintaining network security.", + "links": [] + }, + "r1IKvhpwg2umazLGlQZL1": { + "title": "DNS", + "description": "**DNS** is a key component in the internet infrastructure that translates human-friendly domain names (e.g., `www.example.com`) into IP addresses (e.g., `192.0.2.44`). This translation process enables us to easily connect to websites and other online resources without having to remember complex numeric IP addresses.\n\nThe DNS operates as a distributed and hierarchical system which involves the following components:\n\n* **DNS Resolver**: Your device's initial contact point with the DNS infrastructure, often provided by your Internet Service Provider (ISP) or a third-party service like Google Public DNS.\n \n* **Root Servers**: The authoritative servers on the top of the DNS hierarchy that guide DNS queries to the appropriate Top-Level Domain (TLD) servers.\n \n* **TLD Servers**: These servers manage the allocation of domain names for top-level domains, such as `.com`, `.org`, etc.\n \n* **Authoritative Name Servers**: These are the servers responsible for storing the DNS records pertaining to a specific domain (e.g., `example.com`).\n \n\nSome common DNS record types you might encounter include:\n\n* **A (Address) Record**: Maps a domain name to an IPv4 address.\n* **AAAA (Address) Record**: Maps a domain name to an IPv6 address.\n* **CNAME (Canonical Name) Record**: Maps an alias domain name to a canonical domain name.\n* **MX (Mail Exchange) Record**: Specifies the mail servers responsible for handling email for the domain.\n* **TXT (Text) Record**: Contains human-readable or machine-readable text, often used for verification purposes or providing additional information about a domain.\n\nAs an essential part of the internet, the security and integrity of the DNS infrastructure are crucial. However, it's vulnerable to various types of cyber attacks, such as DNS cache poisoning, Distributed Denial of Service (DDoS) attacks, and DNS hijacking. Proper DNS security measures, such as DNSSEC (DNS Security Extensions) and monitoring unusual DNS traffic patterns, can help mitigate risks associated with these attacks.", + "links": [ + { + "title": "DNS in detail (TryHackMe)", + "url": "https://tryhackme.com/room/dnsindetail", + "type": "article" + }, + { + "title": "Explore top posts about DNS", + "url": "https://app.daily.dev/tags/dns?ref=roadmapsh", + "type": "article" + }, + { + "title": "DNS Explained in 100 Seconds (YouTube)", + "url": "https://www.youtube.com/watch?v=UVR9lhUGAyU", + "type": "video" + } + ] + }, + "tf0TymdPHbplDHvuVIIh4": { + "title": "NTP", + "description": "**NTP** (Network Time Protocol) is a crucial aspect of cybersecurity, as it helps in synchronizing the clocks of computer systems and other devices within a network. Proper time synchronization is vital for various functions, including authentication, logging, and ensuring the accuracy of digital signatures. In this section, we will discuss the importance, primary functions, and potential security risks associated with NTP.\n\nImportance of NTP in Cybersecurity\n----------------------------------\n\n* **Authentication**: Many security protocols, such as Kerberos, rely on accurate timekeeping for secure authentication. Time discrepancies may lead to authentication failures, causing disruptions in network services and affecting the overall security of the system.\n* **Logging and Auditing**: Accurate timestamps on log files are essential for identifying and investigating security incidents. Inconsistent timing can make it challenging to track malicious activities and correlate events across systems.\n* **Digital Signatures**: Digital signatures often include a timestamp to indicate when a document was signed. Accurate time synchronization is necessary to prevent tampering or repudiation of digital signatures.\n\nPrimary Functions of NTP\n------------------------\n\n* **Clock Synchronization**: NTP helps in coordinating the clocks of all devices within a network by synchronizing them with a designated reference time source, usually a central NTP server.\n* **Time Stratum Hierarchy**: NTP uses a hierarchical system of time servers called \"stratum\" to maintain time accuracy. Servers at a higher stratum provide time to lower stratum servers, which in turn synchronize the clocks of client devices.\n* **Polling**: NTP clients continually poll their configured NTP servers at regular intervals to maintain accurate time synchronization. This process allows for the clients to adjust their clocks based on the information received from the server.\n\nSecurity Risks and Best Practices with NTP\n------------------------------------------\n\nWhile NTP is essential for maintaining accurate time synchronization across a network, it is not without security risks:\n\n* **NTP Reflection/Amplification Attacks**: These are a type of DDoS (Distributed Denial of Service) attack that leverages misconfigured NTP servers to amplify malicious traffic targeted at a victim's system. To mitigate this risk, ensure your NTP server is securely configured to prevent abuse by attackers.\n* **Time Spoofing**: An attacker can manipulate NTP traffic to alter the time on client devices, potentially causing authentication failures or allowing unauthorized access. Use authentication keys with NTP to ensure the integrity of time updates by verifying the server's identity.\n* **Untrusted Servers**: Obtain time from a reliable time source to prevent tampering. Always configure clients to use trusted NTP servers, like [pool.ntp.org](http://pool.ntp.org), which provides access to a global group of well-maintained NTP servers.\n\nBy understanding and implementing these crucial aspects of NTP, you can improve the overall security posture of your network by ensuring accurate time synchronization across all systems.", + "links": [] + }, + "hN8p5YBcSaPm-byQUIz8L": { + "title": "IPAM", + "description": "IP Address Management (IPAM) is a critical aspect of cyber security, as it helps organizations efficiently manage and track their IP addresses, DNS, and DHCP services. In any network, devices like servers, routers, and switches are assigned unique IP addresses, which enables them to communicate with each other. Efficient and secure management of these IP addresses is vital for maintaining network security and prevent unauthorized access.\n\nFunctions of IPAM\n-----------------\n\n* **IPv4 and IPv6 address management:** IPAM enables organizations to manage and keep track of their IPv4 and IPv6 addresses. It allows for the allocation, assignment, and control of IP addresses in networks, preventing conflicts and errors.\n \n* **DNS integration:** A well-organized IPAM system can integrate with DNS services to provide consistent and accurate information about the network. This helps organizations in keeping their DNS records up-to-date and secure.\n \n* **DHCP integration:** IPAM works hand-in-hand with DHCP services to manage and monitor IP address leases within the network. This ensures that devices are assigned dynamic IP addresses and automatically updated when a lease expires.\n \n* **Network discovery and auditing:** IPAM enables network discovery, scanning, and auditing to ensure that all connected devices are accounted for and comply with security policies. Regular network discovery can also identify rogue devices or unauthorized access.\n \n* **Policy compliance:** IPAM can help enforce policies related to IP address assignment and usage within an organization. This may include restrictions on the use of certain types of addresses or preventing specific devices from obtaining an IP address.\n \n* **Inventory management and allocation:** IPAM allows organizations to maintain an inventory of available IP addresses, subnets, and address pools. This streamlines IP allocation processes and ensures that addresses are optimally utilized.\n \n* **Reporting and analytics:** An IPAM system can provide detailed reports on IP address usage, allocation history, and other statistics. This information can help organizations identify trends, optimize their networks, and improve overall security.\n \n\nIn conclusion, IPAM plays a vital role in cyber security by enabling organizations to manage and monitor their IP address spaces efficiently. Implementing a comprehensive IPAM solution can help organizations maintain secure and effective network communication, comply with policies, and prevent unauthorized access.", + "links": [] + }, + "P0ZhAXd_H-mTOMr13Ag31": { + "title": "Star", + "description": "In a star network topology, all devices (nodes) are connected to a central device, called a hub or switch. The central device manages the data transmission between the devices connected to it, creating a star-like structure.\n\nAdvantages\n----------\n\n* **Easy to Install and Configure**: Adding new devices or removing existing ones is quite simple, as they only have to connect or disconnect from the central hub or switch.\n* **Fault-Tolerance**: If a device fails or a connection is broken, the rest of the devices can continue to communicate with each other without any major impact.\n* **Centralized Management**: The central hub or switch can easily manage and monitor the network devices, which makes troubleshooting and maintenance more efficient.\n* **Scalability**: It is easy to expand a star network by connecting additional devices to the central hub or switch, allowing for network growth without affecting performance.\n\nDisadvantages\n-------------\n\n* **Dependency on Central Hub or Switch**: If the central device fails, the entire network becomes inoperable. It is essential to ensure the reliability of the central device in a star network.\n* **Cost**: Since a central hub or switch is required, star topologies can be more expensive compared to other network topologies, especially when dealing with larger networks. Additionally, cabling costs can be higher due to individual connections to the central device.\n* **Limited Range**: The distance between devices is determined by the length of the cables connecting to the central hub or switch. Longer cable runs can increase latency and decrease network performance.\n\nApplications\n------------\n\nStar topology is commonly used in home and office networks, as well as in local area networks (LANs). It is a suitable choice when centralized control and easier network management are necessary, or when scalability and easy addition of new devices are priority.", + "links": [] + }, + "9vEUVJ8NTh0wKyIE6-diY": { + "title": "Ring", + "description": "Ring topology is a type of network configuration where each device is connected to two other devices, forming a circular layout or ring. In this topology, data packets travel from one device to another in a unidirectional manner until they reach the intended recipient or return to the sender, indicating that the recipient was not found in the network.\n\nAdvantages of Ring Topology\n---------------------------\n\n* **Easy to Install and Configure:** Ring topology is relatively simpler to set up and maintain as it involves connecting each device to the two adjacent devices only.\n* **Predictable Data Transfer Time:** As data packets move in a circular pattern, it becomes easier to predict the maximum time required for a packet to reach its destination.\n* **Minimal Network Congestion:** The unidirectional flow of packets can significantly reduce the chances of network congestion, as the collision of data packets is less likely.\n\nDisadvantages of Ring Topology\n------------------------------\n\n* **Dependency on All Devices:** The malfunctioning of a single device or cable can interrupt the entire network, making it difficult to isolate the cause of the issue.\n* **Limited Scalability:** Adding or removing devices in a ring topology can temporarily disrupt the network as the circular pattern needs to be re-established.\n* **Slower Data Transfer:** Since data packets must pass through multiple devices before reaching the destination, the overall speed of data transfer can be slower compared to other topologies.\n\nDespite its drawbacks, ring topology can be a suitable option for small networks with a predictable data transfer pattern that require minimal maintenance and setup effort. However, for larger and more complex networks, other topologies like star, mesh, or hybrid configurations may provide better flexibility, reliability, and performance.", + "links": [] + }, + "PYeF15e7iVB9seFrrO7W6": { + "title": "Mesh", + "description": "Mesh topology is a network configuration that involves direct connections between each node or device within the network. In other words, each node is connected to every other node in the network, resulting in a highly interconnected structure. This topology is commonly used in wireless communication systems, where devices communicate with one another directly without the need for a centralized hub or switch.\n\nAdvantages of Mesh Topology\n---------------------------\n\n* **Increased reliability**: Mesh topology is highly reliable, as the failure of one node or connection does not affect the performance of the entire network. If a connection fails, data can still travel through alternative routes within the network, ensuring uninterrupted communication.\n* **Fault tolerance**: Mesh networks have a high level of fault tolerance, as they can easily recover from hardware failures or network errors. This is especially useful for critical systems that require high availability and resilience.\n* **Scalability**: Mesh networks are highly scalable, as there are no limitations on the number of devices that can be added to the network. This is particularly useful for large organizations or rapidly changing environments that require the ability to easily grow and adapt.\n* **Improved data transmission**: The direct connections between nodes in a mesh network provide multiple pathways for data transmission, resulting in faster, more efficient communication with fewer bottlenecks or congestion points.\n\nDisadvantages of Mesh Topology\n------------------------------\n\n* **Complexity**: Mesh topology can be quite complex, particularly as the number of devices increases. This can lead to challenges in configuring, managing, and troubleshooting the network.\n* **High costs**: Implementing a mesh topology can be expensive due to the large number of connections and high-quality hardware required to maintain a reliable, efficient network.\n* **Increased latency**: As data travels through multiple nodes before reaching its destination, this can sometimes result in increased latency compared to other network topologies.\n* **Power consumption**: Wireless mesh networks, in particular, can consume more power than other topologies due to the need for each node to maintain multiple connections, potentially reducing the battery life of devices.\n\nIn summary, mesh topology offers a robust, fault-tolerant, and scalable network configuration ideal for systems that demand high reliability and flexible growth. However, its complexity, costs, and potential latency and power consumption issues need to be carefully considered when deciding whether it is the most suitable network topology for a specific scenario.", + "links": [] + }, + "0DWh4WmLK_ENDuqQmQcu4": { + "title": "Bus", + "description": "A **bus topology** is a type of network configuration where all the devices or nodes in the network are connected to a single, central cable known as the bus, backbone or trunk. This common shared path serves as the medium for data transmission and communication amongst the nodes.\n\nHow Bus Topology Works\n----------------------\n\nIn a bus topology, every node has a unique address that identifies it on the network. When a node wants to communicate with another node in the network, it broadcasts a message containing the destination node's address as well as its own address. All the nodes connected to the bus receive the message, but only the intended recipient with the matching address responds.\n\nAdvantages of Bus Topology\n--------------------------\n\n* **Easy to set up**: Bus topology is relatively simple in terms of installation, as it requires less cable and minimal hardware.\n* **Cost-effective**: Due to its simplicity and reduced cabling requirements, it's typically more affordable to implement than other topologies.\n* **Expandable**: New nodes can be easily added to the network by connecting them to the bus.\n\nDisadvantages of Bus Topology\n-----------------------------\n\n* **Limited Scalability**: As the number of nodes increases, network performance may decrease due to increased collisions and data transmission time.\n* **Single point of failure**: If the central cable (bus) fails or gets damaged, the entire network will be affected and may result in a complete breakdown.\n* **Maintenance difficulty**: Troubleshooting and identifying issues within the network can be challenging due to the shared path for data transmission.\n\nBus topology can be an effective solution for small networks with minimal devices. However, as network size and complexity increase, other topologies such as star, ring, or mesh may be more suitable for maintaining efficiency and reliability.", + "links": [] + }, + "8Mog890Lj-gVBpWa05EzT": { + "title": "SSH", + "description": "SSH, or Secure Shell, is a cryptographic network protocol that provides a secure and encrypted method for managing network devices and accessing remote servers. SSH is widely used by administrators and developers to enable secure remote access, file transfers, and remote command execution over unsecured networks, such as the internet.\n\nKey Features\n------------\n\n* **Encryption**: SSH uses a variety of encryption algorithms to ensure the confidentiality and integrity of data transmitted between the client and server.\n \n* **Authentication**: SSH supports multiple authentication methods, including password-based, public key, and host-based authentication, providing flexibility in securely verifying the identities of communicating parties.\n \n* **Port Forwarding**: SSH allows forwarding of network ports, enabling users to tunnel other protocols securely, such as HTTP or FTP, through an encrypted connection.\n \n* **Secure File Transfer**: SSH provides two file transfer protocols, SCP (Secure Copy Protocol) and SFTP (SSH File Transfer Protocol), to securely transfer files between a local client and remote server.\n \n\nCommon Use Cases\n----------------\n\n* **Remote System Administration**: Administrators can securely access and manage remote systems, such as servers and network devices, using SSH to execute commands and configure settings.\n \n* **Secure File Transfers**: Developers and administrators can transfer files securely between systems using SCP or SFTP, protecting sensitive data from eavesdropping.\n \n* **Remote Application Access**: Users can securely access remote applications by creating an SSH tunnel, allowing them to connect to services that would otherwise be inaccessible due to firewalls or other network restrictions.\n \n\nTips for Secure SSH Usage\n-------------------------\n\n* **Disable root login**: To reduce the risk of unauthorized access, it is recommended to disable direct root login and use a standard user account with sudo privileges for administration tasks.\n \n* **Use Key-Based Authentication**: To further enhance security, disallow password-based authentication and use public key authentication instead, making it more difficult for attackers to gain access through brute-force attacks.\n \n* **Limit SSH Access**: Restrict SSH access to specific IP addresses or networks, minimizing the potential attack surface.\n \n* **Keep SSH Software Updated**: Regularly update your SSH client and server software to ensure you have the latest security patches and features.\n \n\nIn summary, SSH is a vital protocol for ensuring secure communication, remote access, and file transfers. By understanding its key features, use cases, and best practices, users can leverage the security benefits of SSH to protect their sensitive data and systems.", + "links": [] + }, + "Ia6M1FKPNpqLDiWx7CwDh": { + "title": "RDP", + "description": "**Remote Desktop Protocol (RDP)**, developed by Microsoft, is a proprietary protocol that enables users to connect to a remote computer over a network, and access and control its resources, as if they were using the computer locally. This is useful for users who need to work remotely, manage servers or troubleshoot issues on another computer.\n\nHow RDP Works\n-------------\n\nRDP uses a client-server architecture, where the remote computer being accessed acts as the server and the user's computer acts as the client. The client establishes a connection with the server to access its resources, such as display, keyboard, mouse, and other peripherals.\n\nThe protocol primarily operates on standard Transmission Control Protocol (TCP) port 3389 (although it can be customized) and uses the User Datagram Protocol (UDP) to provide a more robust and fault-tolerant communication channel.\n\nFeatures of RDP\n---------------\n\n* **Multi-platform support:** Although developed by Microsoft, RDP clients are available for various platforms, including Windows, macOS, Linux, and even mobile devices like Android and iOS.\n* **Secure connection:** RDP can provide encryption and authentication to secure the connection between client and server, ensuring that data transmitted over the network remains confidential and protected from unauthorized access.\n* **Dynamic resolution adjustment:** RDP can adapt the remote computer's screen resolution to fit the client's screen, providing a better user experience.\n* **Clipboard sharing:** RDP allows users to copy and paste content between the local and remote computers.\n* **Printer and file sharing:** Users can access and print files from their local computer to the remote one, and vice versa.\n\nSecurity Considerations\n-----------------------\n\nThough RDP is popular and useful, it does come with its share of security concerns. Some common risks include:\n\n* Unauthorized access: If an attacker successfully gains access to an RDP session, they may be able to compromise and control the remote computer.\n* Brute force attacks: Attackers may use brute force techniques to guess login credentials, especially if the server has a weak password policy.\n* Vulnerabilities: As a proprietary protocol, RDP can be susceptible to vulnerabilities that could lead to system breaches.\n\nTo mitigate these risks, you should:\n\n* Use strong, unique passwords for RDP accounts and consider implementing two-factor authentication.\n* Limit RDP access to specific IP addresses or Virtual Private Networks (VPNs) to reduce exposure.\n* Apply security patches regularly to keep RDP up-to-date and minimize the risk of exploits.\n* Employ network-level authentication (NLA) to offer an additional layer of security.", + "links": [] + }, + "ftYYMxRpVer-jgSswHLNa": { + "title": "FTP", + "description": "**File Transfer Protocol (FTP)** is a standard network protocol used to transfer files from one host to another host over a TCP-based network, such as the Internet. Originally developed in the 1970s, it's one of the earliest protocols for transferring files between computers and remains widely used today.\n\nHow FTP Works\n-------------\n\nFTP operates on a client-server model, where one computer acts as the client (the sender or requester) and the other acts as the server (the receiver or provider). The client initiates a connection to the server, usually by providing a username and password for authentication, and then requests a file transfer.\n\nFTP uses two separate channels to carry out its operations:\n\n* **Control Channel:** This channel is used to establish the connection between the client and the server and send commands, such as specifying the file to be transferred, the transfer mode, and the directory structure.\n* **Data Channel:** This channel is used to transfer the actual file data between the client and the server.\n\nFTP Modes\n---------\n\nFTP offers two modes of file transfer:\n\n* **ASCII mode:** This mode is used for transferring text files. It converts the line endings of the files being transferred to match the format used on the destination system. For example, if the file is being transferred from a Unix system to a Windows system, the line endings will be converted from LF (Unix) to CR+LF (Windows).\n* **Binary mode:** This mode is used for transferring binary files, such as images, audio files, and executables. No conversion of the data is performed during the transfer process.\n\nFTP Security Concerns\n---------------------\n\nFTP has some significant security issues, primarily because it was designed before the widespread use of encryption and authentication mechanisms. Some of these concerns include:\n\n* Usernames and passwords are transmitted in plain text, allowing anyone who can intercept the data to view them.\n* Data transferred between the client and server is not encrypted by default, making it vulnerable to eavesdropping.\n* FTP does not provide a way to validate a server's identity, leaving it vulnerable to man-in-the-middle attacks.\n\nTo mitigate these security risks, several secure alternatives to the FTP protocol have been developed, such as FTPS (FTP Secure) and SFTP (SSH File Transfer Protocol), which encrypt data transfers and provide additional security features.\n\nIn conclusion, FTP is a commonly used protocol for transferring files between computers over a network. While it is easy to use, it has significant security vulnerabilities that make it a less desirable option for secure file transfers. It's essential to use more secure alternatives like FTPS or SFTP for transferring sensitive data.", + "links": [ + { + "title": "What Is FTP: FTP Explained for Beginners", + "url": "https://www.hostinger.com/tutorials/what-is-ftp", + "type": "article" + } + ] + }, + "YEy6o-clTBKZp1yOkLwNb": { + "title": "SFTP", + "description": "**SFTP** (Secure File Transfer Protocol) is a network protocol designed to securely transfer files over an encrypted connection, usually via SSH (Secure Shell). SFTP provides file access, file transfer, and file management functionalities, making it a popular choice for secure file transfers between a client and a server.\n\nKey features of SFTP\n--------------------\n\n* **Security**: SFTP automatically encrypts data before it is sent, ensuring that your files and sensitive data are protected from unauthorized access while in transit.\n \n* **Authentication**: SFTP relies on SSH for user authentication, allowing you to use password-based, public key, or host-based authentication methods.\n \n* **File Integrity**: SFTP uses checksums to verify that transferred files have maintained their integrity during transport, allowing you to confirm that files received are identical to those sent.\n \n* **Resume Capability**: SFTP offers support for resuming interrupted file transfers, making it an ideal choice for transferring large files or transferring files over potentially unreliable connections.\n \n\nHow SFTP works\n--------------\n\nSFTP operates over an established SSH connection between the client and server. Upon successful SSH authentication, the client can issue commands to the server, such as to list, upload, or download files. The data transferred between the client and server is encrypted, ensuring that sensitive information is not exposed during the transfer process.\n\nWhen to use SFTP\n----------------\n\nSFTP is an ideal choice whenever you need to securely transfer files between a client and a server. Examples of when you might want to use SFTP instead of other protocols include:\n\n* Transferring sensitive data such as customer information, financial records, or intellectual property.\n* Uploading or downloading files to/from a remote server in a secure manner, especially when dealing with confidential data.\n* Managing files on a remote server, which may involve creating, renaming, or deleting files and directories.\n\nOverall, SFTP provides a secure and reliable way of transferring files over the internet, making it an essential tool for maintaining the integrity and confidentiality of your data in today's cyber security landscape.", + "links": [] + }, + "3Awm221OJHxXNLiL9yxfd": { + "title": "HTTP / HTTPS", + "description": "HTTP (Hypertext Transfer Protocol) and HTTPS (Hypertext Transfer Protocol Secure) are two important protocols that are crucial for transferring data over the internet. They form the primary means of communication between web servers and clients (browsers).\n\nHTTP\n----\n\nHTTP is an application-layer protocol that allows clients and servers to exchange information, such as web pages, images, and other content. When you visit a website, your browser sends an HTTP request to the server, which then responds with the requested data. This data is then rendered by your browser.\n\nHTTP operates on a stateless, request-response model. This means that each request is independent of the others, making it a fast and efficient way of transmitting data.\n\nHowever, HTTP has one significant drawback — it's not secure. Since it's transmitted in plain text, anyone intercepting the traffic can easily read the content of the messages. This makes HTTP unsuitable for sensitive information like passwords or credit card numbers.\n\nHTTPS\n-----\n\nTo address the security concerns of HTTP, HTTPS was introduced as a secure alternative. HTTPS uses encryption to ensure that data transmitted between the client and server is confidential and cannot be deciphered by a third-party.\n\nHTTPS uses either SSL (Secure Sockets Layer) or TLS (Transport Layer Security) to encrypt data. These cryptographic protocols provide end-to-end security, ensuring data integrity and authentication. When you visit a website with HTTPS, you can be confident that your information is being securely transmitted.\n\nTo implement HTTPS, websites need to obtain an SSL/TLS certificate from a trusted Certificate Authority (CA). This certificate authenticates the website's identity and helps establish a secure connection between the client and server.\n\nIn Summary\n----------\n\nWhen browsing the internet, always look for the padlock icon in the address bar, which indicates a secure HTTPS connection. This helps protect your personal information from being intercepted by attackers. As a website owner or developer, it's crucial to prioritize implementing HTTPS, to provide a secure and trustworthy experience for your users.", + "links": [] + }, + "LKK1A5-xawA7yCIAWHS8P": { + "title": "SSL / TLS", + "description": "**Secure Socket Layer (SSL)** and **Transport Layer Security (TLS)** are cryptographic protocols designed to provide security and data integrity for communications over networks. These protocols are commonly used for securing web traffic and ensuring that sensitive information, such as credit card numbers and login credentials, are transmitted securely between clients (e.g., web browsers) and servers.\n\nSSL\n---\n\nSSL was developed by Netscape in the mid-1990s and has gone through several iterations. The last version, SSLv3, was released in 1996. SSL was deprecated in 2015 due to security concerns, and it is not recommended for use in modern applications.\n\nTLS\n---\n\nTLS is the successor to SSL and is continually evolving with new versions and updates. The most recent version, TLS 1.3, was released in 2018. TLS is widely used and considered the standard for securing web traffic.\n\nHow SSL/TLS Works\n-----------------\n\nSSL/TLS operates by encrypting the data transmitted between a client and a server, ensuring that the data cannot be easily intercepted or tampered with. The encryption is achieved using a combination of cryptographic algorithms, key exchanges, and digital certificates.\n\nHere are the key steps in setting up an SSL/TLS connection:\n\n* **Handshake:** The client and server will engage in a process called a \"handshake\" to establish a secure connection. During this process, the client and server agree on which version of SSL/TLS to use, and choose the cipher suites and cryptographic algorithms they will use to secure the communication.\n \n* **Key Exchange:** The client and server will perform a key exchange, a process by which they generate and securely share encryption keys. These keys will be used to encrypt and decrypt the data being transmitted between them.\n \n* **Certificate Verification:** The server will provide a digital certificate, which contains its public key and information about the server. The client checks the validity of the certificate by confirming that it was issued by a trusted Certificate Authority (CA) and has not expired.\n \n* **Secure Communication:** Once the handshake, key exchange, and certificate verification are complete, the client and server can begin securely transmitting data using the encryption keys they have shared.\n \n\nAdvantages of SSL/TLS\n---------------------\n\n* **Secure communication:** SSL/TLS provides a secure, encrypted tunnel for data to be transmitted between clients and servers, protecting sensitive information from eavesdropping, interception, and tampering.\n \n* **Authentication:** SSL/TLS uses digital certificates to authenticate the server and sometimes the client. This helps to ensure that the parties involved in the communication are who they claim to be.\n \n* **Data integrity:** SSL/TLS includes mechanisms to confirm that the data received has not been tampered with during transmission, maintaining the integrity of the information being sent.", + "links": [] + }, + "AjywuCZdBi9atGUbetlUL": { + "title": "VMWare", + "description": "_VMware_ is a global leader in virtualization and cloud infrastructure solutions. Established in 1998, they have been at the forefront of transforming the IT landscape. VMware's virtualization platform can be applied to a wide range of areas such as data centers, desktops, and applications.\n\nVMware Products and Technologies\n--------------------------------\n\nSome of the popular VMware products include the following:\n\n* **VMware vSphere**: It is the most well-known VMware product, and it forms the foundation of the virtual infrastructure. vSphere enables you to create, manage and run multiple virtual machines on a single physical server. It essentially provides better utilization of hardware resources and enhanced server management.\n \n* **VMware Workstation**: This desktop virtualization product allows you to run multiple isolated operating systems on a single Windows or Linux PC. It enables you to create and manage virtual machines effortlessly and is primarily targeted at developers and IT professionals.\n \n* **VMware Fusion**: Similar to the Workstation but designed specifically for Mac users, Fusion allows you to run Windows and Linux applications on a Mac without requiring a reboot.\n \n* **VMware Horizon**: This product focuses on providing remote access to virtual desktops and applications. It helps organizations to securely deliver resources to users, improve desktop management, and reduce costs associated with maintaining traditional PCs.\n \n* **VMware NSX**: NSX is VMware's network virtualization and security platform. It is designed to work in tandem with VMware vSphere and other virtualization platforms, providing advanced networking and security features like micro-segmentation, distributed firewalling, and load balancing.\n \n* **VMware vSAN**: vSAN is a software-defined storage solution that allows you to decouple storage functions from the underlying hardware. With vSAN, you can pool together direct-attached storage devices across multiple vSphere servers and create a shared datastore that can be easily managed and scaled.\n \n\nBenefits of VMware Virtualization\n---------------------------------\n\nVMware's virtualization technologies offer various advantages, such as:\n\n* **Increased efficiency**: By consolidating multiple physical servers into virtual machines running on fewer physical servers, resource utilization is improved, which reduces energy and hardware costs.\n \n* **Flexibility**: Virtualization allows you to run multiple operating systems and applications simultaneously, which increases productivity and enables you to switch between tasks more quickly.\n \n* **Scalability**: VMware makes it easy to add or remove virtual machines and resources as needed, allowing you to scale your IT infrastructure efficiently.\n \n* **Business continuity**: Virtualization ensures high availability and disaster recovery by replicating your virtual machines and enabling automatic failover to other servers in case of any hardware failure.\n \n* **Simplified management**: Virtualized environments can be managed from a central location, reducing the time and effort required to maintain and monitor IT resources.\n \n\nIn conclusion, VMware is an industry-leading company providing various virtualization products and services that cater to different types of users and environments. As a user, you should evaluate your requirements and choose the right VMware product for your needs to fully reap the benefits of virtualization.", + "links": [] + }, + "vGVFhZXYOZOy4qFpLLbxp": { + "title": "VirtualBox", + "description": "VirtualBox is a powerful, open-source and feature-rich virtualization software created by Oracle Corporation. It allows users to set up and run multiple guest operating systems, referred to as \"virtual machines\" (VMs), within a single host computer. VirtualBox operates on a wide range of operating systems, including Windows, macOS, Linux, and Solaris, making it highly versatile for different users and environments.\n\nKey Features\n------------\n\n* **Cross-platform compatibility**: VirtualBox can be installed and used on a variety of host operating systems. This is beneficial for users who work with multiple platforms and require access to different applications or environments across them.\n \n* **Snapshot functionality**: This feature allows users to take a snapshot of their virtual machine, capturing its current state. This can be useful for testing updates or changes, as users can revert to their previous snapshot if conflicts or issues arise.\n \n* **USB device support**: VirtualBox allows users to access USB devices connected to their host computer, such as flash drives, printers, or webcams, from within their guest operating system.\n \n* **Shared folders**: Users can easily share files between their host system and virtual machines using a shared folder feature. This simplifies file transfers and resource sharing between your host computer and your virtual environments.\n \n\nSetting up VirtualBox\n---------------------\n\n* Download and install the latest version of VirtualBox from the [official website](https://www.virtualbox.org/).\n* Once installed, launch the VirtualBox application.\n* Click on \"New\" to create a new virtual machine and follow the wizard to configure the VM settings, such as the operating system, memory allocation, and virtual hard disk.\n* Once the VM is configured, click \"Start\" to launch the virtual machine.\n* Install your desired guest operating system within the virtual machine.\n\nAdvantages of VirtualBox\n------------------------\n\n* Open-source software: VirtualBox is free and its source code is available for users to modify and contribute to.\n \n* Simple user interface: VirtualBox has an intuitive and easy-to-use interface, making it user-friendly for beginners and professionals alike.\n \n* Regular updates and improvements: Oracle Corporation and the community behind VirtualBox regularly release updates, bug fixes, and new features, ensuring that the software remains up-to-date and dynamic.\n \n\nConsiderations\n--------------\n\nWhile VirtualBox has numerous benefits, there are certain performance limitations when compared to other, more advanced virtualization solutions, such as VMware or Hyper-V. Users working with resource-intensive operating systems or applications may experience some performance differences when utilizing VirtualBox as their choice of virtualization software.\n\n* * *\n\nIn conclusion, VirtualBox is a powerful and flexible tool for creating and managing virtual environments on a variety of host operating systems. With its open-source nature, cross-platform compatibility, and user-friendly interface, it is an excellent choice for cybersecurity enthusiasts and professionals looking to explore virtualization technologies.", + "links": [] + }, + "BisNooct1vJDKaBKsGR7_": { + "title": "esxi", + "description": "VMware ESXi is a Type 1 hypervisor and the core building block for VMware's virtualization technology. It represents a bare-metal hypervisor, which means it is installed directly onto your physical server's hardware, without the need for a supporting operating system. This results in elevated performance, reduced overhead, and efficient resource allocation.\n\nKey features and benefits of ESXi include:\n\n* **Bare-metal performance**: ESXi can provide better performance by executing directly on the hardware, without the need for an additional operating system layer.\n \n* **Security**: ESXi has a smaller footprint and is more resistant to attacks due to its limited scope and stringent VMware policies.\n \n* **Resource allocation**: ESXi allows for efficient allocation of resources, such as memory and CPU time, as it directly controls hardware.\n \n* **Scalability**: ESXi provides a simple and efficient environment to run multiple virtual machines (VMs) on a single server, which can reduce the need for additional hardware.\n \n* **Centralized management**: VMware offers vSphere, a centralized management platform that integrates seamlessly with ESXi, making it easy to deploy, manage, and maintain large-scale virtual infrastructure.\n \n* **Compatibility**: ESXi is compatible with a wide variety of hardware, which makes deployment and implementation more flexible and cost-effective.\n \n\nTo get started with ESXi, you'll need to have compatible hardware and download the ESXi ISO from VMware's website. After installing it on your server, you can manage the virtual machines through VMware vSphere Client or other third-party tools. For more advanced management features, such as high availability, fault tolerance, and distributed resource scheduling, consider investing in VMware vSphere to fully leverage ESXi's potential.\n\nIn summary, VMware's ESXi enables organizations to create, run, and manage multiple virtual machines on a single physical server. With its bare-metal performance, robust security, and seamless integration with management tools, ESXi is a powerful solution for businesses looking to optimize their IT infrastructure through virtualization technologies.", + "links": [] + }, + "jqX1A5hFF3Qznqup4lfiF": { + "title": "proxmox", + "description": "Proxmox is an open-source platform for enterprise-level virtualization. It is a complete server virtualization management solution that allows system administrators to create and manage virtual machines in a unified environment.\n\nKey Features\n------------\n\n* **Server Virtualization**: Proxmox enables you to turn your physical server into multiple virtual servers, each running its own operating system, applications, and services. This helps to maximize server usage and reduce operating costs.\n \n* **High Availability**: Proxmox VE supports high availability and failover. In case of hardware or software failure, automatic migration of virtual machines can prevent downtime for critical applications and services.\n \n* **Storage**: Proxmox offers a variety of storage solution options, including local (LVM, ZFS, directories), network (iSCSI, NFS, GlusterFS, Ceph), and distributed storage (Ceph RBD).\n \n* **Live Migration**: Live migration is a crucial feature that allows you to move running virtual machines from one host to another with minimal downtime.\n \n* **Operating System Support**: Proxmox VE supports a wide range of guest operating systems, including Linux, Windows, BSD, and others.\n \n* **Web Interface**: Proxmox offers a powerful and user-friendly web interface for managing your virtual environment. This allows you to create, start, stop or delete virtual machines, monitor their performance, manage their storage, and more from any web browser.\n \n* **Role-based Access Control**: Proxmox VE provides a role-based access control system, allowing you to create users with specific permissions and assign them to different parts of the Proxmox system.\n \n* **Backup and Restore**: Proxmox offers built-in backup and restore functionality, allowing you to easily create full, incremental, or differential backups of your virtual machines and easily restore them when needed.\n \n\nConclusion\n----------\n\nAs a powerful and feature-rich virtualization solution, Proxmox Virtual Environment enables administrators to manage their virtual infrastructure more efficiently and reliably. Boasting an easy-to-use web interface, comprehensive storage options, and support for multiple operating systems, Proxmox VE is an excellent choice for managing your virtual environment.", + "links": [] + }, + "CIoLaRv5I3sCr9tBnZHEi": { + "title": "Hypervisor", + "description": "A **hypervisor** is a software component that plays a vital role in virtualization technology. It enables multiple operating systems to run simultaneously on a single physical host. In the context of cybersecurity, using a hypervisor allows users to create and manage multiple isolated virtual environments, commonly known as **virtual machines (VMs)**, which can help protect sensitive data and applications from threats.\n\nThere are two primary types of hypervisors:\n\n* **Type 1 hypervisors** (_Bare-metal Hypervisors_) - These hypervisors run directly on the host's hardware, without the need for an underlying operating system, offering better performance and security. Examples of type 1 hypervisors include VMware ESXi, Microsoft Hyper-V, and Xen.\n \n* **Type 2 hypervisors** (_Hosted Hypervisors_) - These hypervisors run as an application on an existing operating system, which makes them less performant and potentially less secure. However, they are generally easier to set up and manage. Examples of type 2 hypervisors include Oracle VirtualBox, VMware Workstation, and Parallels Desktop.\n \n\nBenefits of using a Hypervisor\n------------------------------\n\nUtilizing a hypervisor in your cybersecurity strategy can provide several benefits, such as:\n\n* **Isolation:** Each VM operates in a separate environment, decreasing the chance that a security breach on one VM will affect the others.\n* **Flexibility:** VMs can be easily created, modified, or destroyed, allowing for easy management and reduced downtime.\n* **Resource Management:** Hypervisors can effectively manage resources among the various VMs, ensuring that no single VM monopolizes the available resources.\n* **Snapshotting:** Hypervisors can create snapshots of a VM's state, allowing for easy recovery and rollback in case of a security incident or system failure.\n\nHypervisor Security Considerations\n----------------------------------\n\nThough hypervisors can enhance your cybersecurity posture, it's essential to be aware of potential security risks and best practices. Some security considerations include:\n\n* **Secure configuration and patch management:** Ensure that the hypervisor is configured securely, and patches are applied promptly to protect against known vulnerabilities.\n* **Limiting hypervisor access:** Restrict access to the hypervisor by allowing only authorized users and implementing strong authentication and access controls.\n* **Monitoring:** Implement continuous monitoring and logging mechanisms to detect and respond to potential security threats in the virtual environment.\n* **Network Segmentation:** Isolate sensitive VMs on separate networks or virtual LANs (VLANs) to minimize the risk of unauthorized access or lateral movement within the virtualized environment.\n\nIn conclusion, a hypervisor is a powerful tool in cybersecurity and virtualization. By understanding its types, benefits, and security considerations, you can make informed decisions on how to best leverage hypervisor technology to protect your digital assets.", + "links": [] + }, + "251sxqoHggQ4sZ676iX5w": { + "title": "VM", + "description": "Virtualization technology enables the creation of multiple virtual environments, known as Virtual Machines (VMs), within a single physical computer. VMs function independently of each other, allowing users to run various operating systems and applications in a single hardware platform.\n\nWhat are Virtual Machines?\n--------------------------\n\nA virtual machine (VM) is a virtual environment that emulates a physical computer, allowing you to run an operating system and applications separately from the underlying hardware. VMs allow for efficient utilization of computer resources, as they enable multiple instances of a system to run on the same physical machine.\n\nKey Components of VMs\n---------------------\n\nHypervisor\n----------\n\nA hypervisor, also known as a virtual machine monitor (VMM), is the software responsible for creating, managing, and monitoring the virtual environments on a host machine. There are two types of hypervisors:\n\n* **Type 1 Hypervisors:** Also known as \"bare-metal\" or \"native\" hypervisors. They run directly on the hardware and manage the virtual machines without requiring an underlying operating system.\n* **Type 2 Hypervisors:** Known as \"hosted\" hypervisors. They are installed as an application on a host operating system, which then manages the virtual machines.\n\nGuest Operating System\n----------------------\n\nThe guest operating system, or guest OS, is the operating system installed on a virtual machine. Since VMs are independent of each other, you can run different operating systems and applications on each one without any conflicts.\n\nVirtual Hardware\n----------------\n\nVirtual hardware refers to the resources allocated to a virtual machine, such as CPU, RAM, storage, and networking. Virtual hardware is managed by the hypervisor and ensures that each VM has access to a required set of resources without interfering with other VMs on the host machine.\n\nBenefits of Virtual Machines\n----------------------------\n\n* **Resource Efficiency:** VMs optimize the use of hardware resources, reducing costs and enabling more efficient use of energy.\n* **Isolation:** VMs provide a secure and isolated environment for applications and operating systems, reducing the risk of conflicts and potential security threats.\n* **Flexibility:** VMs allow for the easy deployment, migration, and backup of operating systems and applications. This makes it simple to test new software, recover from failures, and scale resources as needed.\n* **Cost Savings:** With the ability to run multiple workloads on a single physical machine, organizations can save on hardware, maintenance, and operational expenses.\n\nPopular Virtualization Software\n-------------------------------\n\nThere is a wide range of virtualization software available, including:\n\n* VMware vSphere: A Type 1 hypervisor commonly used in enterprise environments for server virtualization.\n* Microsoft Hyper-V: A Type 1 hypervisor integrated into the Windows Server operating system.\n* Oracle VM VirtualBox: A Type 2 hypervisor that runs on Windows, macOS, and Linux hosts, popular for desktop virtualization.\n\nIn conclusion, virtual machines play a critical role in modern computing, providing a flexible and efficient method to optimize computing resources, isolate applications, and enhance security. Understanding VMs and virtualization technology is an essential part of any comprehensive cybersecurity guide.\n\n[Virtual Machines Part-1 by Abhishek Veeramalla](https://www.youtube.com/watch?v=lgUwYwBozow)", + "links": [] + }, + "LocGETHz6ANYinNd5ZLsS": { + "title": "GuestOS", + "description": "A Guest OS (Operating System) is an essential component in virtualization. It is an operating system that runs within a virtual machine (VM) created by a host operating system or a hypervisor. In this scenario, multiple guest operating systems can operate on a single physical host machine, sharing resources provided by the host.\n\nKey Features of Guest OS\n------------------------\n\n* **Resource Sharing**: The guest OS shares the host's resources, such as CPU, memory, and storage, while having a virtualized environment of its own.\n* **Isolation**: Each guest OS operates independently of others on the same host machine, ensuring that the performance or security of one system does not affect the others.\n* **Customization**: You can install and manage different types of guest operating systems on the same host, catering to specific requirements or user preferences.\n* **Portability**: The guest OS and its associated data can be easily moved to another host machine, simplifying the management of multiple systems for businesses and individuals.\n\nUse Cases for Guest OS\n----------------------\n\n* **Testing and Development**: By providing a separate environment to experiment with different applications, guest operating systems are appropriate for testing and development.\n* **Security**: Sandbox environments can be created within the guest OS for analyzing malware or executing potentially unsafe applications, without affecting the host machine's performance or security.\n* **Legacy Applications**: Some older applications may not be compatible with modern operating systems. Having a guest OS with an older OS version helps to run these legacy applications.\n* **Resource Optimization**: Virtualization enables businesses to make the most of their hardware investments, as multiple guest OS can share the resources of a single physical machine.\n\nGuest OS Management\n-------------------\n\nTo manage guest operating systems effectively, you must use virtualization software or a hypervisor. Some popular options include:\n\n* **VMware**: VMware provides tools like VMware Workstation and Fusion to create, manage, and run guest OS within virtual machines.\n* **Oracle VirtualBox**: Oracle's VirtualBox is an open-source hypervisor that supports the creation and management of guests operating systems across multiple host OS platforms.\n* **Microsoft Hyper-V**: Microsoft's free hypervisor solution, Hyper-V, is capable of creating and managing guest operating systems on Windows-based host machines.\n\nIn conclusion, a guest operating system plays a vital role in virtualization, allowing users to operate multiple OS within virtual machines on a single host, optimizing resources, and providing the flexibility to work with a variety of applications and environments.", + "links": [] + }, + "p7w3C94xjLwSMm5qA8XlL": { + "title": "HostOS", + "description": "A **Host Operating System (OS)** is the primary operating system installed on a computer that runs directly on the hardware. It serves as the base layer for virtualization, providing resources and an environment for virtual machines (also known as guest operating systems) to operate.\n\nIn virtualization, the host OS allows you to run multiple guest OSs on a single physical hardware system simultaneously, which share resources (such as memory, storage, and CPU) managed by the host OS.\n\nSome key points regarding Host OS in virtualization include:\n\n* _Responsibilities_: The host OS manages hardware resources, including the allocation of those resources to the guest operating systems. It is also responsible for running the virtualization software or hypervisor that creates, manages, and interacts with the virtual machines.\n \n* _Types of Virtualization_: Host OS can be used in two types of virtualization: full virtualization and paravirtualization. In full virtualization, guest operating systems run unmodified, while in paravirtualization, guest operating systems need to be modified to efficiently run on the host OS.\n \n* _Security Considerations_: Protecting the host OS is crucial since its vulnerability can potentially affect every virtual machine running on the host. To secure the host, ensure that it is regularly updated, uses strong authentication measures, follows strict access controls, and employs network security best practices.\n \n\nBy understanding host OS and its roles in virtualization, you can better manage your virtual environment and ensure optimal performance and security for your virtual machines.", + "links": [] + }, + "tk4iG5i1Ml9w9KRO1tGJU": { + "title": "nslookup", + "description": "**Nslookup** is a network administration command-line tool designed for retrieving information about Domain Name System (DNS) records. DNS is responsible for translating domain names into IP addresses, allowing users to access websites and resources by using human-readable names (e.g., [www.example.com](http://www.example.com)) instead of numerical IP addresses.\n\nUses\n----\n\n* Query DNS servers to verify the configuration of domain names\n* Find the IP address of a specific domain name\n* Troubleshoot DNS-related issues and errors\n* Identify the authoritative DNS servers for a domain\n\nHow to Use\n----------\n\n* **Open Command Prompt or Terminal**: Press `Windows key + R`, type `cmd`, and press Enter to open Command Prompt on Windows. On macOS or Linux, open Terminal.\n \n* **Running Nslookup**: To start using Nslookup, type `nslookup` and hit Enter. You'll now see the `>` prompt, indicating you are in Nslookup mode.\n \n* **Query DNS Records**: In Nslookup mode, you can query different types of DNS records by typing the record type followed by the domain name. For instance, to find the A (address) record of [www.example.com](http://www.example.com), type `A www.example.com`. To exit Nslookup mode, type `exit`.\n \n\nCommonly Used Record Types\n--------------------------\n\nBelow are some of the most-commonly queried DNS record types:\n\n* **A**: Stands for 'Address'; returns the IPv4 address associated with a domain name\n* **AAAA**: Stands for 'Address', for IPv6; returns the IPv6 address associated with a domain name\n* **NS**: Stands for 'Name Server'; returns the authoritative DNS servers for a specific domain\n* **MX**: Stands for 'Mail Exchange'; returns the mail server(s) responsible for handling email for a specific domain\n* **CNAME**: Stands for 'Canonical Name'; returns the domain name that an alias is pointing to\n* **TXT**: Stands for 'Text'; returns additional text information that can be associated with a domain, like security policies (e.g., SPF)\n\nExample\n-------\n\nIf you want to find the A (IPv4) record for [example.com](http://example.com), follow these steps:\n\n* Open Command Prompt or Terminal\n* Type `nslookup` and hit Enter\n* Type `A example.com` and hit Enter\n\nThis will return the IPv4 address associated with the domain name [example.com](http://example.com).", + "links": [] + }, + "jr8JlyqmN3p7Ol3_kD9AH": { + "title": "iptables", + "description": "**IPTables** is a command-line utility for configuring and managing packet filtering rules within the Linux operating system. It allows the system administrator to define and manage the firewall rules that control the incoming and outgoing network traffic. IPTables is an essential tool for securing Linux systems and ensuring proper network traffic flow.\n\nHow IPTables Works\n------------------\n\nIPTables is built upon a framework called _Netfilter_, which is embedded in the Linux kernel. Netfilter provides various operations on packets, such as filtering, modifying, and redirecting. IPTables makes use of these operations by providing a user-friendly interface to define rules based on various criteria like source IP address, destination IP address, protocol, and port numbers.\n\nIPTables organizes rules into chains, where each chain consists of a list of rules. There are three default chains: INPUT, OUTPUT, and FORWARD. These chains represent the different stages a packet goes through in the network stack:\n\n* **INPUT**: Applied to incoming packets destined for the local system.\n* **OUTPUT**: Applied to outgoing packets originating from the local system.\n* **FORWARD**: Applied to packets being routed through the local system.\n\nBasic IPTables Usage\n--------------------\n\nTo list the current IPTables rules, use the following command:\n\n iptables -L\n \n\nTo add a new rule to a specific chain, use the `-A` flag followed by the chain name and the rule details:\n\n iptables -A INPUT -s 192.168.1.2 -j DROP\n \n\nThis command adds a rule to the INPUT chain that drops all packets coming from the IP address 192.168.1.2.\n\nTo delete a rule from a specific chain, use the `-D` flag followed by the chain name and the rule number:\n\n iptables -D INPUT 3\n \n\nThis command removes the third rule in the INPUT chain.\n\nTo insert a rule at a specific position in a chain, use the `-I` flag followed by the chain name, rule number, and the rule details:\n\n iptables -I INPUT 2 -s 192.168.1.3 -j DROP\n \n\nThis command inserts a rule at position 2 in the INPUT chain that drops all packets coming from the IP address 192.168.1.3.\n\nSaving and Restoring IPTables Rules\n-----------------------------------\n\nBy default, IPTables rules are temporary and will be lost upon a system reboot. To save the current rules and make them persistent, use the following command:\n\n iptables-save > /etc/iptables/rules.v4\n \n\nTo restore the rules from a saved file, use the following command:\n\n iptables-restore < /etc/iptables/rules.v4\n \n\nConclusion\n----------\n\nIPTables is a powerful tool for managing packet filtering rules in Linux systems. With proper configuration, it can greatly enhance your system's security and ensure smooth network traffic flow. Understanding IPTables can help you diagnose and resolve network-related issues while providing essential protection from cyber threats.", + "links": [] + }, + "k6UX0BJho5arjGD2RWPgH": { + "title": "Packet Sniffers", + "description": "Packet sniffers are essential network troubleshooting tools that capture and inspect data packets passing through a network. They're especially useful for detecting security vulnerabilities, monitoring network traffic, and diagnosing network-related issues.\n\nHow Packet Sniffers Work\n------------------------\n\nPacket sniffers work by actively listening to the network traffic and extracting data from the packets transmitted across the network. They can either capture all packets or filter them based on specific criteria, like IP addresses, protocols, or port numbers.\n\nCommon Features\n---------------\n\nSome of the main features offered by packet sniffers include:\n\n* **Capture and analysis**: Packet sniffers can capture and analyze individual data packets, providing detailed information about the packet's header, payload, and other relevant information.\n* **Filtering**: To make it easier for users to locate specific network traffic, packet sniffers often feature filtering options that can narrow down the data to a single protocol, port number, or IP address.\n* **Packet injection**: Some packet sniffers can inject data packets into the network, which is useful for testing security mechanisms or for simulating traffic in a network environment.\n* **Graphical representation**: Packet sniffers may also provide graphical representations for data, making it easier to visualize network traffic patterns and identify potential congestion points or other issues.\n\nPopular Packet Sniffers\n-----------------------\n\nThere are numerous packet sniffers available, both open-source and commercial. Some popular packet sniffers include:\n\n* [@article@Wireshark](https://www.wireshark.org/): A popular open-source packet analyzer with advanced features and support for various platforms.\n* [@article@tcpdump](https://www.tcpdump.org/): A command-line packet sniffer and analyzer primarily used in Unix-based systems.\n* [@article@Npcap](https://nmap.org/npcap/): A packet capture framework for Windows that supports Windows 10 and newer versions.\n\nCyber Security & Packet Sniffers\n--------------------------------\n\nPacket sniffers are valuable tools for cybersecurity professionals. They can help identify unauthorized or malicious network activity, track down the source of specific traffic patterns or attacks, and assist with the development of network security policies. When using packet sniffers, it's important to keep in mind that monitoring other users' network activity without their consent may raise legal and ethical issues.\n\nTo sum up, packet sniffers are powerful tools that can provide valuable insights into network traffic and security, ultimately helping to maintain and secure any given network environment.", + "links": [] + }, + "u-6xuZUyOrogh1bU4cwER": { + "title": "ipconfig", + "description": "**IPConfig** is a command-line tool that is available on Windows operating systems. It is used to display the current network configuration settings of a computer, such as IP address, subnet mask, and default gateway. This tool helps users diagnose and troubleshoot network connectivity issues by providing essential details about the system's network connections.\n\nUsing IPConfig\n--------------\n\nTo use IPConfig, open the Command Prompt or PowerShell and enter the following command:\n\n ipconfig\n \n\nThis command will display the network configuration details for all the active network connections on your system.\n\nIPConfig Options\n----------------\n\nIPConfig has several options that can provide more comprehensive information or perform different tasks, such as:\n\n* **/all**: This option displays the full configuration data for all the network connections, including DHCP (Dynamic Host Configuration Protocol) server and lease information.\n \n ipconfig /all\n \n \n* **/release**: This command releases the IP address obtained from the DHCP server for the specified network adapter or all network adapters if none is specified.\n \n ipconfig /release\n \n \n* **/renew**: This command requests a new IP address from the DHCP server for the specified network adapter or all network adapters if none is specified.\n \n ipconfig /renew\n \n \n* **/flushdns**: This option clears the DNS (Domain Name System) resolver cache, which stores the recent DNS queries and their corresponding IP addresses.\n \n ipconfig /flushdns\n \n \n* **/registerdns**: This command refreshes all DHCP leases and re-registers DNS names for your system.\n \n ipconfig /registerdns\n \n \n* **/displaydns**: This option displays the contents of the DNS resolver cache, allowing you to view recently resolved domain names and IP addresses.\n \n ipconfig /displaydns\n \n \n* **/setclassid**: This command allows you to modify the DHCP class ID for the specified network adapter.\n \n ipconfig /setclassid\n \n \n* **/showclassid**: This option displays the DHCP class ID for the specified network adapter.\n \n ipconfig /showclassid\n \n \n\nIn conclusion, IPConfig is a powerful and handy tool for managing and troubleshooting network connections on Windows systems. It allows you to view and modify network configuration settings, lease IP addresses, and interact with the DNS resolver cache easily.", + "links": [] + }, + "2M3PRbGzo14agbEPe32ww": { + "title": "netstat", + "description": "Netstat, short for 'network statistics', is a command-line tool that provides valuable information about the network connections, routing tables, and network interface statistics on a computer system. Netstat can help in diagnosing and troubleshooting network-related issues by displaying real-time data about network traffic, connections, routes, and more.\n\nKey Features\n------------\n\n* **Network Connections:** Netstat can show open and active network connections, including inbound and outbound, as well as display the ports on which your system is currently listening.\n* **Routing Tables:** Netstat provides information about your system's routing tables, which can help you identify the path a packet takes to reach its destination.\n* **Network Interface Statistics:** Netstat displays statistics for network interfaces, covering details such as packets transmitted, packets received, errors, and more.\n\nCommon Netstat Commands\n-----------------------\n\n* `netstat -a`: Displays all active connections and listening ports\n* `netstat -n`: Displays active connections without resolving hostnames (faster)\n* `netstat -r`: Displays the routing table\n* `netstat -i`: Displays network interfaces and their statistics\n* `netstat -s`: Displays network protocol statistics (TCP, UDP, ICMP)\n\nExample Use Cases\n-----------------\n\n* **Identify Open Ports:** You can use netstat to determine which ports are open and listening on your system, helping you identify potential security vulnerabilities.\n* **Monitor Network Connections:** Netstat allows you to monitor active connections to ensure that nothing unauthorized or suspicious is connecting to your system.\n* **Troubleshoot Network Issues:** By displaying routing table information, netstat can help you understand the pathways your system takes to reach various destinations, which can be crucial when diagnosing network problems.\n\nNetstat is a versatile and powerful tool for gaining insights into your system's network behavior. Armed with this knowledge, you'll be better equipped to address potential vulnerabilities and monitor your system's health in the context of cyber security.", + "links": [] + }, + "iJRQHzh5HXADuWpCouwxv": { + "title": "Port Scanners", + "description": "Port scanners are essential tools in the troubleshooting and cybersecurity landscape. They are designed to detect open or closed network ports on a target system. Network ports serve as communication endpoints for various applications and services running on a device, and knowing the status of these ports can help identify potential security vulnerabilities or confirm that specific services are running as intended.\n\nIn this section, we will explore the following aspects of port scanners:\n\n* **Why port scanners are important**\n* **Types of port scanners**\n* **Popular port scanning tools**\n\nWhy port scanners are important\n-------------------------------\n\nPort scanners can help in the following situations:\n\n* **Identifying open ports:** Open ports might expose your system to attacks if they are left unsecured. A port scanner can help you identify which network ports are open and need to be secured.\n* **Detecting unauthorized services:** Scanning for open ports can help you find if any unauthorized applications are running on your network, as these services might open ports that you are not aware of.\n* **Testing firewall rules:** Port scanners can also verify if your firewall rules are effective and configured correctly.\n* **Troubleshooting network issues:** By detecting open and closed ports, port scanners can help you diagnose network problems and ensure your applications and services are running smoothly.\n\nTypes of port scanners\n----------------------\n\nThere are three main types of port scanners:\n\n* **TCP Connect:** This scanner initiates a full TCP connection between the scanner and the target device. It goes through the entire process of establishing a TCP connection, including a three-way handshake. This type of scan is accurate but more easily detectable.\n* **TCP SYN or Half-Open scan:** This scanner only sends a SYN packet (a request to start a connection) to the target device. If the target device responds with a SYN/ACK packet, the port is considered open. This type of scan is faster and less detectable, as it doesn't establish a full connection.\n* **UDP Scan:** This scanner targets User Datagram Protocol (UDP) ports, which are typically used for streaming and real-time communication applications. It sends UDP packets to the target device, and if there's no response, the port is considered open. This type of scan can be less accurate, as some devices may not respond to UDP probes.\n\nPopular port scanning tools\n---------------------------\n\nHere are some popular and widely used port scanning tools:\n\n* **Nmap:** Nmap (Network Mapper) is a free, open-source tool that is highly versatile and powerful. It offers various types of scans, including TCP Connect, TCP SYN, and UDP scans.\n* **Masscan:** Masscan is a high-speed port scanner that is typically used for large-scale scanning, thanks to its ability to scan the entire internet within a few minutes.\n* **Angry IP Scanner:** It is a cross-platform port scanner that is very user-friendly and suitable for beginners. It supports both TCP and UDP scanning.\n\nRemember to always use port scanners responsibly and only on your own systems or where you have permission to perform a scan. Unauthorized port scanning can have legal and ethical implications.", + "links": [] + }, + "GuuY-Q6FZzfspB3wrH64r": { + "title": "ping", + "description": "**Ping** is a fundamental networking tool that helps users to check the connectivity between two devices, typically a source computer, and a remote device, such as a server or another computer. The name \"ping\" comes from the sonar terminology, where a signal is sent out and a response is expected to verify the presence of an object.\n\nThe ping command operates by sending Internet Control Message Protocol (ICMP) Echo Request packets to the target host and waiting for an ICMP Echo Reply. By sending multiple requests and calculating the time interval between sending the request and receiving a reply, the tool provides valuable information about the quality and reliability of the network connection.\n\nUsing Ping\n----------\n\nTo use the ping command, open a command prompt or terminal window, and type `ping` followed by the IP address or hostname of the target device. For example:\n\n ping example.com\n \n\nInterpreting Ping Results\n-------------------------\n\nThe output of the ping command will display the following information:\n\n* **Sent**: The number of packets sent to the target device.\n* **Received**: The number of packets received from the target device (if connectivity is successful).\n* **Lost**: The number of packets that did not reach the target device, indicating a problem in the connection.\n* **Minimum, Maximum, and Average Round Trip Time (RTT)**: Provides an estimate of the time it takes for a single packet to travel from the source device to the destination and back again.\n\nTroubleshooting with Ping\n-------------------------\n\nPing is particularly useful for diagnosing and troubleshooting network connectivity issues. Some common scenarios in which it can help include:\n\n* Verifying if a remote device is active and responding.\n* Identifying network latency or slow network connections.\n* Troubleshooting routing problems and packet loss.\n* Testing the resolution of domain names to IP addresses.\n\nBy understanding and utilizing the ping command, users can diagnose and resolve various network-related issues to ensure a stable and secure online experience.\n\nRemember that some devices or servers may be configured not to respond to ICMP requests, which might result in no response or a \"Request timed out\" message after using the ping command. This behavior is usually configured to prevent potential security risks or attacks, so don't panic if you encounter this while troubleshooting.", + "links": [] + }, + "D2YYv1iTRGken75sHO0Gt": { + "title": "dig", + "description": "`dig`, short for the Domain Information Groper, is a powerful and flexible command-line tool used to perform DNS queries and obtain valuable information about domains, IPs, and DNS records. This utility, available on UNIX-based systems like Linux and macOS, provides an essential function to help diagnose and resolve various issues related to domain name resolution and network connectivity. It is highly useful for network administrators and cybersecurity professionals when troubleshooting DNS-related problems.\n\nFeatures\n--------\n\n* **DNS Querying**: `dig` can retrieve various types of DNS records such as A, AAAA, MX, NS, CNAME, and many others.\n* **Flexibility**: With various command-line options, `dig` allows users to customize their queries easily.\n* **User-friendly Formatting**: `dig` provides readable and straightforward responses, simplifying the interpretation of DNS records and related information.\n* **Batch Mode**: The tool enables users to perform multiple DNS queries in a batch file, increasing efficiency.\n\nBasic Usage\n-----------\n\nHere's a basic example of how to use `dig` to perform a DNS query:\n\n dig example.com\n \n\nThis command will return the A (IPv4) record for `example.com`.\n\nTo perform a specific type of DNS query, such as fetching an AAAA (IPv6) record, use the following command:\n\n dig example.com AAAA\n \n\nCommon Options\n--------------\n\nSome common options to use with `dig` include:\n\n* `+short`: Condenses the output, providing only essential information.\n* `-t`: Specifies the type of DNS record to query (e.g., `A`, `AAAA`, `MX`, `NS`, etc.).\n* `+tcp`: Forces `dig` to use TCP instead of the default UDP for the DNS query.\n\nConclusion\n----------\n\nIn summary, `dig` is a valuable command-line tool for performing DNS queries and troubleshooting domain name resolution problems. Its power and flexibility make it an essential tool for any network administrator or cybersecurity professional.", + "links": [] + }, + "hkO3Ga6KctKODr4gos6qX": { + "title": "arp", + "description": "ARP is a crucial network protocol used to map IP addresses to their corresponding MAC (Media Access Control) addresses. This mapping is crucial, as devices on a network use MAC addresses to communicate with one another. As IP addresses are easier to remember and utilize for humans, ARP helps in converting these logical addresses to physical addresses that devices can understand.\n\nWhy ARP is important\n--------------------\n\nIn a network, when a device wants to send data to another device, it needs to know the recipient's MAC address. If the sender only knows the IP address, it can use ARP to determine the corresponding MAC address. The mapping is stored in the device's ARP cache, which holds a record of both the IP and MAC addresses. This allows devices to quickly identify and communicate with others on the network.\n\nARP Request and Reply\n---------------------\n\nHere are the basic steps involved in the ARP process:\n\n* The sender creates an ARP request packet with its own IP and MAC addresses, and the recipient's IP address. The packet is broadcast to all devices on the local network.\n* Each device on the network receives the ARP request, checks if the IP address is its own, and replies to the sender as needed.\n* The sender receives the ARP reply containing the recipient's MAC address and updates its ARP cache with the new information.\n* Finally, the sender uses the MAC address to transmit data packets to the intended recipient.\n\nTroubleshooting with ARP\n------------------------\n\nIf you're having issues with network communication or want to investigate your network, the ARP table can be a helpful tool. You can view your device's ARP cache using commands specific to your operating system:\n\n* **Windows**: Open Command Prompt and type `arp -a`\n* **Linux**: Open Terminal and type `arp`\n* **macOS**: Open Terminal and type `arp -a`\n\nThe output will display the IP and MAC addresses of devices on the network that the system has interacted with.\n\nARP Spoofing and Security Concerns\n----------------------------------\n\nAs crucial as ARP is, it can be exploited by attackers for malicious purposes. ARP spoofing, also known as ARP poisoning, is a form of cyberattack in which an attacker sends fake ARP requests to a network to link their MAC address with an IP address that legitimately belongs to another device. This enables the attacker to intercept and manipulate network traffic or launch denial-of-service (DoS) attacks.\n\nTo mitigate ARP spoofing, consider implementing security measures such as monitoring ARP traffic, using a static ARP table, or employing security solutions like intrusion detection and prevention systems. Additionally, maintaining a secure and up-to-date network infrastructure can help reduce potential vulnerabilities.", + "links": [] + }, + "K05mEAsjImyPge0hDtsU0": { + "title": "Protocol Analyzers", + "description": "Protocol analyzers, also known as packet analyzers or network analyzers, are tools used to capture and analyze the data packets transmitted across a network. These tools help in monitoring network traffic, identifying security vulnerabilities, troubleshooting network problems, and ensuring that the network is operating efficiently. By analyzing the packets on a network, you can gain insights into the performance of your network infrastructure and the behavior of various devices and applications on it.\n\nFeatures & Uses of Protocol Analyzers\n-------------------------------------\n\n* **Traffic Monitoring & Analysis**: Protocol analyzers allow you to monitor the traffic on your network in real-time, which helps identify bottlenecks, network congestion, and other performance issues.\n \n* **Security Analysis**: Analyzing network traffic can help identify unusual traffic patterns, potential security threats or breaches, and malicious activities. By studying the data packets, you can detect unauthorized access, malware infections, or other cyber attacks.\n \n* **Protocol Debugging**: These tools enable you to analyze different network protocols (such as HTTP, FTP, and SMTP) and their respective packets, which proves useful in troubleshooting issues related to application performance and communication.\n \n* **Bandwidth Utilization**: Protocol analyzers allow you to analyze the volume of network traffic and how the available bandwidth resources are being used, helping you optimize the network for better performance.\n \n* **Network Troubleshooting**: By capturing and analyzing packet data, you can identify network problems and take corrective measures to improve the overall performance and stability of the network.\n \n\nPopular Protocol Analyzers\n--------------------------\n\nHere's a list of some widely-used protocol analyzers:\n\n* **Wireshark**: Wireshark is an open-source packet analyzer with support for numerous protocols. It is one of the most popular and widely-used network troubleshooting tools available.\n \n* **TCPDump**: TCPDump is a command-line packet analyzer that allows you to capture network traffic and view it in a human-readable format, making it easy to analyze.\n \n* **Ethereal**: Ethereal is another open-source packet analyzer that provides a graphical user interface for capturing, filtering, and analyzing network traffic.\n \n* **Nmap**: Nmap is a popular network scanning tool that also includes packet capture and analysis capabilities, allowing you to analyze the network for vulnerabilities and other issues.\n \n* **Microsoft Message Analyzer**: Microsoft Message Analyzer is a versatile protocol analyzer developed by Microsoft that provides deep packet inspection and analysis of network traffic, including encrypted traffic.\n \n\nIn conclusion, protocol analyzers are essential tools for network administrators, security professionals, and developers alike to ensure the performance, security, and stability of their networks. By understanding how these tools work and using them effectively, you can take proactive measures to maintain and improve the health of your network.", + "links": [] + }, + "xqwIEyGfdZFxk6QqbPswe": { + "title": "nmap", + "description": "**Nmap** (Network Mapper) is an open-source network scanner that is widely used in cyber security for discovering hosts and services on a computer network. Nmap allows you to efficiently explore and scan networks to identify open ports, running services, and other security vulnerabilities.\n\nFeatures of Nmap\n----------------\n\n* **Host Discovery**: Nmap facilitates finding hosts on the network using various techniques such as ICMP echo requests, TCP SYN/ACK probes, and ARP scans.\n \n* **Port Scanning**: Nmap can identify open ports on target hosts, which can reveal potential security vulnerabilities and provide crucial information during a penetration test.\n \n* **Service and Version Detection**: Nmap can detect the name and version of the services running on target hosts. This information helps to identify software that might be outdated or have known security flaws.\n \n* **Operating System Detection**: Nmap can make intelligent guesses about the operating system of a target host, which can be useful for tuning your attack strategy based on the vulnerabilities of specific systems.\n \n* **Scriptable**: Nmap has a built-in scripting engine (NSE) that allows users to write custom scripts for automating and extending its functionality.\n \n\nHow to use Nmap\n---------------\n\nNmap can be installed on various platforms such as Windows, Linux, and macOS. After installation, Nmap can be used via the command line with different options and flags, depending on the desired scan type.\n\nFor example, to perform a simple host and port discovery, the following command can be used:\n\n nmap -sn -p 80,443 192.168.0.0/24\n \n\nThis command will perform a \"ping scan\" (`-sn`) on the specified IP range (`192.168.0.0/24`) and check for open ports 80 and 443.\n\nImportant Notes\n---------------\n\n* While Nmap is a valuable tool for cyber security professionals, it can also be used by malicious attackers to gather information about potential targets. It is essential to use Nmap responsibly and only on networks and systems that you have permission to scan.\n \n* Scanning large networks can generate considerable traffic and may impact the performance of the target hosts. It is important to configure your scans appropriately and be mindful of potential network disruptions.\n \n\nFor more information and usage examples, refer to the [official Nmap documentation](https://nmap.org/book/man.html).", + "links": [] + }, + "xFuWk7M-Vctk_xb7bHbWs": { + "title": "route", + "description": "`route` is a command-line utility that allows you to view and manipulate the IP routing table in your computer. The primary function of the routing table is to determine the best path for sending IP packets to their destination. Properly managing this table is crucial for network administrators, as it plays a direct role in your computer's ability to communicate with other devices on the network effectively.\n\nUsing the Route Command\n-----------------------\n\nThe syntax for the route command is as follows:\n\n route [COMMAND] [OPTIONS]\n \n\nHere are some basic commands that you can use with `route`:\n\n* **route add** - Adds a new route to the table\n* **route delete** - Removes a route from the table\n* **route change** - Modifies a specific route in the table\n* **route get** - Retrieves information about a specific route\n* **route show** - Displays the entire routing table\n\nPlease note that, to modify the routing table, administrative privileges may be needed.\n\nExamples of Route Usage\n-----------------------\n\n* **View the routing table**\n\n route -n\n \n\nThis command will display the current routing table in a numerical format, which includes the destination, gateway, and interface.\n\n* **Add a new route**\n\n sudo route add -net 192.168.2.0 netmask 255.255.255.0 gw 192.168.1.1\n \n\nThis command adds a new route to the destination network 192.168.2.0 with a netmask of 255.255.255.0 and a gateway of 192.168.1.1.\n\n* **Delete a route**\n\n sudo route delete -net 192.168.2.0 netmask 255.255.255.0\n \n\nThis command removes the route to the destination network 192.168.2.0 with a netmask of 255.255.255.0.\n\n* **Change an existing route**\n\n sudo route change -net 192.168.2.0 netmask 255.255.255.0 gw 192.168.1.2\n \n\nThis command modifies the existing route to the destination network 192.168.2.0 with a new gateway of 192.168.1.2.\n\nConclusion\n----------\n\nThe `route` command is an essential tool for network administrators and anyone involved in cyber security. Understanding and being able to manipulate the IP routing table can help ensure that your computer is able to communicate effectively with other devices on the network, thus contributing to a more secure and efficient network environment.", + "links": [] + }, + "y8GaUNpaCT1Ai88wPOk6d": { + "title": "tcpdump", + "description": "Tcpdump is a powerful command-line packet analyzer tool that allows you to monitor and intercept network traffic on your system. This utility is beneficial for troubleshooting network connectivity problems and analyzing network protocols. Tcpdump can capture and display the packet headers on a particular network interface or a specific port.\n\nKey Features\n------------\n\n* Capture packets in real-time\n* Display captured packets in a human-readable format\n* Write packets to a file and read saved packet files\n* Filter packets based on specific conditions such as IP addresses, protocol, or port\n\nBasic Usage\n-----------\n\nTo start using Tcpdump, open your terminal/command line and enter the following command:\n\n tcpdump -i any\n \n\nThis command will capture packets on all network interfaces. The output will display source and destination IP addresses, port numbers, and packet length.\n\nCommon Tcpdump Commands\n-----------------------\n\nHere are some essential tcpdump commands for different tasks:\n\n* **Monitor a specific interface**: To monitor a specific network interface, replace `` with the name of the interface you want to monitor:\n \n tcpdump -i \n \n \n* **Capture specific number of packets:** To capture a specific number of packets, use the `-c` option followed by the number of packets you want to capture:\n \n tcpdump -i any -c 10\n \n \n* **Save captured packets to a file:** Tcpdump can save the captured packets to a file for further analysis. To save the packets in a file, use the `-w` option followed by the file name:\n \n tcpdump -i any -w capture.pcap\n \n \n* **Filter captured packets**: You can filter the captured packets by various parameters such as IP addresses, protocol, or port numbers. Some examples of the filter are:\n \n * Capture packets from/to a specific IP address:\n \n tcpdump -i any host 192.168.1.1\n \n \n * Capture packets related to a specific port:\n \n tcpdump -i any port 80\n \n \n * Capture packets by protocol (e.g., icmp, tcp, or udp):\n \n tcpdump -i any icmp\n \n \n\nYou can learn more about tcpdump filters and advanced options from its official documentation or by typing `man tcpdump` in your terminal. Tcpdump is an invaluable tool for any network administrator and will help you get to the root of any network issues.", + "links": [] + }, + "cSz9Qx3PGwmhq3SSKYKfg": { + "title": "tracert", + "description": "Tracert, short for \"Trace Route\", is a command-line utility that helps in diagnosing network connectivity issues by displaying the route taken by data packets to reach a specific destination. It identifies each hop along the path and calculates the time it takes for the data packets to travel from one point to another. Tracert can be particularly useful in determining potential delays or interruptions in network communication.\n\nHow to Use Tracert\n------------------\n\n* Open `Command Prompt` on your Windows computer or `Terminal` on Linux or macOS.\n* Type `tracert` followed by the target destination, which can either be an IP address or a domain name. For example: `tracert example.com`\n\nThe output will show a list of hops in sequential order, with each line representing a single hop, its IP address, hostname, and the round-trip time (in milliseconds) for the data packets to reach that point.\n\nInterpreting Tracert Results\n----------------------------\n\nWhen analyzing the results of a tracert command, consider the following:\n\n* _Hops_: These are the individual steps the data packets take to reach the destination. If the route appears excessively long, there may be an issue with the network configuration or an inefficient routing path.\n* _Round-trip Time (RTT)_: This measures how long it takes for data packets to travel from the source to the destination and back. If the RTT is consistently high or increases significantly between specific hops, there could be a network delay, bottleneck, or congestion.\n* _Request Timed Out_: If you see this error, it means that a data packet failed to reach a specific hop within the given time. This could be an indication of a connection failure, firewall blocking, or packet loss.\n\nHowever, note that some routers may be configured to discard or de-prioritize ICMP echo requests (the packets used by tracert) due to security reasons or traffic management, which might result in incomplete or inaccurate tracert results.\n\nLimitations and Alternatives\n----------------------------\n\nWhile tracert is a handy troubleshooting tool, it has some limitations:\n\n* It relies on ICMP (Internet Control Message Protocol) packets, which may be filtered or blocked by firewalls or other network devices.\n* The results might be affected by short-lived network congestions or latency spikes which are not necessarily representative of the average performance.\n* It provides limited insight into the underlying causes of network issues (e.g., hardware failures, software misconfigurations).\n\nFor more advanced network troubleshooting and analysis, you may consider other tools such as:\n\n* `ping`: To test basic connectivity and latency towards a specific host or IP address.\n* `nslookup` or `dig`: To look up DNS records, diagnose DNS problems, or verify proper domain name resolution.\n* `mtr` (My Traceroute): Available on Linux and macOS, it combines the functionality of both \"traceroute\" and \"ping,\" providing real-time, continuous statistics on each hop's performance.", + "links": [] + }, + "lG6afUOx3jSQFxbH92otL": { + "title": "Kerberos", + "description": "Kerberos is a network authentication protocol designed to provide strong authentication for client/server applications. It was developed by MIT in the 1980s and is named after the three-headed dog from Greek mythology that guarded the gates of Hades, symbolizing the protocol's aim to provide secure authentication in a potentially hostile network environment.\n\nHow Kerberos works\n------------------\n\nKerberos relies on a trusted third party called the Key Distribution Center (KDC). The KDC maintains a database of secret keys for each user and service on the network. The protocol uses symmetric key cryptography, meaning that both the client and the server know the same shared encryption key.\n\nThe main goal of Kerberos is to prove the identity of both the client and the server to each other so that they can securely exchange information. To achieve this, the protocol uses tickets - encrypted messages containing information about the client's identity, the server's identity, and a shared session key.\n\nHere is a high-level summary of the Kerberos authentication process:\n\n* The client requests a ticket from the KDC by providing its username.\n* The KDC generates a ticket, encrypts it using the client's secret key, and sends it back to the client.\n* The client decrypts the ticket and obtains a session key that it will use to securely communicate with the server.\n* To access a specific service, the client requests a service ticket from the KDC. The request includes its ticket and the target server's identifier.\n* The KDC generates a service ticket, encrypts it using the server's secret key, and sends it back to the client.\n* The client sends the service ticket to the server along with a message, encrypted using the session key, to establish its identity.\n* The server decrypts the service ticket, extracts the session key, and uses it to decrypt the client's message.\n* After verifying the client's identity, the server allows access to the requested service and sends an encrypted message to confirm authentication.\n\nBenefits of Kerberos\n--------------------\n\n* **Secure**: Kerberos provides strong authentication using encrypted tickets, making it difficult for attackers to intercept and forge.\n* **Centralized**: The KDC centralizes authentication management, making it easier to control and maintain user access.\n* **Scalable**: The protocol is designed to support large networks, making it a popular choice for enterprise environments.\n* **Interoperable**: Kerberos is an open standard supported by many different platforms and vendors.\n\nLimitations\n-----------\n\n* **KDC reliance**: The KDC is a single point of failure. If it's compromised or goes offline, authentication on the network will be disrupted.\n* **Time-sensitive**: Kerberos is sensitive to time differences between servers and clients. Synchronized clocks are necessary to maintain accurate ticket lifetimes and prevent replay attacks.\n* **Complexity**: The protocol can be complex to set up and requires proper management of secret keys.\n\nIn summary, Kerberos is a robust and widely used authentication protocol that helps secure client/server communications. Its centralized management and strong security measures make it an excellent choice for organizations with demanding authentication requirements. However, it also has its limitations and complexities that must be carefully managed to maintain a secure and efficient authentication process.", + "links": [ + { + "title": "Explore top posts about Kerberos", + "url": "https://app.daily.dev/tags/kerberos?ref=roadmapsh", + "type": "article" + }, + { + "title": "Kerberos authentication process", + "url": "https://youtu.be/_44CHD3Vx-0", + "type": "video" + } + ] + }, + "lV3swvD6QGLmD9iVfbKIF": { + "title": "LDAP", + "description": "LDAP is a protocol used to access directory services, i.e., a hierarchical database that holds information about various objects, such as users, groups, computer accounts, and more. In the context of cybersecurity, it's essential in storing information related to authentication, authorization, and user profiles. LDAP is primarily utilized in enterprise environments as a centralized system for managing user accounts and their permissions.\n\n**How LDAP works**\n\n* It is based on a client-server model, where the client sends a request to the server (usually an LDAP directory server), and the server responds accordingly.\n* LDAP servers store directory entries in a hierarchical (tree-like) structure, starting from the root (known as the \"base DN\") and following a series of branches down to individual entries.\n* Each entry in the LDAP directory has a distinguished name (DN), which uniquely identifies the entry in the hierarchy.\n\n**LDAP in Cyber Security** In cybersecurity, LDAP servers are often used for the following purposes:\n\n* **Authentication**: LDAP stores user account and password information, which can be used to authenticate users to access specific applications or resources.\n* **Authorization**: Using LDAP directory groups, you can manage access controls for users and grant or deny permissions based on their role or membership.\n* **User Management**: LDAP provides a single, centralized repository for managing user account information, making it easier to maintain consistent user data across multiple systems or applications.\n\n**LDAP Security Best Practices** To enhance the security of your LDAP implementation, consider adopting these best practices:\n\n* Use secure protocols like LDAPS (LDAP over SSL) or StartTLS to encrypt the data transmitted between the client and the LDAP server.\n* Implement strong access control rules to ensure that only authorized clients can access the LDAP directory.\n* Regularly update and patch both client-side and server-side LDAP software to protect against known vulnerabilities.\n* Limit the searchable scope on the client-side, to minimize the risk of information disclosure.\n* Use strong authentication methods, such as multi-factor authentication (MFA), to secure access to the LDAP directory.\n\nIn conclusion, LDAP is a critical component in many enterprise-level cybersecurity architectures, as it plays a vital role in handling authentication and authorization processes. To ensure the security of your LDAP implementation, it's crucial to follow best practices and carefully manage access to directory services.", + "links": [] + }, + "xL32OqDKm6O043TYgVV1r": { + "title": "SSO", + "description": "Single Sign-On, or SSO, is an authentication mechanism that allows users to access multiple applications, systems, or websites by entering their login credentials only once. This means that a user can quickly and conveniently navigate between multiple platforms without the need to authenticate multiple times, providing both a seamless user experience and an added layer of security.\n\nKey Components of SSO\n---------------------\n\nThere are typically three main components involved in the Single Sign-On process:\n\n* **User:** The individual who wants to access multiple applications within an environment.\n* **Service Provider (SP):** The application or website the user is trying to access.\n* **Identity Provider (IdP):** The third-party platform that securely stores and manages user identities, ensuring only authorized users can access the applications.\n\nHow SSO Works\n-------------\n\nSSO operates by leveraging a centralized authentication system, usually provided by an Identity Provider (IdP). When a User attempts to access a Service Provider (SP), the following process occurs:\n\n* The User requests access to a Service Provider.\n \n* The Service Provider checks if the User is already authenticated to the Identity Provider.\n \n* If not, the User is redirected to the Identity Provider's login page.\n \n* The User submits their login credentials to the Identity Provider.\n \n* If the credentials are valid, the Identity Provider issues an encrypted token called a \"security assertion\".\n \n* The User presents this token to the Service Provider as proof of authentication.\n \n* The Service Provider validates the token and grants access to the User.\n \n\nBenefits of SSO\n---------------\n\n* **Improved User Experience:** Users spend less time logging in, allowing them to focus on their work without being repeatedly prompted for authentication.\n \n* **Reduced Password Fatigue:** Users only need to remember one set of login credentials, minimizing the need to write down or reuse passwords, which can be a security risk.\n \n* **Enhanced Security:** By limiting the number of times a user enters their login credentials, SSO reduces the risk of phishing attacks and potential password breaches.\n \n* **Simplified Identity Management:** Centralizing authentication through a single Identity Provider makes it easier for administrators to manage access rights and monitor user activity across multiple platforms.\n \n* **Reduced Help Desk Costs:** With fewer password-related issues to address, help desk teams can focus on more critical tasks, resulting in lower support costs.\n \n\nOverall, implementing Single Sign-On in your organization can dramatically improve both user experience and system security. However, it is essential to choose a reliable Identity Provider and ensure secure integration with all relevant Service Providers.", + "links": [] + }, + "tH3RLnJseqOzRIbZMklHD": { + "title": "RADIUS", + "description": "**RADIUS** (Remote Authentication Dial-In User Service) is a widely used client-server protocol that offers centralized authentication, authorization, and accounting (AAA) management for users connecting to a network. Developed in 1991, RADIUS allows the transfer of user authentication and configuration information between devices and servers on a network.\n\nHow RADIUS Works\n----------------\n\nRADIUS uses the User Datagram Protocol (UDP) for communication between the client and the server. When a user attempts to connect to a network, the client (like a VPN server or wireless access point) forwards the authentication request to the RADIUS server. The server then checks the user's credentials against its user database or forwards the request to another authentication server.\n\nUpon successful authentication, the RADIUS server sends back an **Access-Accept** message, as well as user-specific access policies (such as VLAN assignments or firewall rules). If the authentication fails, the server sends an **Access-Reject** message. Additionally, RADIUS tracks and reports user activity, making it responsible for the accounting aspect of AAA.\n\nBenefits of RADIUS\n------------------\n\n* **Centralized Management**: RADIUS allows administrators to manage user authentication and policies from a central location. This significantly simplifies the management of large and diverse networks.\n \n* **Scalability**: RADIUS servers can manage authentication for thousands of users and devices, making it well-suited for large organizations.\n \n* **Flexibility**: Being a widely adopted standard, RADIUS is compatible with various devices, such as routers, switches, VPN gateways, and wireless access points. It also allows for integration with other authentication services, like LDAP or Active Directory.\n \n* **Security**: RADIUS encrypts passwords during transmission, minimizing risks associated with data breaches. Additionally, it can enforce various access policies to further strengthen network security.\n \n\nRADIUS vs. TACACS+\n------------------\n\nAnother popular AAA protocol is Terminal Access Controller Access-Control System Plus (TACACS+). While both RADIUS and TACACS+ provide similar functionality, there are notable differences:\n\n* RADIUS combines authentication and authorization, while TACACS+ separates them, allowing for greater flexibility and more granular control.\n* RADIUS uses UDP for communication, whereas TACACS+ uses TCP, ensuring reliable and ordered delivery of packets.\n* TACACS+ encrypts the entire payload, while RADIUS only encrypts the password.\n\nOrganizations may choose between RADIUS and TACACS+ based on their specific requirements, network setup, and device compatibility.\n\nIn conclusion, RADIUS plays a crucial role in implementing a robust and efficient AAA framework, simplifying network administration while ensuring security and compliance.", + "links": [] + }, + "WXRaVCYwuGQsjJ5wyvbea": { + "title": "Certificates", + "description": "Certificates, also known as digital certificates or SSL/TLS certificates, play a crucial role in the world of cybersecurity. They help secure communications between clients and servers over the internet, ensuring that sensitive data remains confidential and protected from prying eyes.\n\nWhat is a Certificate?\n----------------------\n\nA digital certificate is an electronic document that uses a digital signature to bind a public key with a specific identity, such as a website domain or an organization. It contains information about the certificate holder, the certificate's validity period, and the public key of the entity that the certificate represents.\n\nCertificate Authorities (CAs)\n-----------------------------\n\nCertificates are issued and signed by trusted third-party organizations called Certificate Authorities (CAs). CAs are responsible for verifying the authenticity of organizations or individuals making the request and ensuring that they, indeed, own the domain for which the certificate is issued.\n\nSome well-known CAs include:\n\n* DigiCert\n* Let's Encrypt\n* GlobalSign\n* Sectigo (formerly Comodo)\n* Entrust\n\nTypes of Certificates\n---------------------\n\nDifferent types of certificates serve different purposes and offer varying levels of validation:\n\n* **Domain Validation (DV)**: These certificates validate the ownership of the domain but do not contain any information about the organization that owns it. DV certificates offer a basic level of security and are suitable for websites that don't process sensitive data, such as blogs or portfolio sites.\n* **Organization Validation (OV)**: OV certificates verify the ownership of the domain and contain information about the organization that owns it. This type of certificate provides an enhanced level of trust and is recommended for business websites where users need to know the identity of the organization they are dealing with.\n* **Extended Validation (EV)**: EV certificates provide the highest level of identity validation by conducting a rigorous verification process that involves checking the organization's legal status, physical presence, and domain ownership. Websites with an EV certificate display a green padlock or bar in the browser address bar, increasing user trust and confidence.\n\nImportance of Certificates\n--------------------------\n\nDigital certificates offer various benefits in the realm of cybersecurity, such as:\n\n* **Authentication**: Certificates help to establish the authenticity of a domain or an organization, allowing users to trust that they are communicating with a legitimate entity.\n* **Encryption**: By using public key encryption, certificates enable secure communication between clients and servers, protecting sensitive data from being intercepted by malicious actors.\n* **Integrity**: Certificates ensure that the data transferred between parties remains intact and unaltered during transmission, preventing tampering or manipulation by malicious actors.\n* **Trust**: With the assurance that a website has a valid certificate from a trusted CA, users are more likely to trust and engage with the site, leading to increased conversion rates and customer loyalty.\n\nConclusion\n----------\n\nDigital certificates provide a crucial layer of security and trust for online communications. Understanding their role in cybersecurity, the different types of certificates, and the importance of acquiring certificates from trusted CAs can greatly enhance your organization's online security posture and reputation.", + "links": [] + }, + "vYvFuz7lAJXZ1vK_4999a": { + "title": "Local Auth", + "description": "In this section, we will discuss local authentication, which is a crucial aspect of ensuring the security of your computer systems and networks.\n\nWhat is Local Authentication?\n-----------------------------\n\nLocal authentication is the process of verifying a user's identity on a single, isolated system, such as a computer or a server. It refers to the direct checking of user credentials (such as username and password) against a locally stored database, instead of relying on a centralized authentication service.\n\nHow Does Local Authentication Work?\n-----------------------------------\n\nIn a local authentication setup, user and password information is stored on the same system where authentication takes place. When a user attempts to log in, the system checks the provided credentials against the stored data. If they match, access is granted, otherwise, it is denied.\n\nHere is a high-level overview of how local authentication works:\n\n* User attempts to log in by entering their credentials, typically a username and password.\n* System checks the provided credentials against a local database.\n* If the credentials match an entry in the database, access is granted to the user.\n* If the credentials do not match any entries in the database, access is denied and an error message is displayed.\n\nAdvantages and Disadvantages of Local Authentication\n----------------------------------------------------\n\nAdvantages\n----------\n\n* **Simplicity**: Local authentication is simple to set up, as it doesn't require any external authentication services or additional infrastructure.\n* **No Dependency on Internet Connectivity**: Since user credentials are stored locally, users can still authenticate even if there is no internet connection.\n\nDisadvantages\n-------------\n\n* **Scalability**: Managing and maintaining user accounts on individual systems becomes difficult when the number of systems and users increases.\n* **Increased Risk**: Information about user accounts, including passwords, may be stored in plain text, making them vulnerable to unauthorized access.\n* **Incomplete Security**: Local authentication alone may not provide sufficient security to protect sensitive information, necessitating the use of additional security measures such as secure socket layer (SSL) and two-factor authentication (2FA).\n\nBest Practices for Local Authentication\n---------------------------------------\n\nTo ensure the security of your system while using local authentication:\n\n* Always use strong, unique passwords for each user account.\n* Regularly update and patch the system to keep it secure against known vulnerabilities.\n* Consider implementing additional security measures, such as encryption, to protect sensitive data.\n* Periodically review user accounts to ensure they have the appropriate access privileges and are no longer needed.\n* Implement logs and monitoring to detect any suspicious activity on your system relating to user authentication.\n\nIn conclusion, local authentication can be an effective method for authenticating users on a single system. However, it is important to be aware of its limitations and make sure to implement additional security measures when necessary to keep your data safe.", + "links": [] + }, + "_hYN0gEi9BL24nptEtXWU": { + "title": "Security Skills and Knowledge", + "description": "In the constantly evolving world of cyber security, it is essential for professionals to stay updated with the latest skills and knowledge. This allows them to proactively defend against emerging threats, maintain secure systems, and create a robust security posture. Here's a brief summary of the essential security skills and knowledge you should possess:\n\nUnderstanding of Security Fundamentals\n--------------------------------------\n\nAn in-depth understanding of the fundamental concepts of cyber security is crucial, which includes:\n\n* Confidentiality, Integrity, and Availability (CIA) triad\n* Risk management\n* Security policies and best practices\n* Authentication, authorization, and access control\n* Cryptography\n\nNetworking\n----------\n\nA strong grasp of networking concepts is required to identify and prevent potential threats. Develop a comprehensive knowledge of:\n\n* Networking protocols, standards, and devices (e.g., switches, routers, and firewalls)\n* Network architecture and design\n* Virtual Private Networks (VPNs) and Virtual Local Area Networks (VLANs)\n\nOperating Systems and Application Security\n------------------------------------------\n\nWell-rounded knowledge of various operating systems (e.g., Windows, Linux, macOS) and applications, as well as:\n\n* Security configuration best practices\n* Patch management\n* Denial-of-service prevention\n* Privileged user management\n\nWeb Security\n------------\n\nWeb security expertise is necessary for maintaining a secure online presence. Key knowledge areas include:\n\n* Web application vulnerabilities (e.g., SQL injection, XSS)\n* Secure web protocols (e.g., HTTP Secure, Transport Layer Security)\n* Content Security Policy (CSP) and other defensive mechanisms\n\nSecurity Testing\n----------------\n\nFamiliarity with testing methodologies, tools, and frameworks is essential for identifying and mitigating vulnerabilities. Acquire competency in:\n\n* Vulnerability scanning and penetration testing\n* Security testing best practices (e.g., OWASP Top Ten)\n* Static and dynamic code analysis tools\n\nIncident Response and Forensic Analysis\n---------------------------------------\n\nLearn to handle security incidents and conduct investigations to minimize the impact of cyber threats. Enhance knowledge of:\n\n* Security incident containment and response strategies\n* Digital forensic tools and techniques\n* Regulatory requirements and legal implications of cyber incidents\n\nCloud Security\n--------------\n\nCloud platforms are becoming increasingly prevalent, making it necessary to understand cloud security best practices, including:\n\n* Cloud-specific risks and vulnerabilities\n* Implementing proper access control and identity management\n* Compliance in cloud environments\n\nSoft Skills\n-----------\n\nIn addition to technical skills, soft skills play an important role in effective communication and collaboration among cyber security teams. Develop:\n\n* Problem-solving ability\n* Adaptability and continuous learning\n* Teamwork and collaboration\n\nBy continually refining and updating your security skills and knowledge, you become an invaluable asset in the rapidly evolving field of cyber security, helping to protect critical systems and data from ever-increasing threats.", + "links": [] + }, + "rzY_QsvnC1shDTPQ-til0": { + "title": "Understand Common Hacking Tools", + "description": "Common Hacking Tools\n--------------------\n\nAs you journey into the world of cyber security, it is essential to be familiar with common hacking tools used by cyber criminals. These tools help hackers exploit vulnerabilities in systems and networks, but they can also be used ethically by security professionals to test their own networks and systems for vulnerabilities. Below is a brief overview of some common hacking tools:\n\nNmap (Network Mapper)\n---------------------\n\nNmap is a popular open-source network scanner used by cyber security professionals and hackers alike to discover hosts and services on a network. It helps identify hosts, open ports, running services, OS types, and many other details. It is particularly useful for network inventorying and security audits.\n\nWireshark\n---------\n\nWireshark is another open-source tool used for network analysis and troubleshooting. It allows the user to capture and analyze the traffic that is being transmitted through a network. It helps identify any suspicious activity, such as malware communication or unauthorized access attempts.\n\nMetasploit\n----------\n\nMetasploit is a powerful penetration testing framework that covers a wide range of exploits and vulnerabilities. With a customizable and extensible set of tools, Metasploit is particularly useful for simulating real-world cyber attacks and helps identify where your system is most vulnerable.\n\nJohn the Ripper\n---------------\n\nJohn the Ripper is a well-known password cracker tool, which can be used to identify weak passwords and test password security. It supports various encryption algorithms and can also be used for identifying hashes.\n\nBurp Suite\n----------\n\nBurp Suite is a web application security testing tool, mainly used to test for vulnerabilities in web applications. It includes tools for intercepting and modifying the requests, automating tests, scanning, and much more.\n\nAircrack-ng\n-----------\n\nAircrack-ng is a set of tools targeting Wi-Fi security. It includes tools for capturing and analyzing network packets, cracking Wi-Fi passwords, and testing the overall security of wireless networks.\n\nKali Linux\n----------\n\nKali Linux is a Linux distribution, specifically built for penetration testing and security auditing. It comes preinstalled with a wide range of hacking tools and is commonly used by ethical hackers and security professionals.\n\nKeep in mind that while these tools are commonly used by hackers, they can also be employed ethically by security professionals to understand and address vulnerabilities in their own systems. The key is to use them responsibly and always seek permission before testing any network or system that does not belong to you.", + "links": [] + }, + "Lg7mz4zeCToEzZBFxYuaU": { + "title": "Understand Common Exploit Frameworks", + "description": "Exploit frameworks are essential tools in the cybersecurity landscape, as they provide a systematic and efficient way to test vulnerabilities, develop exploits, and launch attacks. They automate many tasks and help security professionals and ethical hackers to identify weaknesses, simulate attacks, and strengthen defenses. In this section, we will discuss some of the most common exploit frameworks and their features.\n\nMetasploit\n----------\n\n[Metasploit](https://www.metasploit.com/) is probably the most widely used and well-known exploit framework. It is an open-source platform with a large and active user community, which constantly contributes to its development, vulnerability research, and exploit creation.\n\n* **Key Features:**\n * Supports more than 1,500 exploits and over 3,000 modules\n * Provides a command-line interface as well as a Graphical User Interface (GUI) called Armitage\n * Offers integration with other popular tools, such as Nmap and Nessus\n * Enables payload delivery, exploit execution, and post-exploitation tasks\n\nCanvas\n------\n\n[Canvas](https://www.immunityinc.com/products/canvas/) is a commercial exploit framework developed by Immunity Inc. It includes a wide range of modules that target various platforms, networking devices, and vulnerabilities.\n\n* **Key Features:**\n * Contains a collection of more than 450 exploits\n * Offers exploit development and fuzzing tools\n * Provides intuitive GUI for managing and executing attacks\n * Allows customization through Python scripting\n\nExploit Pack\n------------\n\n[Exploit Pack](https://exploitpack.com/) is another commercial exploit framework that focuses on ease of use and extensive exploit modules selection. It is frequently updated to include the latest exploits and vulnerabilities.\n\n* **Key Features:**\n * Offers over 38,000 exploits for Windows, Linux, macOS, and other platforms\n * Provides a GUI for managing and executing exploits\n * Allows exploit customization and development using JavaScript\n * Includes fuzzers, shellcode generators, and other advanced features\n\nSocial-Engineer Toolkit (SET)\n-----------------------------\n\n[SET](https://github.com/trustedsec/social-engineer-toolkit) is an open-source framework designed to perform social engineering attacks, such as phishing and spear-phishing. Developed by TrustedSec, it focuses on human interaction and targets user credentials, software vulnerabilities, and more.\n\n* **Key Features:**\n * Executes email-based attacks, SMS-based attacks, and URL shortening/exploitation\n * Provides template-based phishing email creation\n * Integrates with Metasploit for payloads and exploits\n * Offers USB-based exploitation for human-interface devices\n\nWhen using these exploit frameworks, it is important to remember that they are powerful tools that can cause significant damage if misused. Always ensure that you have explicit permission from the target organization before conducting any penetration testing activities.", + "links": [ + { + "title": "Metasploit Primer (TryHackMe)", + "url": "https://tryhackme.com/room/rpmetasploit", + "type": "article" + } + ] + }, + "Rae-f9DHDZuwIwW6eRtKF": { + "title": "Understand Concept of Defense in Depth", + "description": "Defense in depth, also known as layered security, is a comprehensive approach to cybersecurity that involves implementing multiple layers of protection to safeguard an organization's assets, networks, and systems. This strategy is based on the concept that no single security measure can guarantee complete protection; therefore, a series of defensive mechanisms are employed to ensure that even if one layer is breached, the remaining layers will continue to provide protection.\n\nIn this section, we'll explore some key aspects of defense in depth:\n\nMultiple Layers of Security\n---------------------------\n\nDefense in depth is built upon the integration of various security measures, which may include:\n\n* **Physical security**: Protecting the organization's facilities and hardware from unauthorized access or damage.\n* **Access control**: Managing permissions to limit users' access to specific resources or data.\n* **Antivirus software**: Detecting, removing, and preventing malware infections.\n* **Firewalls**: Filtering network traffic to block or permit data communication based on predefined rules.\n* **Intrusion Detection and Prevention Systems (IDPS)**: Monitoring and analyzing network traffic to detect and prevent intrusions and malicious activities.\n* **Data backup and recovery**: Ensuring the organization's data is regularly backed up and can be fully restored in case of loss or accidental deletion.\n* **Encryption**: Encoding sensitive data to protect it from unauthorized access or theft.\n\nImplementing these layers allows organizations to minimize the risk of cybersecurity breaches, and in the event of an incident, quickly and effectively respond and recover.\n\nContinuous Monitoring and Assessment\n------------------------------------\n\nEffective defense in depth requires continuous monitoring and assessment of an organization's overall security posture. This involves:\n\n* Regularly reviewing and updating security policies and procedures.\n* Conducting security awareness training to educate employees on potential threats and best practices.\n* Performing vulnerability assessments and penetration testing to identify weaknesses in systems and networks.\n* Implementing incident response plans to ensure swift action in the event of a security breach.\n\nCollaboration and Information Sharing\n-------------------------------------\n\nDefense in depth benefits greatly from collaboration between various stakeholders, such as IT departments, security teams, and business leaders, all working together to maintain and improve the organization's security posture.\n\nIn addition, sharing information about threats and vulnerabilities with other organizations, industry associations, and law enforcement agencies can help strengthen the collective security of all parties involved.\n\nIn summary, defense in depth involves the implementation of multiple layers of security measures, continuous monitoring, and collaboration to protect an organization's valuable assets from cyber threats. By adopting this approach, organizations can minimize the risk of a breach and improve their overall cybersecurity posture.", + "links": [] + }, + "Ec6EairjFJLCHc7b-1xxe": { + "title": "Understand Concept of Runbooks", + "description": "Runbooks are a type of written documentation that details a step-by-step procedure for addressing a specific cyber security issue or incident. They are essential resources that help IT professionals and security teams streamline their response and management of security incidents.\n\nImportance of Runbooks in Cyber Security\n----------------------------------------\n\nRunbooks play a vital role in fortifying an organization's security posture. Here are some reasons why they are important:\n\n* **Standardization**: Runbooks help standardize the process of responding to security incidents, ensuring that the organization follows best practices and avoids potential mistakes.\n* **Efficiency**: Well-prepared runbooks provide clear instructions, which save time and reduce confusion during high-pressure security events.\n* **Knowledge sharing**: They act as a centralized source of knowledge for security procedures that can be shared across teams and can be used for training purposes.\n* **Auditing and compliance**: Runbooks showcase an organization's commitment to robust security practices, which can be critical for meeting regulatory requirements and passing security audits.\n\nComponents of a Good Runbook\n----------------------------\n\nHere are key components that make up an effective runbook:\n\n* **Title**: Clearly state the purpose of the runbook (e.g., \"Responding to a Ransomware Attack\").\n* **Scope**: Define the types of incidents or situations the runbook should be used for and the intended audience (e.g., for all team members dealing with data breaches).\n* **Prerequisites**: List any required resources or tools needed to execute the runbook's instructions.\n* **Step-by-step Instructions**: Provide a clear, concise, and accurate set of tasks to be performed, starting from the detection of the incident to its resolution.\n* **Roles and Responsibilities**: Define the roles of each team member involved in executing the runbook, including their responsibilities during each step of the process.\n* **Escalation**: Include a predefined set of conditions for escalating the situation to higher authorities or external support.\n* **Communication and reporting**: Explain how to communicate the incident to the relevant stakeholders and what information needs to be reported.\n* **Post-incident review**: Outline the process for reviewing and improving the runbook and the overall incident response after an event has been resolved.\n\nUpdating and Maintaining Runbooks\n---------------------------------\n\nRunbooks should be periodically reviewed and updated to ensure their effectiveness. It is important to incorporate lessons learned from past incidents, emerging threats, and new technologies into the runbook to keep it relevant and effective.\n\nIn conclusion, runbooks play a crucial role in fostering a resilient cyber security posture. Organizations should invest time and effort in developing and maintaining comprehensive runbooks for dealing with a wide range of security incidents.", + "links": [] + }, + "7KLGFfco-hw7a62kXtS3d": { + "title": "Understand Basics of Forensics", + "description": "**Forensics** is a specialized area within cybersecurity that deals with the investigation of cyber incidents, the collection, preservation, and analysis of digital evidence, and the efforts to tie this evidence to specific cyber actors. The main goal of digital forensics is to identify the cause of an incident, determine the extent of the damage, and provide necessary information to recover and prevent future attacks. This discipline typically involves several key steps:\n\n* **Preparation**: Developing a forensic strategy, setting up a secure laboratory environment, and ensuring the forensics team has the necessary skills and tools.\n* **Identification**: Determining the scope of the investigation, locating and identifying the digital evidence, and documenting any relevant information.\n* **Preservation**: Ensuring the integrity of the digital evidence is maintained by creating backups, securing storage, and applying legal and ethical guidelines.\n* **Analysis**: Examining the digital evidence using specialized tools and techniques to extract relevant information, identify patterns, and uncover hidden details.\n* **Reporting**: Compiling the findings of the investigation into a report that provides actionable insights, including the identification of cyber actors, the methods used, and the damage caused.\n\nProfessionals working in digital forensics need a solid understanding of various technologies, as well as the ability to think critically, be detail-oriented, and maintain the integrity and confidentiality of data. Moreover, they should be well-versed in related laws and regulations to ensure compliance and admissibility of evidence in legal proceedings. Some of the key skills to master include:\n\n* Knowledge of digital evidence collection and preservation techniques\n* Familiarity with forensic tools and software, such as EnCase, FTK, or Autopsy\n* Understanding of file systems, operating systems, and network protocols\n* Knowledge of malware analysis and reverse engineering\n* Strong analytical and problem-solving skills\n* Effective communication abilities to convey technical findings to non-technical stakeholders\n\nOverall, digital forensics is a crucial component of cybersecurity as it helps organizations respond effectively to cyber attacks, identify vulnerabilities, and take appropriate steps to safeguard their digital assets.", + "links": [ + { + "title": "Introduction to Digital Forensics (TryHackMe)", + "url": "https://tryhackme.com/room/introdigitalforensics", + "type": "article" + } + ] + }, + "_x3BgX93N-Pt1_JK7wk0p": { + "title": "Basics and Concepts of Threat Hunting", + "description": "Threat hunting is the proactive process of identifying and mitigating potential threats and vulnerabilities within a network, before they can be exploited by an attacker. To perform effective threat hunting, security professionals must use their knowledge, skills, and the latest threat intelligence to actively search for previously undetected adversaries and suspicious activities within a network.\n\nKey Objectives of Threat Hunting\n--------------------------------\n\n* **Detect**: Identify unknown threats and suspicious behavior that traditional security tools may miss.\n* **Contain**: Quickly isolate and remediate threats before they can cause significant damage.\n* **Learn**: Gather valuable insights about the adversary, their techniques, and the effectiveness of existing security measures.\n\nThreat Hunting Techniques\n-------------------------\n\nThere are several practical approaches to threat hunting, such as:\n\n* **Hypothesis-driven hunting**: Develop hypotheses about potential threats and validate them through data analysis and investigation.\n* **Indicator of Compromise (IoC) hunting**: Leverage existing threat intelligence and IoCs to search for matches within your environment.\n* **Machine learning-driven hunting**: Utilize algorithms and advanced analytics tools to automatically detect anomalies and other suspicious patterns of behavior.\n* **Situational awareness hunting**: Understand the normal behavior and baseline of the environment and look for deviations that may indicate malicious activity.\n\nTools & Technologies for Threat Hunting\n---------------------------------------\n\nSome common tools and technologies used for threat hunting include:\n\n* **Security information and event management (SIEM) systems**: Provide a centralized platform for detecting, alerting, and investigating security incidents and events.\n* **Endpoint detection and response (EDR) solutions**: Deliver real-time monitoring, analysis, and remediation capabilities for endpoints.\n* **Threat intelligence platforms (TIPs)**: Aggregate and analyze global threat data and indicators of compromise (IoC) to provide actionable intelligence.\n* **User and entity behavior analytics (UEBA) tools**: Apply advanced analytics algorithms to detect potential threats by analyzing the behavior of users, devices, and applications.\n\nEssential Skills for Threat Hunters\n-----------------------------------\n\nSuccessful threat hunters should possess a strong combination of technical skills, critical thinking, and situational awareness. Some essential skills include:\n\n* **Understanding of networks and protocols**: Deep knowledge of network architecture, protocols, and communication patterns.\n* **Familiarity with operating systems**: Ability to navigate, investigate, and analyze various operating systems, including Windows, Linux, and macOS.\n* **Scripting and programming**: Proficiency in scripting languages (e.g., Python, PowerShell) and automation tools to streamline the threat hunting process.\n* **Knowledge of common attacker tactics, techniques, and procedures (TTPs)**: Awareness of the latest TTPs, ensuring that you stay ahead of potential threats.\n* **Critical thinking and problem-solving**: Ability to analyze complex scenarios and think creatively to identify potential threats and vulnerabilities.\n\nBy developing a strong foundation in threat hunting concepts and techniques, security professionals are better equipped to proactively identify and mitigate potential attacks, thereby strengthening their organization's overall cybersecurity posture.", + "links": [] + }, + "lcxAXtO6LoGd85nOFnLo8": { + "title": "Basics of Vulnerability Management", + "description": "Vulnerability management is a crucial aspect of cybersecurity, as it helps organizations to identify, prioritize, and remediate potential risks in their networks, systems, and applications. It involves continuous processes and practices designed to protect sensitive data by reducing the attack surface and minimizing the likelihood of a breach.\n\nImportance of Vulnerability Management\n--------------------------------------\n\n* **Prevent cyberattacks**: By addressing vulnerabilities before they can be exploited, organizations reduce the chances of successful attacks and protect their critical assets.\n* **Comply with regulations**: Organizations must adhere to various data protection standards and regulations, such as GDPR, HIPAA, or PCI DSS. A robust vulnerability management program can help meet these requirements.\n* **Maintain customer trust**: Frequent security breaches can lead to reputational damages, making it vital to prioritize vulnerability management as a means to safeguard customer data.\n* **Save costs**: Proactively identifying and mitigating vulnerabilities reduces the financial implications of dealing with a security breach, including the costs of incident response, legal liabilities, and penalties.\n\nComponents of Vulnerability Management\n--------------------------------------\n\n* **Vulnerability Assessment**: Regular vulnerability assessments are essential to identify security weaknesses. This includes scanning networks, system components, software, and applications to identify existing vulnerabilities.\n \n* **Risk Analysis**: After identifying vulnerabilities, it is essential to assess their potential risks. This involves determining the likelihood and impact of each vulnerability, prioritizing them based on severity, and deciding which vulnerabilities to address first.\n \n* **Remediation**: The remediation process involves implementing patches, updates, or configuration changes to address the identified vulnerabilities. It is crucial to regularly review and ensure that patches have been applied effectively to prevent further exploitation.\n \n* **Verification**: After remediation, organizations must verify that the implemented solutions have effectively eliminated the risk posed by the vulnerability. Verification processes may include re-scanning and penetration testing.\n \n* **Reporting**: Maintaining comprehensive and accurate records of vulnerability management activities is essential for regulatory compliance and informing key stakeholders about the organization's security posture. Regular reporting can also aid in identifying problem areas and trends, allowing decision-makers to allocate resources and plan accordingly.\n \n\nBy implementing a thorough vulnerability management program, organizations can significantly reduce their risk exposure and improve their overall cybersecurity posture. In today's digital landscape, proactively managing vulnerabilities is a critical step in safeguarding sensitive information and maintaining customer trust.", + "links": [] + }, + "uoGA4T_-c-2ip_zfEUcJJ": { + "title": "Basics of Reverse Engineering", + "description": "Reverse engineering is the process of analyzing a system, component, or software to understand how it works and deduce its design, architecture, or functionality. It is a critical skill in cybersecurity, as it helps security professionals uncover the potential attack vectors, hidden vulnerabilities, and underlying intentions of a piece of software or hardware.\n\nIn this section, we will cover the basic concepts and techniques of reverse engineering that every cybersecurity professional should be familiar with.\n\nStatic Analysis Vs. Dynamic Analysis\n------------------------------------\n\nThere are two main approaches to reverse engineering: static analysis and dynamic analysis. Static analysis involves examining the code and structure of a software without executing it. This includes analyzing the source code, if available, or examining the binary executable using disassemblers or decompilers.\n\nDynamic analysis, on the other hand, involves executing the software while observing and monitoring its behaviors and interactions with other components or systems. This analysis is typically performed in controlled environments, such as virtual machines or sandbox environments, to minimize potential risks.\n\nBoth approaches have their merits and limitations, and combining them is often the most effective way to gain a comprehensive understanding of the target system.\n\nDisassemblers and Decompilers\n-----------------------------\n\nDisassemblers and decompilers are essential tools in reverse engineering, as they help transform binary executables into a more human-readable format.\n\n* **Disassemblers** convert machine code (binary executable) into assembly language, a low-level programming language that is more human-readable than raw machine code. Assembly languages are specific to the CPU architectures, such as x86, ARM, or MIPS.\n* **Decompilers** attempt to reverse-engineer binary executables into high-level programming languages, such as C or C++, by interpreting the structures and patterns in the assembly code. Decompilation, however, is not always perfect and may generate code that is more difficult to understand than assembly.\n\nSome popular disassemblers and decompilers are:\n\n* [@article@IDA Pro](https://www.hex-rays.com/products/ida/)\n* [@article@Ghidra](https://ghidra-sre.org/)\n* [@article@Hopper](https://www.hopperapp.com/)\n\nDebuggers\n---------\n\nDebuggers are another essential tool for reverse engineering, as they allow you to execute a program and closely monitor its behavior during runtime. Debuggers provide features such as setting breakpoints, stepping through code, and examining memory contents.\n\nSome popular debuggers include:\n\nCommon Reverse Engineering Techniques\n-------------------------------------\n\nHere are some basic reverse engineering techniques:\n\n* **Control flow analysis:** Understanding the execution flow of a program, such as loops, branches, and conditional statements, to determine how the program behaves under certain conditions.\n* **Data flow analysis:** Analyzing how data is passed between different parts of a program and tracing the origin and destination of data.\n* **System call analysis:** Examining system calls made by a program to understand how it interacts with the operating system, hardware, or external resources.\n* **Cryptographic analysis:** Identifying and analyzing encryption and decryption algorithms used within a program or analyzing any cryptographic keys or certificates that may be present.\n* **Pattern recognition:** Identifying common patterns, structures, or routines in code that may indicate the use of known algorithms or frameworks.\n\nRemember that mastering the art of reverse engineering takes time and practice. As you delve deeper into the world of reverse engineering, you will develop the ability to recognize patterns, understand complex systems, and ultimately, better defend against cyber threats.", + "links": [ + { + "title": "OllyDbg", + "url": "http://www.ollydbg.de/", + "type": "article" + }, + { + "title": "GDB", + "url": "https://www.gnu.org/software/gdb/", + "type": "article" + }, + { + "title": "x64dbg", + "url": "https://x64dbg.com/", + "type": "article" + } + ] + }, + "NkAAQikwH-A6vrF8fWpuB": { + "title": "Penetration Testing Rules of Engagement", + "description": "Penetration testing, also known as ethical hacking, is an essential component of a strong cybersecurity program. Rules of engagement (RoE) for penetration testing define the scope, boundaries, and guidelines for conducting a successful penetration test. These rules are crucial to ensure lawful, efficient, and safe testing.\n\nKey Components\n--------------\n\n* **Scope**: The primary objective of defining a scope is to reasonably limit the testing areas. It specifies the systems, networks, or applications to be tested (in-scope) and those to be excluded (out-of-scope). Additionally, the scope should indicate testing methodologies, objectives, and timeframes.\n \n* **Authorization**: Penetration testing must be authorized by the organization's management or the system owner. Proper authorization ensures the testing is legitimate, lawful, and compliant with organizational policies. Obtain written permission, detail authorization parameters, and report concerns or issues that may arise during the test.\n \n* **Communication**: Establish a clear communication plan to ensure timely and accurate information exchange between penetration testers and stakeholders. Designate primary contacts and a secondary point of contact for escalations, emergencies or incident handling. Document the preferred communication channels and establish reporting protocols.\n \n* **Testing Approach**: Select an appropriate testing approach, such as black-box, white-box, or grey-box testing, depending on the objectives and available information. Clarify which penetration testing methodologies will be utilized (e.g., OSSTMM, OWASP, PTES) and specify whether automated tools, manual techniques, or both will be used during the test.\n \n* **Legal & Regulatory Compliance**: Comply with applicable laws, regulations, and industry standards (e.g., GDPR, PCI-DSS, HIPAA) to prevent violations and potential penalties. Seek legal advice if necessary and ensure all parties involved are aware of the regulations governing their specific domain.\n \n* **Rules of Engagement Document**: Formalize all rules in a written document and have it signed by all relevant parties (e.g., system owner, penetration tester, legal advisor). This document should include information such as scope, approach, communication guidelines, and restrictions on testing techniques. Keep it as a reference for incident handling and accountability during the test.\n \n\nIn conclusion, robust penetration rules of engagement not only help identify potential security vulnerabilities in your organization but also ensure that the testing process is transparent and compliant. Establishing RoE is necessary to minimize the risk of legal issues, miscommunications, and disruptions to the organization's routine operations.", + "links": [] + }, + "PUgPgpKio4Npzs86qEXa7": { + "title": "Perimiter vs DMZ vs Segmentation", + "description": "Perimeter and DMZ (Demilitarized Zone) segmentation is a crucial aspect of network security that helps protect internal networks by isolating them from external threats. In this section, we will discuss the concepts of perimeter and DMZ segmentation, and how they can be used to enhance the security of your organization.\n\nPerimeter Segmentation\n----------------------\n\nPerimeter segmentation is a network security technique that involves isolating an organization's internal networks from the external, untrusted network (typically the internet). The goal is to create a protective barrier to limit the access of external attackers to the internal network, and minimize the risk of data breaches and other security threats.\n\nTo achieve this, perimeter segmentation typically involves the use of network security appliances such as firewalls, intrusion detection systems (IDS), and intrusion prevention systems (IPS). These devices act as gatekeepers, enforcing security policies and filtering network traffic to protect the internal network from malicious activity.\n\nDMZ Segmentation\n----------------\n\nThe DMZ is a specially isolated part of the network situated between the internal network and the untrusted external network. DMZ segmentation involves creating a separate, secure area for hosting public-facing services (such as web servers, mail servers, and application servers) that need to be accessible to external users.\n\nThe primary purpose of the DMZ is to provide an additional layer of protection for internal networks. By keeping public-facing services in the DMZ and isolated from the internal network, you can prevent external threats from directly targeting your organization's most sensitive assets.\n\nTo implement a DMZ in your network, you can use devices such as firewalls, routers, or dedicated network security appliances. Properly configured security policies and access controls help ensure that only authorized traffic flows between the DMZ and the internal network, while still allowing necessary external access to the DMZ services.\n\nKey Takeaways\n-------------\n\n* Perimeter and DMZ segmentation are crucial security techniques that help protect internal networks from external threats.\n* Perimeter segmentation involves isolating an organization's internal networks from the untrusted external network, typically using security appliances such as firewalls, IDS, and IPS.\n* DMZ segmentation involves creating a separate, secure area within the network for hosting public-facing services that need to be accessible to external users while maintaining additional security for internal assets.\n* Implementing proper network segmentation and security policies can significantly reduce the risk of data breaches and other security threats.", + "links": [] + }, + "HavEL0u65ZxHt92TfbLzk": { + "title": "Core Concepts of Zero Trust", + "description": "_Zero Trust_ is a modern security framework that addresses the ever-evolving threat landscape in the digital world. It emphasizes the idea of \"never trust, always verify\". This approach requires organizations to abandon the traditional perimeter-based security models and adopt a more comprehensive, holistic approach to protecting their data and assets.\n\nCore Principles\n---------------\n\n* **Deny trust by default**: Assume all network traffic, both inside and outside the organization, is potentially malicious. Do not trust any user, device, or application just because they are within the network perimeter.\n \n* **Verify every request**: Authenticate and authorize all requests (even for those from within the network) before granting access to any resource. Ensure that each user, device, or application is properly identified, and their access to resources is appropriate based on their role, rights, and privileges.\n \n* **Apply least privilege**: Limit users, applications, and devices to the minimum level of access required to perform their functions. This minimizes the risk of unauthorized access, and reduces the potential attack surface.\n \n* **Segment networks**: Isolate and segregate different parts of the network to limit the potential impact of a breach. If an attacker gains access to one segment, they should not be able to move laterally across the network and access other sensitive data.\n \n* **Inspect and log all traffic**: Actively monitor, analyze, and log network traffic to identify potential security incidents and perform forensic investigations. This provides valuable insights for security teams to continuously improve their security posture and detect early signs of malicious activities.\n \n\nBenefits\n--------\n\n* **Reduced attack surface**: Limiting access to sensitive resources and segmenting the network makes it more challenging for attackers to compromise systems and access valuable data.\n \n* **Enhanced visibility and monitoring**: By continuously inspecting and logging all traffic, security teams can gain unprecedented levels of visibility, helping them identify potential threats and attacks more effectively.\n \n* **Improved compliance and governance**: Implementing a Zero Trust model reinforces an organization's compliance and governance posture, ensuring access to sensitive data is only granted to authorized users.\n \n* **Adaptability**: A Zero Trust approach can be applied to a wide range of environments and can be tailored to meet the specific security needs and objectives of an organization.\n \n\nBy implementing a Zero Trust framework, an organization can strengthen its security posture, safeguard against internal and external threats, and maintain control over their critical assets in an increasingly interconnected world.", + "links": [] + }, + "kqT0FRLt9Ak9P8PhHldO-": { + "title": "Roles of Compliance and Auditors", + "description": "Compliance and auditors play a crucial role in maintaining the security and integrity of any organization's digital infrastructure. They ensure that organizations follow industry-specific regulations, international standards, and defined security policies to reduce the risk of security breaches and protect sensitive data.\n\nCompliance\n----------\n\nCompliance refers to adhering to a set of rules, regulations, and best practices defined by industry standards, government regulations, or an organization's internal security policies. These may include:\n\n* **Industry Standards**: Security standards specific to an industry, e.g., _Payment Card Industry Data Security Standard (PCI DSS)_ for companies handling credit card transactions.\n* **Government Regulations**: Rules defined at a national or regional level to ensure the protection of sensitive information, e.g., _General Data Protection Regulation (GDPR)_ in the European Union.\n* **Internal Security Policies**: Guidelines and procedures created by an organization to manage its digital infrastructure and data securely.\n\nAuditors\n--------\n\nAuditors, specifically cybersecurity auditors or information system auditors, are responsible for evaluating and verifying an organization's compliance with relevant regulations and standards. They perform rigorous assessments, suggest corrective actions, and prepare detailed reports highlighting discrepancies and vulnerabilities in the organization's information systems. Some key responsibilities of auditors include:\n\n* **Assessment**: Conduct comprehensive reviews of security policies, procedures, and controls in place. This may involve evaluating the effectiveness of firewalls, security software, and network configurations.\n* **Risk Management**: Identify and evaluate potential risks and vulnerabilities to an organization's digital infrastructure, such as data breaches, cyber-attacks, or human errors.\n* **Documentation**: Prepare detailed reports highlighting findings, recommendations, and corrective actions. This may include a list of vulnerabilities, compliance gaps, and improvement suggestions.\n* **Consultation**: Provide expert advice and technical guidance to management and IT teams to help organizations meet compliance requirements and improve their overall security posture.\n\nTo summarize, compliance and auditors are essential in maintaining an organization's cybersecurity stance. Effective coordination between security professionals, management, and IT teams is needed to ensure the safety and protection of sensitive data and systems from evolving cyber threats.", + "links": [] + }, + "ggAja18sBUUdCfVsT0vCv": { + "title": "Understand the Definition of Risk", + "description": "In the context of cybersecurity, risk can be defined as the possibility of damage, loss, or any negative occurrence that is caused by external or internal vulnerabilities, and that may be avoided through preemptive action. Risk is typically characterized by three main components:\n\n* **Threat:** A potential danger to the confidentiality, integrity, or availability of information in your system. Threats can be natural (e.g., floods, earthquakes), human-made (e.g., hackers, malicious software), or due to technical issues (e.g., hardware malfunction).\n \n* **Vulnerability:** A weakness or flaw in your system that can be exploited by a threat agent to compromise the security of the system. Vulnerabilities can exist in various aspects, such as physical access, network services, or security procedures.\n \n* **Impact:** The potential amount of damage or loss that can occur to your organization, system, or data due to the successful execution of a threat. Impacts can be financial, reputational, operational, or any other negative consequence that your organization faces as a result of a security breach.\n \n\nWhen evaluating the risk levels of a cybersecurity scenario, it is important to assess the likelihood of a specific threat exploiting a specific vulnerability, as well as the associated impact if such an event occurs. By understanding risks and their components, you can better prioritize your security resources and take appropriate steps to mitigate potential risks. Remember that risk cannot be entirely eliminated, but rather managed to an acceptable level through effective security measures and strategies.", + "links": [] + }, + "9asy3STW4oTYYHcUazaRj": { + "title": "Understand Backups and Resiliency", + "description": "Backups and resiliency are crucial components of an effective cyber security strategy. They help organizations maintain their operations and data integrity, even in the face of various threats such as data breaches, hardware failures, or natural disasters. In this section, we will discuss the importance of creating and maintaining regular data backups and developing a resilient infrastructure.\n\nData Backups\n------------\n\nData backups are simply copies of your valuable data that are stored in a secure location, separate from your primary storage. They provide a means to recover your data in case of any data loss incidents, such as accidental deletion, hardware failure, or cyber attacks like ransomware.\n\n**Best practices for data backups include:**\n\n* **Frequent and scheduled backups**: Schedule regular backups and automate the process to ensure consistency and reduce the risk of human error.\n \n* **Multiple copies**: Maintain multiple copies of your backups, preferably on different types of storage media (e.g., external hard drives, cloud storage, or tapes).\n \n* **Offsite storage**: Store at least one copy of your backups offsite. This will help protect against data loss due to onsite physical disasters or theft.\n \n* **Encryption**: Encrypt your backups to protect sensitive data from unauthorized access.\n \n* **Testing and verification**: Regularly test your backups to ensure they are functioning properly and can be restored when needed.\n \n\nInfrastructure Resiliency\n-------------------------\n\nInfrastructure resiliency refers to the ability of your organization's IT systems to maintain availability and functionality in the face of unexpected disruptions, such as power outages, hardware failures, or cyber attacks. A resilient infrastructure helps minimize downtime and data loss, ensuring that your organization can continue its operations during and after an incident.\n\n**Key components of a resilient infrastructure include:**\n\n* **Redundancy**: Design your infrastructure in a way that it includes redundant components (e.g., servers, power supplies, or network connections) to ensure uninterrupted operations in case of a failure.\n \n* **Disaster recovery planning**: Develop a comprehensive disaster recovery plan that outlines the steps and resources to restore your systems and data after an incident. This plan should include provisions for regular testing and updating.\n \n* **Incident response planning**: Establish a clear incident response process that defines roles, responsibilities, and procedures for identifying, investigating, and mitigating security incidents.\n \n* **Regular monitoring and maintenance**: Proactively monitor your infrastructure for signs of potential issues, and perform routine maintenance to minimize vulnerabilities and reduce the likelihood of failures.\n \n\nBy investing in robust data backups and building a resilient infrastructure, you will ensure that your organization is well-prepared to handle any unexpected disruptions and maintain the continuity of essential operations.", + "links": [] + }, + "H38Vb7xvuBJXVzgPBdRdT": { + "title": "Cyber Kill Chain", + "description": "The **Cyber Kill Chain** is a model that was developed by Lockheed Martin, a major aerospace, military support, and security company, to understand and prevent cyber intrusions in various networks and systems. It serves as a framework for breaking down the stages of a cyber attack, making it easier for security professionals to identify, mitigate, and prevent threats.\n\nThe concept is based on a military model, where the term \"kill chain\" represents a series of steps needed to successfully target and engage an adversary. In the context of cybersecurity, the model breaks down the stages of a cyber attack into seven distinct phases:\n\n* **Reconnaissance**: This initial phase involves gathering intelligence on the target, which may include researching public databases, performing network scans, or social engineering techniques.\n* **Weaponization**: In this stage, the attacker creates a weapon – such as a malware, virus, or exploit – and packages it with a delivery mechanism that can infiltrate the target's system.\n* **Delivery**: The attacker selects and deploys the delivery method to transmit the weapon to the target. Common methods include email attachments, malicious URLs, or infected software updates.\n* **Exploitation**: This is the phase where the weapon is activated, taking advantage of vulnerabilities in the target's systems or applications to execute the attacker's code.\n* **Installation**: Once the exploit is successful, the attacker installs the malware on the victim's system, setting the stage for further attacks or data exfiltration.\n* **Command and Control (C2)**: The attacker establishes a communication channel with the infected system, allowing them to remotely control the malware and conduct further actions.\n* **Actions on Objectives**: In this final phase, the attacker achieves their goal, which may involve stealing sensitive data, compromising systems, or disrupting services.\n\nUnderstanding and analyzing the Cyber Kill Chain helps organizations and individuals take a more proactive approach to cybersecurity. By recognizing the signs of an attack at each stage, appropriate countermeasures can be employed to either prevent or minimize the damage from the attack.\n\nBy staying informed and diligently employing security best practices, you can effectively protect your digital assets and contribute to a safer cyberspace.", + "links": [] + }, + "pnfVrOjDeG1uYAeqHxhJP": { + "title": "MFA & 2FA", + "description": "Introduction\n------------\n\nMulti-Factor Authentication (MFA) and Two-Factor Authentication (2FA) are security measures designed to enhance the protection of user accounts and sensitive information. These supplementary methods require the user to provide more than one form of verification to access an account, making it more difficult for unauthorized users to gain access. In this section, we'll discuss the basics of MFA and 2FA and why they are crucial to cybersecurity.\n\nTwo-Factor Authentication (2FA)\n-------------------------------\n\n2FA strengthens security by requiring two distinct forms of verification before granting access. This means that even if a malicious actor has your password, they will still need the second form of verification to access your account, reducing the risk of unauthorized access.\n\nTwo-Factor Authentication usually involves a combination of:\n\n* Something you know (e.g., passwords, PINs)\n* Something you have (e.g., physical tokens, mobile phones)\n* Something you are (e.g., biometrics, such as fingerprints or facial recognition)\n\nA common example of 2FA is when you receive a unique code via SMS when logging into a website or access sensitive information. You will need to provide that code along with your password to gain access, adding an extra layer of security.\n\nMulti-Factor Authentication (MFA)\n---------------------------------\n\nMFA enhances security even further by requiring more than two forms of verification, incorporating three or more factors from the categories mentioned earlier (knowledge, possession, and inherence). By incorporating additional authentication methods, MFA raises the bar for attackers, making it much more difficult for them to gain access.\n\nThe main advantage of using MFA over 2FA is that even if one factor is compromised, there are still additional hurdles for an attacker to overcome. For example, if someone intercepts your mobile phone as the second factor, they would still have to bypass a biometric authentication requirement.\n\nImportance in Cybersecurity\n---------------------------\n\nUsing MFA and 2FA lends more security to user accounts, lowering the chances of being compromised. They provide multiple layers of protection, making it significantly harder for cybercriminals to breach accounts or gain unauthorized access.\n\nImplementing 2FA and MFA should be a priority for businesses and individuals alike in order to maintain a high level of cybersecurity. By educating users on the benefits and importance of these forms of authentication and ensuring their widespread adoption, we can create a more secure online environment.", + "links": [] + }, + "_S25EOGS3P8647zLM5i-g": { + "title": "Operating System Hardening", + "description": "OS hardening, or Operating System hardening, is the process of strengthening your operating system's security settings to prevent unauthorized access, data breaches, and other malicious activities. This step is essential for enhancing the security posture of your device or network and to minimize potential cyber risks.\n\nThe Importance of OS Hardening\n------------------------------\n\nIn today's world of evolving cyber threats and vulnerabilities, default security configurations provided by operating systems are often insufficient. OS hardening is necessary to:\n\n* **Inhibit unauthorized access**: Limit the potential entry points for attackers.\n* **Close security gaps**: Reduce the risks of exploits and vulnerabilities in your system.\n* **Prevent data breaches**: Safeguard sensitive data from cybercriminals.\n* **Align with compliance requirements**: Ensure your system complies with industry regulations and standards.\n\nKey Principles of OS Hardening\n------------------------------\n\nHere are some fundamental principles that can help strengthen your operating system security:\n\n* **Least Privilege**: Limit user rights and permissions, only providing the minimum access required for essential tasks. Implement stringent access controls and separation of duties.\n* **Disable or remove unnecessary services**: Unnecessary software, programs, and services can introduce vulnerabilities. Turn them off or uninstall them when not needed.\n* **Patch Management**: Keep your system and applications up-to-date with the latest security patches and updates.\n* **Regular Monitoring**: Implement monitoring mechanisms to detect and respond to potential threats promptly.\n* **Authentication and Password Security**: Enforce strong, unique passwords and use Multi-Factor Authentication (MFA) for added protection.\n\nSteps for OS Hardening\n----------------------\n\nA comprehensive OS hardening process includes the following steps:\n\n* **Create a Standard Operating Environment (SOE)**: Develop a standardized and secure system configuration as a baseline for all company systems.\n* **Inventory**: Identify and track all the devices, software, and services in your environment and their respective configurations.\n* **Assess current security controls**: Evaluate the existing security settings to identify gaps requiring improvement.\n* **Apply required hardening measures**: Implement necessary changes, including applying patches, updating software, and configuring security settings.\n* **Monitor and review**: Continuously monitor your environment and update your hardening measures and policies as needed.\n\nBy incorporating OS hardening into your cybersecurity practices, you can significantly reduce the risks associated with cyber threats and protect your business's valuable assets.", + "links": [] + }, + "aDF7ZcOX9uR8l0W4aqhYn": { + "title": "Understand Concept of Isolation", + "description": "Isolation is a key principle in cyber security that helps to ensure the confidentiality, integrity, and availability of information systems and data. The main idea behind isolation is to separate different components or processes, such that if one is compromised, the others remain protected. Isolation can be applied at various levels, including hardware, software, and network layers. It is commonly used to protect sensitive data, critical systems, and to limit the potential damage caused by malicious activities.\n\nHardware Isolation\n------------------\n\nHardware isolation provides a physical separation between various components or systems, thereby preventing direct access or interference between them. This can be achieved through several mechanisms, including:\n\n* **Air-gapped systems**: A computer or network that has no direct connections to external networks or systems, ensuring that unauthorized access or data leakage is virtually impossible.\n \n* **Hardware security modules (HSMs)**: Dedicated physical devices that manage digital keys and cryptographic operations, ensuring that sensitive cryptographic material is separated from other system components and protected against tampering or unauthorized access.\n \n\nSoftware Isolation\n------------------\n\nSoftware isolation seeks to separate data and processes within the software environment itself. Some common methods include:\n\n* **Virtualization**: The creation of isolated virtual machines (VMs) within a single physical host, allowing multiple operating systems and applications to run in parallel without direct access to each other's resources.\n \n* **Containers**: Lightweight virtual environments that allow applications to run in isolation from one another, sharing the same operating system kernel, but having separate file systems, libraries, and namespaces.\n \n* **Sandboxing**: A security technique that confines an application's activities to a restricted environment, protecting the underlying system and other applications from potential harm.\n \n\nNetwork Isolation\n-----------------\n\nNetwork isolation aims to separate and control communication between different systems, devices, or networks. This can be implemented through several means, such as:\n\n* **Firewalls**: Devices or software that act as a barrier, filtering and controlling traffic between networks or devices based on predefined policies.\n \n* **Virtual Local Area Networks (VLANs)**: Logical partitions created within a physical network, segregating devices into separate groups with restricted communication between them.\n \n* **Virtual Private Networks (VPNs)**: Encrypted connections that securely tunnel network traffic over the public internet, protecting it from eavesdropping or tampering and ensuring the privacy of the communication.\n \n\nImplementing the concept of isolation within your cyber security strategy can significantly enhance your organization's security posture by limiting the attack surface, containing potential threats, and mitigating the impact of security breaches.", + "links": [] + }, + "FJsEBOFexbDyAj86XWBCc": { + "title": "Basics of IDS and IPS", + "description": "When it comes to cybersecurity, detecting and preventing intrusions is crucial for protecting valuable information systems and networks. In this section, we'll discuss the basics of Intrusion Detection Systems (IDS) and Intrusion Prevention Systems (IPS) to help you better understand their function and importance in your overall cybersecurity strategy.\n\nWhat is Intrusion Detection System (IDS)?\n-----------------------------------------\n\nAn Intrusion Detection System (IDS) is a critical security tool designed to monitor and analyze network traffic or host activities for any signs of malicious activity, policy violations, or unauthorized access attempts. Once a threat or anomaly is identified, the IDS raises an alert to the security administrator for further investigation and possible actions.\n\nThere are two types of IDS:\n\n* **Network-Based Intrusion Detection System (NIDS)**: This type of IDS is deployed on network devices such as routers, switches, or firewalls to monitor and analyze the traffic between hosts within the network.\n \n* **Host-Based Intrusion Detection System (HIDS)**: This type of IDS is installed on individual hosts, such as servers or workstations, to monitor and analyze the activities on that specific host.\n \n\nWhat is Intrusion Prevention System (IPS)?\n------------------------------------------\n\nAn Intrusion Prevention System (IPS) is an advanced security solution closely related to IDS. While an IDS mainly focuses on detecting and alerting about intrusions, an IPS takes it a step further and actively works to prevent the attacks. It monitors, analyzes, and takes pre-configured automatic actions based on suspicious activities, such as blocking malicious traffic, reseting connections, or dropping malicious packets.\n\nThere are two types of IPS:\n\n* **Network-Based Intrusion Prevention System (NIPS)**: This type of IPS is deployed in-line with network devices and closely monitors network traffic, making it possible to take actions in real-time.\n \n* **Host-Based Intrusion Prevention System (HIPS)**: This type of IPS is installed on individual hosts and actively prevents attacks by controlling inputs and outputs on the host, restricting access to resources, and making use of application-level controls.\n \n\nKey Takeaways\n-------------\n\n* IDS and IPS are essential components of a robust cybersecurity strategy.\n* IDS focuses on detecting and alerting about potential intrusions, while IPS takes it further by actively preventing and mitigating attacks.\n* Network-based systems protect networks, while host-based systems protect individual hosts within a network.\n* Regularly updating and configuring IDS/IPS is necessary to continually defend against evolving threats.\n\nBy understanding the basics of IDS and IPS, you can better evaluate your security needs and take the right steps to protect your network and hosts from potential intruders.", + "links": [] + }, + "bj5YX8zhlam0yoNckL8e4": { + "title": "Honeypots", + "description": "A **honeypot** is a security measure that is designed to lure and trap potential cyber attackers, usually by posing as a vulnerable system or network. Honeypots can be a valuable tool in understanding the various tactics used by malicious actors, which allows security professionals to develop better strategies for defending against these attacks. In this section, we will explore the different types of honeypots, their uses, and some important considerations when implementing them.\n\nTypes of Honeypots\n------------------\n\nThere are several different types of honeypots that can be implemented, each with unique features and capabilities. Some common types include:\n\n* **Low-Interaction Honeypots**: These honeypots simulate a limited set of services or vulnerabilities to lure attackers. They require minimal resources and are easier to set up than other types of honeypots. They are often used to gather basic information about attacker behavior and techniques.\n \n* **High-Interaction Honeypots**: These honeypots simulate a complete and realistic environment, often running full operating systems and services. They are resource-intensive but provide a more in-depth understanding of attacker behavior and can be used to identify more sophisticated threats.\n \n* **Research Honeypots**: These honeypots are designed specifically for the purpose of collecting detailed information about attacker methods and motives for further analysis. They often require advanced knowledge and resources to maintain but provide valuable intelligence.\n \n\nUses of Honeypots\n-----------------\n\nHoneypots have several uses in the cybersecurity landscape:\n\n* **Identify new threats**: Honeypots can help security professionals identify new attack methods, malware, or other threats before they affect real systems.\n \n* **Distract attackers**: By presenting a seemingly vulnerable target, honeypots can divert attackers' attention from actual critical systems, thus providing an additional layer of security.\n \n* **Collect attack data**: By carefully monitoring interactions with honeypots, security professionals can gather valuable information on attacker behavior, tactics, and techniques, further improving cyber defense strategies.\n \n\nImportant Considerations\n------------------------\n\nWhile honeypots can be powerful tools in a security professional's arsenal, there are some important factors to consider:\n\n* **Ethics and legality**: It's crucial to ensure that all honeypot activities are conducted ethically and within the boundaries of the law. In some jurisdictions, certain activities surrounding honeypots (such as trapping attackers) may be illegal or require specific permissions.\n \n* **Risk of compromise**: Honeypots can add another attack surface, which can be exploited by attackers if not adequately secured or maintained. If an attacker determines that a system is a honeypot, they may decide to attack the network further or launch more targeted attacks.\n \n* **Maintenance and resources**: Developing and maintaining honeypots can be resource-intensive, requiring dedicated systems or virtual machines, expertise in system administration, and ongoing monitoring.\n \n\nIt's important to carefully weigh the benefits and risks of implementing honeypots and ensure they are used responsibly and strategically within your cybersecurity plan.", + "links": [] + }, + "WG7DdsxESm31VcLFfkVTz": { + "title": "Authentication vs Authorization", + "description": "Authentication vs Authorization\n-------------------------------\n\n**Authentication** is the process of validating the identity of a user, device, or system. It confirms that the entity attempting to access the resource is who or what they claim to be. The most common form of authentication is the use of usernames and passwords. Other methods include:\n\n**Authorization** comes into play after the authentication process is complete. It involves granting or denying access to a resource, based on the authenticated user's privileges. Authorization determines what actions the authenticated user or entity is allowed to perform within a system or application.", + "links": [ + { + "title": "Two-factor authentication (2FA)", + "url": "https://authy.com/what-is-2fa/", + "type": "article" + }, + { + "title": "Biometrics (fingerprint, facial recognition, etc.)", + "url": "https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5428991/", + "type": "article" + }, + { + "title": "Security tokens or certificates", + "url": "https://www.comodo.com/e-commerce/ssl-certificates/certificate.php", + "type": "article" + }, + { + "title": "Role-based access control (RBAC)", + "url": "https://en.wikipedia.org/wiki/Role-based_access_control", + "type": "article" + }, + { + "title": "Access Control Lists (ACLs)", + "url": "https://en.wikipedia.org/wiki/Access-control_list", + "type": "article" + }, + { + "title": "Attribute-based access control (ABAC)", + "url": "https://en.wikipedia.org/wiki/Attribute-based_access_control", + "type": "article" + } + ] + }, + "7tDxTcKJNAUxbHLPCnPFO": { + "title": "Blue / Red / Purple Teams", + "description": "In the context of cybersecurity, Blue Team, Red Team, and Purple Team are terms used to describe different roles and methodologies employed to ensure the security of an organization or system. Let's explore each one in detail.\n\nBlue Team\n---------\n\nThe Blue Team is responsible for defending an organization's information systems, networks, and critical assets from security threats. They are tasked with the ongoing monitoring of systems, detecting and responding to potential security incidents, and implementing protective measures.\n\n**Key activities of the Blue Team:**\n\n* Develop and implement security policies and procedures\n* Perform vulnerability assessments and risk assessments\n* Deploy security tools and technologies (e.g., firewalls, intrusion detection systems, etc.)\n* Monitor logs and analyze security events for potential threats\n* Respond to and investigate security incidents\n* Conduct security awareness and training programs\n\nRed Team\n--------\n\nThe Red Team's primary goal is to simulate real-world attacks, identify vulnerabilities, and test the effectiveness of the Blue Team's defensive strategies. They are external or internal team members that act like adversaries, using creativity, and advanced techniques to test an organization's cybersecurity defenses.\n\n**Key activities of the Red Team:**\n\n* Perform regular penetration testing and security assessments\n* Use social engineering techniques to exploit human weaknesses\n* Analyze and exploit vulnerabilities in systems, networks, and applications\n* Emulate advanced persistent threats and attack scenarios\n* Provide actionable insights to improve the organization's security posture\n\nPurple Team\n-----------\n\nThe Purple Team bridges the gap between the Blue Team and Red Team, helping to create a more collaborative environment. They facilitate communication and information sharing between the two teams, ultimately aiming to improve the overall effectiveness of a security program.\n\n**Key activities of the Purple Team:**\n\n* Coordinate and plan joint exercises between Blue Team and Red Team\n* Share knowledge, techniques, and findings between the teams\n* Assist with the implementation of identified security improvements\n* Evaluate and measure the effectiveness of security controls\n* Foster a culture of continuous improvement and collaboration\n\nBy investing in Blue, Red, and Purple Team efforts, organizations can achieve a more robust and resilient security posture, capable of withstanding and adapting to ever-evolving threats.", + "links": [ + { + "title": "Red Team Fundamentals (TryHackMe)", + "url": "https://tryhackme.com/room/redteamfundamentals", + "type": "article" + } + ] + }, + "XwRCZf-yHJsXVjaRfb3R4": { + "title": "False Negative / False Positive", + "description": "A false positive happens when the security tool mistakenly identifies a non-threat as a threat. For example, it might raise an alarm for a legitimate user's activity, indicating a potential attack when there isn't any. A high number of false positives can cause unnecessary diverting of resources and time, investigating false alarms. Additionally, it could lead to user frustration if legitimate activities are being blocked.\n\nA false negative occurs when the security tool fails to detect an actual threat or attack. This could result in a real attack going unnoticed, causing damage to the system, data breaches, or other negative consequences. A high number of false negatives indicate that the security system needs to be improved to capture real threats effectively.\n\nTo have an effective cybersecurity system, security professionals aim to maximize true positives and true negatives, while minimizing false positives and false negatives. Balancing these aspects ensures that the security tools maintain their effectiveness without causing undue disruptions to a user's experience.", + "links": [] + }, + "M6uwyD4ibguxytf1od-og": { + "title": "True Negative / True Positive", + "description": "True Negative / True Positive\n-----------------------------\n\nA true positive is an instance when security tools correctly detect and identify a threat, such as a malware or intrusion attempt. A high number of true positives indicates that a security tool is working effectively and catching potential threats as required.\n\nA true negative occurs when the security tool correctly identifies that there is no threat or attack in a given situation. In other words, the system does not raise an alarm when there is no attack happening. A high number of true negatives show that the security tool is not overly sensitive, generating unnecessary alerts.", + "links": [] + }, + "wN5x5pY53B8d0yopa1z8F": { + "title": "Basics of Threat Intel, OSINT", + "description": "Open Source Intelligence (OSINT) is a crucial part of cyber threat intelligence (CTI). It refers to the collection and analysis of publicly available information from various sources to identify potential threats to an organization's information security.\n\nWhy is OSINT important for threat intelligence?\n-----------------------------------------------\n\nOSINT plays a significant role in achieving comprehensive threat intelligence by offering valuable insights into various threat actors, their tactics, techniques, and procedures (TTPs). By leveraging OSINT, security teams can:\n\n* Identify and track adversaries targeting their organization\n* Gain knowledge about the latest attack strategies and trends\n* Evaluate the effectiveness of existing security measures\n* Develop proactive defense strategies to mitigate potential threats\n\nKey OSINT Sources\n-----------------\n\nThere are numerous sources of OSINT data that can be valuable for threat intelligence. Some of the main sources include:\n\n* **Publicly accessible websites and blogs**: Security researchers, hackers, and threat actors frequently share information about their findings, tools, and techniques in their blogs and websites.\n \n* **Social media platforms**: Social media platforms like Twitter, Reddit, and LinkedIn offer a wealth of information about threat actors' activities and can act as a valuable resource for threat intelligence.\n \n* **Security-related conference materials**: Many industry conferences and workshops publish their research papers, video recordings, and presentations online, allowing you to gather valuable insights from experts in the field.\n \n* **Online forums and chat rooms**: Hacker forums, online chat rooms, and bulletin boards often contain discussions related to the latest vulnerabilities, exploits, and attack techniques.\n \n* **Pastebin and GitHub**: These platforms offer code snippets and repositories that may contain working hacking tools or proof-of-concept exploits, making them valuable sources of OSINT.\n \n\nBest Practices for OSINT Collection\n-----------------------------------\n\nCollecting and analyzing OSINT for threat intelligence may seem like a daunting task, but by following these best practices, you can effectively incorporate it into your cyber defense strategies:\n\n* **Set clear goals and objectives**: Define what you want to achieve with your OSINT collection efforts and how it contributes to your organization's threat intelligence initiatives.\n \n* **Establish a methodology**: Develop a structured approach and process for searching, collecting, and analyzing OSINT data.\n \n* **Filter your data**: As the volume of data available from OSINT sources can be overwhelming, it's essential to filter the data gathered effectively. Prioritize information that is relevant to your organizational context and specific intelligence requirements.\n \n* **Maintain up-to-date knowledge**: Regularly review newly available OSINT and stay current with the latest tactics, techniques, and procedures utilized by threat actors.\n \n* **Collaborate and share with peers**: The security community is known for collaboration and knowledge sharing. Engage with other security professionals to benefit from their knowledge and experience.\n \n\nIn conclusion, OSINT is a significant aspect of threat intelligence that helps organizations identify and mitigate potential security threats. By effectively collecting and analyzing OSINT, you can gain a better understanding of the ever-evolving threat landscape and develop more effective strategies to protect your organization.", + "links": [] + }, + "zQx_VUS1zRmF4zCGjJD5-": { + "title": "Understand Handshakes", + "description": "In the world of cyber security, a **handshake** refers to the process of establishing a connection between two parties or devices as part of a secure communication protocol. A handshake typically ensures that both parties are aware of the connection and also serves to initiate the setup of a secure communication channel.\n\nThere are two common types of handshakes in cyber security:\n\n* **Three-Way Handshake**\n* **Cryptographic Handshake**\n\nThree-Way Handshake (TCP Handshake)\n-----------------------------------\n\nIn the context of a Transmission Control Protocol (TCP) connection, a three-way handshake is used to establish a secure and reliable connection between two devices. This process involves three specific steps:\n\n* **SYN**: The initiating device sends a SYN (synchronize) packet to establish a connection with the receiving device.\n* **SYN-ACK**: The receiving device acknowledges the SYN packet by sending back a SYN-ACK (synchronize-acknowledge) packet.\n* **ACK**: The initiating device acknowledges the SYN-ACK packet by sending an ACK (acknowledge) packet.\n\nOnce these steps are completed, the connection is established, and data can be exchanged securely between the two devices.\n\nCryptographic Handshake (SSL/TLS Handshake)\n-------------------------------------------\n\nA cryptographic handshake is used to establish a secure connection using cryptographic protocols like Secure Sockets Layer (SSL) or Transport Layer Security (TLS). The SSL/TLS handshake involves several steps, some of which include:\n\n* **Client Hello**: The initiating party (client) sends a \"Client Hello\" message, which includes supported cipher suites, SSL/TLS version, and a random value.\n* **Server Hello**: The receiving party (server) replies with a \"Server Hello\" message, choosing the highest SSL/TLS version and a compatible cipher suite, along with its random value.\n* **Authentication**: The server shares its digital certificate, allowing the client to verify its identity using a trusted certificate authority (CA).\n* **Key Exchange**: Both parties exchange the necessary information (like public keys) to generate a shared secret key that will be used for encryption and decryption.\n\nOnce this process is successfully completed, a secure communication channel is established, and encrypted data can be shared between both parties.\n\nUnderstanding handshakes in cyber security is crucial for professionals, as it helps ensure secure communication and data exchange between devices and users. This knowledge can be useful in protecting sensitive information and preventing cyber attacks.", + "links": [] + }, + "uz6ELaLEu9U4fHVfnQiOa": { + "title": "Understand CIA Triad", + "description": "The **CIA Triad** is a foundational concept in cybersecurity that stands for **Confidentiality, Integrity, and Availability**. These three principles represent the core objectives that should be guaranteed in any secure system.\n\nConfidentiality\n---------------\n\nConfidentiality aims to protect sensitive information from unauthorized users or intruders. This can be achieved through various security mechanisms, such as encryption, authentication, and access control. Maintaining confidentiality ensures that only authorized individuals can access the information and systems.\n\nKey Points:\n-----------\n\n* Encryption: Converts data into an unreadable format for unauthorized users, but can be decrypted by authorized users.\n* Authentication: Ensures the identity of the users trying to access your system or data, typically through the use of credentials like a username/password or biometrics.\n* Access Control: Defines and regulates which resources or data can be accessed by particular users and under which conditions.\n\nIntegrity\n---------\n\nIntegrity ensures that information and systems are protected from modifications or tampering by unauthorized individuals. This aspect of the triad is crucial for maintaining accuracy, consistency, and reliability in your systems and data. Integrity controls include checksums, file permissions, and digital signatures.\n\nKey Points:\n-----------\n\n* Checksums: Mathematical calculations that can be used to verify the integrity of data by detecting any changes.\n* File Permissions: Ensure that only authorized users have the ability to modify or delete specific files.\n* Digital Signatures: A cryptographic technique that can be used to authenticate the source and integrity of data or messages.\n\nAvailability\n------------\n\nAvailability ensures that systems and information are accessible and functional when needed. This can be achieved by implementing redundancy, fault tolerance, and backup solutions. High availability translates to better overall reliability of your systems, which is essential for critical services.\n\nKey Points:\n-----------\n\n* Redundancy: Duplicate or backup components or systems that can be used in case of failure.\n* Fault Tolerance: The capacity of a system to continue functioning, even partially, in the presence of faults or failures.\n* Backups: Regularly saving copies of your data to prevent loss in case of a catastrophe, such as a hardware failure, malware attack, or natural disaster.\n\nIn summary, the CIA Triad is an essential aspect of cybersecurity, providing a clear framework to evaluate and implement security measures. By ensuring confidentiality, integrity, and availability, you create a robust and secure environment for your information and systems.", + "links": [ + { + "title": "The CIA Triad - Professor Messer", + "url": "https://www.youtube.com/watch?v=SBcDGb9l6yo", + "type": "video" + } + ] + }, + "cvI8-sxY5i8lpelW9iY_5": { + "title": "Privilege Escalation", + "description": "Privilege escalation attacks occur when an attacker gains unauthorized access to a system and then elevates their privileges to perform actions that they should not have been able to do. There are two main types of privilege escalation:\n\n* **Horizontal Privilege Escalation**: In this type of attack, an attacker gains unauthorized access to a user account with the same privilege level as their own, but is able to perform actions or access data that belongs to another user.\n \n* **Vertical Privilege Escalation**: Also known as \"Privilege Elevation,\" this type of attack involves an attacker gaining unauthorized access to a system and then elevating their privilege level from a regular user to an administrator, system owner, or root user. This provides the attacker with greater control over the system and its resources.\n \n\nTo protect your systems and data from privilege escalation attacks, consider implementing the following best practices:\n\n* **Principle of Least Privilege**: Assign the minimum necessary access and privileges to each user account, and regularly review and update access permissions as required.\n \n* **Regularly Update and Patch Software**: Keep your software and systems up-to-date with the latest security patches to address known vulnerabilities that could be exploited in privilege escalation attacks.\n \n* **Implement Strong Authentication and Authorization**: Use strong authentication methods (e.g., multi-factor authentication) and ensure proper access controls are in place to prevent unauthorized access to sensitive data or system resources.\n \n* **Conduct Security Audits**: Regularly check for any misconfigurations, vulnerabilities or outdated software that could be exploited in privilege escalation attacks.\n \n* **Monitor and Log System Activities**: Implement logging and monitoring systems to detect suspicious account activities or changes in user privileges that may indicate a privilege escalation attack.\n \n\nBy understanding the types of privilege escalation attacks and following these best practices, you can create a more secure environment for your data and systems, and reduce the risk of unauthorized users gaining unrestricted access.", + "links": [] + }, + "fyOYVqiBqyKC4aqc6-y0q": { + "title": "Web Based Attacks and OWASP10", + "description": "The Open Web Application Security Project (OWASP) is a non-profit organization focused on improving the security of software. One of their most well-known projects is the **OWASP Top 10**, which is a list of the most critical web application security risks. The Top 10 project aims to raise awareness and provide businesses, developers, and security teams with guidance on how to address these risks effectively.\n\nThe OWASP Top 10 is updated periodically, with the most recent version released in 2021. Here is a brief summary of the current top 10 security risks:\n\n* **Injection**: Injection flaws, such as SQL, NoSQL, or OS command injection, occur when untrusted data is sent to an interpreter as part of a command or query, allowing an attacker to execute malicious commands or access unauthorized data.\n \n* **Broken Authentication**: Application functions related to authentication and session management are often implemented incorrectly, allowing attackers to compromise passwords, keys, or session tokens, or exploit other implementation flaws to assume users' identities.\n \n* **Sensitive Data Exposure**: Many web applications and APIs do not properly protect sensitive data, such as financial, healthcare, or personally identifiable information (PII). Attackers can steal or modify this data to conduct crimes like identity theft or credit card fraud.\n \n* **XML External Entities (XXE)**: Poorly configured XML parsers can be vulnerable to external entity attacks, allowing attackers to access unauthorized data, perform server-side request forgery (SSRF), or launch denial-of-service (DoS) attacks.\n \n* **Broken Access Control**: Restrictions on what authenticated users are allowed to do often fail to be properly enforced. Attackers can exploit these flaws to access unauthorized functionality or data, modify user access, or perform other unauthorized actions.\n \n* **Security Misconfiguration**: Insecure default configurations, incomplete or ad hoc configurations, misconfigured HTTP headers, and verbose error messages can provide attackers with valuable information to exploit vulnerabilities.\n \n* **Cross-Site Scripting (XSS)**: XSS flaws occur when an application includes untrusted data in a web page without proper validation or escaping. Attackers can execute malicious scripts in the context of the user's browser, leading to account takeover, defacement, or redirection to malicious sites.\n \n* **Insecure Deserialization**: Insecure deserialization flaws can enable an attacker to execute arbitrary code, conduct injection attacks, elevate privileges, or perform other malicious actions.\n \n* **Using Components with Known Vulnerabilities**: Applications and APIs using components with known vulnerabilities may compromise the system if those vulnerabilities are exploited.\n \n* **Insufficient Logging & Monitoring**: Insufficient logging and monitoring, coupled with inadequate integration with incident response, allow attackers to maintain their presence within a system, move laterally, and exfiltrate or tamper with data.\n \n\nTo mitigate these risks, the OWASP Top 10 project provides detailed information, including how to test for each risk, code examples for various programming languages, and specific steps to prevent or remediate the issues. By understanding and implementing the recommended practices, organizations can improve their web application security and protect their users' data.", + "links": [] + }, + "v7CD_sHqLWbm9ibXXESIK": { + "title": "Learn how Malware works and Types", + "description": "Malware, short for malicious software, refers to any software intentionally created to cause harm to a computer system, server, network, or user. It is a broad term that encompasses various types of harmful software created by cybercriminals for various purposes. In this guide, we will delve deeper into the major types of malware and their characteristics.\n\nVirus\n-----\n\nA computer virus is a type of malware that, much like a biological virus, attaches itself to a host (e.g., a file or software) and replicates when the host is executed. Viruses can corrupt, delete or modify data, and slow down system performance.\n\nWorm\n----\n\nWorms are self-replicating malware that spread through networks without human intervention. They exploit system vulnerabilities, consuming bandwidth and sometimes carrying a payload to infect target machines.\n\nTrojan Horse\n------------\n\nA trojan horse is a piece of software disguised as a legitimate program but contains harmful code. Users unknowingly download and install it, giving the attacker unauthorized access to the computer or network. Trojans can be used to steal data, create a backdoor, or launch additional malware attacks.\n\nRansomware\n----------\n\nRansomware is a type of malware that encrypts its victims' files and demands a ransom, typically in the form of cryptocurrency, for the decryption key. If the victim refuses or fails to pay within a specified time, the encrypted data may be lost forever.\n\nSpyware\n-------\n\nSpyware is a type of malware designed to collect and relay information about a user or organization without their consent. It can capture keystrokes, record browsing history, and access personal data such as usernames and passwords.\n\nAdware\n------\n\nAdware is advertising-supported software that automatically displays or downloads advertising materials, often in the form of pop-up ads, on a user's computer. While not always malicious, adware can be intrusive and open the door for other malware infections.\n\nRootkit\n-------\n\nA rootkit is a type of malware designed to hide or obscure the presence of other malicious programs on a computer system. This enables it to maintain persistent unauthorized access to the system and can make it difficult for users or security software to detect and remove infected files.\n\nKeylogger\n---------\n\nKeyloggers are a type of malware that monitor and record users' keystrokes, allowing attackers to capture sensitive information, such as login credentials or financial information entered on a keyboard.\n\nUnderstanding the different types of malware can help you better identify and protect against various cyber threats. As the cyber landscape continues to evolve, it's essential to stay informed about emerging malware and equip yourself with the necessary security skills and knowledge.", + "links": [] + }, + "Hoou7kWyfB2wx_yFHug_H": { + "title": "nmap", + "description": "Nmap\n----\n\nNmap, short for \"Network Mapper,\" is a powerful and widely used open-source tool for network discovery, scanning, and security auditing. Nmap was originally designed to rapidly scan large networks, but it also works well for scanning single hosts. Security professionals, network administrators, and cyber security enthusiasts alike use Nmap to identify available hosts and services on a network, reveal their version information, and explore network infrastructure.\n\nKey Features\n------------\n\nNmap offers a multitude of features that can help you gather information about your network:\n\n* **Host Discovery** - Locating active devices on a network.\n* **Port Scanning** - Identifying open network ports and associated services.\n* **Version Detection** - Determining the software and version running on network devices.\n* **Operating System Detection** - Identifying the operating systems of scanned devices.\n* **Scriptable Interaction with the Target** - Using Nmap Scripting Engine (NSE) to automate tasks and extend functionality.\n\nHow It Works\n------------\n\nNmap sends specially crafted packets to the target hosts and analyzes the received responses. Based on this information, it detects active hosts, their operating systems, and the services they are running. It can be used to scan for open ports, check for vulnerabilities, and gather valuable information about target devices.\n\nExample Usage\n-------------\n\nNmap is a command-line tool with several command options. Here is an example of a basic scan:\n\n nmap -v -A 192.168.1.1\n \n\nThis command performs a scan on the target IP address `192.168.1.1`, with `-v` for verbose output and `-A` for aggressive scan mode, which includes operating system and version detection, script scanning, and traceroute.\n\nGetting Started with Nmap\n-------------------------\n\nNmap is available for download on Windows, Linux, and macOS. You can download the appropriate binary or source package from the [official Nmap website](https://nmap.org/download.html). Extensive documentation, including installation instructions, usage guidelines, and specific features, can be found on the [Nmap reference guide](https://nmap.org/book/man.html).\n\nConclusion\n----------\n\nUnderstanding and using Nmap is an essential skill for any cyber security professional or network administrator. With its wide range of features and capabilities, it provides invaluable information about your network infrastructure, enabling you to detect vulnerabilities and improve overall security. Regularly monitoring your network with Nmap and other incident response and discovery tools is a critical aspect of maintaining a strong cyber security posture.", + "links": [] + }, + "jJtS0mgCYc0wbjuXssDRO": { + "title": "tracert", + "description": "`tracert` (Trace Route) is a network diagnostic tool that displays the route taken by packets across a network from the sender to the destination. This tool helps in identifying network latency issues and determining if there are any bottlenecks, outages, or misconfigurations in the network path. Available in most operating systems by default, `tracert` can be executed through a command-line interface (CLI) such as Command Prompt in Windows or Terminal in Linux and macOS.\n\nHow Tracert Works\n-----------------\n\nWhen you initiate a `tracert` command, it sends packets with varying Time-to-Live (TTL) values to the destination. Each router or hop in the network path decreases the original TTL value by 1. When the TTL reaches 0, the router sends an Internet Control Message Protocol (ICMP) \"Time Exceeded\" message back to the source. `tracert` records the time it took for the packet to reach each hop and presents the data in a readable format. The process continues until the destination is reached or the maximum TTL value is exceeded.\n\nUsing Tracert\n-------------\n\nTo use `tracert`, follow these simple steps:\n\n* Open the command prompt (Windows) or terminal (Linux/macOS).\n \n* Type `tracert` followed by the target's domain name or IP address, and press Enter. For example:\n \n\n tracert example.com\n \n\n* The trace will run, showing the details of each hop, latency, and hop's IP address or hostname in the output.\n\nInterpreting Tracert Results\n----------------------------\n\nThe output of `tracert` includes several columns of information:\n\n* Hop: The number of the router in the path from source to destination.\n* RTT1, RTT2, RTT3: Round-Trip Times measured in milliseconds, representing the time it took for a packet to travel from your machine to the hop and back. Three different times are displayed for each hop (each measuring a separate ICMP packet).\n* Hostname (optional) and IP Address: Domain name (if applicable) and IP address of the specific hop.\n\nUnderstanding the `tracert` output helps in identifying potential network issues such as high latency, routing loops, or unreachable destinations.\n\nLimitations and Considerations\n------------------------------\n\nSome limitations and considerations to keep in mind when using `tracert`:\n\n* Results may vary due to dynamic routing or load balancing on the network.\n* Firewalls or routers might be configured to block ICMP packets or not decrement the TTL value, potentially giving incomplete or misleading results.\n* `tracert` might not be able to discover every hop in certain network configurations.\n* On Linux/macOS systems, the equivalent command is called `traceroute`.\n\nUsing `tracert` in incident response and discovery helps security teams analyze network path issues, locate potential bottlenecks or problematic hops, and understand network infrastructure performance.", + "links": [] + }, + "OUarb1oS1-PX_3OXNR0rV": { + "title": "nslookup", + "description": "NSLookup, short for \"Name Server Lookup\", is a versatile network administration command-line tool used for querying the Domain Name System (DNS) to obtain information associated with domain names and IP addresses. This tool is available natively in most operating systems such as Windows, MacOS, and Linux distributions.\n\nUsing NSLookup\n--------------\n\nTo use NSLookup, open the command prompt or terminal on your device and enter the command `nslookup`, followed by the domain name or IP address you want to query. For example:\n\n nslookup example.com\n \n\nFeatures of NSLookup\n--------------------\n\n* **DNS Record Types**: NSLookup supports various DNS record types like A (IPv4 address), AAAA (IPv6 address), MX (Mail Exchange), NS (Name Servers), and more.\n \n* **Reverse DNS Lookup**: You can perform reverse DNS lookups to find the domain name associated with a specific IP address. For example:\n \n nslookup 192.0.2.1\n \n \n* **Non-interactive mode**: NSLookup can execute single queries without entering the interactive mode. To do this, simply execute the command as mentioned earlier.\n \n* **Interactive mode**: Interactive mode allows you to carry out multiple queries during a single session. To enter the interactive mode, type nslookup without any arguments in your terminal.\n \n\nLimitations\n-----------\n\nDespite being a useful tool, NSLookup has some limitations:\n\n* No support for DNSSEC (Domain Name System Security Extensions).\n* Obsolete or not maintained in some Unix-based systems, replaced with more modern utilities like `dig`.\n\nAlternatives\n------------\n\nSome alternatives to NSLookup include:\n\n* **dig**: \"Domain Information Groper\" is a flexible DNS utility that supports a wide range of DNS record types and provides more detailed information than NSLookup.\n \n* **host**: Another common DNS lookup tool that provides host-related information for both forward and reverse lookups.\n \n\nConclusion\n----------\n\nIn summary, NSLookup is a handy DNS query tool for network administrators and users alike. It offers the basic functionality for finding associated domain names, IP addresses, and other DNS data while being simple to use. However, for more advanced needs, you should consider using alternatives like dig or host.", + "links": [] + }, + "W7iQUCjODGYgE4PjC5TZI": { + "title": "curl", + "description": "Curl is a versatile command-line tool primarily used for transferring data using various network protocols. It is widely used in cybersecurity and development for the purpose of testing and interacting with web services, APIs, and scrutinizing web application security. Curl supports various protocols such as HTTP, HTTPS, FTP, SCP, SFTP, and many more.\n\n**Features of Curl:**\n\n* Provides support for numerous protocols.\n* Offers SSL/TLS certificates handling and authentication.\n* Customizable HTTP request headers and methods.\n* Proxies and redirections support.\n* IPv6 support.\n\nCommon Curl Use Cases in Cybersecurity:\n---------------------------------------\n\n* **HTTP Requests:** Curl can be used to test and troubleshoot web services by making GET or POST requests, specifying headers, or sending data. You can also use it to automate certain tasks.\n \n GET Request Example:\n \n curl https://example.com\n \n \n POST Request Example:\n \n curl -X POST -d \"data=sample\" https://example.com\n \n \n* **HTTPS with SSL/TLS:** Curl can be utilized to verify and test SSL/TLS configurations and certificates for web services.\n \n Test a site's SSL/TLS configuration:\n \n curl -Iv https://example.com\n \n \n* **File Transfers:** Curl can be used for transferring files using protocols like FTP, SCP, and SFTP.\n \n FTP Example:\n \n curl -u username:password ftp://example.com/path/to/file\n \n \n* **Web Application Testing:** Curl can help you find vulnerabilities in web applications by sending customized HTTP requests, injecting payloads or exploiting their features.\n \n Send Cookie Example:\n \n curl -H \"Cookie: session=12345\" https://example.com\n \n \n Detect Server Software Example:\n \n curl -I https://example.com\n \n \n\nCurl is a powerful tool in the arsenal of anyone working in cybersecurity. Understanding and mastering its usage can greatly enhance your capabilities when dealing with various network protocols, web services, and web applications.", + "links": [] + }, + "Cclbt4bNfkHwFwZOvJuLK": { + "title": "hping", + "description": "hping is a versatile and powerful command-line based packet crafting tool that allows network administrators, security professionals, and system auditors to manipulate and analyze network packets at a granular level. hping can be used to perform stress testing, firewall testing, scanning, and packet generation, among other functionalities.\n\nKey Features\n------------\n\n* **Flexible and powerful:** hping supports a wide array of protocols including TCP, UDP, ICMP, and RAW-IP, and can manipulate individual fields within network packets.\n \n* **Custom packet crafting:** Users can create custom packets to test specific firewall rules, for example by modifying flags, window size, or payload.\n \n* **Traceroute mode:** hping can perform traceroute-style scans through its specialized mode, enabling users to discover the network path between two systems.\n \n* **Scripting capability:** hping can be used in conjunction with scripts to automate packet crafting and analysis tasks, making it highly adaptable for diverse network testing use cases.\n \n\nSample Commands\n---------------\n\nHere are some example commands using hping:\n\n* Perform a traditional ping:\n \n hping3 -1 \n \n \n* Perform a SYN flood attack:\n \n hping3 --flood -S -p \n \n \n* Perform a traceroute using ICMP packets:\n \n hping3 --traceroute -V -1 \n \n \n* Perform a UDP scan of the first 100 ports:\n \n hping3 --udp -p 1-100 \n \n \n\nSummary\n-------\n\nIn summary, hping is an invaluable tool for anyone involved in network security, administration, or auditing. Its flexibility and power make it an essential part of any cybersecurity toolkit. By understanding how to use hping effectively, you can gain valuable insights into the behavior of networks, devices, and security mechanisms, leading to a more secure and resilient infrastructure.", + "links": [] + }, + "yfTpp-ePuDB931FnvNB-Y": { + "title": "ping", + "description": "Ping is a fundamental network utility that helps users determine the availability and response time of a target device, such as a computer, server, or network device, by sending small packets of data to it. It operates on the Internet Control Message Protocol (ICMP) and forms an essential part of the incident response and discovery toolkit in cyber security.\n\nHow Ping Works\n--------------\n\nWhen you issue a Ping command, your device sends out ICMP Echo Request packets to the target device. In response, the target device sends out ICMP Echo Reply packets. The round-trip time (RTT) between the request and reply is measured and reported, which is an indication of the network latency and helps identify network problems.\n\nUses of Ping in Cyber Security\n------------------------------\n\n* **Availability and Reachability:** Ping helps ensure that the target device is online and reachable in the network. A successful ping indicates that the target is available and responding to network requests.\n* **Response Time Measurements:** Ping provides the RTT measurements, which are useful for identifying network latency issues or bottlenecks. High RTTs indicate potential network congestion or other issues.\n* **Troubleshoot Connectivity Issues:** In case of network issues or cyber attacks, Ping can help isolate the problem by determining whether the issue is with the target device, the network infrastructure, or a security configuration.\n* **Confirming Access Control:** Ping can also be used to ensure that firewalls or intrusion detection systems (IDS) are properly configured by confirming if ICMP requests are allowed or blocked.\n\nPing Limitations\n----------------\n\n* **Blocking ICMP Traffic**: Some devices or firewalls may be configured to block ICMP traffic, making them unresponsive to Ping requests.\n* **False-Negative Results**: A poor network connection or heavy packet loss may result in a false-negative Ping result, incorrectly displaying the target device as unavailable.\n\nDespite these limitations, Ping remains a useful tool in the cyber security world for network diagnostics and incident response. However, it is essential to use Ping in conjunction with other discovery tools and network analysis techniques for comprehensive network assessments.", + "links": [] + }, + "fzdZF-nzIL69kaA7kwOCn": { + "title": "arp", + "description": "ARP is a protocol used by the Internet Protocol (IP) to map an IP address to a physical address, also known as a Media Access Control (MAC) address. ARP is essential for routing data between devices in a Local Area Network (LAN) as it allows for the translation of IP addresses to specific hardware on the network.\n\nHow It Works\n------------\n\nWhen a device wants to communicate with another device on the same LAN, it needs to determine the corresponding MAC address for the target IP address. ARP helps in this process by broadcasting an ARP request containing the target IP address. All devices within the broadcast domain receive this ARP request and compare the target IP address with their own IP address. If a match is found, the device with the matching IP address sends an ARP reply which contains its MAC address.\n\nThe device that initiated the ARP request can now update its ARP cache (a table that stores IP-to-MAC mappings) with the new information, and then proceed to send data to the target's MAC address.\n\nSecurity Concerns\n-----------------\n\nWhile ARP is crucial for the functioning of most networks, it also presents certain security risks. ARP poisoning, for example, occurs when an attacker sends fake ARP messages with the goal to associate their MAC address with the IP address of a target device. This can lead to Man-in-the-Middle (MITM) attacks where the attacker can intercept, modify, or block traffic intended for the target device.\n\nTo mitigate ARP poisoning attacks, organizations can implement security measures such as static ARP entries, dynamic ARP inspection, and ensuring that their network devices are updated with the latest security patches.\n\nBy understanding ARP and the potential security risks it presents, you can help protect your network by incorporating appropriate security solutions and staying vigilant against potential threats.", + "links": [ + { + "title": "ARP Explained - Address Resolution Protocol", + "url": "https://www.youtube.com/watch?v=cn8Zxh9bPio", + "type": "video" + } + ] + }, + "D2ptX6ja_HvFEafMIzWOy": { + "title": "cat", + "description": "`cat` is a widely used command-line utility in UNIX and UNIX-like systems. It stands for \"concatenate\" which, as the name suggests, can be used to concatenate files, display file contents, or combine files. In the context of incident response and discovery tools, `cat` plays an essential role in quickly accessing and assessing the contents of various files that inform on security incidents and help users understand system data as well as potential threats.\n\nUsage\n-----\n\nThe default syntax for `cat` is as follows:\n\n cat [options] [file(s)]\n \n\nwhere `options` are command flags to modify the behavior of `cat` and `file(s)` are the input file(s) to be processed. If no file is specified, `cat` reads input from the standard input, which allows it to interact with output from other utilities or commands.\n\nKey Features\n------------\n\nHere are some of the useful features of `cat` in incident response and discovery:\n\n* **Display file contents**: Quickly view file content, which is useful for examining logs and configuration files.\n \n cat file.txt\n \n \n* **Combine multiple files**: Combine contents of multiple files that can be useful while investigating related logs.\n \n cat file1.txt file2.txt > combined.txt\n \n \n* **Number lines while displaying**: Use the `-n` flag to show line numbers in the output, assisting in pinpointing specific entries in large files.\n \n cat -n file.txt\n \n \n* **Display non-printable characters**: The `-v` flag allows viewing non-printable characters that might be hidden in a file.\n \n cat -v file.txt\n \n \n* **Piping and Archiving**: The `cat` command can interface seamlessly with other command-line utilities, allowing complex operations to be performed with ease.\n \n cat logs.txt | grep 'ERROR' > error_logs.txt\n \n \n\nWrapping Up\n-----------\n\nIn summary, `cat` is a versatile and indispensable tool in cybersecurity for simplifying the process of navigating through files, logs, and data during an incident response. Its compatibility with various other Unix utilities and commands makes it a powerful tool in the hands of cyber professionals.", + "links": [] + }, + "9xbU_hrEOUtMm-Q09Fe6t": { + "title": "dd", + "description": "`dd` is a powerful data duplication and forensic imaging tool that is widely used in the realm of cybersecurity. As an incident responder, this utility can assist you in uncovering important evidence and preserving digital details to reconstruct the event timelines and ultimately prevent future attacks.\n\nThis command-line utility is available on Unix-based systems such as Linux, BSD, and macOS. It can perform tasks like data duplication, data conversion, and error correction. Most importantly, it's an invaluable tool for obtaining a bit-by-bit copy of a disk or file, which can then be analyzed using forensic tools.\n\nUse Cases:\n----------\n\nSome of the common use cases of `dd` in cybersecurity include:\n\n* Creating an exact copy of a disk or file for forensic analysis.\n* Retrieving deleted files from a disk image.\n* Performing data recovery on damaged disks.\n* Copying data between devices or files quickly and reliably.\n\nGeneral Syntax:\n---------------\n\n dd if= of= bs= count= skip= seek=\n \n\n* `if`: The input file or device to read from.\n* `of`: The output file or device to write to.\n* `bs`: The number of bytes to read and write at a time.\n* `count`: The number of blocks to copy.\n* `skip`: The number of input blocks to skip before starting to copy.\n* `seek`: The number of output blocks to skip before starting to copy.\n\nYou can simply skip the `count`, `skip`, and `seek` option for default behaviour.\n\nExample:\n--------\n\nLet's say you need to create a forensically sound image of a suspect's USB drive for analysis. You would typically use a command like this:\n\n dd if=/dev/sdb1 of=~/usb_drive_image.img bs=4096\n \n\nIn this example, `dd` creates an exact image of the USB drive (`/dev/sdb1`) and writes it to a new file in your home directory called `usb_drive_image.img`.\n\nBe cautious while using `dd` as it can overwrite and destroy data if used incorrectly. Always verify the input and output files and make sure to have backups of important data.\n\nBy mastering the `dd` utility, you'll have a powerful forensic imaging tool at your disposal which will undoubtedly enhance your cybersecurity incident response and discovery capabilities.", + "links": [] + }, + "VNmrb5Dm4UKUgL8JBfhnE": { + "title": "head", + "description": "Summary\n-------\n\n`head` is a versatile command-line utility that enables users to display the first few lines of a text file, by default it shows the first 10 lines. In case of incident response and cyber security, it is a useful tool to quickly analyze logs or configuration files while investigating potential security breaches or malware infections in a system.\n\nUsage\n-----\n\nThe basic syntax of `head` command is as follows:\n\n head [options] [file(s)]\n \n\nWhere `options` are flags that could be used to modify the output and `[file(s)]` are the input file(s) for which you want to display the first few lines.\n\nExamples\n--------\n\n* Display the first 10 lines of a file:\n\n head myfile.txt\n \n\n* You can change the number of lines to display using `-n` flag:\n\n head -n 20 myfile.txt\n \n\n* To display the first 5 lines of multiple files:\n\n head -n 5 file1.txt file2.txt\n \n\n* Another helpful flag is `-q` or `--quiet`, which avoids displaying file headers when viewing multiple files:\n\n head -q -n 5 file1.txt file2.txt\n \n\nApplication in Incident Response\n--------------------------------\n\nDuring an incident response, the `head` command helps to quickly analyze logs and files to identify potential malicious activity or errors. You can use `head` to peek into logs at the early stages of an investigation, and once you have gathered enough information, you can move on to more advanced tools to analyze the data in depth.\n\nFor example:\n\n* Check the first 5 lines of the system log for any potential issues:\n\n head -n 5 /var/log/syslog\n \n\n* Analyze the beginning of a large log file without loading the entire file:\n\n head -n 100 /var/log/large-log-file.log\n \n\nIn summary, the `head` command is a handy tool for preliminary analysis of log files that can save crucial time during an incident response. However, for more in-depth analysis, other tools and techniques should be employed.", + "links": [] + }, + "Dfz-6aug0juUpMmOJLCJ9": { + "title": "grep", + "description": "Grep is a powerful command-line tool used for searching and filtering text, primarily in Unix-based systems. Short for \"global regular expression print\", grep is widely used for its ability to search through files and directories, and find lines that match a given pattern. It is particularly useful for incident response and discovery tasks, as it helps you identify specific occurrences of potentially malicious activities within large amounts of log data.\n\nIn this section, we will cover the basics of grep and how to wield its power for efficient incident response.\n\nBasic Syntax\n------------\n\nThe basic syntax of grep is as follows:\n\n grep [options] pattern [files/directories]\n \n\n* `options`: Modify the behavior of grep (e.g., case-insensitive search, display line numbers)\n* `pattern`: The search pattern, which can be a fixed string, a regular expression, or a combination of both\n* `files/directories`: The target files or directories to search\n\nCommon Grep Options\n-------------------\n\nHere are some commonly used grep options:\n\n* `-i`: Perform a case-insensitive search\n* `-v`: Invert the search, returning lines that do not match the pattern\n* `-n`: Display line numbers for matching lines\n* `-r`: Recursively search directories\n* `-c`: Display the count of matching lines\n\nSample Use Cases\n----------------\n\n* Case-insensitive search for the word \"password\":\n\n grep -i \"password\" /var/log/syslog\n \n\n* Display line numbers for lines containing \"error\" in log files:\n\n grep -n \"error\" /var/log/*.log\n \n\n* Search for IP addresses in a web server access log:\n\n grep -E -o \"([0-9]{1,3}\\.){3}[0-9]{1,3}\" /var/log/apache2/access.log\n \n\nConclusion\n----------\n\nGrep is an indispensable tool for incident response and discovery tasks in cyber security. It allows you to quickly pinpoint specific patterns in large volumes of data, making it easier to identify potential threats and respond accordingly. As you become more proficient with grep and its wide array of options, you'll gain a valuable resource in your cyber security toolkit.", + "links": [] + }, + "Sm9bxKUElINHND8FdZ5f2": { + "title": "wireshark", + "description": "Wireshark is an open-source network protocol analyzer that allows you to monitor and analyze the packets of data transmitted through your network. This powerful tool helps to identify issues in network communication, troubleshoot application protocol problems, and keep a close eye on cyber security threats.\n\nKey Features of Wireshark\n-------------------------\n\n* **Packet Analysis:** Wireshark inspects each packet in real-time, allowing you to delve deep into the various layers of network protocols to gather valuable information about the source, destination, size, and type of data.\n \n* **Intuitive User Interface:** The graphical user interface (GUI) in Wireshark is easy to navigate, making it accessible for both new and experienced users. The main interface displays a summary of packet information that can be further examined in individual packet detail and hex views.\n \n* **Display Filters:** Wireshark supports wide-range of filtering options to focus on specific network traffic or packets. These display filters help in pinpointing the desired data more efficiently.\n \n* **Capture Filters:** In addition to display filters, Wireshark also allows the use of capture filters that limit the data captured based on specific criteria such as IP addresses or protocol types. This helps to mitigate the volume of irrelevant data and reduce storage requirements.\n \n* **Protocol Support:** Wireshark supports hundreds of network protocols, providing comprehensive insights into your network.\n \n\nHow to Use Wireshark\n--------------------\n\n* **Download and Install:** Visit the [Wireshark official website](https://www.wireshark.org/) and download the appropriate version for your operating system. Follow the installation prompts to complete the process.\n \n* **Capture Network Traffic:** Launch Wireshark and select the network interface you want to monitor (e.g., Wi-Fi, Ethernet). Click the \"Start\" button to begin capturing live packet data.\n \n* **Analyze and Filter Packets:** As packets are captured, they will be displayed in the main interface. You can apply display filters to narrow down the displayed data or search for specific packets using different parameters.\n \n* **Stop and Save Capture:** When you're done analyzing network traffic, click the \"Stop\" button to cease capturing packets. You may save the captured data for future analysis by selecting \"File\" > \"Save As\" and choosing a suitable file format.\n \n\nWireshark's capabilities make it an invaluable tool in incident response and discovery for cyber security professionals. Familiarize yourself with this tool to gain a deeper understanding of your network's security and prevent potential cyber threats.", + "links": [] + }, + "gNan93Mg9Ym2AF3Q2gqoi": { + "title": "winhex", + "description": "WinHex is a versatile forensic tool that every incident responder should have in their arsenal. In this section, we will provide you with a brief summary of WinHex and its capabilities in assisting in incident response and discovery tasks. WinHex is a popular hex and disk editor for computer forensics and data recovery purposes.\n\nKey Features of WinHex\n----------------------\n\nHere are some of the essential features of WinHex that make it an excellent tool for incident response:\n\n* **Hex Editing**: As a hex editor, WinHex allows you to analyze file structures and edit raw data. It supports files of any size and can search for hex values, strings, or data patterns, which is particularly helpful in forensic analysis.\n \n* **Disk Imaging and Cloning**: WinHex can be used to image and clone disks, which is helpful during incident response to acquire forensic copies of compromised systems for analysis. The imaging process can be customized to support different compression levels, block sizes, and error handling options.\n \n* **File Recovery**: With WinHex, you can recover lost, deleted, or damaged files from various file systems such as FAT, NTFS, and others. It can search for specific file types based on their headers and footers, making it easier to locate and recover pertinent files during an investigation.\n \n* **RAM Analysis**: WinHex provides the functionality to capture and analyze the contents of physical memory (RAM). This feature can help incident responders to identify and examine malware artifacts, running processes, and other valuable information residing in memory while responding to an incident.\n \n* **Slack Space and Unallocated Space Analysis**: WinHex can analyze and display the content in slack spaces and unallocated spaces on a drive. This capability enables a more thorough investigation as fragments of critical evidence might be residing in these areas.\n \n* **Scripting Support**: WinHex allows automation of common tasks with its scripting language (called WinHex Scripting or WHS). This feature enables efficient and consistent processing during forensic investigations.\n \n* **Integration with X-Ways Forensics**: WinHex is seamlessly integrated with X-Ways Forensics, providing access to an array of powerful forensic features, such as advanced data carving, timeline analysis, registry analysis, and more.\n \n\nUsing WinHex in Incident Response\n---------------------------------\n\nArmed with the knowledge of its essential features, you can utilize WinHex in several ways during incident response:\n\n* Conducting an initial assessment or triage of a compromised system by analyzing logs, file metadata, and relevant artifacts.\n* Acquiring disk images of affected systems for further analysis or preservation of evidence.\n* Analyzing and recovering files that might have been deleted, tampered with, or inadvertently lost during the incident.\n* Examining memory for traces of malware or remnants of an attacker's activities.\n* Crafting custom scripts to automate repetitive tasks, ensuring a more efficient and systematic investigation.\n\nIn conclusion, WinHex is an indispensable and powerful utility for incident responders. Its diverse set of features makes it suitable for various tasks, from initial triage to in-depth forensic investigations. By incorporating WinHex into your incident response toolkit, you can enhance your ability to analyze, understand, and respond to security incidents effectively.", + "links": [] + }, + "wspNQPmqWRjKoFm6x_bVw": { + "title": "memdump", + "description": "Memdump is a handy tool designed for forensic analysis of a system's memory. The main purpose of Memdump is to extract valuable information from the RAM of a computer during a cyber security incident or investigation. By analyzing the memory dump, cyber security professionals can gain insights into the attacker's methods, identify malicious processes, and uncover potential evidence for digital forensics purposes.\n\nKey Features\n------------\n\n* **Memory Dumping**: Memdump allows you to create an image of the RAM of a computer, capturing the memory contents for later analysis.\n* **File Extraction**: With Memdump, you can extract executable files or any other file types from the memory dump to investigate potential malware or data theft.\n* **String Analysis**: Memdump can help you identify suspicious strings within the memory dump, which may provide crucial information about an ongoing attack or malware's behavior.\n* **Compatibility**: Memdump is compatible with various operating systems, including Windows, Linux, and macOS.\n\nExample Usage\n-------------\n\nFor a Windows environment, you can use Memdump as follows:\n\n memdump.exe -O output_file_path\n \n\nThis command will create a memory dump of the entire RAM of the system and save it to the specified output file path. You can then analyze this memory dump using specialized forensic tools to uncover valuable information about any cyber security incidents.\n\nRemember that Memdump should always be executed with administrator privileges so that it can access the entire memory space.\n\nConclusion\n----------\n\nMemdump is a powerful forensic tool that can greatly assist you in conducting an incident response or discovery process. By capturing and analyzing a system's memory, you can identify threats, gather evidence, and ultimately enhance your overall cyber security posture.", + "links": [] + }, + "_jJhL1RtaqHJmlcWrd-Ak": { + "title": "FTK Imager", + "description": "[FTK Imager](https://accessdata.com/product-download/digital-forensics/ftk-imager-version-3.1.1) is a popular and widely used free imaging tool developed by AccessData. It allows forensic analysts and IT professionals to create forensic images of digital devices and storage media. It is ideal for incident response and discovery as it helps in preserving and investigating digital evidence that is crucial for handling cyber security incidents.\n\nFTK Imager provides users with a variety of essential features, such as:\n\n* **Creating forensic images**: FTK Imager can create a forensically sound image of a computer's disk or other storage device in various formats, including raw (dd), E01, and AFF formats.\n \n* **Previewing data**: It allows analysts to preview data stored on any imaging source, such as a hard drive, even before creating a forensic image so that they can determine if the source's data is relevant to the investigation.\n \n* **Acquiring live data**: FTK Imager can help capture memory (RAM) of a live system for further investigation, allowing you to analyze system information such as running processes, network connections, and file handles.\n \n* **Examining file systems**: It offers the ability to browse and examine file systems, identify file types, view, and export files and directories without needing to mount the disk image.\n \n* **Hashing support**: FTK Imager supports hashing files and capturing evident files, ensuring the integrity of data and confirming that the original data has not been tampered with during investigation and analysis.\n \n* **Mounting images**: Users can mount forensic images, enabling them to view and analyze disk images using various third-party tools.\n \n\nTo use FTK Imager effectively in incident response:\n\n* Download and install FTK Imager from the [official website](https://accessdata.com/product-download/digital-forensics/ftk-imager-version-3.1.1).\n* Launch FTK Imager to create forensic images of digital devices or storage media by following the [user guide](https://ad-pdf.s3.amazonaws.com/Imager%20Lite%204_2%20Users%20Guide.pdf) and best practices.\n* Preview, examine, and export data as needed for further investigation and analysis.\n* Use FTK Imager along with other forensic tools and techniques to perform comprehensive digital investigations during incident response and discovery scenarios.\n\nIn summary, FTK Imager is a versatile tool that plays a critical role in incident response and discovery efforts by providing secure and forensically sound digital imaging capabilities, enabling investigators to preserve, analyze, and present digital evidence for successful cyber security investigations.", + "links": [] + }, + "bIwpjIoxSUZloxDuQNpMu": { + "title": "autopsy", + "description": "Autopsy is a versatile and powerful open-source digital forensics platform that is primarily used for incident response, cyber security investigations, and data recovery. As an investigator, you can utilize Autopsy to quickly and efficiently analyze a compromised system, extract crucial artifacts, and generate comprehensive reports. Integrated with The Sleuth Kit and other plug-ins, Autopsy allows examiners to automate tasks and dig deep into a system's structure to discover the root cause of an incident.\n\nFeatures of Autopsy\n-------------------\n\n* **Central Repository**: Autopsy features a central repository that allows analysts to store and manage case data, ingest modules, and collaborate with other team members. This functionality streamlines the investigation process with effective communication, data sharing, and collaborative analysis.\n \n* **Intuitive Interface**: Autopsy's graphical user interface (GUI) is user-friendly and well organized. It presents the results in a structured and easy-to-navigate layout, showcasing file systems, metadata, and text strings from binary files.\n \n* **File System Support**: Autopsy natively supports multiple file systems like FAT12, FAT16, FAT32, NTFS, ext2, ext3, ext4, UFS1, UFS2, and more, making it an ideal solution for analyzing different storage devices.\n \n* **Timeline Analysis**: The Timeline feature in Autopsy allows analysts to visualize and explore the chronological sequence of file system events. This can be essential in understanding the chain of events during an incident and identifying suspicious activities or anomalies.\n \n* **Keyword Search**: Autopsy's keyword search function is an invaluable tool for locating artifacts of interest using keywords or regular expressions. Investigators can identify incriminating documents, emails or other files by searching for specific terms, phrases, or patterns.\n \n* **Integration with Other Tools**: Autopsy's modular design enables seamless integration with various digital forensics tools, facilitating the analysis with specialized features and functions, such as Volatility for memory analysis or PLASO for log parsing.\n \n\nInstallation and Usage\n----------------------\n\nAutopsy is available for download from its official website, [www.autopsy.com/download/](https://www.autopsy.com/download/), and can be installed on Windows, Linux, and macOS platforms.\n\nOnce installed, creating a new case is easy. Follow these basic steps:\n\n* Launch Autopsy.\n* Click on the \"New Case\" button.\n* Provide a case name, case number, examiner, and case directory.\n* Add a data source (e.g., a disk image, local folder, or cloud storage) to the case.\n* Configure data ingestion options and select specific modules of interest.\n* Click on \"Finish\" to begin the data analysis.\n\nAs Autopsy completes its analysis, it will generate a comprehensive report that can be utilized for internal reporting, maintaining case records, or presenting evidence in legal proceedings.\n\nConclusion\n----------\n\nIn conclusion, Autopsy is a valuable tool for incident response and digital forensics professionals. By mastering its functions and capabilities, you can enhance your capabilities in incident investigations, data recovery, and threat attribution.", + "links": [] + }, + "XyaWZZ45axJMKXoWwsyFj": { + "title": "dig", + "description": "Dig, short for Domain Information Groper, is a command-line tool used to query Domain Name System (DNS) servers to obtain valuable information about DNS records. Dig is available on most Unix-based systems, including Linux and macOS, and can also be installed on Windows.\n\nAs part of your incident response toolkit, dig helps you to discover essential domain details such as domain's IP addresses, mail server details, name servers, and more. This can be crucial when tracking down a cyberattack or monitoring the DNS health of your own organization.\n\nInstallation\n------------\n\nFor Linux and macOS systems, dig is usually pre-installed as part of the BIND (Berkeley Internet Name Domain) package. To check if dig is installed, execute the following command:\n\n dig -v\n \n\nIf the command is not found, install it using your system's package manager:\n\n* For Debian-based systems (Debian, Ubuntu, etc.):\n \n sudo apt-get install dnsutils\n \n \n* For Red Hat-based systems (RHEL, CentOS, Fedora, etc.):\n \n sudo yum install bind-utils\n \n \n* For macOS:\n \n brew install bind\n \n \n* For Windows, download the BIND package from the [official website](https://www.isc.org/download/) and follow the installation instructions.\n \n\nBasic Usage\n-----------\n\nThe basic syntax for using dig is:\n\n dig [options] [name] [record type]\n \n\nWhere `options` can be various command-line flags, `name` is the domain name you want to query, and `record type` is the type of DNS record you want to fetch (e.g., A, MX, NS, TXT, etc.).\n\nHere are a few examples:\n\n* To query the IP addresses (A records) of [example.com](http://example.com):\n \n dig example.com A\n \n \n* To query the mail servers (MX records) of [example.com](http://example.com):\n \n dig example.com MX\n \n \n* To query the name servers (NS records) of [example.com](http://example.com):\n \n dig example.com NS\n \n \n\nBy default, dig queries your system's configured DNS servers, but you can also specify a custom DNS server as follows:\n\n dig @8.8.8.8 example.com A\n \n\nWhere `8.8.8.8` is the IP address of the custom DNS server (e.g., Google's Public DNS).\n\nAdvanced Usage\n--------------\n\nDig offers a variety of options for specifying query behavior, controlling output, and troubleshooting DNS issues.\n\n* To display only the answer section of the response:\n \n dig example.com A +short\n \n \n* To control the number of retries and timeout:\n \n dig example.com A +tries=2 +time=1\n \n \n* To query a specific DNSSEC (DNS Security Extensions) record:\n \n dig example.com DNSKEY\n \n \n* To show traceroute-like output for following the DNS delegation path:\n \n dig example.com A +trace\n \n \n\nFor a comprehensive list of options, consult the [dig man page](https://manpages.debian.org/stretch/dnsutils/dig.1.en.html) and the [official BIND documentation](https://bind9.readthedocs.io/en/latest/reference.html#dig).\n\nConclusion\n----------\n\nDig is a powerful and flexible tool for querying DNS information, making it an essential part of any cyber security professional's toolkit. Whether you're investigating a breach, monitoring domain health, or troubleshooting DNS issues, dig can help you discover critical information about domain names and their associated records.", + "links": [] + }, + "762Wf_Eh-3zq69CZZiIjR": { + "title": "tail", + "description": "Summary\n-------\n\n`head` is a versatile command-line utility that enables users to display the first few lines of a text file, by default it shows the first 10 lines. In case of incident response and cyber security, it is a useful tool to quickly analyze logs or configuration files while investigating potential security breaches or malware infections in a system.\n\nUsage\n-----\n\nThe basic syntax of `head` command is as follows:\n\n head [options] [file(s)]\n \n\nWhere `options` are flags that could be used to modify the output and `[file(s)]` are the input file(s) for which you want to display the first few lines.\n\nExamples\n--------\n\n* Display the first 10 lines of a file:\n\n head myfile.txt\n \n\n* You can change the number of lines to display using `-n` flag:\n\n head -n 20 myfile.txt\n \n\n* To display the first 5 lines of multiple files:\n\n head -n 5 file1.txt file2.txt\n \n\n* Another helpful flag is `-q` or `--quiet`, which avoids displaying file headers when viewing multiple files:\n\n head -q -n 5 file1.txt file2.txt\n \n\nApplication in Incident Response\n--------------------------------\n\nDuring an incident response, the `head` command helps to quickly analyze logs and files to identify potential malicious activity or errors. You can use `head` to peek into logs at the early stages of an investigation, and once you have gathered enough information, you can move on to more advanced tools to analyze the data in depth.\n\nFor example:\n\n* Check the first 5 lines of the system log for any potential issues:\n\n head -n 5 /var/log/syslog\n \n\n* Analyze the beginning of a large log file without loading the entire file:\n\n head -n 100 /var/log/large-log-file.log\n \n\nIn summary, the `head` command is a handy tool for preliminary analysis of log files that can save crucial time during an incident response. However, for more in-depth analysis, other tools and techniques should be employed.", + "links": [] + }, + "IXNGFF4sOFbQ_aND-ELK0": { + "title": "ipconfig", + "description": "`ipconfig` is a widely-used command-line utility for Windows operating systems that provides valuable information regarding a computer's network configuration. It can be extremely helpful for incident response and discovery tasks when investigating network-related issues, extracting crucial network details, or when trying to ascertain a machine's IP address.\n\nHow to Use Ipconfig\n-------------------\n\nTo utilize `ipconfig`, open the Command Prompt (CMD) by pressing Windows Key + R, type `cmd`, and hit Enter. Once the CMD is open, type `ipconfig` and press Enter. The following information will be displayed:\n\n* **IPv4 Address:** The assigned IP address for the local machine.\n* **Subnet Mask:** The mask used to separate the host addresses from the network addresses.\n* **Default Gateway:** The IP address of the immediate network gateway that the local machine communicates with.\n\nAdditional Ipconfig Commands\n----------------------------\n\n`ipconfig` offers supplementary commands that can provide useful information:\n\n* **ipconfig /all:** Provides detailed information about network configurations, including Host Name, DNS Servers, and DHCP configuration status.\n* **ipconfig /renew:** Renews the DHCP lease, giving a new IP address (if possible) from the DHCP server.\n* **ipconfig /release:** Releases the assigned IP address, disconnecting the machine from network access.\n* **ipconfig /flushdns:** Clears the DNS cache, removing all stored DNS entries.\n\nBenefits of Ipconfig for Incident Response and Discovery\n--------------------------------------------------------\n\n`ipconfig` is an efficient tool for Incident Response (IR) teams and network administrators to troubleshoot and uncover vital network details during a cyber-security event. Some notable benefits include:\n\n* **Discovering IP Addresses:** Identify the local machine's IP, Gateway, and DNS server addresses, which might be relevant during an investigation, or while assessing network exposure or communication with rogue servers.\n* **Identifying Configuration Issues:** Uncover misconfigured network settings or discrepancies between IP, DNS, or default gateway addresses, which could be signs of malicious activity.\n* **DNS Cache Investigation:** Examine DNS cache entries as evidence of possible communication to malicious domains, or clear the DNS cache to alleviate malware behavior.\n* **Troubleshooting Connection Problems:** Validate network connectivity directly, from the local host or with remote hosts through tools like `ping` or `tracert`, utilizing IP addresses from `ipconfig`.\n\n`Ipconfig` is an essential and user-friendly utility for gathering network configuration details, allowing IT professionals to respond efficiently, ensure security, and maintain the health of their computer systems during investigations or discovery tasks.", + "links": [] + }, + "jqWhR6oTyX6yolUBv71VC": { + "title": "Salting", + "description": "Salting is a crucial concept within the realm of cryptography. It is a technique employed to enhance the security of passwords or equivalent sensitive data by adding an extra layer of protection to safeguard them against hacking attempts, such as brute-force attacks or dictionary attacks.", + "links": [] + }, + "0UZmAECMnfioi-VeXcvg8": { + "title": "Hashing", + "description": "In this section, we will discuss the concept of _hashing_, an important cryptographic primitive, and its multiple applications in the realm of cyber security.\n\n**What is Hashing?**\n\nA _hash function_ is a mathematical algorithm that takes an input (or 'message') and returns a fixed-size string of bytes, usually in the form of a hexadecimal number. The output is called the _hash value_ or simply, the _hash_. Some characteristics of a good hash function are:\n\n* _Deterministic_: The same input will always result in the same hash output.\n* _Efficient_: The time taken to compute the hash should be as quick as possible.\n* _Avalanche Effect_: A tiny change in the input should result in a drastically different hash output.\n* _One-way Function_: It should be computationally infeasible to reverse-engineer the input from its hash output.\n* _Collision Resistance_: It should be extremely unlikely to find two different inputs that produce the same hash output.\n\n**Common Hashing Algorithms**\n\nThere are several widely used hashing algorithms with different strengths and weaknesses. Some of the most common ones include:\n\n* MD5 (Message Digest 5): Produces a 128-bit hash value. It is no longer considered secure due to vulnerability to collision attacks.\n* SHA-1 (Secure Hash Algorithm 1): Generates a 160-bit hash value. Like MD5, it is no longer considered secure due to collision attacks and is being phased out.\n* SHA-256 and SHA-512: Part of the SHA-2 family, SHA-256 produces a 256-bit hash value, while SHA-512 generates a 512-bit hash value. Both are widely adopted and considered secure.\n\n**Applications of Hashing**\n\nHashing is a versatile mechanism and serves many purposes in cyber security, such as:\n\n* _Data Integrity_: Hashing can be used to ensure that a file or piece of data hasn't been altered or tampered with. Comparing the hash value of the original and received data can determine if they match.\n \n* _Password Storage_: Storing users' passwords as hashes makes it difficult for attackers to obtain the plain-text passwords even if they gain access to the stored hashes.\n \n* _Digital Signatures_: Digital signatures often rely on cryptographic hash functions to verify the integrity and authenticity of a message or piece of data.\n \n* _Proof of Work_: Hash functions are employed in consensus algorithms like the one used in Bitcoin mining, as they can solve computational challenges.\n \n\nIn conclusion, hashing is a crucial technique in ensuring data integrity and maintaining security in various areas of cyber security. Understanding and adopting secure hashing algorithms is an essential skill for any cyber security professional.", + "links": [] + }, + "rmR6HJqEhHDgX55Xy5BAW": { + "title": "Key Exchange", + "description": "Key exchange, also known as key establishment, is a process where two parties establish a shared secret key that can be used to encrypt and decrypt messages between them. This key ensures secure communication, preventing eavesdropping and tampering by third parties. There are various key exchange protocols and algorithms to choose from, and in this section, we will go over some of the most important ones.\n\nSymmetric vs Asymmetric Encryption\n----------------------------------\n\nBefore diving into key exchange methods, let's briefly differentiate between symmetric and asymmetric encryption:\n\n* **Symmetric encryption** uses the same key for encryption and decryption. Examples include the Advanced Encryption Standard (AES) and Triple Data Encryption Algorithm (3DES). The main challenge in symmetric encryption is securely sharing the key between the involved parties.\n \n* **Asymmetric encryption**, also known as public-key cryptography, uses two different keys - a private key and a public key. The private key is kept secret, while the public key is shared freely. You can encrypt a message using the recipient's public key, and only the corresponding private key can decrypt it. Examples of asymmetric encryption algorithms include RSA and Elliptic Curve Cryptography (ECC).\n \n\nDiffie-Hellman Key Exchange\n---------------------------\n\nDiffie-Hellman (DH) is a cryptographic protocol that enables two parties to agree on a shared secret key without prior knowledge of each other. The key exchange happens over a public channel and is based on the mathematical properties of modular arithmetic and exponentiation.\n\nHere's an outline of how the DH protocol works:\n\n* Both parties agree on a large prime number, `p`, and a base, `g`, which are publicly known and can be used by all users in the network.\n* Each party generates a private secret key: Alice generates `a`, and Bob generates `b`. These keys should remain confidential.\n* They compute public values: Alice calculates `A = g^a mod p`, and Bob calculates `B = g^b mod p`. Both `A` and `B` are sent over the public channel.\n* The shared secret key is calculated using public values: Alice computes `s = B^a mod p`, and Bob computes `s = A^b mod p`. Both calculations result in the same value `s`, which can be used as the shared key for symmetric encryption.\n\nThe security of DH relies on the difficulty of the Discrete Logarithm Problem (DLP). However, DH is susceptible to man-in-the-middle (MITM) attacks, where an attacker can intercept the public key exchange process and provide their public keys instead.\n\nElliptic Curve Diffie-Hellman (ECDH)\n------------------------------------\n\nElliptic Curve Diffie-Hellman (ECDH) is a variant of the DH protocol that uses elliptic curve cryptography instead of modular arithmetic. ECDH provides similar security to DH but with shorter key lengths, which results in faster computations and reduced resource consumption.\n\nECDH works similarly to the standard DH protocol, but with elliptic curve operations:\n\n* Both parties agree on an elliptic curve and a base point `G` on the curve.\n* Each party generates a private secret key: Alice generates `a`, and Bob generates `b`.\n* They compute public values: Alice calculates the point `A = aG`, and Bob calculates the point `B = bG`. Both `A` and `B` are sent over the public channel.\n* The shared secret key is calculated using public values: Alice computes `s = aB`, and Bob computes `s = bA`. These calculations result in the same point `s`, which can be used as the shared key for symmetric encryption.\n\nPublic-Key Infrastructure and Key Exchange\n------------------------------------------\n\nIn practice, secure key exchange often involves the use of public-key infrastructure (PKI). A PKI system consists of a hierarchy of trusted authorities, known as Certificate Authorities (CAs), which issue and verify digital certificates. Certificates are used to authenticate public keys and their ownership, helping mitigate man-in-the-middle attacks.\n\nDuring key exchange, parties exchange certificates to verify each other's public keys. This process is often followed by a secure key exchange protocol like DH or ECDH to establish a shared secret key for symmetric encryption.\n\nIn conclusion, key exchange protocols play a crucial role in ensuring secure communication. Understanding the fundamentals of key exchange and its various mechanisms can greatly help in achieving robust cybersecurity.", + "links": [] + }, + "fxyJxrf3mnFTa3wXk1MCW": { + "title": "PKI", + "description": "Public Key Infrastructure, or PKI, is a system used to manage the distribution and identification of public encryption keys. It provides a framework for the creation, storage, and distribution of digital certificates, allowing users to exchange data securely through the use of a public and private cryptographic key pair provided by a Certificate Authority (CA).\n\nKey Components of PKI\n---------------------\n\n* **Certificate Authority (CA):** A trusted third-party organization that issues and manages digital certificates. The CA verifies the identity of entities and issues digital certificates attesting to that identity.\n \n* **Registration Authority (RA):** A subordinate authority that assists the CA in validating entities' identity before issuing digital certificates. The RA may also be involved in revoking certificates or managing key recovery.\n \n* **Digital Certificates:** Electronic documents containing the public key and other identifying information about the entity, along with a digital signature from the CA.\n \n* **Private and Public Key Pair:** Unique cryptographic keys generated together, where the public key is shared with others and the private key is kept secret by the owner. The public key encrypts data, and only the corresponding private key can decrypt it.\n \n\nBenefits of PKI\n---------------\n\n* **Secure Communication:** PKI enables secure communication across networks by encrypting data transmitted between parties, ensuring that only the intended recipient can read it.\n \n* **Authentication:** Digital certificates issued by a CA validate the identity of entities and their public keys, enabling trust between parties.\n \n* **Non-repudiation:** PKI ensures that a sender cannot deny sending a message, as their digital signature is unique and verified by their digital certificate.\n \n* **Integrity:** PKI confirms the integrity of messages by ensuring that they have not been tampered with during transmission.\n \n\nCommon Uses of PKI\n------------------\n\n* Secure email communication\n* Secure file transfer\n* Secure remote access and VPNs\n* Secure web browsing (HTTPS)\n* Digital signatures\n* Internet of Things (IoT) security\n\nIn summary, PKI plays a crucial role in establishing trust and secure communication between entities in the digital world. By using a system of trusted CAs and digital certificates, PKI provides a secure means of exchanging data, authentication, and maintaining the integrity of digital assets.", + "links": [] + }, + "7svh9qaaPp0Hz23yinIye": { + "title": "Private vs Public Keys", + "description": "Cryptography plays a vital role in securing cyber systems from unauthorized access and protecting sensitive information. One of the most popular methods used for ensuring data privacy and authentication is the concept of **Public-Key Cryptography**. This type of cryptography relies on two distinct keys: **Private Key** and **Public Key**. This section provides a brief summary of Private Keys and Public Keys, and highlights the differences between the two.\n\nPrivate Key\n-----------\n\nA Private Key, also known as a Secret Key, is a confidential cryptographic key that is uniquely associated with an individual or an organization. It should be kept secret and not revealed to anyone, except the authorized person who owns it. The Private Key is used for decrypting data that was encrypted using the corresponding Public Key, or for signing digital documents, proving the identity of the signer.\n\nKey characteristics of Private Keys:\n\n* Confidential and not shared with others\n* Used for decryption or digital signing\n* Loss or theft of Private Key can lead to data breaches and compromise of sensitive information\n\nPublic Key\n----------\n\nA Public Key is an openly available cryptographic key that is paired with a Private Key. Anyone can use the Public Key to encrypt data or to verify signatures, but only the person/organization with the corresponding Private Key can decrypt the encrypted data or create signatures. The Public Key can be distributed freely without compromising the security of the underlying cryptographic system.\n\nKey characteristics of Public Keys:\n\n* Publicly available and can be shared with anyone\n* Used for encryption or verifying digital signatures\n* Loss or theft of Public Key does not compromise sensitive information or communication security\n\nKey Differences\n---------------\n\nThe main differences between Private and Public keys are as follows:\n\n* Ownership: The Private Key is confidential and owned by a specific individual/organization, while the Public Key is owned by the same individual/organization but can be publicly distributed.\n* Accessibility: The Private Key is never shared or revealed to anyone, whereas the Public Key can be shared freely.\n* Purpose: The Private Key is used for decrypting data and creating digital signatures, while the Public Key is used for encrypting data and verifying digital signatures.\n* Security: Loss or theft of the Private Key can lead to serious security breaches while losing a Public Key does not compromise the security of the system.\n\nUnderstanding the roles and differences between Private and Public Keys is essential for ensuring the effective application of Public-Key Cryptography in securing cyber systems and protecting sensitive information.", + "links": [] + }, + "kxlg6rpfqqoBfmMMg3EkJ": { + "title": "Obfuscation", + "description": "Obfuscation is the practice of making something difficult to understand or find by altering or hiding its appearance or content. In the context of cyber security and cryptography, obfuscation refers to the process of making data, code, or communication less readable and harder to interpret or reverse engineer.\n\n5.1 Why Use Obfuscation?\n------------------------\n\nThe primary purpose of obfuscation is to enhance security by:\n\n* Concealing sensitive information from unauthorized access or misuse.\n* Protecting intellectual property (such as proprietary algorithms and code).\n* Preventing or impeding reverse engineering, tampering, or analysis of code or data structures.\n\nObfuscation can complement other security measures such as encryption, authentication, and access control, but it should not be relied upon as the sole line of defense.\n\n5.2 Techniques for Obfuscation\n------------------------------\n\nThere are several techniques for obfuscating data or code, including:\n\n* **Identifier renaming**: This technique involves changing the names of variables, functions, or objects in code to make it harder for an attacker to understand their purpose or behavior.\n \n _Example: Renaming `processPayment()` to `a1b2c3()`._\n \n* **Control flow alteration**: This involves modifying the structure of code to make it difficult to follow or analyze, without affecting its functionality. This can include techniques such as inserting dummy loops or conditionals, or changing the order of instructions.\n \n _Example: Changing a straightforward loop into a series of nested loops with added conditional statements._\n \n* **Data encoding**: Transforming or encoding data can make it less legible and harder to extract or manipulate. This can involve encoding strings or data structures, or splitting data across multiple variables or containers.\n \n _Example: Encoding a string as a series of character codes or a base64-encoded binary string._\n \n* **Code encryption**: Encrypting portions of code or entire programs can prevent reverse engineering, tampering, or analysis. The code is decrypted at runtime, either by an interpreter or within the application itself.\n \n _Example: Using a cryptographically secure encryption algorithm, such as AES, to encrypt the main logic of a program._\n \n\n5.3 Limitations and Considerations\n----------------------------------\n\nWhile obfuscation can be an effective deterrent against casual or unskilled attackers, it's important to recognize its limitations:\n\n* It is not foolproof: Determined and skilled attackers can often reverse-engineer or deobfuscate code or data if they are motivated enough.\n* Obfuscation can impact performance and maintainability: The added complexity and overhead can make code slower to execute and harder to maintain or update.\n* Relying solely on obfuscation is not recommended: It should be used as one layer in a comprehensive security strategy that includes encryption, authentication, and access control.\n\nIn conclusion, obfuscation can be a useful tool to improve the security posture of a system, but it should not be relied upon as the only means of protection.", + "links": [] + }, + "auR7fNyd77W2UA-PjXeJS": { + "title": "ATT&CK", + "description": "MITRE ATT&CK® stands for Adversarial Tactics, Techniques & Common Knowledge.\n\nMITRE ATT&CK documents various strategies, methods, and processes employed by adversaries at every stage of a cybersecurity incident, from the reconnaissance and strategizing phase to the final implementation of the attack.\n\nThe insights provided by MITRE ATT&CK can empower security professionals and teams to enhance their defensive strategies and responses against potential threats.\n\nThis framework was created by the non-profit organization MITRE Corporation and is continuously updated with contributions from cybersecurity experts worldwide.\n\nLearn more from the following resources:", + "links": [ + { + "title": "MITRE ATT&CK®", + "url": "https://attack.mitre.org/", + "type": "article" + }, + { + "title": "MITRE ATT&CK Framework", + "url": "https://www.youtube.com/watch?v=Yxv1suJYMI8", + "type": "video" + }, + { + "title": "Introduction To The MITRE ATT&CK Framework", + "url": "https://www.youtube.com/watch?v=LCec9K0aAkM", + "type": "video" + } + ] + }, + "7Bmp4x6gbvWMuVDdGRUGj": { + "title": "Kill Chain", + "description": "", + "links": [] + }, + "AY-hoPGnAZSd1ExaYX8LR": { + "title": "Diamond Model", + "description": "", + "links": [] + }, + "oRssaVG-K-JwlL6TAHhXw": { + "title": "ISO", + "description": "The **International Organization for Standardization (ISO)** is an international standard-setting body composed of representatives from various national standards organizations. It promotes worldwide proprietary, industrial, and commercial standards. In the domain of cyber security, there are several important ISO standards that help organizations to protect their sensitive data and to be resilient against cyber threats. In this guide, we will discuss some of the most notable standards related to cyber security:\n\nISO/IEC 27001 - Information Security Management\n-----------------------------------------------\n\nISO/IEC 27001 is a globally recognized standard that sets out requirements for an **Information Security Management System (ISMS)**. It provides a systematic approach to manage and secure sensitive data pertaining to an organization. By implementing this standard, organizations can demonstrate their commitment to maintaining the highest level of information security and reassure their customers, partners, and stakeholders.\n\nKey aspects of ISO/IEC 27001 include:\n\n* Establishing an information security policy\n* Conducting a risk assessment and managing risk\n* Implementing appropriate information security controls\n* Monitoring and reviewing the effectiveness of the ISMS\n* Continuously improving the ISMS\n\nISO/IEC 27032 - Cyber Security\n------------------------------\n\nISO/IEC 27032 is a guidance on **cybersecurity** that provides a framework for establishing and maintaining a secure cyberspace. This standard addresses various aspects such as information privacy, data integrity, and availability in the context of cyber risk. It covers guidelines for information sharing, incident management & coordination, and collaboration among stakeholders in cyberspace.\n\nISO/IEC 27035 - Incident Management\n-----------------------------------\n\nISO/IEC 27035 is a standard for **Information Security Incident Management**. It assists organizations in preparing for, identifying, and handling information security incidents. This standard covers the entire lifecycle of an incident from preparedness to lessons learned. By effectively managing incidents, organizations can minimize the adverse impact of incidents and improve their overall security posture.\n\nISO/IEC 27701 - Privacy Information Management\n----------------------------------------------\n\nISO/IEC 27701 is an extension to ISO/IEC 27001 and ISO/IEC 27002 that provides a framework for managing the **privacy of personal information**. This standard helps organizations to comply with data protection laws and regulations, such as the General Data Protection Regulation (GDPR). Key elements include data minimization, data subject access, data breach notification, and third-party management.\n\nIn conclusion, the ISO has established several robust cyber security standards that organizations can adopt to protect their sensitive data and ensure business continuity. By implementing these standards, you can mitigate risks associated with cyber attacks and ensure the overall security and compliance in your organization.", + "links": [] + }, + "SOkJUTd1NUKSwYMIprv4m": { + "title": "NIST", + "description": "[NIST](https://www.nist.gov/) is an agency under the U.S. Department of Commerce that develops and promotes measurement, standards, and technology. One of their primary responsibilities is the development of cyber security standards and guidelines, which help organizations improve their security posture by following the best practices and recommendations laid out by NIST.\n\nSome important NIST publications related to cyber security are:\n\nNIST Cybersecurity Framework\n----------------------------\n\nThe [NIST Cybersecurity Framework](https://www.nist.gov/cyberframework) provides a structure for managing cyber risks and helps organizations understand, communicate, and manage their cyber risks. It outlines five core functions:\n\n* Identify – Develop understanding of risks to systems, assets, data, and capabilities\n* Protect – Implement safeguards to ensure delivery of critical infrastructure services\n* Detect – Identify occurrence of a cybersecurity event in a timely manner\n* Respond – Take action on detected cybersecurity events to contain the impact\n* Recover – Maintain plans for resilience and restore capabilities or services impaired due to a cybersecurity event\n\nNIST Special Publication 800-53 (SP 800-53)\n-------------------------------------------\n\n[NIST SP 800-53](https://csrc.nist.gov/publications/detail/sp/800-53/rev-5/final) provides guidelines for selecting security and privacy controls for federal information systems as well as for systems that process federal information. This publication defines specific security and privacy controls that can be applied to address various risk factors and offers guidance on tailoring these controls for the unique needs of an organization.\n\nNIST Special Publication 800-171 (SP 800-171)\n---------------------------------------------\n\n[NIST SP 800-171](https://csrc.nist.gov/publications/detail/sp/800-171/rev-2/final) addresses security requirements for protecting controlled unclassified information (CUI) in non-federal information systems and organizations. It is particularly relevant for entities that work with federal agencies, as they must meet these requirements in order to manage and safeguard CUI effectively.\n\nNIST Risk Management Framework (RMF)\n------------------------------------\n\nThe [NIST Risk Management Framework](https://csrc.nist.gov/projects/risk-management/) provides a structured process for organizations to manage security and privacy risks using NIST guidelines and standards. This framework consists of six steps:\n\n* Categorize Information Systems\n* Select Security Controls\n* Implement Security Controls\n* Assess Security Controls\n* Authorize Information Systems\n* Monitor Security Controls\n\nBy following NIST cyber security standards, organizations can reduce their vulnerability to cyber-attacks and enhance their overall security posture.", + "links": [] + }, + "fjEdufrZAfW4Rl6yDU8Hk": { + "title": "RMF", + "description": "The **Risk Management Framework (RMF)** is a comprehensive, flexible approach for managing cybersecurity risks in an organization. It provides a structured process to identify, assess, and manage risks associated with IT systems, networks, and data. Developed by the National Institute of Standards and Technology (NIST), the RMF is widely adopted by various government and private sector organizations.\n\nKey Components\n--------------\n\nThe RMF consists of six steps, which are continuously repeated to ensure the continuous monitoring and improvement of an organization's cybersecurity posture:\n\n* **Categorize** - Classify the information system and its information based on their impact levels (e.g., low, moderate, or high).\n* **Select** - Choose appropriate security controls from the NIST SP 800-53 catalog based on the system's categorization.\n* **Implement** - Apply the chosen security controls to the IT system and document the configuration settings and implementation methods.\n* **Assess** - Determine the effectiveness of the implemented security controls by testing and reviewing their performance against established baselines.\n* **Authorize** - Grant authorization to operate the IT system, based on the residual risks identified during the assessment phase, and document the accepted risks.\n* **Monitor** - Regularly review and update the security controls to address any changes in the IT system or environment or to respond to newly identified threats.\n\nBenefits of RMF\n---------------\n\n* **Clear and consistent process**: RMF provides a systematic and repeatable process for managing cybersecurity risks.\n* **Flexibility**: It can be tailored to an organization's unique requirements and risk tolerance levels.\n* **Standardization**: RMF facilitates the adoption of standardized security controls and risk management practices across the organization.\n* **Accountability**: It promotes transparency and clear assignment of responsibilities for managing risks.\n* **Continuous improvement**: By monitoring and revisiting the risks and security controls, organizations can ensure that their cybersecurity posture remains effective and up-to-date.\n\nIn summary, the Risk Management Framework (RMF) is a vital component of an organization's cybersecurity strategy. By following the structured and continuous process outlined in the RMF, organizations can effectively manage the cybersecurity risks they face and maintain a robust and resilient cybersecurity posture.", + "links": [] + }, + "sSihnptkoEqUsHjDpckhG": { + "title": "CIS", + "description": "The **Center for Internet Security (CIS)** is a non-profit organization that focuses on enhancing the cybersecurity posture of individuals, organizations, and governments around the world. CIS offers various tools, best practices, guidelines, and frameworks that help in defending against common cyber threats.\n\nCIS Critical Security Controls\n------------------------------\n\nOne of the most significant contributions of CIS is the **CIS Critical Security Controls (CSC)**, which are a set of prioritized actions that aim to improve cyber defense. These controls have been developed by a community of IT security experts and are regularly updated to remain relevant in the ever-evolving threat landscape.\n\nThe CIS Critical Security Controls are divided into three categories:\n\n* Basic Controls: Foundational security measures that every organization should implement.\n* Foundational Controls: Additional security measures providing a more robust defense.\n* Organizational Controls: Governance and management-related processes, ensuring the continuity and effectiveness of the security program.\n\nThe following are the key objectives of implementing CIS Critical Security Controls:\n\n* Strengthen the security posture of an organization.\n* Protect sensitive information and valuable assets.\n* Identify and prioritize the most critical vulnerabilities.\n* Reduce the attack surface and risks associated with cyber threats.\n\nCIS Benchmarks\n--------------\n\nCIS also provides **CIS Benchmarks**, which are a set of configuration guidelines for various technologies, including operating systems, cloud providers, and applications. These benchmarks offer practical guidance for securing systems and improving overall cybersecurity posture.\n\nCIS Benchmarks provide the following benefits:\n\n* Improve system security by reducing the attack surface.\n* Assist in meeting compliance requirements such as HIPAA, PCI DSS, and GDPR.\n* Enable organizations to adopt best practices in configuration management.\n* Facilitate audit preparation and maintaining system documentation.\n\nIn summary, the Center for Internet Security (CIS) offers valuable resources that can help organizations bolster their security posture. The CIS Critical Security Controls and CIS Benchmarks are practical tools that provide guidance on implementing security measures to mitigate cyber threats effectively. By following these guidelines, organizations can improve their resilience and better protect themselves in the rapidly evolving digital landscape.", + "links": [] + }, + "HjfgaSEZjW9BOXy_Ixzkk": { + "title": "CSF", + "description": "Cybersecurity Framework (CSF) Summary\n-------------------------------------\n\nThe Cybersecurity Framework (CSF) is a set of guidelines aimed at helping organizations better protect their critical infrastructure from cyber threats. Developed by the National Institute of Standards and Technology (NIST), this voluntary framework provides a flexible, risk-based approach to managing cybersecurity risks.\n\nKey Components of CSF\n---------------------\n\nCSF comprises three key components:\n\n* **Core** - Consists of five functions, each representing a high-level cybersecurity activity:\n \n * Identify: Understand the organization's cybersecurity risks.\n * Protect: Implement safeguards to protect the critical infrastructure.\n * Detect: Identify the occurrence of a potential cybersecurity event.\n * Respond: Develop and implement appropriate actions to address detected cybersecurity events.\n * Recover: Implement plans to restore systems and services after a cybersecurity incident.\n* **Tiers** - Provide context for organizations to consider the robustness of their cybersecurity program:\n \n * Tier 1: Partial – Minimal cybersecurity risk management practices.\n * Tier 2: Risk Informed – Risk management practices in place, but not consistently applied.\n * Tier 3: Repeatable – Risk management practices are consistent across the organization.\n * Tier 4: Adaptive – Proactive approach to managing cybersecurity risks.\n* **Profiles** - Organizations create profiles to align their cybersecurity activities with their organizational goals, risk tolerance, and resources. A target profile represents desired outcomes, whereas a current profile reflects the current state of cybersecurity programs.\n \n\nBenefits of Implementing CSF\n----------------------------\n\n* Enhanced understanding of cybersecurity risks and corresponding management strategies within an organization.\n* Improved ability to prioritize cybersecurity investments based on risk assessments.\n* Strengthened communication between different departments and stakeholders regarding cybersecurity expectations and progress.\n* Compliance with industry standards and guidelines, including support for organizations subject to regulatory requirements.\n\nCSF offers organizations a structured approach to improving their cybersecurity posture. By following this framework, organizations can manage their cybersecurity risks more effectively, create a stronger defense against cyberattacks, and maintain the resilience of their critical infrastructure.", + "links": [] + }, + "c2kY3wZVFKZYxMARhLIwO": { + "title": "SIEM", + "description": "SIEM, short for Security Information and Event Manager, is a term used to describe tools that greatly increases visibility into a network or system. It does this by monitoring, filtering, collecting, normalizing, and correlating vast amounts of data such as logs, and neatly presents it via an interface/dashboard.\n\nOrganizations leverage SIEMs to monitor and thus identify, protect, and respond to potential threats in their environment.\n\nFor hands-on experience, you should consider setting up a SIEM in your own environment. A common stack widely used for various purposes across the industry is the ELK-stack.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Security 101: What is a SIEM? - Microsoft", + "url": "https://www.microsoft.com/security/business/security-101/what-is-siem", + "type": "article" + }, + { + "title": "Using the ELK stack for SIEM", + "url": "https://logz.io/blog/elk-siem/", + "type": "article" + }, + { + "title": "Build a powerful home SIEM", + "url": "https://www.youtube.com/watch?v=2XLzMb9oZBI", + "type": "video" + } + ] + }, + "i0ulrA-GJrNhIVmzdWDrn": { + "title": "SOAR", + "description": "", + "links": [] + }, + "zR6djXnfTSFVEfvJonQjf": { + "title": "ParrotOS", + "description": "", + "links": [] + }, + "w6wXkoLrv0_d-Ah0txUHd": { + "title": "Kali Linux", + "description": "", + "links": [] + }, + "10qbxX8DCrfyH7tgYexxQ": { + "title": "LOLBAS", + "description": "**LoLBAS** stands for **Living off the Land Binaries and Scripts**. It is a collection of tools, utilities, and scripts, often built-in within an operating system, that attackers exploit for unintended purposes. These tools can assist the adversaries in achieving their objectives without the need to install any additional software, thus avoiding detection by many security solutions.\n\nIn this section, we will explore the concept and significance of LoLBAS, and the challenges they present in the context of cyber security.\n\nWhat is LoLBAS?\n---------------\n\nLoLBAS are legitimate tools, binaries, and scripts that are already present in a system. These may be default OS utilities, like PowerShell or Command Prompt, or commonly installed applications, such as Java or Python. Adversaries utilize these tools to perform malicious activities, as they blend into the environment and are less likely to raise any alarms.\n\nSome examples of LoLBAS include:\n\n* PowerShell: Used for executing commands and scripts for various administrative functions.\n* Cscript and Wscript: Used for executing VBScript and JScript files.\n* Certutil: Used for updating certificate store but can also be leveraged to download files from the internet.\n\nWhy LoLBAS are popular among adversaries?\n-----------------------------------------\n\nThere are several reasons why adversaries choose to use LoLBAS for their malicious purposes:\n\n* **No additional software required**: As these tools are already a part of the target system, there is no need to install new software that could potentially be detected.\n* **Ease of use**: Many LoLBAS provide powerful capabilities without requiring complex coding. As a result, adversaries can swiftly implement and execute tasks using them.\n* **Masquerading as legitimate actions**: Since LoLBAS are typically used for legitimate purposes, suspicious activities using these tools can blend in with regular traffic, making it difficult to identify and detect.\n\nChallenges posed by LoLBAS\n--------------------------\n\nUtilizing LoLBAS presents unique challenges in cyber security due to the following reasons:\n\n* **Difficulty in detection**: Identifying and differentiating between malicious and legitimate uses of these tools is a challenging task.\n* **False positives**: Blocking, limiting, or monitoring the usage of LoLBAS frequently leads to false positives, as legitimate users might also rely on these tools.\n\nSecuring against LoLBAS attacks\n-------------------------------\n\nTo protect against LoLBAS-based attacks, organizations should consider taking the following steps:\n\n* **Monitor behavior**: Establish baselines of normal system behavior and monitor for deviations, which could suggest malicious use of LoLBAS.\n* **Least privilege principle**: Apply the principle of least privilege by limiting user permissions, reducing the potential attack surface.\n* **Harden systems**: Remove or disable unnecessary tools and applications that could be exploited by adversaries.\n* **Educate users**: Train users on the risks and signs of LoLBAS usage and encourage them to report suspicious activity.\n* **Employ advanced security solutions**: Use technologies like Endpoint Detection and Response (EDR) and behavioral analytics to detect abnormal patterns that could be associated with LoLBAS abuse.\n\nConclusion\n----------\n\nLoLBAS present a significant challenge to cyber security, as they blend in with legitimate system activities. However, overcoming this challenge is possible through a combination of proactive monitoring, system hardening, and user education.\n\nEnsure you are well prepared to identify and mitigate LoLBAS attacks by following the recommendations provided in this guide. Stay vigilant and stay secure!", + "links": [ + { + "title": "LOLBAS project", + "url": "https://lolbas-project.github.io/#", + "type": "article" + } + ] + }, + "KbFwL--xF-eYjGy8PZdrM": { + "title": "Event Logs", + "description": "", + "links": [] + }, + "7oFwRkmoZom8exMDtMslX": { + "title": "syslogs", + "description": "", + "links": [] + }, + "xXz-SwvXA2cLfdCd-hLtW": { + "title": "netflow", + "description": "", + "links": [] + }, + "TIxEkfBrN6EXQ3IKP1B7u": { + "title": "Packet Captures", + "description": "", + "links": [] + }, + "np0PwKy-EvIa_f_LC6Eem": { + "title": "Firewall Logs", + "description": "", + "links": [] + }, + "OAukNfV5T0KTnIF9jKYRF": { + "title": "MAC-based", + "description": "_Mandatory Access Control (MAC)_ is a robust security model when it comes to hardening, as it enforces strict policies on operating systems and applications regarding system access. In MAC-based hardening, the end-users are not allowed to modify access controls on your system.\n\nHow MAC-based Hardening Works\n-----------------------------\n\nTypical MAC mechanisms work based on predefined security attributes or labels. These labels determine access permissions and are integrated within the system to classify data, resources, and users. Once these labels are in place, the operating system or a trusted security kernel rigorously enforces the constraints on how they access data.\n\nBenefits of MAC-Based Hardening\n-------------------------------\n\nMAC-based hardening offers numerous benefits for organizations seeking to improve their cybersecurity posture:\n\n* **Enforced Security Policies**: MAC policies can be pre-configured in accordance with your organization's security requirements, ensuring consistency on all systems.\n* **Limited Access**: Users have limited access to resources, which reduces the potential for insider threats and accidental leaks of sensitive data.\n* **Protection of Sensitive Data**: By preventing unauthorized users from accessing sensitive data, MAC-based hardening helps protect against data breaches and other cybersecurity risks.\n* **Auditing and Compliance**: MAC-based hardening mechanisms help facilitate audits and compliance with industry regulations.\n\nPopular MAC-based Models\n------------------------\n\nThere are various MAC models implemented in modern software systems. Some of the most popular models include:\n\n* **Bell-LaPadula (BLP) Model**: Designed for confidentiality, the BLP Model enforces the \"no read up, no write down\" rule, meaning that users may only read data at the same or lower levels of sensitivity, while only allowing data to be written to the same or higher levels of sensitivity.\n* **Biba Model**: Focusing on integrity, the Biba Model enforces the \"no write up, no read down\" rule, which works opposite to BLP Model.\n* **Clark-Wilson Model**: The Clark-Wilson Model emphasizes well-formed transactions, separation of duties, and certification processes to maintain data integrity and confidentiality.\n\nImplementing MAC-Based Hardening\n--------------------------------\n\nTo implement MAC-based hardening, it's important to follow these general steps:\n\n* **Establish Security Policies**: Define clear policies and guidelines, including security labels, for the various data classifications, users, and resources.\n* **Select an Appropriate MAC Model**: Choose a MAC model suitable for your organization's needs and implement it across your systems.\n* **Train Staff**: Provide training to your staff to ensure understanding and adherence to your organization's MAC-based policies.\n* **Monitor and Audit**: Continually monitor the system for deviations from the MAC policies and perform periodic audits to verify their enforcement.\n\nIn summary, MAC-based hardening offers robust access controls by enforcing strict policies in accordance with your organization's security requirements. In doing so, it reduces the potential for unauthorized access to data and resources, ultimately enhancing your cybersecurity posture.", + "links": [] + }, + "6oAzYfwsHQYNVbi7c2Tly": { + "title": "NAC-based", + "description": "Network Access Control (NAC) based hardening is a crucial component in enhancing the security of your network infrastructure. NAC provides organizations with the ability to control and manage access to the network resources, ensuring that only authorized users and devices can connect to the network. It plays a vital role in reducing the attack surface and preventing unauthorized access to sensitive data and resources.\n\nKey Features of NAC-Based Hardening\n-----------------------------------\n\n* **Authentication and Authorization:** NAC-based hardening ensures that users and devices connecting to the network are properly authenticated and have been granted appropriate access permissions. This includes the use of strong passwords, multi-factor authentication (MFA), and enforcing access control policies.\n \n* **Endpoint Health Checks:** NAC solutions continuously monitor the health and compliance of endpoints, such as whether anti-virus software and security patches are up to date. If a device is found to be non-compliant, it can be automatically quarantined or disconnected from the network, thus preventing the spread of threats.\n \n* **Real-Time Visibility and Control:** NAC provides real-time visibility into the devices connected to your network, allowing you to identify and control risks proactively. This includes monitoring for unauthorized devices, unusual behavior, or known security gaps.\n \n* **Device Profiling:** NAC-based hardening can automatically identify and classify devices connected to the network, making it easier to enforce access control policies based on device type and ownership.\n \n* **Policy Enforcement:** NAC solutions enforce granular access policies for users and devices, reducing the attack surface and limiting the potential damage of a security breach. Policies can be based on factors such as user role, device type, and location.\n \n\nNAC Best Practices\n------------------\n\nTo get the most out of a NAC-based hardening approach, here are some best practices to consider:\n\n* **Develop a Comprehensive Access Control Policy:** Clearly define the roles, responsibilities, and access permissions within your organization, ensuring that users have the least privilege required to perform their job functions.\n* **Regularly Review and Update Policies:** As your organization evolves, so should your NAC policies. Regularly review and update policies to maintain alignment with organizational changes.\n* **Educate Users:** Educate end-users about the importance of security and their role in maintaining a secure network. Offer training on topics such as password management, avoiding phishing attacks, and identifying social engineering attempts.\n* **Ensure Comprehensive Coverage:** Ensure that your NAC solution covers all entry points to your network, including remote access, wireless networks, and guest access.\n* **Monitor and Respond to NAC Alerts:** NAC solutions generate alerts when suspicious activity is detected, such as an unauthorized device trying to connect to the network. Make sure you have a process in place to respond to these alerts in a timely manner.\n\nBy implementing NAC-based hardening in your cybersecurity strategy, you protect your organization from threats and maintain secure access to critical resources.", + "links": [] + }, + "W7bcydXdwlubXF2PHKOuq": { + "title": "Port Blocking", + "description": "Port blocking is an essential practice in hardening the security of your network and devices. It involves restricting, filtering, or entirely denying access to specific network ports to minimize exposure to potential cyber threats. By limiting access to certain ports, you can effectively safeguard your systems against unauthorized access and reduce the likelihood of security breaches.\n\nWhy is Port Blocking Important?\n-------------------------------\n\n* **Reducing attack surface**: Every open port presents a potential entry point for attackers. By blocking unused or unnecessary ports, you shrink the attack surface of your network.\n* **Securing sensitive data**: Limiting access to specific ports can help protect sensitive data by ensuring that only authorized individuals can access certain network services.\n* **Compliance with regulations**: Various regulations such as PCI DSS, HIPAA, and GDPR require organizations to have a secure data protection infrastructure, which includes controlling access to your network.\n\nHow to Implement Port Blocking\n------------------------------\n\nTo implement port blocking, consider the following steps:\n\n* **Identifying necessary ports**: Analyze your network to determine which ports need to remain open for key services and functions, and which can be safely blocked.\n* **Creating a port blocking policy**: Develop a policy that defines which ports should be blocked and why, along with the rationale behind permitting access to specific ports.\n* **Using firewall rules**: Configure the firewall on your devices and network infrastructure to block the ports deemed appropriate by your policy.\n* **Testing**: Test your configuration to ensure that only the necessary ports are accessible, and the blocked ports are indeed blocked.\n* **Monitoring and maintaining**: Regularly monitor and review open ports for any possible changes, and update your port blocking policy and configurations as needed.\n\nRemember, implementing port blocking is just one piece of a comprehensive cybersecurity strategy. Be sure to consider additional hardening concepts and best practices to ensure your network remains secure.", + "links": [] + }, + "FxuMJmDoDkIsPFp2iocFg": { + "title": "Group Policy", + "description": "_Group Policy_ is a feature in Windows operating systems that enables administrators to define and manage configurations, settings, and security policies for various aspects of the users and devices in a network. This capability helps you to establish and maintain a consistent and secure environment, which is crucial for organizations of all sizes.\n\nHow Group Policy Works\n----------------------\n\nGroup Policy works by maintaining a hierarchy of _Group Policy Objects_ (GPOs), which contain multiple policy settings. GPOs can be linked to different levels of the Active Directory (AD) structure, such as domain, site, and organizational unit (OU) levels. By linking GPOs to specific levels, you can create an environment in which different settings are applied to different groups of users and computers, depending on their location in the AD structure.\n\nWhen a user logs in or a computer starts up, the relevant GPOs from the AD structure get evaluated to determine the final policy settings. GPOs are processed in a specific order — local, site, domain, and OUs, with the latter having the highest priority. This order ensures that you can have a baseline set of policies at the domain level, with more specific policies applied at the OU level, as needed.\n\nCommon Group Policy Scenarios\n-----------------------------\n\nHere are some typical scenarios in which Group Policy can be utilized to enforce security policies and settings:\n\n* **Password Policies**: You can use Group Policy to define minimum password length, complexity requirements, password history, and maximum password age for all users within the domain. This ensures a consistent level of password security across the organization.\n \n* **Account Lockout Policies**: Group Policy allows you to specify conditions under which user accounts will be locked out, such as after a specific number of failed login attempts. This helps to thwart brute-force attacks.\n \n* **Software Deployment**: Deploy and manage the installation of software packages and security updates across the entire network. Ensure that all devices are running the latest, most secure software versions.\n \n* **Device Security**: Apply configurations to enforce encryption, firewall settings, and other security-related device settings to protect your organization's network and sensitive data.\n \n* **User Rights Assignment**: Control various user rights, such as the ability to log in locally or remotely, access this computer from the network, or shut down the system.\n \n* **Restricted Groups**: Manage group memberships, including local administrator groups, to ensure that only authorized users have elevated privileges on targeted devices.\n \n\nBy understanding and leveraging the capabilities of Group Policy, you can establish a robust and secure environment that meets your organization's specific requirements. Keep in mind that maintaining a well-documented, granular, and least-privileged approach to Group Policy settings will help ensure a manageable and resilient security posture.", + "links": [] + }, + "8JM95sonFUhZCdaynUA_M": { + "title": "ACLs", + "description": "Access Control Lists (ACLs) act as an essential part of an organization's security infrastructure by helping to manage access rights to resources and maintain security between users, groups, and systems.\n\nIn this section, we will discuss the following:\n\n* What are Access Control Lists\n* Types of ACLs\n* How to implement and administer ACLs\n\nWhat are Access Control Lists\n-----------------------------\n\nAccess Control Lists are rule sets that define which user, group, or system has access to specific resources and determine what type of access they have (e.g., read or write). ACLs act as a barrier to prevent unauthorized access to sensitive data and systems; this can help maintain confidentiality, integrity, and availability of your organization's critical assets.\n\nTypes of ACLs\n-------------\n\nThere are two primary types of ACLs: Discretionary and Mandatory.\n\n* **Discretionary Access Control Lists (DACLs)** \n DACLs allow the owner of a resource to determine who can gain access to the resource, and the level of access they can have. For example, a user or a group of users may have read access rights to a particular file, whereas another group may have full control over the file.\n \n* **Mandatory Access Control Lists (MACLs)** \n MACLs rely on predefined security labels or classifications to enforce access control. In this case, resources are assigned security labels, and users or systems are given security clearances. Access is granted only if the user's security clearance level matches the resource label.\n \n\nImplementing and Administering ACLs\n-----------------------------------\n\nHere are some best practices you can follow when implementing and administering Access Control Lists:\n\n* **Define clear access policies**: Establish clear rules and guidelines for accessing resources, such as who can access specific resources and what type of access they can have.\n \n* **Use Role-Based Access Control (RBAC)**: Assign permissions to roles instead of individual users. This will help simplify the ACL management process.\n \n* **Regular audits and reviews**: Periodically review and update the ACLs to ensure that access permissions are aligned with business requirements and security policies.\n \n* **Apply the principle of least privilege**: Grant users the minimum privileges they need to perform their tasks.\n \n* **Maintain a change management process**: Document all changes to ACLs, including the date of change, the reason for the change, and the individual responsible for executing the change.\n \n\nRemember that a well-implemented and maintained ACL system can significantly reduce the risks associated with unauthorized access to your organization's critical assets.", + "links": [] + }, + "oFgyQYL3Ws-l7B5AF-bTR": { + "title": "Sinkholes", + "description": "A **sinkhole** is a security mechanism employed in cybersecurity to redirect and isolate malicious traffic, primarily aimed at protecting networks from Distributed Denial of Service (DDoS) attacks and botnets. The main principle behind sinkholes is to create a \"black hole\" where malicious traffic is directed and monitored, allowing other network operations to run unaffected.\n\nHow Sinkholes Work\n------------------\n\n* **Network redirection:** When an attacker attempts to target a network, they often rely on multiple sources of traffic or requests. Sinkholes work by redirecting this incoming malicious traffic to a separate, isolated server or IP address, known as the sinkhole server.\n \n* **Traffic analysis:** Once the malicious traffic has been redirected, the sinkhole provides an opportunity for cybersecurity professionals to analyze the incoming data. This analysis can help determine the nature of the attack and potentially trace it back to its origin.\n \n* **Prevention and mitigation:** By redirecting malicious traffic away from the original target, sinkholes prevent or minimize the effects of DDoS attacks or botnet activities on a network. Additionally, information gathered from the sinkhole can aid in the development of new security measures to prevent future attacks.\n \n\nTypes of Sinkholes\n------------------\n\nThere are mainly two types of sinkholes used in cybersecurity: Passive Sinkholes and Active Sinkholes.\n\n* **Passive Sinkholes:** In a passive sinkhole, the sinkhole server is configured to passively intercept and log any malicious traffic directed towards it. This allows for analysis of attack patterns, data payloads, and other useful information without taking any direct action.\n \n* **Active Sinkholes:** An active sinkhole, on the other hand, goes one step further by not only intercepting and logging malicious traffic but also responding to the source, potentially disrupting the attacker's operations.\n \n\nBenefits of Sinkholes\n---------------------\n\n* **DDoS prevention:** By redirecting and isolating malicious traffic, sinkholes can effectively prevent or reduce the impact of DDoS attacks on a network.\n* **Attack analysis:** The isolated environment provided by sinkholes enables security professionals to study attack patterns and develop strategies to counter them.\n* **Botnet disruption:** Sinkholes can disrupt the communication between botnets and their command and control (C&C) servers, limiting their ability to carry out coordinated attacks.\n\nLimitations of Sinkholes\n------------------------\n\n* **Resource-intensive:** Sinkhole servers require dedicated resources to handle the influx of traffic and may need regular updating and maintenance.\n* **Possibility of collateral damage:** In some cases, sinkhole servers may inadvertently redirect or block legitimate traffic, leading to disruptions in network operations.\n\nConclusion\n----------\n\nSinkholes are valuable tools in the cybersecurity arsenal, helping to prevent and mitigate the effects of DDoS attacks and botnets. By isolating malicious traffic, they not only minimize the impact of attacks on networks but also provide valuable insights into attack patterns, contributing to the development of more robust cybersecurity measures.", + "links": [] + }, + "e-MDyUR3GEv-e4Qsx_5vV": { + "title": "Patching", + "description": "Patching is the process of updating, modifying, or repairing software or systems by applying fixes, also known as patches. Patches are designed to address vulnerabilities, fix bugs, or improve the overall security of a system. Regular patching is an essential component of any cyber security strategy.\n\nImportance of Patching\n----------------------\n\n* **Fix security vulnerabilities** - Attackers are constantly on the lookout for unpatched systems, which makes patching a critical step in securing your environment. Patches help fix any security weaknesses that the software developers have identified.\n \n* **Enhance system stability** - Patches often include improvements to the software's codebase or configuration, enhancing the overall performance and stability of the system.\n \n* **Improve software functionality** - Patches can add new features and update existing ones, ensuring that your software remains up-to-date with the latest technology advancements.\n \n\nPatch Management\n----------------\n\nTo make patching effective, organizations need to establish a well-structured patch management process. A good patch management process includes:\n\n* **Inventory** - Maintaining a comprehensive inventory of all devices and software within your organization allows you to detect the need for patches and implement them in a timely manner.\n \n* **Risk assessment** - Evaluate the risk associated with the vulnerabilities addressed by a patch. This will help prioritize which patches should be applied first.\n \n* **Patch testing** - Always test patches in a controlled environment before deploying them to your production systems. This will help identify any potential compatibility or performance issues that the patch might cause.\n \n* **Deployment** - Ensure that patches are deployed across your organization's systems in a timely and consistent manner, following a predefined schedule.\n \n* **Monitoring and reporting** - Establishing a mechanism for monitoring and reporting on the status of patching activities ensures that your organization remains compliant with relevant regulations and best practices.\n \n* **Patch rollback** - In case a patch causes unexpected issues or conflicts, having a plan for rolling back patches is essential. This may include creating backups and having a process for quickly restoring systems to their pre-patch state.\n \n\nBy integrating patching into your organization's cyber security strategy, you can significantly reduce the attack surface and protect your critical assets from cyber threats. Regular patching, combined with other hardening concepts and best practices, ensures a strong and resilient cyber security posture.", + "links": [] + }, + "UF3BV1sEEOrqh5ilnfM1B": { + "title": "Jump Server", + "description": "A **jump server**, also known as a **bastion host** or **jump host**, is a critical security component in many network architectures. It is a dedicated, locked-down, and secure server that sits within a protected network, and provides a controlled access point for users and administrators to access specific components within the system. This intermediate server acts as a bridge between untrusted networks and the internal privileged systems, thereby reducing the attack surface and securing the environment.\n\nKey Features\n------------\n\n* **Isolation**: The primary function of the jump server is to provide a level of isolation between the outside world and critical network infrastructure. Users must first authenticate on the jump server before accessing the target systems.\n* **Access Control**: Jump servers enforce strict access control policies by allowing only authorized users and administrators to access the privileged systems.\n* **Monitoring**: All activities on the jump server are logged and monitored, creating an audit trail for any suspicious activity or attempts at unauthorized access.\n* **Patching and Updating**: Jump servers are kept up-to-date with the latest security patches and updates, ensuring that they are resilient to new vulnerabilities and attacks.\n\nBest Practices for Implementing a Jump Server\n---------------------------------------------\n\n* **Implement Multi-Factor Authentication (MFA)**: Require multiple forms of authentication to access the jump server. This reduces the risk of unauthorized access through stolen or weak credentials.\n* **Restrict User Privileges**: Limit user privileges on the jump server to minimize the potential for unauthorized actions. Users should only be granted the minimum permissions needed to perform their tasks.\n* **Harden the Operating System**: Configure the jump server's operating system with security best practices in mind. This includes disabling unnecessary services, applying least privilege principles, and regularly updating the system with the latest patches.\n* **Employ Network Segmentation**: Deploy the jump server in a separate network segment from the rest of the environment. Implement strong firewall rules and access control lists (ACLs) to control traffic between the segments.\n* **Monitor and Audit**: Regularly monitor and review the logs and activity on the jump server to detect and investigate security incidents. Enable security alerts and notifications for suspicious activities.\n\nIn summary, a jump server is a crucial security component that helps protect sensitive network environments by providing isolation, access control, and monitoring. By properly configuring and managing a jump server, organizations can significantly reduce the risk of unauthorized access and potential security breaches.", + "links": [] + }, + "LEgJtu1GZKOtoAXyOGWLE": { + "title": "Endpoint Security", + "description": "Endpoint security refers to the practice of protecting individual devices, or \"endpoints\", that connect to your organization's network from potential cyber threats. These devices include desktop computers, laptops, smartphones, tablets, and servers. With the increase in remote working and the widespread use of personal devices in the workplace, endpoint security has become a critical aspect of a strong cybersecurity strategy.\n\nWhy is Endpoint Security Important?\n-----------------------------------\n\nEndpoint devices serve as potential entry points for cybercriminals to access sensitive data and launch attacks against your organization's network. By securing these devices, you can prevent unauthorized access, reduce the risk of data breaches, and maintain the integrity of your network.\n\nKey Components of Endpoint Security\n-----------------------------------\n\nTo effectively secure your endpoints, consider implementing the following measures:\n\n* **Antivirus and Malware Protection**: Make sure every endpoint device has up-to-date antivirus and anti-malware software installed. This will help to detect and remove malicious files, preventing them from causing harm to your network.\n \n* **Patch Management**: Stay up to date with the latest security patches for your operating systems and third-party applications. Regularly updating your software can help protect against vulnerabilities that cybercriminals may exploit.\n \n* **Device Management**: Implement a centralized device management solution that allows administrators to monitor, manage, and secure endpoints. This includes enforcing security policies, tracking device inventory, and remote wiping lost or stolen devices.\n \n* **Access Control**: Limit access to sensitive data by implementing a strict access control policy. Only grant necessary permissions to those who require it, and use authentication methods such as multi-factor authentication (MFA) to verify the identity of users.\n \n* **Encryption**: Encrypt sensitive data stored on endpoint devices to prevent unauthorized access to the data in case of device theft or loss.\n \n* **Firewall and Intrusion Prevention**: Deploy firewall and intrusion prevention systems to block external threats and alert administrators of potential attacks.\n \n* **User Training**: Educate users about the importance of endpoint security and the best practices for maintaining it. This includes topics like creating strong passwords, avoiding phishing scams, and following safe browsing practices.\n \n\nBy taking a comprehensive approach to endpoint security, you can protect your organization's network and sensitive data from the growing threat of cyberattacks.", + "links": [] + }, + "9Z6HPHPj4escSVDWftFEx": { + "title": "FTP vs SFTP", + "description": "", + "links": [] + }, + "6ILPXeUDDmmYRiA_gNTSr": { + "title": "SSL vs TLS", + "description": "", + "links": [] + }, + "gNFVtBxSYP5Uw3o3tlJ0M": { + "title": "IPSEC", + "description": "", + "links": [] + }, + "LLGXONul7JfZGUahnK0AZ": { + "title": "DNSSEC", + "description": "DNS Security Extensions (DNSSEC) is a protocol designed to address security vulnerabilities in the Domain Name System (DNS). Here are the key points:\n\n* **Digital Signatures:** DNSSEC protects against attacks by digitally signing DNS data. These signatures ensure data validity and prevent tampering.\n \n* **Hierarchical Signing:** DNSSEC signs data at every level of the DNS lookup process. For instance, when looking up ‘[google.com](http://google.com),’ the root DNS server signs a key for the .COM nameserver, which then signs a key for [google.com](http://google.com)’s authoritative nameserver.\n \n* **Backwards Compatibility:** DNSSEC doesn’t disrupt traditional DNS lookups; it adds security without breaking existing functionality. It complements other security measures like SSL/TLS.\n \n* **Chain of Trust:** DNSSEC establishes a parent-child trust chain from the root zone down to specific domains. Any compromise in this chain exposes requests to on-path attacks.\n \n\nLearn more from the following resources:", + "links": [ + { + "title": "DNSSEC: What Is It and Why Is It Important? - ICANN", + "url": "https://www.icann.org/resources/pages/dnssec-what-is-it-why-important-2019-03-05-en", + "type": "article" + }, + { + "title": "How DNSSEC Works - Cloudflare", + "url": "https://www.cloudflare.com/dns/dnssec/how-dnssec-works/", + "type": "article" + }, + { + "title": "What is DNS security? - Cloudflare", + "url": "https://www.cloudflare.com/learning/dns/dns-security/", + "type": "article" + }, + { + "title": "What is DNSSEC? - IBM", + "url": "https://www.youtube.com/watch?v=Fk2oejzgSVQ", + "type": "video" + }, + { + "title": "(DNS) 101 Miniseries", + "url": "https://www.youtube.com/playlist?list=PLTk5ZYSbd9MhMmOiPhfRJNW7bhxHo4q-K", + "type": "video" + } + ] + }, + "z_fDvTgKw51Uepo6eMQd9": { + "title": "LDAPS", + "description": "", + "links": [] + }, + "_9lQSG6fn69Yd9rs1pQdL": { + "title": "SRTP", + "description": "", + "links": [] + }, + "9rmDvycXFcsGOq3v-_ziD": { + "title": "S/MIME", + "description": "", + "links": [] + }, + "3140n5prZYySsuBHjqGOJ": { + "title": "Antivirus", + "description": "", + "links": [] + }, + "9QtY1hMJ7NKLFztYK-mHY": { + "title": "Antimalware", + "description": "", + "links": [] + }, + "QvHWrmMzO8IvNQ234E_wf": { + "title": "EDR", + "description": "", + "links": [] + }, + "iolsTC-63d_1wzKGul-cT": { + "title": "DLP", + "description": "", + "links": [] + }, + "35oCRzhzpVfitQPL4K9KC": { + "title": "ACL", + "description": "", + "links": [] + }, + "tWDo5R3KU5KOjDdtv801x": { + "title": "Firewall & Nextgen Firewall", + "description": "", + "links": [] + }, + "l5EnhOCnkN-RKvgrS9ylH": { + "title": "HIPS", + "description": "", + "links": [] + }, + "LIPtxl_oKZRcbvXT4EdNf": { + "title": "NIDS", + "description": "", + "links": [] + }, + "7w9qj16OD4pUzq-ItdxeK": { + "title": "NIPS", + "description": "", + "links": [] + }, + "jWl1VWkZn3n1G2eHq6EnX": { + "title": "Host Based Firewall", + "description": "", + "links": [] + }, + "SLKwuLHHpC7D1FqrpPRAe": { + "title": "Sandboxing", + "description": "", + "links": [] + }, + "1jwtExZzR9ABKvD_S9zFG": { + "title": "EAP vs PEAP", + "description": "", + "links": [] + }, + "HSCGbM2-aTnJWUX6jGaDP": { + "title": "WPS", + "description": "", + "links": [] + }, + "MBnDE0VyVh2u2p-r90jVk": { + "title": "WPA vs WPA2 vs WPA3 vs WEP", + "description": "", + "links": [] + }, + "w6V4JOtXKCMPAkKIQxvMg": { + "title": "Preparation", + "description": "The **preparation** stage of the incident response process is crucial to ensure the organization's readiness to effectively deal with any type of security incidents. This stage revolves around establishing and maintaining an incident response plan, creating an incident response team, and providing proper training and awareness sessions for the employees. Below, we'll highlight some key aspects of the preparation stage.\n\nIncident Response Plan\n----------------------\n\nAn _Incident Response Plan_ is a documented set of guidelines and procedures for identifying, investigating, and responding to security incidents. It should include the following components:\n\n* **Roles and Responsibilities**: Define the roles within the incident response team and the responsibilities of each member.\n* **Incident Classification**: Establish criteria to classify incidents based on their severity, impact, and type.\n* **Escalation Procedures**: Define a clear path for escalating incidents depending on their classification, involving relevant stakeholders when necessary.\n* **Communication Guidelines**: Set up procedures to communicate about incidents internally within the organization, as well as externally with partners, law enforcement, and the media.\n* **Response Procedures**: Outline the steps to be taken for each incident classification, from identification to resolution.\n\nIncident Response Team\n----------------------\n\nAn _Incident Response Team_ is a group of individuals within an organization that have been appointed to manage security incidents. The team should be comprised of members with diverse skillsets and backgrounds, including but not limited to:\n\n* Security Analysts\n* Network Engineers\n* IT Managers\n* Legal Counsel\n* Public Relations Representatives\n\nTraining and Awareness\n----------------------\n\nEmployee training and awareness is a crucial component of the preparation stage. This includes providing regular training sessions on security best practices and the incident response process, as well as conducting simulated incident exercises to evaluate the efficiency of the response plan and the team's readiness.\n\nContinuous Improvement\n----------------------\n\nThe preparation phase is not a one-time activity; it should be regularly revisited, evaluated, and updated based on lessons learned from previous incidents, changes in the organization's structure, and emerging threats in the cybersecurity landscape.\n\nIn summary, the preparation stage is the foundation of an effective incident response process. By establishing a comprehensive plan, assembling a skilled team, and ensuring ongoing employee training and awareness, organizations can minimize the potential damage of cybersecurity incidents and respond to them quickly and effectively.", + "links": [] + }, + "XsRoldaBXUSiGbvY1TjQd": { + "title": "Identification", + "description": "The _Identification_ step in the incident response process is the initial phase where an organization detects and confirms that a security incident has occurred. As the cornerstone of effective incident response, it is crucial to identify potential threats as quickly as possible. In this section, we will explore various aspects of the identification phase and discuss how to effectively recognize security incidents.\n\nKey Elements of Identification\n------------------------------\n\n* **Monitoring:** Implement robust monitoring systems, which include security information and event management (SIEM) solutions, intrusion detection systems (IDS), antivirus software, and firewalls, to consistently track and scrutinize IT environment activities.\n \n* **Alerts and Indicators:** Establish clear and meaningful alerts and indicators of compromise (IoCs) to quickly identify and respond to anomalous behavior or potential threats.\n \n* **Threat Intelligence:** Leverage threat intelligence from various sources, such as reputable security vendors, industry partners, and government agencies, to stay informed about emerging threats and vulnerabilities.\n \n* **Incident Triage:** Implement an incident triage process, which includes the evaluation of potential incidents and the categorization of real incidents based on their severity, to ensure timely and efficient allocation of resources.\n \n* **User Reporting Mechanisms:** Encourage employees to report suspicions of cyber incidents and educate them on their role in recognizing abnormal activity. Setting up a reporting mechanism such as a dedicated email address or hotline can facilitate this.\n \n\nIdentifying Security Incidents\n------------------------------\n\nDetecting cyber incidents is an ongoing process which requires continuous refinement and improvement. Begin by focusing on early detection and quick containment, as incidents tend to become costlier the longer they remain undetected.\n\nSome key aspects to keep in mind when identifying security incidents are:\n\n* **Analyze and prioritize alerts:** Use a risk-based approach to prioritize incidents according to their potential impact on the organization's critical infrastructure, sensitive data, and business continuity.\n \n* **Leverage analytics:** Use advanced analytics and machine learning tools to detect anomalous behavior and identify advanced attacks that could bypass traditional signature-based detection solutions.\n \n* **Regularly review and update detection tools:** Keep detection tools up to date and ensure they are properly calibrated to minimize false positives and negatives.\n \n\nAs the author of this guide, I suggest you invest time and resources into developing a solid identification process. By putting in place effective detection measures, you are building the foundation for a successful incident response capability, empowering your organization to respond efficiently to cyber threats and minimize potential damages.", + "links": [] + }, + "l7WnKuR2HTD4Vf9U2TxkK": { + "title": "Containment", + "description": "In the Incident Response Process, containment is the step where the identified threat is controlled to prevent any further damage to the system and organization, while maintaining the integrity of the collected incident data. The primary goal of containment is to limit the attack's scope and prevent any further compromises.\n\nShort-term and Long-term Containment\n------------------------------------\n\nThere are two main types of containment measures that need to be applied depending on the nature of the incident: short-term and long-term containment.\n\nShort-term Containment\n----------------------\n\nThese measures are focused on stopping the immediate threat by disconnecting affected systems, blocking harmful IP addresses, or temporarily disabling the vulnerable service. However, these steps might result in the loss of valuable incident data, so it is essential to balance these actions against preserving evidence necessary for further investigation.\n\nLong-term Containment\n---------------------\n\nLong-term containment focuses on implementing more sustainable solutions to address the root cause of the incident, such as updating security patches, configuring firewalls, and implementing access control measures. These actions are taken to prevent reoccurrence and must be performed in parallel with the recovery phase to ensure a comprehensive Incident Response Process.\n\nKey Steps in Containment\n------------------------\n\nThe following are some key steps that you should follow during the containment phase:\n\n* **Isolate** - Segregate the affected systems from the rest of the network to stop the spread of the threat.\n* **Preserve Evidence** - Securely capture relevant logs and data for future analysis and investigation.\n* **Implement Temporary Measures** - Take immediate actions to block the attacker and secure the environment while minimizing disruption.\n* **Update Containment Strategy** - Integrate lessons learned from previous incidents and external resources to continuously improve your containment process.\n\nBy properly executing the containment phase of the Incident Response Process, you will be well-prepared to eradicate the root cause of the cyber security threat and recover your affected systems with minimal damage to your organization.", + "links": [] + }, + "N17xAIo7sgbB0nrIDMWju": { + "title": "Eradication", + "description": "Eradication is a crucial step in the incident response process where the primary goal is to eliminate any malicious activity from the infected system(s) and halt the attacker's foothold in the network. This step usually follows the detailed analysis and identification of the nature and scope of the incident. Below are some key aspects of the eradication process:\n\nDelete Malware & Vulnerability Patching\n---------------------------------------\n\nOnce the incident has been identified and understood, teams must remove any malicious software, including viruses, worms, and Trojans from the affected systems. Simultaneously, patch any vulnerabilities that were exploited to ensure the effectiveness of the eradication process.\n\nEnhance Security Measures\n-------------------------\n\nAfter vulnerabilities have been patched, it's essential to boost the organization's security posture. This may involve updating and strengthening passwords, tightening access controls, or employing advanced security mechanisms like multi-factor authentication (MFA).\n\nSystem Restoration\n------------------\n\nIn some cases, it may be necessary to restore compromised systems from known backups or clean images to eliminate any lingering threats. Before restoring, verify the integrity and safety of the backups and ensure the security vulnerability is patched to avoid reinfection.\n\nRetain Evidentiary Data\n-----------------------\n\nBe sure to retain any critical artifacts, logs, and other evidence associated with the incident. This information may be needed later for legal or insurance purposes, audit requirements, or continuous improvement of the organization's incident response capabilities.\n\nRemember that each incident is unique, and the eradication strategy must be customized according to the given incident's specifics. Proper documentation and communication should be maintained throughout the process to ensure smooth execution and avoid overlooking critical aspects. After eradication has been completed, it is essential to move forward and strengthen the overall cybersecurity posture to prevent future incidents.", + "links": [] + }, + "vFjbZAJq8OfLb3_tsc7oT": { + "title": "Recovery", + "description": "The recovery phase of the incident response process is a critical step in regaining normalcy after a cyber security incident. This phase focuses on restoring the affected systems and data, implementing necessary improvements to prevent future occurrences, and getting back to normal operations. In this section, we will discuss the key components and best practices for the recovery phase.\n\nRestoring Systems and Data\n--------------------------\n\nThe primary objective of the recovery phase is to restore affected systems and data to their pre-incident status. This process may involve:\n\n* Cleaning and repairing infected systems\n* Restoring data from backups\n* Reinstalling compromised software and applications\n* Updating system configurations and patching vulnerabilities\n\nPost-Incident Analysis\n----------------------\n\nOnce systems are back in operation, it is vital to analyze the incident thoroughly to understand the root cause, impact, and lessons learned. This analysis will assess the effectiveness of your incident response process and identify areas for improvement. Post-incident analysis may include:\n\n* Reviewing logs, incident reports, and other evidence collected during the investigation\n* Interviewing staff involved in the response\n* Examining the attacker's tools, tactics, and procedures\n* Evaluating any potential legal or regulatory implications of the incident\n\nImplementing Improvements\n-------------------------\n\nBased on the findings of the post-incident analysis, take proactive measures to strengthen your security posture and harden your defenses. These improvements may involve:\n\n* Updating policies, procedures, and security controls\n* Enhancing monitoring and detection capabilities\n* Conducting security training and awareness programs for employees\n* Engaging external cyber security experts for consultation and guidance\n\nDocumenting and Communicating\n-----------------------------\n\nThorough documentation of the incident, response actions, and post-incident analysis is essential for internal and external communication, legal and regulatory compliance, and continued improvement. Documentation should be concise, accurate, and easily accessible. It may include:\n\n* Incident response reports and action items\n* Updated policies, procedures, and guidelines\n* Security awareness materials for employees\n* Executive summaries for senior management\n\nContinuous Review and Improvement\n---------------------------------\n\nLastly, it is important to never consider the recovery process as \"finished.\" Just as the threat landscape evolves, your organization should maintain a proactive approach to cyber security by regularly reviewing, updating, and enhancing your incident response process.\n\nIn summary, the recovery phase of the incident response process involves the restoration of affected systems and data, post-incident analysis, implementing improvements, documenting the incident, and maintaining a continuous improvement mindset. By following these steps, you will be better equipped to handle and recover from future cyber security incidents.", + "links": [] + }, + "ErRol7AT02HTn3umsPD_0": { + "title": "Lessons Learned", + "description": "The final and vital step of the incident response process is reviewing and documenting the \"lessons learned\" after a cybersecurity incident. In this phase, the incident response team conducts a thorough analysis of the incident, identifies key points to be learned, and evaluates the effectiveness of the response plan. These lessons allow organizations to improve their security posture, making them more resilient to future threats. Below, we discuss the main aspects of the lessons learned phase:\n\nPost-Incident Review\n--------------------\n\nOnce the incident has been resolved, the incident response team gathers to discuss and evaluate each stage of the response. This involves examining the actions taken, any issues encountered, and the efficiency of communication channels. This stage helps in identifying areas for improvement in the future.\n\nRoot Cause Analysis\n-------------------\n\nUnderstanding the root cause of the security incident is essential to prevent similar attacks in the future. The incident response team should analyze and determine the exact cause of the incident, how the attacker gained access, and what vulnerabilities were exploited. This will guide organizations in implementing proper security measures and strategies to minimize risks of a reoccurrence.\n\nUpdate Policies and Procedures\n------------------------------\n\nBased on the findings of the post-incident review and root cause analysis, the organization should update its security policies, procedures, and incident response plan accordingly. This may involve making changes to access controls, network segmentation, vulnerability management, and employee training programs.\n\nConduct Employee Training\n-------------------------\n\nSharing the lessons learned with employees raises awareness and ensures that they have proper knowledge and understanding of the organization's security policies and procedures. Regular training sessions and awareness campaigns should be carried out to enhance employee cybersecurity skills and reinforce best practices.\n\nDocument the Incident\n---------------------\n\nIt's crucial to maintain accurate and detailed records of security incidents, including the measures taken by the organization to address them. This documentation serves as evidence of the existence of an effective incident response plan, which may be required for legal, regulatory, and compliance purposes. Furthermore, documenting incidents helps organizations to learn from their experience, assess trends and patterns, and refine their security processes.\n\nIn conclusion, the lessons learned phase aims to identify opportunities to strengthen an organization's cybersecurity framework, prevent similar incidents from happening again, and continuously improve the incident response plan. Regular reviews of cybersecurity incidents contribute to building a robust and resilient security posture, mitigating risks and reducing the impact of cyber threats on the organization's assets and operations.", + "links": [] + }, + "zqRaMmqcLfx400kJ-h0LO": { + "title": "Zero Day", + "description": "A **zero-day** refers to a vulnerability in software, hardware, or firmware that is unknown to the parties responsible for fixing or patching it. Cybercriminals can exploit these vulnerabilities to gain unauthorized access to systems, steal sensitive data, or perform other malicious activities. Zero-day vulnerabilities are particularly dangerous because they are difficult to detect and prevent, given that there are no existing fixes or defenses against them.\n\nZero-Day Exploits\n-----------------\n\nAttackers can create **zero-day exploits** by writing malicious code that takes advantage of the discovered zero-day vulnerability. These exploits can be delivered through various methods such as spear phishing emails or drive-by downloads from compromised websites.\n\nZero-Day Detection & Response\n-----------------------------\n\nDue to the unknown nature of zero-day vulnerabilities, traditional security measures such as signature-based antivirus programs and firewalls may not be effective in detecting them. However, organizations can take several steps to protect themselves from zero-day attacks:\n\n* **Patch management**: Regularly update and patch all software, hardware, and firmware to minimize entry points for potential attacks.\n* **Monitor network traffic**: Use network monitoring tools to analyze network traffic continually and look for any unusual or suspicious activities, which may indicate a zero-day exploit attempt.\n* **Behavior-based detection**: Implement security solutions that focus on monitoring the behavior of applications and network traffic for any signs of malicious activities, rather than relying solely on signature-based detection methods.\n* **Use threat intelligence**: Subscribe to threat intelligence feeds that provide information on the latest security vulnerabilities and emerging threats, so you can stay informed about possible zero-day attacks.\n* **Implement strong access control**: Control access to critical systems and data, limit the number of privileged accounts, and enforce least privilege policies wherever possible, making it harder for attackers to exploit zero-day vulnerabilities.\n* **Educate employees**: Train employees to recognize and avoid common attack vectors such as phishing emails or downloading suspicious files, as they can often be the initial entry point for zero-day exploits.\n\nIn conclusion, while it is impossible to predict and prevent zero-day vulnerabilities completely, organizations can improve their cyber resilience by taking a proactive approach and using a combination of security methods and best practices.", + "links": [] + }, + "HPlPGKs7NLqmBidHJkOZg": { + "title": "Known vs Unknown", + "description": "In the realm of cyber security, threats can be classified as known or unknown based on their familiarity and the level of awareness about them. Understanding the difference between these two types of threats is essential for effectively implementing security measures and mitigating potential risks.\n\nKnown Threats\n-------------\n\nKnown threats are those that have been identified, studied, and documented by the security community. They are the types of threats that security vendors have had the opportunity to analyze and develop protective measures against. These threats include:\n\n* Malware: Such as viruses, worms, and Trojans that have known signatures and behavior patterns.\n* Phishing: Social engineering attacks using deceptive emails, texts, or websites to trick users into providing sensitive information or downloading harmful files.\n* Exploits: Taking advantage of known vulnerabilities in software and hardware.\n* Common Attack Patterns: Recognizable attack techniques, such as SQL injection, that have well-documented solutions and mitigation strategies.\n\nTo defend against known threats, organizations should keep their security software, operating systems, and applications up-to-date. Regularly patching vulnerabilities, training employees to recognize phishing scams, and following best practices for secure configurations can help protect against these known risks.\n\nUnknown Threats\n---------------\n\nUnknown threats are those that have not yet been identified or documented by the security community. They represent a greater challenge to organizations due to their unpredictable nature and the lack of available defense mechanisms. Examples of unknown threats include:\n\n* Zero-Day Vulnerabilities: Security flaws that are unknown to the software or hardware vendor and for which security patches do not yet exist.\n* Advanced Persistent Threats (APTs): Highly skilled, persistent adversaries that operate stealthily, often using custom-developed tools, to compromise a target's network over an extended period.\n* Novel Malware Types: New or significantly altered forms of malware that do not have known signatures, making them difficult to detect with traditional security tools.\n\nDefending against unknown threats requires a proactive approach. Incorporating threat intelligence, network monitoring, and behavior-based anomaly detection can help organizations identify potential threats before they cause damage. Additionally, following the principle of least privilege, segmenting networks, and maintaining strong data encryption can reduce the impact of unknown threats when they are discovered.\n\nIn conclusion, understanding the difference between known and unknown threats is crucial for implementing effective cyber security measures. By staying informed about the latest threats and investing in the right security tools and practices to tackle both known and unknown risks, organizations can better protect their networks, systems, and data from cyber attacks.", + "links": [] + }, + "l0BvDtwWoRSEjm6O0WDPy": { + "title": "APT", + "description": "Advanced Persistent Threats, or APTs, are a class of cyber threats characterized by their persistence over a long period, extensive resources, and high level of sophistication. Often associated with nation-state actors, organized cybercrime groups, and well-funded hackers, APTs are primarily focused on targeting high-value assets, such as critical infrastructure, financial systems, and government agencies.\n\nKey Aspects of APT\n------------------\n\n* **Persistence**: APTs are designed to maintain a low profile and operate under the radar for extended periods. Hackers use advanced techniques to maintain access and control over their targets, and continually adapt and evolve in order to avoid being discovered.\n \n* **Sophistication**: APTs are known for employing a wide range of techniques and tactics to infiltrate and exploit their targets, including zero-day vulnerabilities, spear-phishing, social engineering, and advanced malware. The level of expertise behind APTs is typically higher than your average cybercriminal.\n \n* **Motivation**: APTs often have significant resources behind them, which allows for sustained cyber campaigns against specific targets. The motivation can be monetary gain, espionage, or even maintaining a competitive edge in the marketplace. APTs can also be used to sow chaos and destabilize geopolitical rivals.\n \n\nDetecting and Mitigating APTs\n-----------------------------\n\nDue to the sophisticated and persistent nature of APTs, they can be challenging to detect and protect against. However, implementing several best practices can help organizations mitigate the risk and impact of APTs:\n\n* Adopt a proactive approach to cyber security, including continuous network monitoring, threat hunting, and regular assessments.\n* Implement a robust set of defense-in-depth security measures, including intrusion detection systems (IDS), firewalls, and access controls.\n* Train employees on cybersecurity awareness and how to spot and respond to cyber threats.\n* Keep systems updated and patched to prevent exploitation of known vulnerabilities.\n* Employ advanced threat intelligence solutions to identify and anticipate potential APT campaigns.\n\nAPT attacks can be damaging and disruptive to organizations, but understanding the nature of these threats and implementing a comprehensive security strategy can help minimize the risk and protect valuable assets. Remember, APTs are not just a concern for large enterprises and governments; organizations of all sizes can be targeted. Staying vigilant and proactive is key to staying safe from these advanced threats.\n\nLearn more from the following resources:", + "links": [ + { + "title": "What Are Advanced Persistent Threats? - IBM", + "url": "https://www.ibm.com/topics/advanced-persistent-threats", + "type": "article" + } + ] + }, + "rxzcAzHjzIc9lkWSw0fef": { + "title": "VirusTotal", + "description": "", + "links": [] + }, + "h__KxKa0Q74_egY7GOe-L": { + "title": "Joe Sandbox", + "description": "", + "links": [] + }, + "GZHFR43UzN0WIIxGKZOdX": { + "title": "any.run", + "description": "", + "links": [] + }, + "lFt1k1Q-NlWWqyDA3gWD1": { + "title": "urlvoid", + "description": "", + "links": [] + }, + "lMiW2q-b72KUl-2S7M6Vb": { + "title": "urlscan", + "description": "", + "links": [] + }, + "-RnlvUltJ9IDtH0HEnMbN": { + "title": "WHOIS", + "description": "Whois is a protocol that allows querying databases to obtain information about the owner of a domain name, an IP address, or an autonomous system number on the Internet.\n\nIn the field of cyber security, Whois data is one of several components in passive reconnaissance and open-source intelligence(OSINT) gathering.", + "links": [ + { + "title": "How to use the whois command on Linux", + "url": "https://www.howtogeek.com/680086/how-to-use-the-whois-command-on-linux/", + "type": "article" + }, + { + "title": "Whois lookup", + "url": "https://www.whois.com/whois/", + "type": "article" + } + ] + }, + "7obusm5UtHwWMcMMEB3lt": { + "title": "Phishing", + "description": "", + "links": [] + }, + "M65fCl72qlF0VTbGNT6du": { + "title": "Whishing", + "description": "Social engineering attack involving voice, such as a phone call to trick a victim to do something to the benefit of the attacker.\n\nDerived from voice-phishing, or \"vishing\".", + "links": [] + }, + "KSwl6sX2W47vUmytpm8LH": { + "title": "Whaling", + "description": "", + "links": [] + }, + "d4U6Jq-CUB1nNN2OCFoum": { + "title": "Smishing", + "description": "SMS-phishing, or \"smishing\", is a type of social-engineering attack based on SMS, or text messages, to trick a victim into doing something to the benefit of the attacker, such as clicking on a malicious link or providing sensitive information.", + "links": [] + }, + "cbEMUyg_btIPjdx-XqIM5": { + "title": "Spam vs Spim", + "description": "", + "links": [] + }, + "FD0bkmxNpPXiUB_NevEUf": { + "title": "Shoulder Surfing", + "description": "", + "links": [] + }, + "Iu0Qtk13RjrhHpSlm0uyh": { + "title": "Dumpster Diving", + "description": "", + "links": [] + }, + "o-keJgF9hmifQ_hUD91iN": { + "title": "Tailgating", + "description": "Tailgating is the act of getting access to a restricted area by simply following an authorized person. This is a common social engineering technique used by attackers to gain physical access to a building or a restricted area. The attacker waits for an authorized person to open the door and then follows them inside. This technique is effective because it is based on trust and the assumption that the attacker is an authorized person.", + "links": [] + }, + "v9njgIxZyabJZ5iND3JGc": { + "title": "Zero day", + "description": "", + "links": [] + }, + "O1VceThdxRlgQ6DcGyY7Y": { + "title": "Social Engineering", + "description": "", + "links": [] + }, + "UU_inxa8Y2lLP2BRhdLDT": { + "title": "Reconnaissance", + "description": "", + "links": [] + }, + "ZEgxmvjWPp5NofLFz_FTJ": { + "title": "Impersonation", + "description": "", + "links": [] + }, + "dcvuKHq0nHgHLcLwtl4IJ": { + "title": "Watering Hole Attack", + "description": "", + "links": [] + }, + "cO70zHvHgBAH29khF-hBW": { + "title": "Drive by Attack", + "description": "", + "links": [] + }, + "0LeDwj_tMaXjQBBOUJ5CL": { + "title": "Typo Squatting", + "description": "", + "links": [] + }, + "Q0i-plPQkb_NIvOQBVaDd": { + "title": "Brute Force vs Password Spray", + "description": "", + "links": [] + }, + "IF5H0ZJ72XnqXti3jRWYF": { + "title": "DoS vs DDoS", + "description": "", + "links": [] + }, + "ODlVT6MhV-RVUbRMG0mHi": { + "title": "MITM", + "description": "", + "links": [] + }, + "LteSouUtAj3JWWOzcjQPl": { + "title": "Spoofing", + "description": "", + "links": [] + }, + "O1fY2n40yjZtJUEeoItKr": { + "title": "Evil Twin", + "description": "", + "links": [] + }, + "urtsyYWViEzbqYLoNfQAh": { + "title": "DNS Poisoning", + "description": "", + "links": [] + }, + "LfWJJaT3fv0p6fUeS8b84": { + "title": "Deauth Attack", + "description": "", + "links": [] + }, + "u4hySof6if5hiONSaW-Uf": { + "title": "VLAN Hopping", + "description": "", + "links": [] + }, + "Ee7LfbhwJbiWjJ3b_bbni": { + "title": "Rogue Access Point", + "description": "", + "links": [] + }, + "n8ZOZxNhlnw7DpzoXe_f_": { + "title": "Buffer Overflow", + "description": "", + "links": [] + }, + "nOND14t7ISgSH3zNpV3F8": { + "title": "Memory Leak", + "description": "", + "links": [] + }, + "2jo1r9O_rCnDwRv1_4Wo-": { + "title": "XSS", + "description": "", + "links": [] + }, + "P-Am25WJV8cFd_KsX7cdj": { + "title": "SQL Injection", + "description": "", + "links": [] + }, + "pK2iRArULlK-B3iSVo4-n": { + "title": "CSRF", + "description": "", + "links": [] + }, + "mIX8PsIGuwgPCGQZ6ok2H": { + "title": "Replay Attack", + "description": "", + "links": [] + }, + "sMuKqf27y4iG0GrCdF5DN": { + "title": "Pass the Hash", + "description": "", + "links": [] + }, + "L0ROYh2DNlkybNDO2ezJY": { + "title": "Directory Traversal", + "description": "", + "links": [] + }, + "lv6fI3WeJawuCbwKtMRIh": { + "title": "Stakeholders", + "description": "", + "links": [] + }, + "05tH6WhToC615JTFN-TPc": { + "title": "HR", + "description": "", + "links": [] + }, + "C5bCIdPi0gGkY_r4qqoXZ": { + "title": "Legal", + "description": "", + "links": [] + }, + "05Gbgy6aawYlYIx38u8DE": { + "title": "Compliance", + "description": "", + "links": [] + }, + "s9tHpzYRj2HCImwQhnjFM": { + "title": "Management", + "description": "", + "links": [] + }, + "vVaBQ5VtsE_ZeXbCOF8ux": { + "title": "Cloud Skills and Knowledge", + "description": "In the realm of cyber security, cloud skills and knowledge are indispensable for professionals who work with cloud-based infrastructure and services. As more organizations migrate to the cloud, the demand for cloud security expertise continues to rise. This chapter focuses on the essential cloud skills and knowledge a cyber security specialist should possess.\n\nUnderstanding Cloud Models\n--------------------------\n\nIt is fundamental for a cyber security professional to be acquainted with the different cloud service models, including:\n\n* **IaaS (Infrastructure as a Service):** Offers virtualized computing resources over the Internet (e.g., Amazon Web Services, Microsoft Azure).\n* **PaaS (Platform as a Service):** Provides a platform for developers to build, test, and deploy applications (e.g., Google App Engine, Heroku).\n* **SaaS (Software as a Service):** Offers on-demand access to software applications over the Internet (e.g., Salesforce, Microsoft 365).\n\nFamiliarity with Cloud Security Architecture\n--------------------------------------------\n\nA comprehensive understanding of cloud security architecture enables professionals to design and implement secure cloud environments. Key aspects include:\n\n* Identifying and managing risks in cloud deployments\n* Configuring and managing cloud security services\n* Applying best practices for data storage, access control, and encryption in the cloud\n\nCompliance and Legal Issues\n---------------------------\n\nCloud security specialists must be aware of various compliance and legal requirements related to cloud data storage and processing, such as GDPR, HIPAA, and PCI-DSS.\n\nCloud Security Tools and Technologies\n-------------------------------------\n\nCyber security professionals should be proficient in using various security tools and technologies specifically designed for the cloud, including:\n\n* Cloud security monitoring and management tools (e.g., AWS Security Hub, Azure Security Center)\n* Cloud-native security platforms (e.g., Palo Alto Networks Prisma, Check Point CloudGuard)\n* API security and management tools (e.g., Postman, Swagger)\n\nCloud Identity and Access Management\n------------------------------------\n\nA strong grasp of identity and access management (IAM) concepts in the cloud is crucial. This entails understanding:\n\n* How to create and manage user identities and permissions\n* Implementing multi-factor authentication (MFA)\n* Understanding the differences between cloud-based and traditional IAM systems\n\nSecuring Cloud Networks\n-----------------------\n\nProfessionals should know the fundamentals of securing cloud networks, including:\n\n* Implementing network security features such as firewalls, virtual private networks (VPNs), and intrusion detection systems\n* Segmenting cloud networks for better security\n\nOverall, possessing cloud skills and knowledge prepares cyber security professionals to effectively protect and manage cloud infrastructure and applications in today's fast-paced digital landscape.", + "links": [] + }, + "ThLsXkqLw--uddHz0spCH": { + "title": "Understand the Concept of Security in the Cloud", + "description": "In this section, we will explore some key security concepts in the cloud to help you better understand and apply best practices for securing your cloud environment. This knowledge will enable you to maintain the confidentiality, integrity, and availability of your data and applications, while ensuring compliance with industry standards and regulations.\n\nShared Responsibility Model\n---------------------------\n\nOne of the fundamental concepts to grasp when dealing with cloud security is the _Shared Responsibility Model_. This means that securing the cloud environment is a joint effort between the cloud service provider (CSP) and the customer.\n\n* **CSP Responsibilities**: The cloud service provider is responsible for securing the underlying infrastructure that supports the cloud services, including data centers, networks, hardware, and software.\n* **Customer Responsibilities**: Customers are responsible for securing their data, applications, and user access within the cloud environment. This includes data encryption, patch management, and access control.\n\nIdentity and Access Management (IAM)\n------------------------------------\n\nIAM is an essential security concept in the cloud, as it helps enforce the principle of least privilege by only granting the necessary permissions to users, applications, and services.\n\n* **User Management**: Creation and management of user accounts, roles, and groups to ensure that only authorized personnel can access and manage the cloud environment.\n* **Access Control**: Implementing policies and rules to control access to cloud resources, such as virtual machines, storage accounts, and databases.\n\nData Protection\n---------------\n\nKeeping your data secure in the cloud is crucial, and multiple methods can be employed to achieve this goal.\n\n* **Encryption**: Encrypting data at rest (stored in the cloud) and in transit (transmitted over the internet) to protect it from unauthorized access.\n* **Backup and Recovery**: Regularly creating backups of your data to ensure its availability in case of data loss or corruption, and implementing a disaster recovery plan to quickly restore lost or compromised data.\n\nNetwork Security\n----------------\n\nNetwork security in the cloud encompasses various strategies aimed at protecting the integrity and availability of the network.\n\n* **Firewalls**: Deploying firewalls to protect against unauthorized access to your cloud environment, using both standard and next-generation firewall features.\n* **Intrusion Detection and Prevention Systems (IDPS)**: Implementing IDPS solutions to monitor network traffic for malicious activity and automatically block suspected threats.\n* **VPC and Network Segmentation**: Creating virtual private clouds (VPCs) and segmenting networks to isolate resources, limiting the potential blast radius in case of a security incident.\n\nSecurity Monitoring and Incident Response\n-----------------------------------------\n\nContinuously monitoring your cloud environment helps identify and respond to security incidents in a timely manner.\n\n* **Security Information and Event Management (SIEM)**: Deploying SIEM solutions to collect, analyze, and correlate security events and logs in real-time, enabling the detection of suspicious activities.\n* **Incident Response Plan**: Developing and maintaining a well-documented incident response plan to guide your organization through the process of identifying, containing, and remediating security incidents.\n\nBy understanding and implementing these cloud security concepts, you will be better equipped to protect your cloud environment and ensure the safety of your data and applications.", + "links": [] + }, + "XL3FVeGFDhAl_gSol6Tjt": { + "title": "Understand the basics and general flow of deploying in the cloud", + "description": "Cloud deployment flow refers to the process of deploying applications, data, and services onto the cloud infrastructure. It is a critical aspect of cloud computing, as it ensures that resources are utilized efficiently, and applications and services run seamlessly on the cloud environment. In this section, we will discuss the key aspects of cloud deployment flow, including the types of cloud deployment models and the steps involved in the process.\n\nTypes of Cloud Deployment Models\n--------------------------------\n\nThere are four main types of cloud deployment models, which are:\n\n* **Public Cloud**: The resources are owned, managed, and operated by a third-party service provider and are made available to the general public.\n* **Private Cloud**: The cloud infrastructure is owned, managed, and operated for a single organization, and resources are allocated based on the organization's needs.\n* **Hybrid Cloud**: A combination of private and public clouds that allows for data and application portability between the two environments.\n* **Community Cloud**: The cloud infrastructure is shared by multiple organizations with similar requirements and goals.\n\nCloud Deployment Process\n------------------------\n\n* **Select a Cloud Deployment Model**: Choose the type of cloud deployment model that best meets your organization's needs and requirements.\n \n* **Define Your Infrastructure**: Identify the cloud services you need, such as computing resources, storage, networking, and other applications or services.\n \n* **Choose a Cloud Service Provider**: Research and compare different cloud service providers to determine which one best aligns with your organization's needs, budget, and goals.\n \n* **Configure and Migrate**: Set up and configure your cloud environment, including network configuration, security settings, and user access levels. Additionally, migrate your data and applications to the cloud.\n \n* **Test and Optimize**: Test your cloud deployment to ensure that it meets your performance and functionality requirements. Monitor and optimize your cloud environment to ensure that resources are being used efficiently and cost-effectively.\n \n* **Monitor, Manage, and Maintain**: Regularly monitor your cloud environment to check for performance issues, security risks, and other potential concerns. Perform regular maintenance tasks, such as updating software and patching security vulnerabilities, to ensure the continuous, reliable operation of your cloud deployment.\n \n\nBy understanding the cloud deployment flow and following the steps mentioned above, you can seamlessly deploy your applications, data, and services on the cloud infrastructure, improving the overall efficiency and performance of your organization's IT systems.", + "links": [] + }, + "KGjYM4Onr5GQf1Yv9IabI": { + "title": "Understand the differences between cloud and on-premises", + "description": "When it comes to managing your organization's data and applications, there are mainly two options: **Cloud** and **On-premises**. Choosing between these two options can be crucial for the way your organization handles its cyber security. In this section, we will discuss the key differences and advantages of both options.\n\nCloud\n-----\n\nCloud computing allows you to store and access data and applications over the internet, rather than housing them within your own organization's infrastructure. Some key advantages of cloud computing include:\n\n* **Scalability:** Cloud service providers can easily scale resources up or down based on your organization's needs.\n* **Cost savings:** You only pay for what you actually use, and you can avoid high upfront costs associated with building and maintaining your own infrastructure.\n* **Flexibility:** Cloud services enable users to access data and applications from any device and location with an internet connection\n\nHowever, cloud-based solutions also come with their own set of challenges:\n\n* **Security and privacy:** When your data is stored with a third-party provider, you may have concerns about how your information is being protected and who has access to it.\n* **Data control and sovereignty:** Cloud service providers may store your data in servers located in various countries, which might raise concerns about data privacy and legal compliance.\n* **Performance:** Some applications might suffer from network latency when hosted in the cloud, impacting their responsiveness and efficiency.\n\nOn-premises\n-----------\n\nOn-premises solutions are those that are deployed within your own organization's infrastructure. Key advantages of on-premises solutions include:\n\n* **Control:** With an on-premises solution, your organization maintains full control over its data and the infrastructure it resides on.\n* **Data protection:** On-premises environments may offer increased data security due to physical access restrictions and the ability to implement stringent security policies.\n* **Customization:** On-premises solutions can be tailored to the specific needs and resources of your organization.\n\nHowever, on-premises solutions are not without their own set of challenges:\n\n* **Upfront costs:** Building and maintaining an on-premises infrastructure can be expensive and might require significant capital investments.\n* **Maintenance:** Your organization will be responsible for regularly updating hardware and software components, which can be time-consuming and costly.\n* **Limited scalability:** Scaling an on-premises infrastructure can be a complex and expensive process, and it may take more time compared to the flexibility provided by cloud solutions.\n\nConclusion\n----------\n\nIn conclusion, both cloud and on-premises solutions have their own set of advantages and challenges. The choice between the two depends on factors such as cost, security, control, and performance requirements. As an organization's cyber security expert, you must thoroughly evaluate these factors to make an informed decision that best suits your organization's needs.", + "links": [] + }, + "RJctUpvlUJGAdwBNtDSXw": { + "title": "Understand the concept of Infrastructure as Cloud", + "description": "Infrastructure as Code (IaC) is a key concept in the world of cloud computing and cybersecurity. It refers to the practice of defining, provisioning, and managing IT infrastructure through code rather than manual processes. IaC is a fundamental shift in the way we manage and operate infrastructure resources, introducing automation, consistency, and scalability benefits.\n\nKey Benefits of Infrastructure as Code\n--------------------------------------\n\n* **Consistency**: IaC ensures that your infrastructure is consistent across different environments (development, staging, and production). This eliminates manual errors and guarantees that the infrastructure is provisioned in the same way every time.\n \n* **Version Control**: By managing your infrastructure as code, it allows you to track changes to the infrastructure, just like you would with application code. This makes it easier to identify issues and rollback to a previous state if needed.\n \n* **Collaboration**: IaC allows multiple members of your team to collaborate on defining and managing the infrastructure, enabling better communication and visibility into the state of the infrastructure.\n \n* **Automation**: IaC enables you to automate the provisioning, configuration, and management of infrastructure resources. This reduces the time and effort required to provision resources and enables you to quickly scale your infrastructure to meet demand.\n \n\nCommon IaC Tools\n----------------\n\nThere are several popular IaC tools available today, each with their strengths and weaknesses. Some of the most widely used include:\n\n* **Terraform**: An open-source IaC tool developed by HashiCorp that allows you to define and provide data center infrastructure using a declarative configuration language. Terraform is platform-agnostic and can be used with various cloud providers.\n \n* **AWS CloudFormation**: A service by Amazon Web Services (AWS) that enables you to manage and provision infrastructure resources using JSON or YAML templates. CloudFormation is specifically designed for use with AWS resources.\n \n* **Azure Resource Manager (ARM) Templates**: A native IaC solution provided by Microsoft Azure that enables you to define, deploy, and manage Azure infrastructure using JSON templates.\n \n* **Google Cloud Deployment Manager**: A service offered by Google Cloud Platform (GCP) that allows you to create and manage cloud resources using YAML configuration files.\n \n\nBest Practices for Implementing Infrastructure as Code\n------------------------------------------------------\n\n* **Use Version Control**: Keep your IaC files in a version control system (e.g., Git) to track changes and enable collaboration among team members.\n \n* **Modularize Your Code**: Break down your infrastructure code into smaller, reusable modules that can be shared and combined to create more complex infrastructure configurations.\n \n* **Validate and Test**: Use tools and practices such as unit tests and static analysis to verify the correctness and security of your infrastructure code before deploying it.\n \n* **Continuously Monitor and Update**: Keep your IaC code up-to-date with the latest security patches and best practices, and constantly monitor the state of your infrastructure to detect and remediate potential issues.", + "links": [] + }, + "-83ltMEl3le3yD68OFnTM": { + "title": "Understand the Concept of Serverless", + "description": "Serverless computing is an innovative approach to application development that has changed the way developers build and deploy applications. In traditional application development, developers have to spend valuable time setting up, maintaining, and scaling servers to run their applications. Serverless computing removes this additional infrastructure overhead, allowing developers to focus solely on the application logic while the cloud provider takes care of the underlying infrastructure.\n\nHow does serverless work?\n-------------------------\n\nServerless computing works by executing your application code in short-lived stateless compute containers that are automatically provisioned and scaled by the cloud provider. In simple terms, it means that you only pay for the actual compute resources consumed when your application is running, rather than paying for pre-allocated or reserved resources. This ensures high flexibility, cost-effectiveness, and scalability.\n\nSome common characteristics of serverless computing include:\n\n* _No server management:_ Developers don't need to manage any servers, taking the burden of infrastructure management off their shoulders.\n* _Auto-scaling:_ The cloud provider automatically scales the compute resources as per the incoming requests or events.\n* _Cost optimization:_ Pay-as-you-go pricing model ensures that you only pay for the compute resources consumed by your application.\n* _Event-driven:_ Serverless applications are often designed to be triggered by events, such as API calls or data updates, ensuring efficient use of resources.\n\nPopular Serverless platforms\n----------------------------\n\nMany cloud providers offer serverless computing services, with the most popular options being:\n\n* **AWS Lambda:** Amazon Web Services (AWS) offers one of the most popular serverless computing services called Lambda. Developers can build and deploy applications using various programming languages, with AWS taking care of the infrastructure requirements.\n* **Google Cloud Functions:** Google Cloud Platform (GCP) offers Cloud Functions, a serverless computing service for executing your application code in response to events.\n* **Azure Functions:** Microsoft's Azure Functions allow you to run stateless applications in a fully managed environment, complete with auto-scaling capabilities and numerous integrations with other Azure services.\n\nAdvantages of Serverless Computing\n----------------------------------\n\nAdopting serverless computing can benefit organizations in several ways, such as:\n\n* **Reduced operational costs:** With serverless, you only pay for what you use, reducing the overall infrastructure costs.\n* **Faster deployment:** Serverless applications can be deployed quickly, allowing businesses to reach the market faster and respond to changes more effectively.\n* **Scalability:** The automatic scaling provided by the serverless platform ensures high availability and performance of your application.\n* **Focus on business logic:** Developers can concentrate exclusively on writing application code without worrying about infrastructure management.\n\nIt's important to note that serverless computing isn't a one-size-fits-all solution. There are times when traditional server-based architectures might be more suitable, depending on the use case and requirements. However, understanding the concept of serverless computing and leveraging its benefits can go a long way in enhancing cloud skills and knowledge in the ever-evolving cyber security domain.", + "links": [] + }, + "sVw5KVNxPEatBRKb2ZbS_": { + "title": "SaaS", + "description": "**Software as a Service**, often abbreviated as **SaaS**, is a cloud-based software delivery model where applications are provided over the internet. Instead of installing and maintaining software locally on individual computers or servers, users can access the software and its features through a web browser.\n\nFeatures\n--------\n\nSaaS offers various benefits and features that make it an attractive option for individuals and businesses alike. Some key features include:\n\n* **Accessibility**: SaaS applications can be accessed from anywhere with an internet connection.\n* **Lower Costs**: As a user, you only pay for what you use, reducing upfront costs such as licences and infrastructure investments.\n* **Automatic Updates**: The SaaS provider is responsible for software updates, bug fixes, and patches. This means the latest version of the software is available to users without any manual intervention.\n* **Scalability**: SaaS applications can easily scale to accommodate a growing user base, making it an ideal choice for businesses of all sizes.\n* **Customization**: SaaS applications often come with various modules or add-ons that offer additional functionality and professional services for customization.\n\nSecurity Considerations\n-----------------------\n\nWhile SaaS offers numerous benefits, there are some potential concerns related to data security and privacy. Here are some key security considerations:\n\n* **Data Storage**: In a SaaS environment, your data is stored in the cloud, which means you need to trust the provider to properly secure it. Make sure the provider complies with relevant industry standards and regulations.\n* **Data Transmission**: It is crucial to verify that your data is encrypted when transmitted between your systems and the SaaS application. This can help protect your information from unauthorized access during transmission.\n* **Access Control**: Establish strong access control policies and procedures to ensure that only authorized users can access sensitive data within the SaaS application.\n* **Service Availability**: In case of a SaaS provider experiencing downtime or going out of business, make sure to have contingency plans in place, such as regular data backups and alternative software options.\n\nChoosing a SaaS Provider\n------------------------\n\nBefore committing to a SaaS provider, it is essential to undertake a thorough evaluation to ensure that it can meet your security and business requirements. Some aspects to consider include:\n\n* **Compliance**: Check if the provider adheres to legal and regulatory requirements in your industry.\n* **Service Level Agreements (SLAs)**: Review the provider's SLAs to understand their uptime guarantees, performance standards and penalties in case of SLA breaches.\n* **Data Management**: Make sure the provider offers tools and features to manage your data, such as importing, exporting, and data backup/restoration capabilities.\n* **Support**: Verify if the provider offers adequate support resources, like a 24/7 help desk and comprehensive documentation.\n\nBy keeping these aspects in mind, you can make an informed decision about whether SaaS is the right solution for your business, and select the best SaaS provider to meet your unique needs.", + "links": [] + }, + "PQ_np6O-4PK2V-r5lywQg": { + "title": "PaaS", + "description": "Platform as a Service, or **PaaS**, is a type of cloud computing service that provides a platform for developers to create, deploy, and maintain software applications. PaaS combines the software development platform and the underlying infrastructure, such as servers, storage, and networking resources. This enables developers to focus on writing and managing their applications, without worrying about the underlying infrastructure's setup, maintenance, and scalability.\n\nKey Features of PaaS\n--------------------\n\n* **Scalability:** PaaS allows for easily scaling applications to handle increased load and demand, without the need for manual intervention.\n* **Development Tools:** PaaS providers offer a collection of integrated development tools, such as programming languages, libraries, and APIs (Application Programming Interfaces) that enable developers to build and deploy applications.\n* **Automated Management:** PaaS platforms automate the management of underlying resources and provide seamless updates to ensure the applications are always running on the latest and most secure software versions.\n* **Cost-Effective:** PaaS can be more cost-effective than managing an on-premises infrastructure, since the provider manages the underlying resources, thus reducing the need for dedicated IT staff.\n\nCommon Use Cases for PaaS\n-------------------------\n\n* **Application Development:** Developers can use PaaS platforms to develop, test, and launch applications quickly and efficiently.\n* **Web Hosting:** PaaS platforms often include tools for hosting and managing web applications, reducing the effort needed to configure and maintain web servers.\n* **Data Analytics:** PaaS platforms typically offer data processing and analytics tools, making it easy for organizations to analyze and gain insights from their data.\n* **IoT Development:** PaaS platforms may include IoT (Internet of Things) services, simplifying the development and management of IoT applications and devices.\n\nIn conclusion, PaaS simplifies the application development and deployment process by providing a platform and its associated tools, saving developers time and resources. By leveraging PaaS, organizations can focus on their core competencies and build innovative applications without worrying about infrastructure management.", + "links": [] + }, + "1nPifNUm-udLChIqLC_uK": { + "title": "IaaS", + "description": "Infrastructure as a Service (IaaS) is a type of cloud computing service that offers virtualized computing resources over the internet. Essentially, it enables you to rent IT infrastructure—such as virtual machines (VMs), storage, and networking—on a pay-as-you-go basis instead of buying and maintaining your own physical hardware.\n\nKey Features\n------------\n\nIaaS provides a wide range of services and resources, including:\n\n* **Scalable Virtual Machines**: Quickly provision and scale virtual machines based on your requirements, with various configurations for CPU cores, RAM, and storage.\n \n* **Managed Storage**: Access various storage options such as block storage, object storage, and file storage to suit your application and data needs.\n \n* **Flexible Networking**: Create virtual networks, configure subnets, manage IPs, and set up VPNs to connect your cloud environments.\n \n* **Security**: Implement security measures like firewalls, access control policies, and encryption to protect your infrastructure and data.\n \n* **Automation & Integration**: Utilize APIs and other tools to automate tasks and integrate with third-party services.\n \n\nBenefits\n--------\n\nUsing IaaS offers several advantages, such as:\n\n* **Cost Efficiency**: Eliminate the need to invest in and maintain physical hardware, while only paying for the resources you actually use.\n \n* **Scalability & Flexibility**: Rapidly adjust and scale your resources to meet changing demand, without the constraints of limited physical hardware capacity.\n \n* **Faster Deployment**: Deploy and configure your infrastructure much faster compared to setting up traditional hardware.\n \n* **Reliability**: Leverage the redundancy and reliability of the cloud provider's infrastructure to ensure high availability and minimize downtime.\n \n* **Focus on Core Business**: Free up time and resources that would have been spent on managing and maintaining infrastructure, allowing you to focus on your core business operations.\n \n\nUse Cases\n---------\n\nIaaS is a popular solution for various scenarios, including:\n\n* **Web Apps**: Host and scale web applications, ensuring they can handle sudden traffic spikes or expanding user bases.\n \n* **Development & Testing**: Quickly set up testing and development environments to iterate and validate new features.\n \n* **Data Storage & Backup**: Store large volumes of data, from business-critical databases to offsite backups.\n \n* **Big Data & Analytics**: Process and analyze large data sets with high-performance computing clusters, without the need to invest in specialized hardware.\n \n\nPopular IaaS Providers\n----------------------\n\nThere are several IaaS providers in the market, some of the most popular include:\n\n* Amazon Web Services (AWS)\n* Microsoft Azure\n* Google Cloud Platform (GCP)\n\nEach provider offers a range of services and tools that cater to different needs and requirements. It's essential to evaluate the features, cost structure, and support offered by each platform to make the most suitable choice for your organization.", + "links": [] + }, + "ecpMKP1cQXXsfKETDUrSf": { + "title": "Private", + "description": "A **Private Cloud** is a cloud computing model that is solely dedicated to a single organization. In this model, the organization's data and applications are hosted and managed either within the organization's premises or in a privately-owned data center. This cloud model provides enhanced security and control, as the resources are not shared with other organizations, ensuring that your data remains private and secure.\n\nBenefits of Private Cloud\n-------------------------\n\n* **Enhanced Security:** As the resources and infrastructure are dedicated to one organization, the risk of unauthorized access, data leaks, or security breaches is minimal.\n \n* **Customization and Control:** The organization has complete control over their cloud environment, enabling them to customize their infrastructure and applications according to their specific needs.\n \n* **Compliance:** Private clouds can be tailored to meet strict regulatory and compliance requirements, ensuring that sensitive data is protected.\n \n* **Dedicated Resources:** Organizations have access to dedicated resources, ensuring high performance and availability for their applications.\n \n\nDrawbacks of Private Cloud\n--------------------------\n\n* **Higher Costs:** Building and maintaining a private cloud can be expensive, as organizations are responsible for purchasing and managing their own hardware, software, and infrastructure.\n \n* **Limited Scalability:** As resources are dedicated to one organization, private clouds may have limited scalability, requiring additional investments in infrastructure upgrades to accommodate growth.\n \n* **Responsibility for Management and Maintenance:** Unlike public clouds, where the cloud provider handles management and maintenance, the organization is responsible for these tasks in a private cloud, which can be time-consuming and resource-intensive.\n \n\nIn summary, a private cloud model is ideal for organizations that require a high level of security, control, and customization. It is especially suitable for organizations with strict compliance requirements or sensitive data to protect. However, this model comes with higher costs and management responsibilities, which should be considered when choosing a cloud model for your organization.", + "links": [] + }, + "ZDj7KBuyZsKyEMZViMoXW": { + "title": "Public", + "description": "A **public cloud** is a cloud service that is available for use by the general public. In this cloud model, a cloud service provider owns and manages the cloud infrastructure, which is shared among multiple users or organizations. These users can access the cloud services via the internet and pay as they use, taking advantage of economies of scale.\n\nKey Features\n------------\n\n* **Shared Infrastructure**: The public cloud is built on a shared infrastructure, where multiple users or organizations leverage the same hardware and resources to store their data or run their applications.\n* **Scalability**: Public clouds offer greater scalability than private clouds, as they can quickly allocate additional resources to users who need them.\n* **Cost-effective**: Since public clouds operate on a pay-as-you-go model, users only pay for the resources they consume, making it more cost-effective for organizations with fluctuating resource requirements.\n\nBenefits of Public Cloud\n------------------------\n\n* **Lower costs**: There is no need to invest in on-premises hardware, and ongoing costs are usually lower due to economies of scale and the pay-as-you-go model.\n* **Ease of access**: Users can access the cloud services from anywhere using an internet connection.\n* **Updates and maintenance**: The cloud service provider is responsible for maintaining and updating the cloud infrastructure, ensuring that the latest security patches and features are applied.\n* **Reliability**: Public cloud providers have multiple data centers and robust redundancy measures, which can lead to improved service reliability and uptime.\n\nDrawback and Concerns\n---------------------\n\n* **Security**: Since public clouds are shared by multiple users, there is an increased risk of threats and vulnerabilities, especially if the cloud provider does not have stringent security measures in place.\n* **Privacy and Compliance**: Organizations with strict data privacy and regulatory compliance requirements may find it difficult to use public cloud services, as data may be shared or stored in locations based on the provider's data center locations.\n* **Control**: Users have less direct control over the management and configuration of the cloud infrastructure compared to a private cloud.\n\nDespite these concerns, many businesses and organizations successfully use public clouds to host non-sensitive data or run applications that do not require stringent compliance requirements.\n\nExamples of popular public cloud service providers include Amazon Web Services (AWS), Microsoft Azure, and Google Cloud Platform (GCP).", + "links": [] + }, + "ywRlTuTfh5-NHnv4ZyW1t": { + "title": "Hybrid", + "description": "The hybrid cloud model is a type of cloud computing deployment that combines the features of both private and public cloud models. In this model, organizations can capitalize on the advantages of both models by seamlessly integrating and sharing resources between the two. Below, we delve into the key characteristics, benefits, and challenges associated with the hybrid cloud model.\n\nCharacteristics\n---------------\n\n* **Integration**: Hybrid cloud environments rely on a strong connection between private and public clouds, allowing for the secure sharing of data and applications.\n \n* **Scalability**: Organizations can easily scale resources up or down depending on their needs, taking advantage of the flexibility offered by the public cloud while maintaining the security of a private cloud.\n \n* **Cost-Optimization**: Enterprises using the hybrid cloud model can optimize costs by selectively allocating workloads to either public or private cloud environments based on their specific needs.\n \n\nBenefits\n--------\n\n* **Security**: Hybrid clouds offer better security by allowing organizations to store sensitive data in their private cloud while using the public cloud for less-sensitive data and applications.\n \n* **Greater Flexibility**: By combining public and private clouds, organizations can enjoy more flexibility when managing resources and can react quickly to varying workloads and changing requirements.\n \n* **Cost Savings**: In a hybrid cloud model, organizations can take advantage of the pay-as-you-go pricing of public clouds, reducing the overall TCO (Total Cost of Ownership) of their IT infrastructure.\n \n\nChallenges\n----------\n\n* **Complex Management**: Managing a hybrid cloud environment can be more complex compared to a single cloud solution, as organizations must carefully balance resources and maintain data consistency/bandwidth between private and public cloud environments.\n \n* **Security Concerns**: While hybrid clouds offer improved security compared to a purely public cloud solution, organizations must still implement proper security measures and governance policies, such as encryption and access controls, to protect sensitive data.\n \n\nOverall, the hybrid cloud model is an effective solution for organizations looking to leverage the best features of both private and public cloud environments to achieve a balance between cost-efficiency, security, and flexibility.", + "links": [] + }, + "0LztOTc3NG3OujCVwlcVU": { + "title": "AWS", + "description": "Amazon Web Services (AWS) is a leading cloud computing platform provided by Amazon. Launched in 2006, AWS offers an extensive range of on-demand IT services, such as computing power, storage, databases, networking, and security, which enable organizations to develop, deploy, and scale applications and infrastructure quickly and cost-effectively.\n\nKey AWS Services\n----------------\n\nAWS provides over 200 different services, with new ones being added regularly. Some of the most important and commonly used services include:\n\nCompute\n-------\n\n* **EC2 (Elastic Compute Cloud):** A virtual server that can be customized to suit various workloads and applications. Instances can be scaled up or down as needed.\n \n* **Lambda:** A serverless computing service that enables you to run your code in response to events or HTTP requests without provisioning or managing servers.\n \n\nStorage\n-------\n\n* **S3 (Simple Storage Service):** A scalable object storage service that allows you to store and retrieve files, such as documents, images, and videos.\n \n* **EBS (Elastic Block Store):** A block storage solution used with EC2 instances for persistent storage.\n \n* **Glacier:** A low-cost archiving solution used for long-term storage and data backup.\n \n\nDatabases\n---------\n\n* **RDS (Relational Database Service):** A managed service for hosting, scaling, and backing up relational databases, such as MySQL, PostgreSQL, and Oracle.\n \n* **DynamoDB:** A managed NoSQL database service, designed for applications that need fast, consistent performance at any scale.\n \n\nNetworking\n----------\n\n* **VPC (Virtual Private Cloud):** Provides a virtual network for your AWS resources, enabling you to control and isolate your cloud environment.\n \n* **Route 53:** A Domain Name System (DNS) web service that allows you to manage domain registration and routing policies.\n \n\nSecurity, Identity, and Compliance\n----------------------------------\n\n* **IAM (Identity and Access Management):** Provides centralized control over AWS resource access and user permissions, enabling secure access management for your resources.\n \n* **Cognito:** A user identity and data synchronization service that allows you to authenticate and manage users in your applications.\n \n\nBenefits of AWS\n---------------\n\nThere are several reasons why AWS is widely used and trusted:\n\n* **Scalability:** AWS services are designed to scale with the growing needs of your business. You can adjust resources as needed without any upfront investment.\n \n* **Flexibility:** AWS supports a wide array of operating systems, programming languages, and tools, making it easy to migrate existing applications or develop new ones.\n \n* **Cost-effective:** AWS follows a pay-as-you-go model, allowing you to pay only for the services and resources you use, eliminating upfront expenses.\n \n* **Security:** AWS has robust security features, such as data encryption, multi-factor authentication, and infrastructure security measures, ensuring that your data and applications remain secure.\n \n* **Global Presence:** With data centers across the globe, AWS enables you to serve your customers with low latency and maintain business continuity.\n \n\nAs a part of your cybersecurity strategy, it’s crucial to understand and securely configure your AWS environment. Secure your cloud infrastructure by adhering to AWS best practices, implementing access controls, and regularly monitoring for vulnerabilities.\n\nFor more information on securing your AWS environment, refer to the [AWS Well-Architected Framework](https://aws.amazon.com/architecture/well-architected/) and the [AWS Security Best Practices](https://d1.awsstatic.com/whitepapers/Security/AWS_Security_Best_Practices.pdf) whitepapers.", + "links": [] + }, + "tOLA5QPKi6LHl1ljsOMwX": { + "title": "GCP", + "description": "Google Cloud Platform (GCP) is a collection of cloud computing services offered by Google, which provides infrastructure and platform services to businesses or individuals. It enables users to either build their own applications or services on the provided resources, or utilize ready-to-use services provided by Google. GCP covers a wide range of services, including (but not limited to) compute, storage, databases, networking, and many more.\n\nKey Features\n------------\n\n* **Global Infrastructure**: GCP is built on Google's global infrastructure, which ensures high performance, availability, and low latency for applications and services hosted on their platform.\n \n* **Scalability**: The platform can easily scale up or down based on the user's needs. It allows users to run applications and services on one, tens, or even thousands of virtual machines simultaneously.\n \n* **Security**: GCP provides robust security measures that include data encryption at rest and in transit by default, as well as compliance with various certifications and regulations.\n \n* **Easy Integration**: GCP services can be easily integrated with other Google services, such as Google Drive or Google Analytics, to provide more insights and functionality to your applications.\n \n* **Cost-Effectiveness**: The pay-as-you-go pricing model lets users pay for only the resources they use, without any upfront costs or long-term commitments.\n \n\nCommon GCP Services\n-------------------\n\n* **Compute Engine**: Provides virtual machines (VMs) that can be customized in terms of CPU, memory, storage, etc. You have full control over the VM and can install any software you need.\n* **App Engine**: A fully managed platform for building, deploying, and scaling applications without worrying about infrastructure management. Ideal for web applications or mobile app backends.\n* **Cloud Functions**: Offers event-driven computing, allowing you to run small pieces of code (functions) in response to specific events (triggers such as HTTP requests or file uploads).\n* **Cloud Storage**: A highly scalable and durable object storage solution for unstructured data.\n* **Bigtable**: A highly scalable, fully managed NoSQL database suitable for real-time analytics and large-scale data processing.\n* **Cloud SQL**: A fully managed relational database service for MySQL, PostgreSQL, or SQL Server databases.\n* **Cloud Spanner**: A fully managed, globally distributed relational database service that combines strong consistency, horizontal scaling, and transaction support.\n* **Cloud Pub/Sub**: A messaging service that allows you to send and receive messages between independent applications.\n\nThese are just a few of the many services offered by GCP. Leveraging these services can help businesses build and deploy applications in the cloud with ease, while also ensuring that their data and applications are secure and scalable.", + "links": [] + }, + "GklBi7Qx1akN_cS9UMrha": { + "title": "Azure", + "description": "Microsoft Azure, often referred to simply as \"Azure\", is a cloud computing platform and service offered by Microsoft. Azure provides a wide range of cloud services, tools, and resources for organizations and developers to build, deploy, and manage applications on a global scale. With support for multiple programming languages and frameworks, Azure makes it easier to move existing applications or create new ones for the cloud environment.\n\nKey Features\n------------\n\n* **Compute Power**: Azure offers a variety of virtual machines, containers, and serverless computing options to execute and scale applications.\n \n* **Storage**: Azure provides several storage options - Blob Storage for unstructured data, File Storage for file shares, and Disk Storage for block storage.\n \n* **Databases**: Azure offers managed relational databases, NoSQL databases, and in-memory databases for different needs and workloads.\n \n* **Analytics**: Azure provides tools and services for big data and advanced analytics, including Azure Data Lake, Azure Machine Learning, and Power BI.\n \n* **Networking**: Azure supports various networking services, such as Virtual Networks, Load Balancers, and Content Delivery Networks, to ensure secure and reliable connectivity to applications.\n \n* **Security**: Azure provides a range of security services and features to help protect your applications and data, including Advanced Threat Protection, Azure Active Directory, and Azure Firewall.\n \n* **Identity & Access Management**: Azure Active Directory (AD) provides identity and access management services, enabling secure sign-on and multi-factor authentication for applications and users.\n \n* **Hybrid Cloud**: Azure supports hybrid cloud deployment, meaning you can run some parts of your infrastructure on-premises and some on Azure.\n \n\nPros and Cons\n-------------\n\n**Pros**:\n\n* Wide range of services and features\n* Integration with other Microsoft products\n* Strong support for hybrid cloud\n* Good for large enterprises already using Microsoft technologies\n\n**Cons**:\n\n* Can be complex to navigate and manage\n* Potentially costly depending on usage and services\n\nAzure is an excellent choice for those looking to leverage a vast array of cloud services, particularly if you're already invested in the Microsoft ecosystem. It's important to keep in mind, though, that the platform's complexity can lead to a steeper learning curve, and managing costs can be challenging as usage scales.", + "links": [] + }, + "2jsTgT7k8MeaDtx6RJhOP": { + "title": "S3", + "description": "Amazon Simple Storage Service (S3) is a scalable, high-speed, low-latency object storage service designed and managed by Amazon Web Services (AWS). It offers a simple web service interface that allows developers and businesses to store and retrieve almost any amount or type of data, from anywhere on the internet.\n\nKey Features\n------------\n\n* **Scalable Storage**: Amazon S3 offers virtually unlimited storage capacity, making it perfect for applications that require large amounts of data storage or rapid scaling.\n \n* **High Durability**: S3 automatically stores your data redundantly across multiple devices in multiple geographically dispersed data centers, ensuring 99.999999999% durability of your data.\n \n* **Easy Data Management**: With S3's simple web interface, you can easily create, delete, and manage buckets (storage containers) and objects (files). You can also configure fine-tuned access controls to grant specific permissions to users or groups.\n \n* **Data Transfer**: Amazon S3 supports seamless data transfer using various methods like the AWS Management Console, AWS SDKs, and the REST API. You can also enable data transfers between S3 and other AWS services.\n \n* **Object Versioning**: S3 supports versioning of objects, allowing you to preserve, retrieve, and restore every version of an object in a bucket.\n \n* **Security**: S3 provides secure access to your data by integrating with AWS Identity and Access Management (IAM) and supporting encryption in transit and at rest.\n \n\nUse cases\n---------\n\n* _Backup and Archiving_: Amazon S3 is an ideal solution for backing up and archiving your critical data, ensuring it's durably stored and immediately available when needed.\n \n* _Big Data Analytics_: With its scalable and data-agnostic design, S3 can support big data applications by consistently delivering low latency and high throughput access to vast amounts of data.\n \n* _Content Distribution_: S3 can be easily integrated with Amazon CloudFront, a content delivery network (CDN), to distribute large files, like videos or software packages, quickly and efficiently.\n \n* _Static Website Hosting_: You can host an entire static website on Amazon S3 by simply enabling the website hosting feature on your bucket and uploading the static files.\n \n\nIn summary, Amazon S3 is an essential component of the AWS ecosystem that offers a reliable, scalable, and secure storage solution for businesses and applications of all sizes. By leveraging its powerful features and integrations, you can implement a robust cybersecurity strategy for your cloud storage needs.", + "links": [] + }, + "9OastXVfiG1YRMm68ecnn": { + "title": "Dropbox", + "description": "Dropbox is a widely used cloud storage service that allows you to store, access, and share files, documents, and media with ease across various devices. Launched in 2007, Dropbox has become one of the most popular cloud storage solutions, catering to both individual users and businesses. The service is available on multiple platforms, including Windows, macOS, Linux, iOS, and Android.\n\nKey features\n------------\n\n* **File synchronization**: Sync the same files across all your devices and have instant access to updated files from anywhere.\n* **File sharing**: Easily share files or folders by sending a link or inviting other users to a shared folder.\n* **Collaboration**: Dropbox allows real-time collaboration on documents with multiple users using integrations with other tools like Google Workspace and Microsoft Office 365.\n* **Version history**: Retrieve previous versions of a file for up to 30 days, allowing you to recover deleted files or reverse changes.\n\nPlans and pricing\n-----------------\n\nDropbox offers various plans for individual users and businesses with different storage capacities and features:\n\n* **Basic**: Free plan with 2 GB storage and core features like file synchronization and sharing.\n* **Plus**: Priced at $9.99/month for 2 TB storage, additional features like Smart Sync, remote device wipe, and a longer (30-day) version history.\n* **Professional**: Priced at $19.99/month for 3 TB storage and added features like advanced sharing controls and full-text search.\n* **Business plans**: Starting from $12.50/user/month for a minimum of 3 users, with 5 TB storage per user, priority support, and additional file controls.\n\nSecurity and privacy\n--------------------\n\nDropbox takes security and privacy seriously, with features like:\n\n* **Encryption**: Files are encrypted both when they are stored on Dropbox servers and during transmission (using SSL/TLS).\n* **Two-factor authentication**: You can enable two-factor authentication (2FA) to add an extra layer of security to your account.\n* **Selective sync**: Choose which files and folders to sync on each device, allowing you to keep sensitive data off certain computers or devices.\n* **GDPR compliance**: Dropbox is compliant with the General Data Protection Regulation (GDPR), which ensures better data protection and privacy for users.\n\nDrawbacks\n---------\n\nThere are a few downsides to using Dropbox as your cloud storage solution:\n\n* Limited storage on the free plan.\n* The need for a third-party app to encrypt files before uploading to add an extra layer of security.\n* Other alternatives offer additional features like built-in document editing.\n\nConclusion\n----------\n\nDropbox is a simple and user-friendly cloud storage service that offers seamless integration with various platforms and efficient file sharing options. While its free plan may be limited compared to other alternatives, the ease of use and robust feature set make it a popular choice for both personal and professional use.", + "links": [] + }, + "4Man3Bd-ySLFlAdxbLOHw": { + "title": "Box", + "description": "[Box](https://www.box.com/) is a popular cloud storage service that provides individuals and businesses with a platform to securely store, share, and access files and documents from any device. Box is known for its emphasis on security and collaboration features, making it an ideal choice for businesses who want a secure way to share and collaborate on files with their teams.\n\nFeatures\n--------\n\n* **Security:** Box ensures the data stored within their platform is secure by implementing various security measures, such as encryption (in-transit and at-rest), multi-factor authentication, and granular access controls.\n* **Collaboration:** Users can easily invite collaborators, assign permissions, and share files via secure links within Box. It also features real-time document editing and file version history.\n* **Integrations:** Box integrates with several other applications and services, such as Microsoft Office 365, Google Workspace, Salesforce, Slack, and more.\n* **Box Drive:** With Box Drive, users can access and work on their files directly from the desktop, without downloading them locally, making it easy to keep files up-to-date.\n\nPricing\n-------\n\nBox offers a [variety of pricing plans](https://www.box.com/pricing), catering to different user requirements. These include:\n\n* **Individual Plan:** Free, with limited storage and features.\n* **Personal Pro Plan:** $10/month, includes 100GB storage, larger file size support, and additional features.\n* **Business Plans:** Starting at $5/user/month, tailored to meet the needs of small to enterprise-level businesses, with increased storage, advanced security, and much more.\n\nPrivacy & Compliance\n--------------------\n\nBox is compliant with various international privacy laws and regulations, such as GDPR, HIPAA, and FedRAMP. It also undergoes third-party audits and assessments to verify the efficacy of their security measures.\n\nIn conclusion, Box is a highly secure and feature-rich cloud storage service that is specifically designed for businesses and individuals who require advanced security and collaboration functionality.", + "links": [] + }, + "MWqnhDKm9jXvDDjkeVNxm": { + "title": "OneDrive", + "description": "OneDrive is a popular cloud storage service provided by Microsoft. Part of the Microsoft 365 suite, OneDrive offers a seamless and secure solution for storing and accessing your files from any device, anytime, and anywhere. Below, we'll discuss some of its features and why it's important to consider for your cloud storage needs.\n\nFeatures\n--------\n\n* **Ease of Access**: OneDrive can be accessed through a web browser, or by using its desktop and mobile apps. It comes integrated with Windows 10 and can also be used on Mac, Android, and iOS devices.\n \n* **Storage Space**: OneDrive offers 5GB free storage for new users, and additional storage can be purchased through its subscription plans. Microsoft 365 subscribers receive 1TB of OneDrive storage with their plan.\n \n* **File Syncing**: OneDrive allows you to sync your files across different devices using the same account. This makes it easier to access your files and work on the same document from different locations.\n \n* **Security and Privacy**: Microsoft ensures that your data is encrypted both at rest and in transit. OneDrive also offers security measures such as two-factor authentication and the ability to recover files from the recycle bin.\n \n* **Collaboration**: OneDrive is integrated with Microsoft Office. This enables you to collaborate on Word, Excel, and PowerPoint files in real-time, and also view and edit files using Office Online.\n \n* **Automatic Backup**: OneDrive offers built-in automatic backup features. It can be configured to backup your files, including documents, pictures, and other files on your computer or device.\n \n* **Version History**: OneDrive keeps version history for your files, allowing you to restore previous versions if needed. This is useful, especially when working on collaborative documents, to ensure no work is lost.\n \n\nImportance\n----------\n\nOneDrive is an excellent cloud storage solution, fitting the needs of individuals and businesses alike. It offers various features, such as syncing across devices, real-time collaboration, and robust security measures. Whether you need a personal or professional cloud storage solution, OneDrive is worth considering for its versatility and integration with Microsoft's suite of productivity tools.", + "links": [] + }, + "fTZ4PqH-AMhYA_65w4wFO": { + "title": "Google Drive", + "description": "Google Drive is a cloud-based storage solution provided by Google, which offers users the ability to store, share, and collaborate on files and documents across different platforms and devices. It is integrated with Google's productivity suite, including Google Docs, Sheets, Slides, and Forms, allowing seamless collaboration with team members in real-time.\n\nKey Features\n------------\n\n* **Storage Capacity:** Google Drive offers 15 GB of free storage for individual users, with the option to upgrade to additional storage plans with a subscription.\n* **File Sharing and Collaboration:** You can share files, folders, or your entire drive with others, allowing them to view, edit, or comment on your documents. Collaboration features include real-time editing and support for multiple users.\n* **Data Security:** Google Drive encrypts data in transit and at rest, ensuring that your files are protected from unauthorized access. Additionally, you can manage user permissions and expiration dates for shared files.\n* **Version History:** Drive keeps track of changes made to your documents, allowing you to view or revert to previous versions any time.\n* **Multi-platform Support:** Drive can be accessed through the web, as well as through desktop and mobile apps for Windows, macOS, Android, and iOS devices.\n* **Integration with Google Workspace:** Google Drive is seamlessly integrated with other Google Workspace applications like Google Docs, Sheets, Slides, and Forms for a fully integrated, cloud-based productivity suite.\n\nTips for Using Google Drive Securely\n------------------------------------\n\n* **Enable Two-Factor Authentication (2FA):** Implement 2FA on your Google account to add an extra layer of security during login.\n* **Regularly Review Permissions:** Periodically review file and folder sharing permissions to ensure that access is granted only to necessary parties.\n* **Be Cautious with External Sharing:** Avoid sharing sensitive information with external users, and consider using expiring links or password protection for sensitive files.\n* **Employ Strong Passwords:** Utilize unique and complex passwords for your Google account to mitigate the risk of unauthorized access.\n* **Monitor Activity:** Leverage built-in Google Drive tools to audit user activity and identify potential security threats.", + "links": [] + }, + "Wqy6ki13hP5c0VhGYEhHj": { + "title": "iCloud", + "description": "[iCloud](https://www.icloud.com/) is a cloud storage service offered by Apple Inc. that provides secure and seamless storage, backup, and synchronization of data across all of your Apple devices. It allows you to store documents, photos, music, contacts, calendars, and more, enabling you to access this information from your iPhone, iPad, iPod touch, Mac, or PC.\n\nKey Features\n------------\n\n* **iCloud Drive**: A secure space in the cloud where you can store your files and access them from any compatible device. You can also share files or entire folders with others.\n* **Photos**: Automatically stores and organizes all your photos and videos in iCloud. You can access them from any of your devices and even create shared photo albums for specific moments or events.\n* **Backup**: iCloud automatically backs up your iOS and iPadOS devices daily, ensuring that your data is safe and up-to-date. If you ever need to restore a device, iCloud Backup can help you get your data back quickly and easily.\n* **Find My**: This feature helps you locate your lost or stolen Apple devices by displaying their location on a map. Additionally, it allows you to remotely lock, erase, or play a sound on your lost device to protect your data.\n* **iCloud Keychain**: Securely stores and syncs your passwords and credit card information across all your Apple devices. It helps you generate strong passwords and autofill them when needed, making your online experience simple and more secure.\n* **Family Sharing**: Allows you to share various Apple services, like iCloud storage, Apple Music, and App Store purchases, with up to five family members. It also includes a shared family calendar and photo album.\n\nPricing and Storage Plans\n-------------------------\n\niCloud offers 5 GB of free storage. However, if you need more space, you can choose from the following paid storage plans:\n\n* 50 GB for $0.99 per month\n* 200 GB for $2.99 per month\n* 2 TB for $9.99 per month\n\nPricing may vary based on your location.\n\nTo manage and upgrade your storage plan, go to the Settings app on your iOS or iPadOS device, then tap on your name, and then select iCloud. On a Mac, open System Preferences, click on Apple ID, and then select iCloud.\n\nIn summary, iCloud is a convenient and secure cloud storage solution that allows you to effortlessly store and access your data across all of your Apple devices. With its wide range of features, like iCloud Drive, Photos, Backup, and Find My, iCloud helps you stay connected and protect your valuable information.", + "links": [] + }, + "_RnuQ7952N8GWZfPD60sJ": { + "title": "Programming Skills", + "description": "Programming knowledge is a fundamental skill for professionals in the cybersecurity field, as it enables them to build, assess, and defend computer systems, networks, and applications. Having a strong foundation in programming languages, concepts, and techniques is essential for identifying potential security threats, writing secure code, and implementing robust security measures.\n\nKey Programming Languages\n-------------------------\n\nIt's important to learn multiple programming languages relevant to cybersecurity, as different languages cater to different types of tasks and environments. Here are some of the most widely used programming languages in the cybersecurity field:\n\n* **Python**: As an easy-to-learn high-level language, Python is commonly used for tasks like automation, scripting, and data analysis. It also contains a plethora of libraries and frameworks for cybersecurity, making it highly valuable for security professionals.\n* **C/C++**: These two languages are foundational for understanding system and application-level vulnerabilities since most operating systems are written in C and C++. Knowledge of these languages allows cybersecurity experts to analyze source code, identify potential exploits, and create secure software.\n* **Java**: As a popular and versatile programming language, Java is often used in web applications and enterprise environments. Java knowledge equips cybersecurity professionals to understand and mitigate potential security flaws in Java-based applications.\n* **JavaScript**: With its ubiquity in modern web browsers, JavaScript is crucial for understanding and protecting against web security vulnerabilities, such as Cross-Site Scripting (XSS) and Cross-Site Request Forgery (CSRF) attacks.\n* **Ruby**: Ruby has a strong foothold in web application development and is utilized for scripting and automation, just like Python. Familiarity with Ruby may give cybersecurity professionals an edge in certain environments.\n\nConcepts and Techniques\n-----------------------\n\nTo apply programming knowledge effectively in cybersecurity, you should ground yourself in key concepts and techniques, such as:\n\n* **Cryptography**: Learn about encryption, decryption, encoding, and hashing techniques, as well as fundamental cryptographic algorithms and protocols used to secure data transmission and storage.\n* **Secure coding practices**: Understand concepts like input validation, output encoding, and error handling, which help prevent security vulnerabilities in programs.\n* **Reverse engineering**: Master the art of deconstructing software and analyzing it without access to the original source code, which is crucial for dissecting malware, identifying vulnerabilities, and developing security patches.\n* **Scripting and automation**: Develop skills in writing scripts and automating tasks, as it can save time and enhance efficiency in cybersecurity workflows.\n* **Data analysis**: Learn to analyze and visualize data relevant to cybersecurity, such as network traffic logs, patterns, and trends, to make informed decisions and implement appropriate defense strategies.\n\nAcquiring programming knowledge in cybersecurity can help you stay on top of the latest threats, develop secure software, and implement effective countermeasures. As you progress in your cybersecurity career, you'll find that your programming skills will continually evolve and your understanding of various languages, concepts, and techniques will expand.", + "links": [] + }, + "XiHvGy--OkPFfJeKA6-LP": { + "title": "Python", + "description": "Python is a versatile, high-level programming language that is widely used in various fields, such as web development, data analysis, artificial intelligence, and cyber security. It is known for its simplicity, readability, and extensive library support, making it a popular choice for beginners as well as experts.\n\nKey Features:\n-------------\n\n* **Easy to learn and read**: Python features a clean and simple syntax, which makes it easy for beginners to start coding quickly and minimizes the chance of errors.\n* **Platform independent**: Python can run on any platform, including Windows, Linux, and macOS, making it suitable for cross-platform development.\n* **Large ecosystem**: Python has a vast ecosystem of libraries and frameworks, including popular ones like Django, Flask, and Scikit-learn, which can help speed up the development process.\n* **Strong community support**: Python has a large and active community, which provides a wealth of resources, such as tutorials, sample code, and expert assistance when needed.\n\nPython in Cyber Security:\n-------------------------\n\nPython is particularly valuable in the field of cyber security for several reasons:\n\n* **Scripting and Automation**: Python is excellent for creating scripts and automating tasks, which is useful for managing security tasks such as log analysis, scanning networks, and penetration testing.\n* **Exploit Development**: Python's readability and simplicity make it suitable for developing exploits and writing proof-of-concept code, essential tasks in cyber security.\n* **Analysis and Visualization**: With powerful libraries like Pandas, NumPy, and Matplotlib, Python can help security analysts process, analyze, and visualize large data sets, making it easier to identify patterns and detect security threats.\n\nLearning Python:\n----------------\n\nTo start learning Python, here are some useful resources:\n\nRemember, practice is key, and the more you work with Python, the more you'll appreciate its utility in the world of cyber security.", + "links": [ + { + "title": "Python.org", + "url": "https://www.python.org/", + "type": "article" + }, + { + "title": "Real Python", + "url": "https://realpython.com/", + "type": "article" + }, + { + "title": "Automate the Boring Stuff with Python", + "url": "https://automatetheboringstuff.com/", + "type": "article" + }, + { + "title": "Explore top posts about Python", + "url": "https://app.daily.dev/tags/python?ref=roadmapsh", + "type": "article" + } + ] + }, + "jehVvdz8BnruKjqHMKu5v": { + "title": "Go", + "description": "Go, also known as Golang, is an open-source programming language created by Google. Launched in 2009, it was designed to overcome issues present in other languages and offer a more secure, robust, and efficient development experience.\n\nKey Features of Go\n------------------\n\n* **Performance**: Go is a statically-typed compiled language, which means that it offers greater performance compared to interpreted programming languages like Python or JavaScript.\n* **Concurrency**: One of the strengths of Go is its support for concurrent programming. It uses goroutines to handle multiple tasks simultaneously and efficiently.\n* **Simplicity & Readability**: The syntax of Go is straightforward and easy to understand, making it an excellent choice for the development of secure applications.\n* **Static Typing & Strong Type Safety**: Go enforces static typing, which helps to detect errors at the development stage and minimize security risks.\n* **Standard Library & Collaboration**: Go has a rich standard library, which provides numerous packages for various tasks, such as cryptography, data handling, and communication protocols.\n\nGo In Cyber Security\n--------------------\n\nGo is increasingly becoming popular in the field of cyber security due to its unique features:\n\n* **Secure Web Development**: Go offers built-in support for handling sensitive data, secure communication protocols like HTTPS, and secure cryptographic methods, which help in developing secure web applications.\n* **Network Security**: With its efficient concurrency model, Go is suitable for building network security tools like scanners, proxies, intrusion detection systems, and more.\n* **Malware Analysis**: Go's performance and ease of use make it suitable for developing tools to detect, analyze, and reverse engineer malware.\n* **Cryptographic Tools & Utility**: Go's standard library covers a wide range of cryptography methods, making it convenient to build secure tools and utilities.\n* **Open-Source Software Security**: As an open-source language, Go attracts a large community of developers who collaborate and continuously improve its security features.\n\nGo Resources\n------------\n\nTo get started with Go, consider leveraging the following resources:\n\nAs you learn and incorporate Go into your cyber security toolkit, you will find it to be a versatile and valuable language in building secure, efficient, and reliable tools and applications.", + "links": [ + { + "title": "Golang Courses on Udemy, Coursera, and Pluralsight", + "url": "https://www.udemy.com/topic/go/", + "type": "course" + }, + { + "title": "Official Go Documentation", + "url": "https://golang.org/doc/", + "type": "article" + }, + { + "title": "Go by Example", + "url": "https://gobyexample.com/", + "type": "article" + }, + { + "title": "A Tour of Go", + "url": "https://tour.golang.org/", + "type": "article" + }, + { + "title": "The Go Programming Language book", + "url": "http://www.gopl.io/", + "type": "article" + }, + { + "title": "Explore top posts about Golang", + "url": "https://app.daily.dev/tags/golang?ref=roadmapsh", + "type": "article" + } + ] + }, + "2SThr6mHpX6rpW-gmsqxG": { + "title": "JavaScript", + "description": "JavaScript (often abbreviated as JS) is a widely-used, high-level programming language. It is predominantly used for creating and enhancing the interactive elements of web pages, making it an integral part of the web development space. JavaScript was initially known as LiveScript and was created by Brendan Eich in 1995, but it later got renamed to JavaScript.\n\nFeatures of JavaScript:\n-----------------------\n\n* **Interpreted Language:** JavaScript does not need to be compiled before it is run which makes it easier to find errors in the code.\n* **Object-Oriented Programming:** JavaScript supports object-oriented programming (OOP) concepts, making it easier for developers to work with complex data structures and code.\n* **Event-driven:** JavaScript supports event-driven programming, allowing developers to create interactive elements and respond to user actions like clicks and keypress events on the web page.\n* **Cross-platform Compatibility:** JavaScript can be run on any browser, platform, or operating system, making it a highly versatile language.\n\nJavaScript in Web Development\n-----------------------------\n\nJavaScript is an essential part of web development primarily due to its ability to manipulate and interact with HTML and CSS elements on a web page.\n\nSome common uses for JavaScript in web development:\n\n* **Form Validation:** Validating user inputs in contact forms, registrations forms, and other user input scenarios.\n* **Image Sliders and Galleries:** Creating dynamic image sliders and galleries on websites to enhance user experience.\n* **Interactive Maps:** Integrating interactive maps into websites for display or directions.\n* **Animation:** Adding animations to elements on a webpage for a more engaging experience.\n\nJavaScript Libraries and Frameworks\n-----------------------------------\n\nJavaScript has many libraries and frameworks to help developers work more efficiently and to attain better results. Some popular libraries and frameworks include:\n\n_jQuery:_ A highly popular JavaScript library that simplifies DOM manipulation, event handling, and animations.\n\n_React:_ Developed by Facebook, it is a JavaScript library for building interactive user interfaces (UI).\n\n_Angular:_ A powerful, Google-developed JavaScript framework used for developing dynamic web applications.\n\n_Vue.js:_ A lightweight, easy-to-learn JavaScript framework for building interactive user interfaces.\n\n_Node.js:_ A JavaScript runtime environment built on Chrome's V8 JavaScript engine, allowing developers to run JavaScript on the server-side.\n\nLearning JavaScript\n-------------------\n\nHere are some resources to sharpen your JavaScript programming skills:\n\nBy mastering JavaScript, you'll be better equipped to build more interactive and dynamic web applications, thus enhancing your overall cyber security skills.", + "links": [ + { + "title": "Mozilla Developer Network (MDN) JavaScript Guide", + "url": "https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide", + "type": "article" + }, + { + "title": "W3Schools JavaScript Tutorial", + "url": "https://www.w3schools.com/js/", + "type": "article" + }, + { + "title": "Eloquent JavaScript: A Modern Introduction to Programming", + "url": "https://eloquentjavascript.net/", + "type": "article" + }, + { + "title": "Explore top posts about JavaScript", + "url": "https://app.daily.dev/tags/javascript?ref=roadmapsh", + "type": "article" + } + ] + }, + "8jj9hpe9jQIgCc8Txyw3O": { + "title": "C++", + "description": "C++ is a widely-used, high-level programming language that evolved from the earlier C programming language. Developed by Bjarne Stroustrup in 1985 at Bell Labs, C++ provides object-oriented features and low-level memory manipulation, making it an essential language for many fields, including game development, high-performance systems, and cybersecurity.\n\nKey Features of C++:\n--------------------\n\nObject-Oriented Programming (OOP)\n---------------------------------\n\nC++ is one of the first programming languages to support Object-Oriented Programming (OOP). It allows code to be modular and reusable through the use of classes and objects.\n\nPerformance\n-----------\n\nC++ provides high performance, as it allows low-level access to memory and fine-grained control over system resources. This makes C++ suitable for performance-critical applications like network security systems and firewalls.\n\nCompatibility\n-------------\n\nC++ is highly compatible with the C programming language, which makes it easier for programmers to transition from C to C++. Many system-level libraries and applications written in C can be easily extended or integrated with C++ code.\n\nStandard Template Library (STL)\n-------------------------------\n\nC++ comes with a rich library called the Standard Template Library (STL). The STL contains efficient templated data structures and algorithms, which can improve development speed and code quality.\n\nImportance of C++ in Cybersecurity\n----------------------------------\n\nC++ is widely used in the development of cybersecurity tools and applications due to its efficiency, low-level access, and compatibility with existing systems. Some reasons for its importance in cybersecurity include:\n\n* **Developing Security Software:** C++ is commonly used in developing antivirus software, firewalls, intrusion detection systems, and other security tools due to its strong performance capabilities.\n \n* **Reverse Engineering and Exploit Development:** Cybersecurity professionals often use C++ to reverse-engineer malware, study their behavior, and develop countermeasures to stop them.\n \n* **Vulnerability Analysis:** Since many applications are developed in C++, understanding the language helps cybersecurity professionals assess the code for vulnerabilities and potential exploits.\n \n* **Secure Code Development:** Developing secure applications is vital to prevent security breaches. With its powerful features, C++ enables developers to write efficient, maintainable, and secure code.\n \n\nResources for Learning C++\n--------------------------\n\nTo advance your programming skills in C++ and leverage its power for cybersecurity tasks, consider the following resources:\n\nBy mastering C++, you'll be well-equipped to develop and secure applications, analyze cybersecurity threats, and effectively contribute to the broader cybersecurity community.", + "links": [ + { + "title": "Coursera: C++ For C Programmers", + "url": "https://www.coursera.org/specializations/c-plus-plus-programming", + "type": "course" + }, + { + "title": "Cplusplus.com", + "url": "http://www.cplusplus.com/", + "type": "article" + }, + { + "title": "CPPReference.com", + "url": "https://en.cppreference.com/", + "type": "article" + }, + { + "title": "A Tour of C++", + "url": "https://www.amazon.com/Tour-C-Depth/dp/0134997832", + "type": "article" + }, + { + "title": "Explore top posts about C++", + "url": "https://app.daily.dev/tags/c++?ref=roadmapsh", + "type": "article" + } + ] + }, + "tao0Bb_JR0Ubl62HO8plp": { + "title": "Bash", + "description": "Bash (**B**ourne **A**gain **Sh**ell) is a widely-used Unix shell and scripting language that acts as a command-line interface for executing commands and organizing files on your computer. It allows users to interact with the system's operating system by typing text commands, serving as an alternative to the graphical user interface (GUI). Bash, created as a free and improved version of the original Bourne Shell (`sh`), is the default shell in many Unix-based systems, including Linux, macOS, and the Windows Subsystem for Linux (WSL).\n\nBash Scripting\n--------------\n\nBash scripting is an essential skill for anyone engaged in cyber security. It allows you to automate simple tasks, monitor system activities, and manage multiple files and directories with ease. With Bash scripts, you can develop tools, automate repetitive tasks, or even develop security testing tools.\n\nKey Features\n------------\n\n* **Variables**: Variables can store data in the form of strings or numbers, which can be used and manipulated throughout your script.\n \n* **Control Structures**: Bash supports loops (`for`, `while`) and conditional statements (`if`, `case`) to build more robust scripts with decision-making capabilities.\n \n* **Functions**: Create reusable code blocks that can be called with specified parameters, making your script more modular and easier to maintain.\n \n* **User Input**: Bash scripts allow you to interact with the user by accepting input or choosing options.\n \n* **File Management**: Create, modify, or analyze files using built-in commands such as `ls`, `cp`, `mkdir`, and `grep`.\n \n\nLearning Bash\n-------------\n\nAs a cyber security expert, having a strong foundation in Bash can save you time and help you better understand the inner workings of a system. Invest time in learning Bash essentials, such as basic commands, file manipulation, scripting, and processing text data.\n\n* Basic Commands: Start by learning some of the most commonly used Bash commands: `cd`, `mv`, `cp`, `rm`, `grep`, `find`, `sort`, etc.\n \n* File and Directory Management: Explore the use of commands, like `mkdir`, `rmdir`, `touch`, `chmod`, `chown`, and `ln`, to create, modify, and delete files and directories.\n \n* Text Processing: Learn to use commands like `cat`, `less`, `head`, `tail`, and `awk` to analyze and manipulate text data.\n \n* Scripting: Start by understanding the syntax and structure of Bash scripts, and learn how to create, debug, and execute scripts.\n \n\nSome resources to begin your journey with Bash are:\n\nBash scripting is a versatile tool in the cybersecurity toolkit, and mastering it will provide you with greater control over the systems you protect.", + "links": [ + { + "title": "GNU Bash Manual", + "url": "https://www.gnu.org/software/bash/manual/bash.html", + "type": "article" + }, + { + "title": "Bash Beginner's Guide", + "url": "http://www.tldp.org/LDP/Bash-Beginners-Guide/html/", + "type": "article" + }, + { + "title": "Bash Academy", + "url": "https://www.bash.academy/", + "type": "article" + }, + { + "title": "Learn Shell", + "url": "https://www.learnshell.org/", + "type": "article" + }, + { + "title": "Explore top posts about Bash", + "url": "https://app.daily.dev/tags/bash?ref=roadmapsh", + "type": "article" + } + ] + }, + "paY9x2VJA98FNGBFGRXp2": { + "title": "Power Shell", + "description": "PowerShell is a powerful command-line shell and scripting language developed by Microsoft primarily for the purpose of automating tasks and managing system configuration. PowerShell is designed specifically for Windows but has been made available for other platforms as well, such as macOS and Linux.\n\nWhy PowerShell?\n---------------\n\n* **Automation:** PowerShell scripts allow users to automate tasks, helping to save time and reduce the likelihood of introducing errors during manual processes.\n \n* **Command discovery:** PowerShell's built-in `Get-Command` cmdlet allows users to easily find and learn about the commands available to them.\n \n* **Consistency:** The consistency of the PowerShell syntax makes it easy to learn and use the scripting language, allowing users to create complex scripts with minimal investment in time and effort.\n \n* **Cross-platform compatibility:** PowerShell is now available across various platforms, making it even more valuable to learn and implement in your daily work.\n \n\nBasic Concepts\n--------------\n\nHere are some essential concepts to understand while working with PowerShell:\n\n* **Cmdlet:** A cmdlet is a lightweight command that performs a specific action, such as creating a new folder or listing the files in a directory. Cmdlets follow the 'Verb-Noun' syntax (e.g., `Get-Process`, `New-Item`).\n \n* **Pipeline:** A pipeline is a method of passing the output of one cmdlet as input to another cmdlet. It's represented using the '|' symbol. (e.g., `Get-Process | Stop-Process`)\n \n* **Aliases:** Aliases are alternate names for cmdlets, created to provide a more intuitive, shorthand way to call the original cmdlet (e.g., `ls` is an alias for `Get-ChildItem`).\n \n* **Variables:** Variables in PowerShell use the `$` symbol for storing values. (e.g., `$myVariable = \"Hello, World!\"`)\n \n* **Operators:** PowerShell supports various operators, such as arithmetic operators, comparison operators, logical operators, etc., for performing calculations, comparisons, and transformations on variables and values.\n \n* **Scripting:** PowerShell scripts are saved as `.ps1` files and executed using command line or Integrated Scripting Environment (ISE).\n \n\nLearning PowerShell\n-------------------\n\nTo get started with PowerShell, begin by learning about the available cmdlets, syntax, and features. Useful resources for learning PowerShell include:\n\nIn conclusion, PowerShell is an essential tool for anyone working with Windows systems and can greatly benefit those in the cybersecurity field. The ability to automate tasks and manage configurations using PowerShell will provide a significant advantage, allowing for more efficient and accurate work.", + "links": [ + { + "title": "Learning PowerShell GitHub Repository", + "url": "https://github.com/PowerShell/PowerShell/tree/master/docs/learning-powershell", + "type": "opensource" + }, + { + "title": "Microsoft's Official PowerShell Documentation", + "url": "https://docs.microsoft.com/en-us/powershell/", + "type": "article" + }, + { + "title": "PowerShell.org", + "url": "https://powershell.org/", + "type": "article" + }, + { + "title": "Stack Overflow", + "url": "https://stackoverflow.com/questions/tagged/powershell", + "type": "article" + }, + { + "title": "Reddit's r/PowerShell", + "url": "https://www.reddit.com/r/PowerShell/", + "type": "article" + } + ] + }, + "Jd9t8e9r29dHRsN40dDOk": { + "title": "GTFOBINS", + "description": "GTFOBins (GTFOBINS) is a curated list of Unix binaries that can be exploited by attackers to bypass local security restrictions on a misconfigured system. It provides a detailed index of commands and scripts, demonstrating how certain binaries, when used improperly, can enable privilege escalation, file manipulation, and other unauthorized activities, thus serving as a resource for both security professionals to understand potential vulnerabilities and for attackers to identify and exploit weaknesses.\n\nLearn more from the following resources:", + "links": [ + { + "title": "GTFOBins/GTFOBins.github.io", + "url": "https://gtfobins.github.io/", + "type": "opensource" + }, + { + "title": "Mastering Privilege Escalation: A Comprehensive Guide on GTFOBins", + "url": "https://www.youtube.com/watch?v=gx6CTtWohLQ", + "type": "video" + } + ] + }, + "Rnpx7VkhrBkSQTni6UuTR": { + "title": "WADCOMS", + "description": "WADcoms (Web Application Dangerous Commands) is a comprehensive database of dangerous web application commands and patterns that can be exploited to compromise web security. It offers a catalog of potentially harmful commands and their contexts, helping security professionals identify and mitigate risks associated with web applications by understanding how these commands can be misused for attacks like SQL injection, cross-site scripting (XSS), and remote code execution (RCE).\n\nLearn more from the following resources:", + "links": [ + { + "title": "WADComs/WADComs.github.io", + "url": "https://wadcoms.github.io/", + "type": "opensource" + }, + { + "title": "WADComs: Windows/Active Directory Interactive Cheat Sheet", + "url": "https://john-woodman.com/research/wadcoms/", + "type": "article" + } + ] + } +} \ No newline at end of file diff --git a/public/roadmap-content/data-analyst.json b/public/roadmap-content/data-analyst.json new file mode 100644 index 000000000..81af5d119 --- /dev/null +++ b/public/roadmap-content/data-analyst.json @@ -0,0 +1,1454 @@ +{ + "3xp2fogAVmwXQhdzhZDWR": { + "title": "Introduction", + "description": "Data Analysis plays a crucial role in today's data-centric world. It involves the practice of inspecting, cleansing, transforming, and modeling data to extract valuable insights for decision-making. A **Data Analyst** is a professional primarily tasked with collecting, processing, and performing statistical analysis on large datasets. They discover how data can be used to answer questions and solve problems. With the rapid expansion of data in modern firms, the role of a data analyst has been evolving greatly, making them a significant asset in business strategy and decision-making processes.\n\nLearn more from the following resources:", + "links": [] + }, + "yCnn-NfSxIybUQ2iTuUGq": { + "title": "What is Data Analytics", + "description": "Data Analytics is a core component of a Data Analyst's role. The field involves extracting meaningful insights from raw data to drive decision-making processes. It includes a wide range of techniques and disciplines ranging from the simple data compilation to advanced algorithms and statistical analysis. As a data analyst, you are expected to understand and interpret complex digital data, such as the usage statistics of a website, the sales figures of a company, or client engagement over social media, etc. This knowledge enables data analysts to support businesses in identifying trends, making informed decisions, predicting potential outcomes - hence playing a crucial role in shaping business strategies.", + "links": [] + }, + "Lsapbmg-eMIYJAHpV97nO": { + "title": "Types of Data Analytics", + "description": "Data Analytics has proven to be a critical part of decision-making in modern business ventures. It is responsible for discovering, interpreting, and transforming data into valuable information. Different types of data analytics look at past, present, or predictive views of business operations.\n\nData Analysts, as ambassadors of this domain, employ these types, which are namely Descriptive Analytics, Diagnostic Analytics, Predictive Analytics and Prescriptive Analytics, to answer various questions — What happened? Why did it happen? What could happen? And what should we do next? Understanding these types gives data analysts the power to transform raw datasets into strategic insights.", + "links": [] + }, + "hWDh0ooidbqZb000ENVok": { + "title": "Descriptive Analytics", + "description": "Descriptive Analytics is one of the fundamental types of Data Analytics that provides insight into the past. As a Data Analyst, utilizing Descriptive Analytics involves the technique of using historical data to understand changes that have occurred in a business over time. Primarily concerned with the “what has happened” aspect, it analyzes raw data from the past to draw inferences and identify patterns and trends. This helps companies understand their strengths, weaknesses and pinpoint operational problems, setting the stage for accurate Business Intelligence and decision-making processes.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Descriptive Analytics: What They Are and Related Terms", + "url": "https://www.investopedia.com/terms/d/descriptive-analytics.asp", + "type": "article" + }, + { + "title": "What are Descriptive Analytics?", + "url": "https://www.youtube.com/watch?v=DlFqQy10aCs", + "type": "video" + } + ] + }, + "j7DA2J3speSaBsZAV4M0v": { + "title": "Diagnostic Analytics", + "description": "Diagnostic analytics, as a crucial type of data analytics, is focused on studying past performance to understand why something happened. This is an integral part of the work done by data analysts. Through techniques such as drill-down, data discovery, correlations, and cause-effect analysis, data analysts utilizing diagnostic analytics can look beyond general trends and identify the root cause of changes observed in the data. Consequently, this enables businesses to address operational and strategic issues effectively, by allowing them to grasp the reasons behind such issues. For every data analyst, the skill of performing diagnostic data analytics is a must-have asset that enhances their analysis capability.\n\nLearn more from the following resources:", + "links": [ + { + "title": "What is Diagnostic Analytics?", + "url": "https://amplitude.com/explore/analytics/what-diagnostic-analytics", + "type": "article" + }, + { + "title": "What is Diagnostic Analytics? | Understanding Data-Driven Decision Making", + "url": "https://www.youtube.com/watch?v=ikZjeAC1yJ0", + "type": "video" + } + ] + }, + "3WZORRCwme3HsaKew23Z5": { + "title": "Predictive Analytics", + "description": "Predictive analysis is a crucial type of data analytics that any competent data analyst should comprehend. It refers to the practice of extracting information from existing data sets in order to determine patterns and forecast future outcomes and trends. Data analysts apply statistical algorithms, machine learning techniques, and artificial intelligence to the data to anticipate future results. Predictive analysis enables organizations to be proactive, forward-thinking, and strategic by providing them valuable insights on future occurrences. It's a powerful tool that gives companies a significant competitive edge by enabling risk management, opportunity identification, and strategic decision-making.\n\nLearn more from the following resources:", + "links": [ + { + "title": "What is predictive analytics? - Google", + "url": "https://cloud.google.com/learn/what-is-predictive-analytics", + "type": "article" + }, + { + "title": "What is predictive analytics?", + "url": "https://www.youtube.com/watch?v=cVibCHRSxB0", + "type": "video" + } + ] + }, + "DFMR-0MbmVCCrJu0I9JWG": { + "title": "Prespective Analytics", + "description": "Prescriptive analytics, a crucial type of data analytics, is essential for making data-driven decisions in business and organizational contexts. As a data analyst, the goal of prescriptive analytics is to recommend various actions using predictions on the basis of known parameters to help decision makers understand likely outcomes. Prescriptive analytics employs a blend of techniques and tools such as algorithms, machine learning, computational modelling procedures, and decision-tree structures to enable automated decision making. Therefore, prescriptive analytics not only anticipates what will happen and when it will happen, but also explains why it will happen, contributing to the significance of a data analyst’s role in an organization.\n\nLearn more from the following resources:", + "links": [ + { + "title": "What is Prescriptive Analysis?", + "url": "https://www.investopedia.com/terms/p/prescriptive-analytics.asp", + "type": "article" + }, + { + "title": "Examples of Prescriptive Analysis", + "url": "https://www.youtube.com/watch?v=NOo8Nc9zG20", + "type": "video" + } + ] + }, + "R12sArWVpbIs_PHxBqVaR": { + "title": "Key Concepts of Data", + "description": "In the realm of data analysis, understanding some key concepts is essential. Data analysis is the process of inspecting, cleansing, transforming, and modeling data to discover useful information and support decision-making. In the broadest sense, data can be classified into various types like nominal, ordinal, interval and ratio, each with a specific role and analysis technique. Higher-dimensional data types like time-series, panel data, and multi-dimensional arrays are also critical. On the other hand, data quality and data management are key concepts to ensure clean and reliable datasets. With an understanding of these fundamental concepts, a data analyst can transform raw data into meaningful insights.", + "links": [] + }, + "mDUSpexdq3ITV6qokwSJl": { + "title": "Collection", + "description": "In the realm of data analysis, the concept of collection holds immense importance. As the term suggests, collection refers to the process of gathering and measuring information on targeted variables in an established systematic fashion that enables a data analyst to answer relevant questions and evaluate outcomes. This step is foundational to any data analysis scheme, as it is the first line of interaction with the raw data that later transforms into viable insights. The effectiveness of data analysis is heavily reliant on the quality and quantity of data collected. Different methodologies and tools are employed for data collection depending on the nature of the data needed, such as surveys, observations, experiments, or scraping online data stores. This process should be carried out with clear objectives and careful consideration to ensure accuracy and relevance in the later stages of analysis and decision-making.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Data Collection Methods", + "url": "https://www.questionpro.com/blog/data-collection-methods/", + "type": "article" + }, + { + "title": "What is data collection?", + "url": "https://www.simplilearn.com/what-is-data-collection-article", + "type": "article" + } + ] + }, + "nC7tViln4UyQFYP_-fyjB": { + "title": "Cleanup", + "description": "The Cleanup of Data is a critical component of a Data Analyst's role. It involves the process of inspecting, cleaning, transforming, and modeling data to discover useful information, inform conclusions, and support decision making. This process is crucial for Data Analysts to generate accurate and significant insights from data, ultimately resulting in better and more informed business decisions. A solid understanding of data cleanup procedures and techniques is a fundamental skill for any Data Analyst. Hence, it is necessary to hold a high emphasis on maintaining data quality by managing data integrity, accuracy, and consistency during the data cleanup process.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Top 10 ways to clean your data", + "url": "https://support.microsoft.com/en-gb/office/top-ten-ways-to-clean-your-data-2844b620-677c-47a7-ac3e-c2e157d1db19", + "type": "article" + }, + { + "title": "Master Data Cleaning Essentials on Excel in Just 10 Minutes", + "url": "https://www.youtube.com/watch?v=jxq4-KSB_OA", + "type": "video" + } + ] + }, + "XFnw4_dQYSzm96-srWa7X": { + "title": "Exploration", + "description": "In the realm of data analytics, exploration of data is a key concept that data analysts leverage to understand and interpret data effectively. Typically, this exploration process involves discerning patterns, identifying anomalies, examining underlying structures, and testing hypothesis, which often gets accomplished via descriptive statistics, visual methods, or sophisticated algorithms. It's a fundamental stepping-stone for any data analyst, ultimately guiding them in shaping the direction of further analysis or modeling. This concept serves as a foundation for dealing with complexities and uncertainties in data, hence improving decision-making in various fields ranging from business and finance to healthcare and social sciences.\n\nLearn more from the following resources:", + "links": [ + { + "title": "What is data exploration", + "url": "https://www.heavy.ai/learn/data-exploration", + "type": "article" + }, + { + "title": "How to do Data Exploration", + "url": "https://www.youtube.com/watch?v=OY4eQrekQvs", + "type": "video" + } + ] + }, + "jowh4CFLQiFzKaaElyCuQ": { + "title": "Visualisation", + "description": "The visualization of data is an essential skill in the toolkit of every data analyst. This practice is about transforming complex raw data into a graphical format that allows for an easier understanding of large data sets, trends, outliers, and important patterns. Whether pie charts, line graphs, bar graphs, or heat maps, data visualization techniques not only streamline data analysis, but also facilitate a more effective communication of the findings to others. This key concept underscores the importance of presenting data in a digestible and visually appealing manner to drive data-informed decision making in an organization.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Data visualisation beginner's guide", + "url": "https://www.tableau.com/en-gb/learn/articles/data-visualization", + "type": "article" + }, + { + "title": "Data Visualisation in 2024", + "url": "https://www.youtube.com/watch?v=loYuxWSsLNc", + "type": "video" + } + ] + }, + "2pUq-7hu5EjGpowz98YoV": { + "title": "Statistical Analysis", + "description": "Statistical analysis plays a critical role in the daily functions of a data analyst. It encompasses collecting, examining, interpreting, and present data, enabling data analysts to uncover patterns, trends and relationships, deduce insights and support decision-making in various fields. By applying statistical concepts, data analysts can transform complex data sets into understandable information that organizations can leverage for actionable insights. This cornerstone of data analysis enables analysts to deliver predictive models, trend analysis, and valuable business insights, making it indispensable in the world of data analytics. It is vital for data analysts to grasp such statistical methodologies to effectively decipher large data volumes they handle.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Understanding Statistical Analysis", + "url": "https://www.simplilearn.com/what-is-statistical-analysis-article", + "type": "article" + }, + { + "title": "Statistical Analysis", + "url": "https://www.youtube.com/watch?v=XjMBZE1DuBY", + "type": "video" + } + ] + }, + "U55hwR0HRCIz2cveVR8FZ": { + "title": "Machine Learning", + "description": "Machine learning, a subset of artificial intelligence, is an indispensable tool in the hands of a data analyst. It provides the ability to automatically learn, improve from experience and make decisions without being explicitly programmed. In the context of a data analyst, machine learning contributes significantly in uncovering hidden insights, recognising patterns or making predictions based on large amounts of data. Through the use of varying algorithms and models, data analysts are able to leverage machine learning to convert raw data into meaningful information, making it a critical concept in data analysis.\n\nLearn more from the following resources:", + "links": [ + { + "title": "What is Machine Learning (ML)?", + "url": "https://www.ibm.com/topics/machine-learning", + "type": "article" + }, + { + "title": "What is Machine Learning?", + "url": "https://www.youtube.com/watch?v=9gGnTQTYNaE", + "type": "video" + } + ] + }, + "sgXIjVTbwdwdYoaxN3XBM": { + "title": "Analaysis / Reporting with Excel", + "description": "Excel is a powerful tool utilized by data analysts worldwide to store, manipulate, and analyze data. It offers a vast array of features such as pivot tables, graphs and a powerful suite of formulas and functions to help sift through large sets of data. A data analyst uses Excel to perform a wide range of tasks, from simple data entry and cleaning, to more complex statistical analysis and predictive modeling. Proficiency in Excel is often a key requirement for a data analyst, as its versatility and ubiquity make it an indispensable tool in the field of data analysis.", + "links": [ + { + "title": "W3Schools - Excel", + "url": "https://www.w3schools.com/excel/index.php", + "type": "article" + } + ] + }, + "wME4MSldOWlMB54ekpReS": { + "title": "IF", + "description": "The IF function in Excel is a crucial tool for data analysts, enabling them to create conditional statements, clean and validate data, perform calculations based on specific conditions, create custom metrics, apply conditional formatting, automate tasks, and generate dynamic reports. Data analysts use IF to categorize data, handle missing values, calculate bonuses or custom metrics, highlight trends, and enhance visualizations, ultimately facilitating informed decision-making through data analysis.\n\nLearn more from the following resources:", + "links": [ + { + "title": "IF Function", + "url": "https://support.microsoft.com/en-gb/office/if-function-69aed7c9-4e8a-4755-a9bc-aa8bbff73be2", + "type": "article" + }, + { + "title": "Excel IF Function", + "url": "https://exceljet.net/functions/if-function", + "type": "article" + } + ] + }, + "yBlJrNo9eO470dLp6OaQZ": { + "title": "DATEDIF", + "description": "The `DATEDIF` function is an incredibly valuable tool for a Data Analyst in Excel or Google Sheets, by providing the ability to calculate the difference between two dates. This function takes in three parameters: start date, end date and the type of difference required (measured in years, months, days, etc.). In Data Analysis, particularly when dealing with time-series data or when you need to uncover trends over specific periods, the `DATEDIF` function is a necessary asset. Recognizing its functionality will enable a data analyst to manipulate or shape data progressively and efficiently.\n\n* `DATEDIF` is technically still supported, but wont show as an option. For additional information, see Excel \"Help\" page.\n\nLearn more from the following resources:", + "links": [ + { + "title": "DATEDIF function", + "url": "https://support.microsoft.com/en-gb/office/datedif-function-25dba1a4-2812-480b-84dd-8b32a451b35c", + "type": "article" + } + ] + }, + "9sIP-jpNjtA1JPCBjTf-H": { + "title": "VLOOKUP / HLOOKUP", + "description": "Data Analysts often deal with large and complex datasets that require efficient tools for data manipulation and extraction. This is where basic functions like vlookup and hlookup in Excel become extremely useful. These functions are versatile lookup and reference functions that can find specified data in a vast array, providing ease and convenience in data retrieval tasks.\n\nThe Vertical Lookup (vlookup) is used to find data in a table sorted vertically, while the Horizontal Lookup (hlookup) is used on data organized horizontally. Mastering these functions is crucial for any data analyst's toolbox, as they can dramatically speed up data access, reduce errors in data extraction, and simplify the overall process of analysis. In essence, these two functions are not just basic functions; they serve as essential tools for efficient data analysis.\n\nLearn more from the following resources:", + "links": [ + { + "title": "VLOOKUP Function", + "url": "https://support.microsoft.com/en-gb/office/vlookup-function-0bbc8083-26fe-4963-8ab8-93a18ad188a1", + "type": "article" + }, + { + "title": "HLOOKUP Function", + "url": "https://support.microsoft.com/en-gb/office/hlookup-function-a3034eec-b719-4ba3-bb65-e1ad662ed95f", + "type": "article" + } + ] + }, + "dke_pySrqYZZ7K3rprnIT": { + "title": "REPLACE / SUBSTITUTE", + "description": "", + "links": [] + }, + "YReKRRgE_2dWfGGdBQqbf": { + "title": "UPPER / LOWER / PROPER", + "description": "In the field of data analysis, the Upper, Lower, and Proper functions serve as fundamental tools for manipulating and transforming text data. A data analyst often works with a vast array of datasets, where the text data may not always adhere to a consistent format. To tackle such issues, the Upper, Lower, and Proper functions are used. 'Upper' converts all the text to uppercase, while 'Lower' does the opposite, transforming all text to lowercase. The 'Proper' function is used to capitalize the first letter of each word, making it proper case. These functions are indispensable when it comes to cleaning and preparing data, a major part of a data analyst's role.\n\nLearn more from the following resources:", + "links": [ + { + "title": "UPPER Function", + "url": "https://support.microsoft.com/en-gb/office/upper-function-c11f29b3-d1a3-4537-8df6-04d0049963d6", + "type": "article" + }, + { + "title": "LOWER Function", + "url": "https://support.microsoft.com/en-gb/office/lower-function-3f21df02-a80c-44b2-afaf-81358f9fdeb4", + "type": "article" + }, + { + "title": "PROPER Function", + "url": "https://support.microsoft.com/en-gb/office/proper-function-52a5a283-e8b2-49be-8506-b2887b889f94", + "type": "article" + } + ] + }, + "SIiIfcy-jeiRoDlxw385V": { + "title": "CONCAT", + "description": "The term 'Concat' or ‘Concatenation’ refers to the operation of combining two or more data structures, be it strings, arrays, or datasets, end-to-end in a sequence. In the context of data analysis, a Data Analyst uses concatenation as a basic function to merge or bind data sets along an axis - either vertically or horizontally. This function is commonly used in data wrangling or preprocessing to combine data from multiple sources, handle missing values, and shape data into a form that fits better with analysis tools. An understanding of 'Concat' plays a crucial role in managing the complex, large data sets that data analysts often work with.\n\nLearn more from the following resources:", + "links": [ + { + "title": "CONCAT Function", + "url": "https://support.microsoft.com/en-gb/office/concat-function-9b1a9a3f-94ff-41af-9736-694cbd6b4ca2", + "type": "article" + }, + { + "title": "Excel CONCAT Function", + "url": "https://www.w3schools.com/excel/excel_concat.php", + "type": "article" + } + ] + }, + "D5Q6o6ydr1j51kB0ASFPE": { + "title": "TRIM", + "description": "Trim is considered a basic yet vital function within the scope of data analysis. It plays an integral role in preparing and cleansing the dataset, which is key to analytical accuracy. Trim allows data analysts to streamline dataset by removing extra spaces, enhancing the data quality. Furthermore, Trim functions can help in reducing the errors, enhancing the efficiency of data modelling and ensuring reliable data insight generation. Understanding Trim function is thus an essential part of a data analyst's toolbox.\n\nLearn more from the following resources:", + "links": [ + { + "title": "TRIM Function", + "url": "https://corporatefinanceinstitute.com/resources/excel/trim-function/", + "type": "article" + }, + { + "title": "Excel TRIM Function", + "url": "https://support.microsoft.com/en-gb/office/trim-function-410388fa-c5df-49c6-b16c-9e5630b479f9", + "type": "article" + } + ] + }, + "FDYunL9KJkR_tHEcUV2iC": { + "title": "AVERAGE", + "description": "The average, also often referred to as the mean, is one of the most commonly used mathematical calculations in data analysis. It provides a simple, useful measure of a set of data. For a data analyst, understanding how to calculate and interpret averages is fundamental. Basic functions, including the average, are integral components in data analysis that are used to summarize and understand complex data sets. Though conceptually simple, the power of average lies in its utility in a range of analyses - from forecasting models to understanding trends and patterns in the dataset.\n\nLearn more from the following resources:", + "links": [ + { + "title": "AVERAGE Function", + "url": "https://support.microsoft.com/en-gb/office/average-function-047bac88-d466-426c-a32b-8f33eb960cf6", + "type": "article" + }, + { + "title": "Excel AVERAGE function", + "url": "https://www.w3schools.com/excel/excel_average.php", + "type": "article" + } + ] + }, + "uAdxP1ZkgzcU8JcWjvw6d": { + "title": "COUNT", + "description": "The Count function in data analysis is one of the most fundamental tasks that a Data Analyst gets to handle. This function is a simple yet powerful tool that aids in understanding the underlying data by providing the count or frequency of occurrences of unique elements in data sets. The relevance of count comes into play in various scenarios – from understanding the popularity of a certain category to analyzing customer activity, and much more. This basic function offers crucial insights into data, making it an essential skill in the toolkit of any data analyst.\n\nLearn more from the following resources:", + "links": [ + { + "title": "COUNT Function", + "url": "https://support.microsoft.com/en-gb/office/count-function-a59cd7fc-b623-4d93-87a4-d23bf411294c", + "type": "article" + }, + { + "title": "How to Count Cells in Microsoft Excel (COUNT, COUNTA, COUNTIF, COUNTIFS Functions)", + "url": "https://www.youtube.com/watch?v=5RFLncJuMng", + "type": "video" + } + ] + }, + "_FO80Cm2iAD_bThmnsEgp": { + "title": "SUM", + "description": "Sum is one of the most fundamental operations in data analysis. As a data analyst, the ability to quickly and accurately summarize numerical data is key to draw meaningful insights from large data sets. The operation can be performed using various software and programming languages such as Excel, SQL, Python, R etc., each providing distinct methods to compute sums. Understanding the 'sum' operation is critical for tasks such as trend analysis, forecasting, budgeting, and essentially any operation involving quantitative data.\n\nLearn more from the following resources:", + "links": [ + { + "title": "SUM Function", + "url": "https://support.microsoft.com/en-gb/office/sum-function-043e1c7d-7726-4e80-8f32-07b23e057f89", + "type": "article" + }, + { + "title": "How to use the SUM function in excel", + "url": "https://www.youtube.com/watch?v=-u-9f3QrdAQ", + "type": "video" + } + ] + }, + "F3-cr5onkWqKKPRdxu8lV": { + "title": "MIN / MAX", + "description": "Understanding the minimum and maximum values in your dataset is critical in data analysis. These basic functions, often referred to as Min-Max functions, are statistical tools that data analysts use to inspect the distribution of a particular dataset. By identifying the lowest and highest values, data analysts can gain insight into the range of the dataset, identify possible outliers, and understand the data's variability. Beyond their use in descriptive statistics, Min-Max functions also play a vital role in data normalization, shaping the accuracy of predictive models in Machine Learning and AI fields.\n\nLearn more from the following resources:", + "links": [ + { + "title": "MIN Function", + "url": "https://support.microsoft.com/en-gb/office/min-function-61635d12-920f-4ce2-a70f-96f202dcc152", + "type": "article" + }, + { + "title": "MAX Function", + "url": "https://support.microsoft.com/en-gb/office/max-function-e0012414-9ac8-4b34-9a47-73e662c08098", + "type": "article" + } + ] + }, + "Vk3JErqxpnPY44iyfkLMl": { + "title": "Charting", + "description": "Excel serves as a powerful tool for data analysts when it comes to data organization, manipulation, recovery, and visualization. One of the incredible features it offers is 'Charting'. Charting essentially means creating visual representations of data, which aids data analysts to easily understand complex data and showcase compelling stories of data trends, correlations, and statistical analysis. These charts vary from simple bar graphs to more complex 3D surface and stock charts. As a data analyst, mastering charting under Excel substantially enhances data interpretation, making it easier to extract meaningful insights from substantial data sets.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Create a chart from start to finish", + "url": "https://support.microsoft.com/en-gb/office/create-a-chart-from-start-to-finish-0baf399e-dd61-4e18-8a73-b3fd5d5680c2", + "type": "article" + }, + { + "title": "Excel Charts and Graphs Tutorial", + "url": "https://www.youtube.com/watch?v=eHtZrIb0oWY", + "type": "video" + } + ] + }, + "2DDJUFr0AJTVR2Whj8zub": { + "title": "Pivot Tables", + "description": "Data Analysts recurrently find the need to summarize, investigate, and analyze their data to make meaningful and insightful decisions. One of the most powerful tools to accomplish this in Microsoft Excel is the Pivot Table. Pivot Tables allow analysts to organize and summarize large quantities of data in a concise, tabular format. The strength of pivot tables comes from their ability to manipulate data dynamically, leading to quicker analysis and richer insights. Understanding and employing Pivot Tables efficiently is a fundamental skill for any data analyst, as it directly impacts their ability to derive significant information from raw datasets.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Create a pivot table", + "url": "https://support.microsoft.com/en-gb/office/create-a-pivottable-to-analyze-worksheet-data-a9a84538-bfe9-40a9-a8e9-f99134456576", + "type": "article" + }, + { + "title": "Pivot tables in excel", + "url": "https://www.excel-easy.com/data-analysis/pivot-tables.html", + "type": "article" + }, + { + "title": "How to create a pivot table in excel", + "url": "https://www.youtube.com/watch?v=PdJzy956wo4", + "type": "video" + } + ] + }, + "i4VCwFm-wc9cqE73i-BIb": { + "title": "Learn SQL", + "description": "Structured Query Language, or SQL, is an essential tool for every data analyst. As a domain-specific language used in programming and designed for managing data held in relational database management systems, SQL allows analysts to manipulate and analyse large volumes of data efficiently. Understanding SQL allows a data analyst to extract insights from data stored in databases, conduct complex queries, and create elaborate data reports. SQL is recognized for its effectiveness in data manipulation and its compatibility with other coding languages, making it a fundamental competency in the data analytics field.", + "links": [] + }, + "i2uEcaO4bJhcZ5ayRs2CQ": { + "title": "Learn a Programming Lang.", + "description": "We have two main programming languages when it comes to data analysis: Python and R. Both have extensive libraries to help with decision-making processes in various situations, assisting in manipulating, modeling, and visualizing data. Python is a versatile language, used not only for data analysis but also for web development, automation, artificial intelligence, and more. R, on the other hand, was specifically created for statistical analysis and data visualization, making it an excellent choice for statisticians and researchers. It is known for its advanced visualization capabilities, allowing the creation of highly customizable and sophisticated graphs and plots.\n\nWith potential doubts about which language to choose to advance in a data career, it is ideal to consider your goals and/or the current market needs and choose which language to learn. If you are more interested in a career that combines data analysis with software development, automation, or artificial intelligence, Python may be the best choice. If your focus is purely on statistics and data visualization, R might be more suitable.", + "links": [] + }, + "g_EBQizZsIe-vn8ir6FTv": { + "title": "R", + "description": "R is a powerful language profoundly used by data analysts and statisticians across the globe. Offering a wide array of statistical and graphical techniques, R proves to be an excellent tool for data manipulation, statistical modeling and visualization. With its comprehensive collection of packages and built-in functions for data analysis, R allows data analysts to perform complex exploratory data analysis, build sophisticated models and create stunning visualizations. Moreover, given its open-source nature, R consistently advances with contributions from the worldwide statistical community.\n\nLearn more from the following resources:", + "links": [ + { + "title": "R Website", + "url": "https://www.r-project.org/about.html", + "type": "article" + }, + { + "title": "R vs Python | Which is Better for Data Analysis?", + "url": "https://www.youtube.com/watch?v=1gdKC5O0Pwc", + "type": "video" + } + ] + }, + "M1QtGTLyygIjePoCfvjve": { + "title": "Data Manipulation Libraries", + "description": "Data manipulation libraries are essential tools in data science and analytics, enabling efficient handling, transformation, and analysis of large datasets. Python, a popular language for data science, offers several powerful libraries for this purpose. Pandas is a highly versatile library that provides data structures like DataFrames, which allow for easy manipulation and analysis of tabular data. NumPy, another fundamental library, offers support for large, multi-dimensional arrays and matrices, along with a collection of mathematical functions to operate on these arrays. Together, Pandas and NumPy form the backbone of data manipulation in Python, facilitating tasks such as data cleaning, merging, reshaping, and statistical analysis, thus streamlining the data preparation process for machine learning and other data-driven applications.", + "links": [] + }, + "8OXmF2Gn6TYJotBRvDjqA": { + "title": "Pandas", + "description": "Pandas is a widely acknowledged and highly useful data manipulation library in the world of data analysis. Known for its robust features like data cleaning, wrangling and analysis, pandas has become one of the go-to tools for data analysts. Built on NumPy, it provides high-performance, easy-to-use data structures and data analysis tools. In essence, its flexibility and versatility make it a critical part of the data analyst's toolkit, as it holds the capability to cater to virtually every data manipulation task.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Pandas Website", + "url": "https://pandas.pydata.org/", + "type": "article" + }, + { + "title": "NumPy vs Pandas", + "url": "https://www.youtube.com/watch?v=KHoEbRH46Zk", + "type": "video" + } + ] + }, + "l1SnPc4EMqGdaIAhIQfrT": { + "title": "Data Visualisation Libraries", + "description": "Data visualization libraries are crucial in data science for transforming complex datasets into clear and interpretable visual representations, facilitating better understanding and communication of data insights. In Python, several libraries are widely used for this purpose. Matplotlib is a foundational library that offers comprehensive tools for creating static, animated, and interactive plots. Seaborn, built on top of Matplotlib, provides a high-level interface for drawing attractive and informative statistical graphics with minimal code. Plotly is another powerful library that allows for the creation of interactive and dynamic visualizations, which can be easily embedded in web applications. Additionally, libraries like Bokeh and Altair offer capabilities for creating interactive plots and dashboards, enhancing exploratory data analysis and the presentation of data findings. Together, these libraries enable data scientists to effectively visualize trends, patterns, and outliers in their data, making the analysis more accessible and actionable.", + "links": [] + }, + "uGkXxdMXUMY-3fQFS1jK8": { + "title": "Matplotlib", + "description": "Matplotlib is a paramount data visualization library used extensively by data analysts for generating a wide array of plots and graphs. Through Matplotlib, data analysts can convey results clearly and effectively, driving insights from complex data sets. It offers a hierarchical environment which is very natural for a data scientist to work with. Providing an object-oriented API, it allows for extensive customization and integration into larger applications. From histograms, bar charts, scatter plots to 3D graphs, the versatility of Matplotlib assists data analysts in the better comprehension and compelling representation of data.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Matplotlib Website", + "url": "https://matplotlib.org/", + "type": "article" + }, + { + "title": "Learn Matplotlib in 6 minutes", + "url": "https://www.youtube.com/watch?v=nzKy9GY12yo", + "type": "video" + } + ] + }, + "y__UHXe2DD-IB7bvMF1-X": { + "title": "Dplyr", + "description": "Dplyr is a powerful and popular toolkit for data manipulation in R. As a data analyst, this library provides integral functions to manipulate, clean, and process data efficiently. It has been designed to be easy and intuitive, ensuring a robust and consistent syntax. Dplyr ensures data reliability and fast processing, essential for analysts dealing with large datasets. With a strong focus on efficiency, dplyr functions like select, filter, arrange, mutate, summarise, and group\\_by optimise data analysis operations, making data manipulation a smoother and hassle-free procedure for data analysts.\n\nLearn more from the following resources:", + "links": [ + { + "title": "dplyr website", + "url": "https://dplyr.tidyverse.org/", + "type": "article" + }, + { + "title": "Dplyr Essentials", + "url": "https://www.youtube.com/watch?v=Gvhkp-Yw65U", + "type": "video" + } + ] + }, + "E0hIgQEeZlEidr4HtUFrL": { + "title": "Ggplot2", + "description": "When it comes to data visualization in R programming, ggplot2 stands tall as one of the primary tools for data analysts. This data visualization library, which forms part of the tidyverse suite of packages, facilitates the creation of complex and sophisticated visual narratives. With its grammar of graphics philosophy, ggplot2 enables analysts to build graphs and charts layer by layer, thereby offering detailed control over graphical features and design. Its versatility in creating tailored and aesthetically pleasing graphics is a vital asset for any data analyst tackling exploratory data analysis, reporting, or dashboard building.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Make beautiful graphs in R", + "url": "https://www.youtube.com/watch?v=qnw1xDnt_Ec", + "type": "video" + } + ] + }, + "_sjXCLHHTbZromJYn6fnu": { + "title": "Data Collection", + "description": "In the context of the Data Analyst role, data collection is a foundational process that entails gathering relevant data from various sources. This data can be quantitative or qualitative and may be sourced from databases, online platforms, customer feedback, among others. The gathered information is then cleaned, processed, and interpreted to extract meaningful insights. A data analyst performs this whole process carefully, as the quality of data is paramount to ensuring accurate analysis, which in turn informs business decisions and strategies. This highlights the importance of an excellent understanding, proper tools, and precise techniques when it comes to data collection in data analysis.", + "links": [] + }, + "tYPeLCxbqvMFlTkCGjdHg": { + "title": "Databases", + "description": "Behind every strong data analyst, there's not just a rich assortment of data, but a set of robust databases that enable effective data collection. Databases are a fundamental aspect of data collection in a world where the capability to manage, organize, and evaluate large volumes of data is critical. As a data analyst, the understanding and use of databases is instrumental in capturing the necessary data for conducting qualitative and quantitative analysis, forecasting trends and making data-driven decisions. Thorough knowledge of databases, therefore, can be considered a key component of a data analyst's arsenal. These databases can vary from relational databases like SQL to NoSQL databases like MongoDB, each serving a unique role in the data collection process.\n\nLearn more from the following resources:", + "links": [ + { + "title": "PostgreSQL Roadmap", + "url": "https://roadmap.sh/postgresql-dba", + "type": "article" + }, + { + "title": "MongoDB Roadmap", + "url": "https://roadmap.sh/mongodb", + "type": "article" + } + ] + }, + "iWOK0mRY-hAGxMnHYJ0tt": { + "title": "CSV Files", + "description": "CSV or Comma Separated Values files play an integral role in data collection for data analysts. These file types allow the efficient storage of data and are commonly generated by spreadsheet software like Microsoft Excel or Google Sheets, but their simplicity makes them compatible with a variety of applications that deal with data. In the context of data analysis, CSV files are extensively used to import and export large datasets, making them essential for any data analyst's toolkit. They allow analysts to organize vast amounts of information into a structured format, which is fundamental in extracting useful insights from raw data.\n\nLearn more from the following resources:", + "links": [ + { + "title": "What is a CSV file: A comprehensive guide", + "url": "https://flatfile.com/blog/what-is-a-csv-file-guide-to-uses-and-benefits/", + "type": "article" + }, + { + "title": "Understanding CSV Files", + "url": "https://www.youtube.com/watch?v=UofTplCVkYI", + "type": "video" + } + ] + }, + "4DFcXSSHxg5wv0uXLIRij": { + "title": "APIs", + "description": "Application Programming Interfaces, better known as APIs, play a fundamental role in the work of data analysts, particularly in the process of data collection. APIs are sets of protocols, routines, and tools that enable different software applications to communicate with each other. In data analysis, APIs are used extensively to collect, exchange, and manipulate data from different sources in a secure and efficient manner. This data collection process is paramount in shaping the insights derived by the analysts.\n\nLearn more from the following resources:", + "links": [ + { + "title": "What is an API?", + "url": "https://aws.amazon.com/what-is/api/", + "type": "article" + }, + { + "title": "A beginners guide to APIs", + "url": "https://www.postman.com/what-is-an-api/", + "type": "article" + } + ] + }, + "qQ64ZhSlbbWu9pP8KTE67": { + "title": "Web Scraping", + "description": "Web scraping plays a significant role in collecting unique datasets for data analysis. In the realm of a data analyst's tasks, web scraping refers to the method of extracting information from websites and converting it into a structured usable format like a CSV, Excel spreadsheet, or even into databases. This technique allows data analysts to gather large sets of data from the internet, which otherwise could be time-consuming if done manually. The capability of web scraping and parsing data effectively can give data analysts a competitive edge in their data analysis process, from unlocking in-depth, insightful information to making data-driven decisions.\n\nLearn more from the following resources:", + "links": [ + { + "title": "What is web scraping what is it used for?", + "url": "https://www.parsehub.com/blog/what-is-web-scraping/", + "type": "article" + }, + { + "title": "What is web scraping?", + "url": "https://www.youtube.com/watch?v=dlj_QL-ENJM", + "type": "video" + } + ] + }, + "E6cpb6kvluJM8OGuDcFBT": { + "title": "Data Cleanup", + "description": "Data cleaning, which is often referred as data cleansing or data scrubbing, is one of the most important and initial steps in the data analysis process. As a data analyst, the bulk of your work often revolves around understanding, cleaning, and standardizing raw data before analysis. Data cleaning involves identifying, correcting or removing any errors or inconsistencies in datasets in order to improve their quality. The process is crucial because it directly determines the accuracy of the insights you generate - garbage in, garbage out. Even the most sophisticated models and visualizations would not be of much use if they're based on dirty data. Therefore, mastering data cleaning techniques is essential for any data analyst.", + "links": [] + }, + "X9WmfHOks82BIAzs6abqO": { + "title": "Handling Missing Data", + "description": "When working with real-world data as a Data Analyst, encountering missing or null values is quite prevalent. This phenomenon is referred to as \"Missing Data\" in the field of data analysis. Missing data can severely impact the results of a data analysis process since it reduces the statistical power, which can distort the reliability and robustness of outcomes.\n\nMissing data is a part of the 'Data Cleaning' step which is a crucial part of the Preprocessing in Data Analytics. It involves identifying incomplete, incorrect or irrelevant data and then replacing, modifying or deleting this dirty data. Successful data cleaning of missing values can significantly augment the overall quality of the data, therefore offering valuable and reliable insights. It is essential for a Data Analyst to understand the different techniques for dealing with missing data, such as different types of imputations based on the nature of the data and research question.\n\nLearn more from the following resources:", + "links": [ + { + "title": "How to Handle Missing Data Values While Data Cleaning", + "url": "https://insightsoftware.com/blog/how-to-handle-missing-data-values-while-data-cleaning/", + "type": "article" + }, + { + "title": "Handling Missing Data in Data Preprocessing and Cleaning", + "url": "https://medium.com/@yennhi95zz/handling-missing-data-in-data-preprocessing-and-cleaning-methods-and-examples-19a893336b2a", + "type": "article" + } + ] + }, + "Xltm7B-8TZ1-igLjVJXoR": { + "title": "Removing Duplicates", + "description": "In the world of data analysis, a critical step is data cleaning, that includes an important sub-task: removing duplicate entries. Duplicate data can distort the results of data analysis by giving extra weight to duplicate instances and leading to biased or incorrect conclusions. Despite the quality of data collection, there's a high probability that datasets may contain duplicate records due to various factors like human error, merging datasets, etc. Therefore, data analysts must master the skill of identifying and removing duplicates to ensure that their analysis is based on a unique, accurate, and diverse set of data. This process contributes to more accurate predictions and inferences, thus maximizing the insights gained from the data.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Handling Duplicate Values and Outliers in a dataset", + "url": "https://medium.com/@ayushmandurgapal/handling-duplicate-values-and-outliers-in-a-dataset-b00ce130818e", + "type": "article" + }, + { + "title": "How To Remove Duplicates in a Dataset and Find Unique Values", + "url": "https://www.youtube.com/watch?v=KBzYrvjUsps", + "type": "video" + } + ] + }, + "-rQ8h_6NFxEOhxXgo7LHo": { + "title": "Finding Outliers", + "description": "In the field of data analysis, data cleaning is an essential and preliminary step. This process involves correcting or removing any errors, inaccuracy, or irrelevance present in the obtained raw data, making it more suitable for analysis. One crucial aspect of this process is \"finding outliers\". Outliers are unusual or surprising data points that deviate significantly from the rest of the data. While they may be the result of mere variability or error, they will often pull the aggregate data towards them, skewing the results and impeding the accuracy of data analysis. Therefore, identifying and appropriately handling these outliers is crucial to ensure the reliability of subsequent data analysis tasks.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Outliers", + "url": "%5Bhttps://www.mathsisfun.com/data/outliers.html", + "type": "article" + } + ] + }, + "t_BRtEharsrOZxoyX0OzV": { + "title": "Data Transformation", + "description": "Data Transformation, also known as Data Wrangling, is an essential part of a Data Analyst's role. This process involves the conversion of data from a raw format into another format to make it more appropriate and valuable for a variety of downstream purposes such as analytics. Data Analysts transform data to make the data more suitable for analysis, ensure accuracy, and to improve data quality. The right transformation techniques can give the data a structure, multiply its value, and enhance the accuracy of the analytics performed by serving meaningful results.", + "links": [ + { + "title": "Explore top posts about Data Analysis", + "url": "https://app.daily.dev/tags/data-analysis?ref=roadmapsh", + "type": "article" + } + ] + }, + "TucngXKNptbeo3PtdJHX8": { + "title": "Pandas", + "description": "In the realms of data analysis, data cleaning is a crucial preliminary process, this is where `pandas` - a popular python library - shines. Primarily used for data manipulation and analysis, pandas adopts a flexible and powerful data structure (DataFrames and Series) that greatly simplifies the process of cleaning raw, messy datasets. Data analysts often work with large volumes of data, some of which may contain missing or inconsistent data that can negatively impact the results of their analysis. By utilizing pandas, data analysts can quickly identify, manage and fill these missing values, drop unnecessary columns, rename column headings, filter specific data, apply functions for more complex data transformations and much more. Thus, making pandas an invaluable tool for effective data cleaning in data analysis.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Pandas Website", + "url": "https://pandas.pydata.org/", + "type": "article" + }, + { + "title": "NumPy vs Pandas", + "url": "https://www.youtube.com/watch?v=KHoEbRH46Zk", + "type": "video" + } + ] + }, + "v8TfY-b4W5ygOv7r-syHq": { + "title": "Dplyr", + "description": "Data cleaning plays a crucial role in the data analysis pipeline, where it rectifies and enhances the quality of data to increase the efficiency and authenticity of the analytical process. The `dplyr` package, an integral part of the `tidyverse` suite in R, has become a staple in the toolkit of data analysts dealing with data cleaning. `dplyr` offers a coherent set of verbs that significantly simplifies the process of manipulating data structures, such as dataframes and databases. This involves selecting, sorting, filtering, creating or modifying variables, and aggregating records, among other operations. Incorporating `dplyr` into the data cleaning phase enables data analysts to perform operations more effectively, improve code readability, and handle large and complex data with ease.\n\nLearn more from the following resources:", + "links": [ + { + "title": "dplyr website", + "url": "https://dplyr.tidyverse.org/", + "type": "article" + }, + { + "title": "Dplyr Essentials", + "url": "https://www.youtube.com/watch?v=Gvhkp-Yw65U", + "type": "video" + } + ] + }, + "il6KQXVPGBza5libN38ib": { + "title": "Descriptive Analysis", + "description": "In the realm of data analytics, descriptive analysis plays an imperative role as a fundamental step in data interpretation. Essentially, descriptive analysis encompasses the process of summarizing, organizing, and simplifying complex data into understandable and interpretable forms. This method entails the use of various statistical tools to depict patterns, correlations, and trends in a data set. For data analysts, it serves as the cornerstone for in-depth data exploration, providing the groundwork upon which further analysis techniques such as predictive and prescriptive analysis are built.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Descriptive Analytics: What They Are and Related Terms", + "url": "https://www.investopedia.com/terms/d/descriptive-analytics.asp", + "type": "article" + }, + { + "title": "What are Descriptive Analytics?", + "url": "https://www.youtube.com/watch?v=DlFqQy10aCs", + "type": "video" + } + ] + }, + "2ldO-_ZnIg364Eo8Jyfgr": { + "title": "Dispersion", + "description": "Dispersion in descriptive analysis, specifically for a data analyst, offers a crucial way to understand the variability or spread in a set of data. Descriptive analysis focus on describing and summarizing data to find patterns, relationships, or trends. Distinct measures of dispersion such as range, variance, standard deviation, and interquartile range gives data analysts insight into how spread out data points are, and how reliable any patterns detected may be. This understanding of dispersion helps data analysts in identifying outliers, drawing meaningful conclusions, and making informed predictions.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Standard Deviation and Variance", + "url": "https://www.mathsisfun.com/data/standard-deviation.html", + "type": "article" + } + ] + }, + "Fyqg3MRcwY7g-Q3gjiqwK": { + "title": "Distribution Space", + "description": "In the realm of Data Analysis, the distribution shape is considered as an essential component under descriptive analysis. A data analyst uses the shape of the distribution to understand the spread and trend of the data set. It aids in identifying the skewness (asymmetry) and kurtosis (the 'tailedness') of the data and helps to reveal meaningful patterns that standard statistical measures like mean or median might not capture. The distribution shape can provide insights into data’s normality and variability, informing decisions about which statistical methods are appropriate for further analysis.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Shapes of distributions", + "url": "https://online.stat.psu.edu/stat414/lesson/13/13.5", + "type": "course" + }, + { + "title": "Shapes of Distributions: Definitions, Examples", + "url": "https://www.statisticshowto.com/shapes-of-distributions/", + "type": "article" + } + ] + }, + "923KGEUG9ofBm7dYkkrxz": { + "title": "Mean", + "description": "Central tendency refers to the statistical measure that identifies a single value as representative of an entire distribution. The mean or average is one of the most popular and widely used measures of central tendency. For a data analyst, calculating the mean is a routine task. This single value provides an analyst with a quick snapshot of the data and could be useful for further data manipulation or statistical analysis. Mean is particularly helpful in predicting trends and patterns within voluminous data sets or adjusting influencing factors that may distort the 'true' representation of the data. It is the arithmetic average of a range of values or quantities, computed as the total sum of all the values divided by the total number of values.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Measures of Central Tendency", + "url": "https://statistics.laerd.com/statistical-guides/measures-central-tendency-mean-mode-median.php", + "type": "article" + }, + { + "title": "Central Tendency | Understanding the Mean, Median & Mode", + "url": "https://www.scribbr.co.uk/stats/measures-of-central-tendency/", + "type": "article" + } + ] + }, + "qdXoikVh2xkJkOQNKcyvb": { + "title": "Median", + "description": "Median signifies the middle value in a data set when arranged in ascending or descending order. As a data analyst, understanding, calculating, and interpreting the median is crucial. It is especially helpful when dealing with outliers in a dataset as the median is less sensitive to extreme values. Thus, providing a more realistic 'central' value for skewed distributions. This measure is a reliable reflection of the dataset and is widely used in fields like real estate, economics, and finance for data interpretation and decision-making.\n\nLearn more from the following resources:", + "links": [ + { + "title": "How to find the median value", + "url": "https://www.mathsisfun.com/median.html", + "type": "article" + }, + { + "title": "Median: What It Is and How to Calculate It", + "url": "https://www.investopedia.com/terms/m/median.asp", + "type": "article" + } + ] + }, + "fY8zVG2tVbmtx5OhY7hj-": { + "title": "Mode", + "description": "The concept of central tendency is fundamental in statistics and has numerous applications in data analysis. From a data analyst's perspective, the central tendencies like mean, median, and mode can be highly informative about the nature of data. Among these, the \"Mode\" is often underappreciated, yet it plays an essential role in interpreting datasets.\n\nThe mode, in essence, represents the most frequently occurring value in a dataset. While it may appear simplistic, the mode's ability to identify the most common value can be instrumental in a wide range of scenarios, like market research, customer behavior analysis, or trend identification. For instance, a data analyst can use the mode to determine the most popular product in a sales dataset or identify the most commonly reported bug in a software bug log.\n\nBeyond these, utilizing the mode along with the other measures of central tendency (mean and median) can provide a more rounded view of your data. This approach personifies the diversity that's often required in data analytic strategies to account for different data distributions and outliers. The mode, therefore, forms an integral part of the data analyst's toolkit for statistical data interpretation.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Mode: What is it and how to calculate it", + "url": "https://www.investopedia.com/terms/m/mode.asp", + "type": "article" + }, + { + "title": "Mean Median Mode Formula", + "url": "https://www.cuemath.com/mean-median-mode-formula/", + "type": "article" + } + ] + }, + "yn1sstYMO9du3rpfQqNs9": { + "title": "Average", + "description": "When focusing on data analysis, understanding key statistical concepts is crucial. Amongst these, central tendency is a foundational element. Central Tendency refers to the measure that determines the center of a distribution. The average is a commonly used statistical tool by which data analysts discern trends and patterns. As one of the most recognized forms of central tendency, figuring out the \"average\" involves summing all values in a data set and dividing by the number of values. This provides analysts with a 'typical' value, around which the remaining data tends to cluster, facilitating better decision-making based on existing data.", + "links": [] + }, + "tSxtyJhL5wjU0XJcjsJmm": { + "title": "Range", + "description": "The concept of Range refers to the spread of a dataset, primarily in the realm of statistics and data analysis. This measure is crucial for a data analyst as it provides an understanding of the variability amongst the numbers within a dataset. Specifically in a role such as Data Analyst, understanding the range and dispersion aids in making more precise analyses and predictions. Understanding the dispersion within a range can highlight anomalies, identify standard norms, and form the foundation for statistical conclusions like the standard deviation, variance, and interquartile range. It allows for the comprehension of the reliability and stability of particular datasets, which can help guide strategic decisions in many industries. Therefore, range is a key concept that every data analyst must master.\n\nLearn more from the following resources:", + "links": [ + { + "title": "How to find the range of a data set", + "url": "https://www.scribbr.co.uk/stats/range-statistics/", + "type": "article" + } + ] + }, + "ict4JkoVM-AzPbp9bDztg": { + "title": "Variance", + "description": "Data analysts heavily rely on statistical concepts to analyze and interpret data, and one such fundamental concept is variance. Variance, an essential measure of dispersion, quantifies the spread of data, providing insight into the level of variability within the dataset. Understanding variance is crucial for data analysts as the reliability of many statistical models depends on the assumption of constant variance across observations. In other words, it helps analysts determine how much data points diverge from the expected value or mean, which can be pivotal in identifying outliers, understanding data distribution, and driving decision-making processes. However, variance can't be interpreted in the original units of measurement due to its squared nature, which is why it is often used in conjunction with its square root, the standard deviation.\n\nLearn more from the following resources:", + "links": [ + { + "title": "", + "url": "https://www.investopedia.com/terms/v/variance.asp", + "type": "article" + }, + { + "title": "https://www.scribbr.co.uk/stats/variance-meaning/", + "url": "https://www.scribbr.co.uk/stats/variance-meaning/", + "type": "article" + } + ] + }, + "MXIP8ekH4YtczODKM_G_l": { + "title": "Standard Deviation", + "description": "In the realm of data analysis, the concept of dispersion plays a critical role in understanding and interpreting data. One of the key measures of dispersion is the Standard Deviation. As a data analyst, understanding the standard deviation is crucial as it gives insight into how much variation or dispersion exists from the average (mean), or expected value. A low standard deviation indicates that the data points are generally close to the mean, while a high standard deviation implies that the data points are spread out over a wider range. By mastering the concept of standard deviation and other statistical tools related to dispersion, data analysts are better equipped to provide meaningful analyses and insights from the available data.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Standard Deviation Formula and Uses vs. Variance", + "url": "https://www.investopedia.com/terms/s/standarddeviation.asp", + "type": "article" + }, + { + "title": "Standard Deviation", + "url": "https://www.youtube.com/watch?v=esskJJF8pCc", + "type": "video" + } + ] + }, + "VfcCRRqwLxkYpIX0ZKNNX": { + "title": "Skewness", + "description": "Skewness is a crucial statistical concept driven by data analysis and is a significant parameter in understanding the distribution shape of a dataset. In essence, skewness provides a measure to define the extent and direction of asymmetry in data. A positive skewness indicates a distribution with an asymmetric tail extending towards more positive values, while a negative skew indicates a distribution with an asymmetric tail extending towards more negative values. For a data analyst, recognizing and analyzing skewness is essential as it can greatly influence model selection, prediction accuracy, and interpretation of results.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Skewed Data", + "url": "https://www.mathsisfun.com/data/skewness.html", + "type": "article" + } + ] + }, + "PqGO8AU1zE2ZdtqrIrOkZ": { + "title": "Kurtosis", + "description": "Understanding distribution shapes is an integral part of a Data Analyst's daily responsibilities. When they inspect statistical data, one key feature they consider is the kurtosis of the distribution. In statistics, kurtosis identifies the heaviness of the distribution tails and the sharpness of the peak. A proper understanding of kurtosis can assist Analysts in risk management, outlier detection, and provides deeper insight into variations. Therefore, being proficient in interpreting kurtosis measurements of a distribution shape is a significant skill that every data analyst should master.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Kurtosis: Definition, Types, and Importance", + "url": "https://www.investopedia.com/terms/k/kurtosis.asp", + "type": "article" + } + ] + }, + "BJTVa4ur_bJB7mMtD2-hQ": { + "title": "Central Tendency", + "description": "Descriptive analysis is a significant branch in the field of data analytics, and under this, the concept of Central Tendency plays a vital role. As data analysts, understanding central tendency is of paramount importance as it offers a quick summary of the data. It provides information about the center point around which the numerical data is distributed. The three major types of the central tendency include the Mean, Median, and Mode. These measures are used by data analysts to identify trends, make comparisons, or draw conclusions. Therefore, an understanding of central tendency equips data analysts with essential tools for interpreting and making sense of statistical data.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Measures of central tendency", + "url": "https://www.abs.gov.au/statistics/understanding-statistics/statistical-terms-and-concepts/measures-central-tendency", + "type": "article" + }, + { + "title": "Understanding Central Tendency", + "url": "https://www.youtube.com/watch?v=n_sSVhHBdj4", + "type": "video" + } + ] + }, + "Sz2Y8HLbSmDjSKAJztDql": { + "title": "Tableau", + "description": "Tableau is a powerful data visualization tool utilized extensively by data analysts worldwide. Its primary role is to transform raw, unprocessed data into an understandable format without any technical skills or coding. Data analysts use Tableau to create data visualizations, reports, and dashboards that help businesses make more informed, data-driven decisions. They also use it to perform tasks like trend analysis, pattern identification, and forecasts, all within a user-friendly interface. Moreover, Tableau's data visualization capabilities make it easier for stakeholders to understand complex data and act on insights quickly.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Tableau Website", + "url": "https://www.tableau.com/en-gb", + "type": "article" + }, + { + "title": "What is Tableau?", + "url": "https://www.youtube.com/watch?v=NLCzpPRCc7U", + "type": "video" + } + ] + }, + "SJLeose5vZU8w_18C8_t0": { + "title": "Power BI", + "description": "PowerBI, an interactive data visualization and business analytics tool developed by Microsoft, plays a crucial role in the field of a data analyst's work. It helps data analysts to convert raw data into meaningful insights through it's easy-to-use dashboards and reports function. This tool provides a unified view of business data, allowing analysts to track and visualize key performance metrics and make better-informed business decisions. With PowerBI, data analysts also have the ability to manipulate and produce visualizations of large data sets that can be shared across an organization, making complex statistical information more digestible.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Power BI Website", + "url": "https://www.microsoft.com/en-us/power-platform/products/power-bi", + "type": "article" + }, + { + "title": "Power BI for beginners", + "url": "https://www.youtube.com/watch?v=NNSHu0rkew8", + "type": "video" + } + ] + }, + "tvDdXwaRPsUSTqJGaLS3P": { + "title": "Matplotlib", + "description": "For a Data Analyst, understanding data and being able to represent it in a visually insightful form is a crucial part of effective decision-making in any organization. Matplotlib, a plotting library for the Python programming language, is an extremely useful tool for this purpose. It presents a versatile framework for generating line plots, scatter plots, histogram, bar charts and much more in a very straightforward manner. This library also allows for comprehensive customizations, offering a high level of control over the look and feel of the graphics it produces, which ultimately enhances the quality of data interpretation and communication.", + "links": [] + }, + "-cJb8gEBvdVFf7FlgG3Ud": { + "title": "Seaborn", + "description": "Seaborn is a robust, comprehensive Python library focused on the creation of informative and attractive statistical graphics. As a data analyst, seaborn plays an essential role in elaborating complex visual stories with the data. It aids in understanding the data by providing an interface for drawing attractive and informative statistical graphics. Seaborn is built on top of Python's core visualization library Matplotlib, and is integrated with data structures from Pandas. This makes seaborn an integral tool for data visualization in the data analyst's toolkit, making the exploration and understanding of data easier and more intuitive.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Seaborn Website", + "url": "https://seaborn.pydata.org/", + "type": "article" + }, + { + "title": "Seaborn Tutorial : Seaborn Full Course", + "url": "https://www.youtube.com/watch?v=6GUZXDef2U0", + "type": "video" + } + ] + }, + "n3M49lgNPn28hm7kzki-a": { + "title": "ggplot2", + "description": "ggplot2 is an important and powerful tool in the data analyst's toolkit, especially for visualizing and understanding complex datasets. Built within the R programming language, it provides a flexible, cohesive environment for creating graphs. The main strength of ggplot2 lies in its ability to produce sophisticated and tailored visualizations. This allows data analysts to communicate data-driven findings in an efficient and effective manner, enabling clear communication to stakeholders about relevant insights and patterns identified within the data.", + "links": [] + }, + "EVk1H-QLtTlpG7lVEenDt": { + "title": "Bar Charts", + "description": "As a vital tool in the data analyst's arsenal, bar charts are essential for analyzing and interpreting complex data. Bar charts, otherwise known as bar graphs, are frequently used graphical displays for dealing with categorical data groups or discrete variables. With their stark visual contrast and definitive measurements, they provide a simple yet effective means of identifying trends, understanding data distribution, and making data-driven decisions. By analyzing the lengths or heights of different bars, data analysts can effectively compare categories or variables against each other and derive meaningful insights effectively. Simplicity, readability, and easy interpretation are key features that make bar charts a favorite in the world of data analytics.\n\nLearn more from the following resources:", + "links": [ + { + "title": "A complete guide to bar charts", + "url": "https://www.atlassian.com/data/charts/bar-chart-complete-guide", + "type": "article" + }, + { + "title": "What is a bar chart?", + "url": "https://www.youtube.com/watch?v=WTVdncVCvKo", + "type": "video" + } + ] + }, + "v9T0DX56jFNhUz9nzubzS": { + "title": "Line Chart", + "description": "Data visualization is a crucial skill for every Data Analyst and the Line Chart is one of the most commonly used chart types in this field. Line charts act as powerful tools for summarizing and interpreting complex datasets. Through attractive and interactive design, these charts allow for clear and efficient communication of patterns, trends, and outliers in the data. This makes them valuable for data analysts when presenting data spanning over a period of time, forecasting trends or demonstrating relationships between different data sets.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Line Graph: Definition, Types, Parts, Uses, and Examples", + "url": "https://www.investopedia.com/terms/l/line-graph.asp", + "type": "article" + }, + { + "title": "What is a line graph?", + "url": "https://www.youtube.com/watch?v=rw-MxkzymEw", + "type": "video" + } + ] + }, + "A5YQv7D4qRcskdZ64XldH": { + "title": "Scatter Plot", + "description": "A scatter plot, a crucial aspect of data visualization, is a mathematical diagram using Cartesian coordinates to represent values from two different variables. As a data analyst, understanding and interpreting scatter plots can be instrumental in identifying correlations and trends within a dataset, drawing meaningful insights, and showcasing these findings in a clear, visual manner. In addition, scatter plots are paramount in predictive analytics as they reveal patterns which can be used to predict future occurrences.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Mastering scatter plots", + "url": "https://www.atlassian.com/data/charts/what-is-a-scatter-plot", + "type": "article" + }, + { + "title": "Scatter Graphs: What are they and how to plot them", + "url": "https://www.youtube.com/watch?v=Vyg9qmBsgAc", + "type": "video" + } + ] + }, + "muFy-C2agCX9vR_NU7UZF": { + "title": "Funnel Charts", + "description": "A funnel chart is an important tool for Data Analysts. It is a part of data visualization, the creation and study of the visual representation of data. A funnel chart displays values as progressively diminishing amounts, allowing data analysts to understand the stages that contribute to the output of a process or system. It is often used in sales, marketing or any field that involves a multi-step process, to evaluate efficiency or identify potential problem areas. The 'funnel' shape is symbolic of a typical customer conversion process, going from initial engagement to close of sale. As Data Analysts, understanding and interpreting funnel charts can provide significant insights to drive optimal decision making.\n\nLearn more from the following resources:", + "links": [ + { + "title": "What is a Funnel Chart?", + "url": "https://www.atlassian.com/data/charts/funnel-chart-complete-guide", + "type": "article" + }, + { + "title": "Explain your data with a funnel chart", + "url": "https://www.youtube.com/watch?v=AwFB9Qg96Ek", + "type": "video" + } + ] + }, + "FEyBFZLaGJqTC-IUEcOVS": { + "title": "Histograms", + "description": "As a Data Analyst, understanding and representing complex data in a simplified and comprehensible form is of paramount importance. This is where the concept of data visualization comes into play, specifically the use of histograms. A histogram is a graphical representation that organizes a group of data points into a specified range. It provides an visual interpretation of numerical data by indicating the number of data points that fall within a specified range of values, known as bins. This highly effective tool allows data analysts to view data distribution over a continuous interval or a certain time period, which can further aid in identifying trends, outliers, patterns, or anomalies present in the data. Consequently, histograms are instrumental in making informed business decisions based on these data interpretations.\n\nLearn more from the following resources:", + "links": [ + { + "title": "How a histogram works to display data", + "url": "https://www.investopedia.com/terms/h/histogram.asp", + "type": "article" + }, + { + "title": "What is a histogram", + "url": "https://www.mathsisfun.com/data/histograms.html", + "type": "article" + } + ] + }, + "329BrtmXjXNLfi1SFfdeo": { + "title": "Stacked Charts", + "description": "A stacked chart is an essential tool for a data analyst in the field of data visualization. This type of chart presents quantitative data in a visually appealing manner and allows users to easily compare different categories while still being able to compare the total sizes. These charts are highly effective when trying to measure part-to-whole relationships, displaying accumulated totals over time or when presenting data with multiple variables. Data analysts often use stacked charts to detect patterns, trends and anomalies which can aid in strategic decision making.\n\nLearn more from the following resources:", + "links": [ + { + "title": "What is a stacked chart?", + "url": "https://www.spotfire.com/glossary/what-is-a-stacked-chart", + "type": "article" + }, + { + "title": "A Complete Guide to Stacked Bar Charts", + "url": "https://www.atlassian.com/data/charts/stacked-bar-chart-complete-guide", + "type": "article" + } + ] + }, + "G8resQXEVEHCaQfDlt3nj": { + "title": "Heatmap", + "description": "Heatmaps are a crucial component of data visualization that Data Analysts regularly employ in their analyses. As one of many possible graphical representations of data, heatmaps show the correlation or scale of variation between two or more variables in a dataset, making them extremely useful for pattern recognition and outlier detection. Individual values within a matrix are represented in a heatmap as colors, with differing intensities indicating the degree or strength of an occurrence. In short, a Data Analyst would use a heatmap to decode complex multivariate data and turn it into an easily understandable visual that aids in decision making.\n\nLearn more from the following resources:", + "links": [ + { + "title": "A complete guide to heatmaps", + "url": "https://www.hotjar.com/heatmaps/", + "type": "article" + }, + { + "title": "What is a heatmap?", + "url": "https://www.atlassian.com/data/charts/heatmap-complete-guide", + "type": "article" + } + ] + }, + "K9xwm_Vpdup9ujYqlD9F3": { + "title": "Pie Charts", + "description": "As a data analyst, understanding and efficiently using various forms of data visualization is crucial. Among these, Pie Charts represent a significant tool. Essentially, pie charts are circular statistical graphics divided into slices to illustrate numerical proportions. Each slice of the pie corresponds to a particular category. The pie chart's beauty lies in its simplicity and visual appeal, making it an effective way to convey relative proportions or percentages at a glance. For a data analyst, it's particularly useful when you want to show a simple distribution of categorical data. Like any tool, though, it's important to use pie charts wisely—ideally, when your data set has fewer than seven categories, and the proportions between categories are distinct.\n\nLearn more from the following resources:", + "links": [ + { + "title": "A complete guide to pie charts", + "url": "https://www.atlassian.com/data/charts/pie-chart-complete-guide", + "type": "article" + }, + { + "title": "What is a a pie chart", + "url": "https://www.youtube.com/watch?v=GjJdZaQrItg", + "type": "video" + } + ] + }, + "2g19zjEASJw2ve57hxpr0": { + "title": "Data Visualisation", + "description": "Data Visualization is a fundamental fragment of the responsibilities of a data analyst. It involves the presentation of data in a graphical or pictorial format which allows decision-makers to see analytics visually. This practice can help them comprehend difficult concepts or establish new patterns. With interactive visualization, data analysts can take the data analysis process to a whole new level — drill down into charts and graphs for more detail, and interactively changing what data is presented or how it’s processed. Thereby it forms a crucial link in the chain of converting raw data to actionable insights which is one of the primary roles of a Data Analyst.", + "links": [] + }, + "TeewVruErSsD4VLXcaDxp": { + "title": "Statistical Analysis", + "description": "Statistical analysis is a core component of a data analyst's toolkit. As professionals dealing with vast amount of structured and unstructured data, data analysts often turn to statistical methods to extract insights and make informed decisions. The role of statistical analysis in data analytics involves gathering, reviewing, and interpreting data for various applications, enabling businesses to understand their performance, trends, and growth potential. Data analysts use a range of statistical techniques from modeling, machine learning, and data mining, to convey vital information that supports strategic company actions.\n\nLearn more from the following resources:", + "links": [] + }, + "Xygwu0m5TeYT6S_8FKKXh": { + "title": "Hypothesis Testing", + "description": "In the context of a Data Analyst, hypothesis testing plays an essential role to make inferences or predictions based on data. Hypothesis testing is an approach used to test a claim or theory about a parameter in a population, using data measured in a sample. This method allows Data Analysts to determine whether the observed data deviates significantly from the status quo or not. Essentially, it provides a probability-based mechanism to quantify and deal with the uncertainty inherent in conclusions drawn from not completely reliable data.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Hypothesis Testing", + "url": "https://latrobe.libguides.com/maths/hypothesis-testing", + "type": "article" + }, + { + "title": "Hypothesis Testing - 4 Step", + "url": "https://www.investopedia.com/terms/h/hypothesistesting.asp", + "type": "article" + } + ] + }, + "murioZ0NdrTix_lqSGz-8": { + "title": "Correlation Analysis", + "description": "Correlation Analysis is a quantitative method that data analysts widely employ to determine if there is a significant relationship between two variables, and if so, how strong or weak, positive or negative that relationship might be. This form of analysis helps data analysts identify patterns and trends within datasets, and is often represented visually through scatter plots. By using correlation analysis, data analysts can derive valuable insights to inform decision-making processes within a wide range of fields, from marketing to finance. The implementation of correlation analysis is crucial to forecast future outcomes, develop strategies and drive business growth.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Correlation", + "url": "https://www.mathsisfun.com/data/correlation.html", + "type": "article" + } + ] + }, + "lTycWscyFPi-BtkNg9cdm": { + "title": "Regression", + "description": "As a data analyst, understanding regression is of paramount importance. Regression analysis is a form of predictive modelling technique which investigates the relationship between dependent and independent variables. It is used for forecast, time series modelling and finding the causal effect relationship between variables. In essence, Regression techniques are used by data analysts to predict a continuous outcome variable (dependent variable) based on one or more predictor variables (independent variables). The main goal is to understand how the typical value of the dependent variable changes when any one of the independent variables is varied, while the other independent variables are held fixed. This understanding of regression takes data analysis from a reactive position to a more powerful, predictive one, equipping data analysts with an integral tool in their work.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Regression: Definition, Analysis, Calculation, and Example", + "url": "https://www.investopedia.com/terms/r/regression.asp", + "type": "article" + }, + { + "title": "A Refresher on Regression Analysis - Harvard", + "url": "https://hbr.org/2015/11/a-refresher-on-regression-analysis", + "type": "article" + } + ] + }, + "mCUW07rx74_dUNi7OGVlj": { + "title": "Visualizing Distributions", + "description": "Visualising Distributions, from a data analyst's perspective, plays a key role in understanding the overall distribution and identifying patterns within data. It aids in summarising, structuring, and plotting structured data graphically to provide essential insights. This includes using different chart types like bar graphs, histograms, and scatter plots for interval data, and pie or bar graphs for categorical data. Ultimately, the aim is to provide a straightforward and effective manner to comprehend the data's characteristics and underlying structure. A data analyst uses these visualisation techniques to make initial conclusions, detect anomalies, and decide on further analysis paths.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Data Visualizations that Capture Distributions", + "url": "https://www.datacamp.com/blog/data-demystified-data-visualizations-that-capture-distributions", + "type": "article" + }, + { + "title": "Visualising Distributions in Power BI", + "url": "https://www.youtube.com/watch?v=rOemr3sz2vw", + "type": "video" + } + ] + }, + "f4GuzeokP9w_gwtOquP0g": { + "title": "Machine Learning", + "description": "Data analysts are becoming increasingly involved in the realm of machine learning. This emerging technology harnesses algorithms, statistical models, and other tools to teach machines to perform tasks that would normally require human intelligence. This includes activities such as making predictions based on data, recognizing patterns, and making decisions. Understanding the basics of machine learning is therefore not only beneficial, but essential, to modern data analysts who wish to stay competitive in their field.", + "links": [] + }, + "XdBwqLoYYiLJNlWPBEDUj": { + "title": "Reinforcement Learning", + "description": "Reinforcement learning is a key topic within the broader realm of machine learning. Data analysts and other professionals dealing with data often utilize reinforcement learning techniques. In simple, it can be considered as a type of algorithm that uses trial and error to come up with solutions to problems. Notably, these algorithms learn the ideal behaviour within a specific context, with the intention of maximizing performance. As a data analyst, understanding reinforcement learning provides a crucial expertise, especially when dealing with complex data structures and making strategic decisions based on that data.\n\nLearn more from the following resources:", + "links": [ + { + "title": "What is reinforcement learning", + "url": "https://aws.amazon.com/what-is/reinforcement-learning/#:~:text=Reinforcement%20learning%20(RL)%20is%20a,use%20to%20achieve%20their%20goals.", + "type": "article" + }, + { + "title": "What is reinforcement learning - IBM", + "url": "https://www.ibm.com/topics/reinforcement-learning", + "type": "article" + } + ] + }, + "FntL9E2yVAYwIrlANDNKE": { + "title": "Unsupervised Learning", + "description": "Unsupervised learning, as a fundamental aspect of Machine Learning, holds great implications in the realm of data analytics. It is an approach where a model learns to identify patterns and relationships within a dataset that isn't labelled or classified. It is especially useful for a Data Analyst as it can assist in recognizing unforeseen trends, providing new insights or preparing data for other machine learning tasks. This ability to infer without direct supervision allows a vast potential for latent structure discovery and new knowledge derivation from raw data.\n\nLearn more from the following resources:", + "links": [ + { + "title": "What is unsupervised learning?", + "url": "https://cloud.google.com/discover/what-is-unsupervised-learning", + "type": "article" + }, + { + "title": "Introduction to unsupervised learning", + "url": "https://www.datacamp.com/blog/introduction-to-unsupervised-learning", + "type": "article" + } + ] + }, + "FIYCkGXofKMsXmsqHSMh9": { + "title": "Supervised Learning", + "description": "Supervised machine learning forms an integral part of the toolset for a Data Analyst. With a direct focus on building predictive models from labeled datasets, it involves training an algorithm based on these known inputs and outputs, helping Data Analysts establish correlations and make reliable predictions. Fortifying a Data Analyst's role, supervised machine learning enables the accurate interpretation of complex data, enhancing decision-making processes.\n\nLearn more from the following resources:", + "links": [ + { + "title": "What is supervised learning?", + "url": "https://cloud.google.com/discover/what-is-supervised-learning", + "type": "article" + }, + { + "title": "Supervised Machine Learning", + "url": "https://www.datacamp.com/blog/supervised-machine-learning", + "type": "article" + } + ] + }, + "vHDm_9Lh4EqvXNYSOBaqZ": { + "title": "Decision Trees", + "description": "As a data analyst, understanding machine learning topics like decision trees is crucial. Decision trees are a fundamental aspect in the field of machine learning and artificial intelligence. They present a simple yet effective method of data analysis. They have applications in several areas including customer relationship management, fraud detection, financial analysis, healthcare and more. In simpler terms, a decision tree can be considered as a method of breaking down complex decisions and estimating likely outcomes. This introduction would help data analysts understand the logic behind decision trees and how they are constructed for the purpose of predictive modeling.\n\nLearn more from the following resources:", + "links": [ + { + "title": "What is machine learning for analytics?", + "url": "https://www.oracle.com/business-analytics/what-is-machine-learning-for-analytics/", + "type": "article" + }, + { + "title": "The Role of Machine Learning in Data Analysis", + "url": "https://www.ironhack.com/gb/blog/the-role-of-machine-learning-in-data-analysis", + "type": "article" + } + ] + }, + "zbUw5PyVsdccbgDTRHg7d": { + "title": "Naive Byes", + "description": "As a data analyst, understanding various machine learning algorithms is crucial. Naive Bayes is one of such basic yet powerful algorithms used for predictive modeling and data classification. This algorithm applies the principles of probability and statistics, specifically Bayes' theorem, with a 'naive' assumption of independence among the predictors. Ideal for dealing with large volumes of data, Naive Bayes is a competitive algorithm for text classification, spam filtering, recommendation systems, and more. Understanding Naive Bayes can significantly improve the ability of a data analyst to create more effective models and deliver superior analytical results.\n\nLearn more from the following resources:", + "links": [ + { + "title": "What are Naïve Bayes classifiers?", + "url": "https://www.ibm.com/topics/naive-bayes", + "type": "article" + }, + { + "title": "Naive Bayes", + "url": "https://scikit-learn.org/stable/modules/naive_bayes.html", + "type": "article" + } + ] + }, + "h2xF5bZeUByDgsAi4dga2": { + "title": "K-Means Clustering", + "description": "Kmeans is a fundamentally important method in data analysis and falls under the broad umbrella of machine learning basics. A data analyst using Kmeans clusters large data sets into subgroups or clusters based upon specific characteristics or parameters. The primary purpose is to derive insights from similarities/dissimilarities within the dataset, which can then be used for understanding patterns, trends, and predictive modeling. Accurate use of Kmeans can lead to enhanced decision-making, forecasting and strategic planning based on the data.\n\nLearn more from the following resources:", + "links": [ + { + "title": "K-Means Clustering", + "url": "https://en.wikipedia.org/wiki/K-means_clustering", + "type": "article" + }, + { + "title": "K-Means", + "url": "https://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html", + "type": "article" + } + ] + }, + "7ikA373qH88HBx5irCgIH": { + "title": "Model Evaluation Techniques", + "description": "As a data analyst, it's crucial to understand various model evaluation techniques. These techniques involve different methods to measure the performance or accuracy of machine learning models. For instance, using confusion matrix, precision, recall, F1 score, ROC curves or Root Mean Squared Error (RMSE) among others. Knowing how to apply these techniques effectively not only helps in selecting the best model for a specific problem but also guides in tuning the performance of the models for optimal results. Understanding these model evaluation techniques also allows data analysts to interpret evaluation results and determine the effectiveness and applicability of a model.\n\nLearn more from the following resources:", + "links": [ + { + "title": "What is model evaluation", + "url": "https://domino.ai/data-science-dictionary/model-evaluation", + "type": "article" + }, + { + "title": "Model evaluation metrics", + "url": "https://www.markovml.com/blog/model-evaluation-metrics", + "type": "article" + } + ] + }, + "_aUQZWUhFRvNu0MZ8CPit": { + "title": "Big Data Technologies", + "description": "In the modern digitized world, Big Data refers to extremely large datasets that are challenging to manage and analyze using traditional data processing applications. These datasets often come from numerous different sources and are not only voluminous but also diverse in nature, including structured and unstructured data. The role of a data analyst in the context of big data is crucial. Data analysts are responsible for inspecting, cleaning, transforming, and modeling big data to discover useful information, conclude and support decision-making. They leverage their analytical skills and various big data tools and technologies to extract insights that can benefit the organization and drive strategic business initiatives.", + "links": [] + }, + "m1IfG2sEedUxMXrv_B8GW": { + "title": "Big Data Concepts", + "description": "Big data refers to extremely large and complex data sets that traditional data processing systems are unable to manage effectively. For data analysts, understanding the big data concepts is crucial as it helps them gain insights, make decisions, and create meaningful presentations using these data sets. The key concepts include volume, velocity, and variety - collectively known as the 3Vs. Volume refers to the amount of data, velocity is the speed at which data is processed, and variety indicates the different types of data being dealt with. Other advanced concepts include variability and veracity. These concepts provide a framework for understanding and working with big data for data analysts. With the growing importance of big data in various industries and sectors, a comprehensive grasp of these concepts equips a data analyst to more effectively and efficiently analyze and interpret complex data sets.\n\nLearn more from the following resources:", + "links": [ + { + "title": "An Introduction to Big Data Concepts and Terminology", + "url": "https://www.digitalocean.com/community/tutorials/an-introduction-to-big-data-concepts-and-terminology", + "type": "article" + }, + { + "title": "An Introduction to Big Data Concepts", + "url": "https://www.suse.com/c/rancher_blog/an-introduction-to-big-data-concepts/", + "type": "article" + } + ] + }, + "SStzU_iXSvI_9QWbvGNou": { + "title": "KNN", + "description": "K-Nearest Neighbors (KNN) is a simple yet powerful algorithm used in the field of machine learning, which a Data Analyst might employ for tasks such as classification or regression. It works based on the principle of proximity, where the prediction of new instance's category depends upon the category of its nearest neighbors. For a Data Analyst working with complex data sets, it's crucial to understand how the KNN algorithm operates, its applicability, pros, and cons. This will facilitate making well-informed decisions about when to utilize it for the best possible outcome in data analysis.\n\nLearn more from the following resources:", + "links": [ + { + "title": "https://www.ibm.com/topics/knn#:~:text=The k-nearest neighbors (KNN,used in machine learning today.)", + "url": "https://www.ibm.com/topics/knn#:~:text=The%20k%2Dnearest%20neighbors%20(KNN,used%20in%20machine%20learning%20today.)", + "type": "article" + }, + { + "title": "Nearest Neighbors", + "url": "https://scikit-learn.org/stable/modules/neighbors.html", + "type": "article" + } + ] + }, + "ofOQKAIu4rezWmgZeE-KT": { + "title": "Logistic Regression", + "description": "Logistic Regression is one of the foundational techniques that a data analyst must understand in machine learning. This method is a predictive analysis algorithm based on the concept of probability. It’s used for categorizing data into distinct classes, making it particularly useful for binary classification problems. It should be understood that despite its name, logistic regression is used in classification problems, not regression tasks. Data analysts use this algorithm to build machine learning models to solve various real-world problems such as email spam, credibility of loan applicants, development of marketing strategies and so on.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Everything you need to know about Logistic Regression", + "url": "https://www.spiceworks.com/tech/artificial-intelligence/articles/what-is-logistic-regression/", + "type": "article" + }, + { + "title": "Logistic Regression for Machine Learning", + "url": "https://machinelearningmastery.com/logistic-regression-for-machine-learning/", + "type": "article" + } + ] + }, + "Fwwve0j-KDfc605IIgmil": { + "title": "Parallel Processing", + "description": "Parallel processing is an efficient form of data processing that allows Data Analysts to deal with larger volumes of data at a faster pace. It is a computational method that allows multiple tasks to be performed concurrently, instead of sequentially, thus, speeding up data processing. Parallel processing proves to be invaluable for Data Analysts, as they are often tasked with analyzing huge data sets and compiling reports in real-time. As the demand for rapid data processing and quick analytics is on the rise, the technique of parallel processing forms a critical element in the versatile toolkit of a Data Analyst.\n\nLearn more from the following resources:", + "links": [ + { + "title": "What is parallel processing?", + "url": "https://www.spiceworks.com/tech/iot/articles/what-is-parallel-processing/", + "type": "article" + }, + { + "title": "How parallel computing works?", + "url": "https://computer.howstuffworks.com/parallel-processing.htm", + "type": "article" + } + ] + }, + "wECWIRMlWNoTxz5eKwaSf": { + "title": "Hadoop", + "description": "Hadoop is a critical element in the realm of data processing frameworks, offering an effective solution for storing, managing, and analyzing massive amounts of data. Unraveling meaningful insights from a large deluge of data is a challenging pursuit faced by many data analysts. Regular data processing tools fail to handle large-scale data, paving the way for advanced frameworks like Hadoop. This open-source platform by Apache Software Foundation excels at storing and processing vast data across clusters of computers. Notably, Hadoop comprises two key modules - the Hadoop Distributed File System (HDFS) for storage and MapReduce for processing. Hadoop’s ability to handle both structured and unstructured data further broadens its capacity. For any data analyst, a thorough understanding of Hadoop can unlock powerful ways to manage data effectively and construct meaningful analytics.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Apache Hadoop Website", + "url": "https://hadoop.apache.org/", + "type": "article" + }, + { + "title": "What Is Hadoop?", + "url": "https://www.databricks.com/glossary/hadoop", + "type": "article" + } + ] + }, + "vaiigToDh4522rtWamuSM": { + "title": "Spark", + "description": "As a big data processing framework, Apache Spark showcases immense importance in the field of data analysis. Abreast with the ability to handle both batch and real-time analytics, Spark offers an interface for programming entire clusters with implicit data parallelism and fault tolerance. As a data analyst, mastery over Spark becomes essential in order to efficiently process and analyze complex and high-volume data. This powerful open-source tool can simplify the daunting task of gleaning actionable insights from massive, disparate data sets.\n\nLearn more from the following resources:", + "links": [ + { + "title": "apache/spark", + "url": "https://github.com/apache/spark", + "type": "opensource" + }, + { + "title": "Apache Spark Website", + "url": "https://spark.apache.org/", + "type": "article" + } + ] + }, + "fqCAH4Enc4AAJHoSkk2Pe": { + "title": "MPI", + "description": "Message Passing Interface (MPI) is a pioneering technique in the broader realm of data processing strategies. As a data analyst, understanding and implementing MPI is pivotal for managing massive data sets. MPI is an authorized standard for performing parallel computing, which allows concurrent data processing, maintaining a highly efficient and time-saving operation. This system exchanges data between separate tasks and aids in solving complex problems related to computations and data analysis. By leveraging MPI in data processing, analysts can expect to optimize their work and contribute to faster decision-making, thereby enhancing the overall organizational efficiency.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Message Passing Interface Forum", + "url": "https://www.mpi-forum.org/", + "type": "article" + }, + { + "title": "Microsoft MPI", + "url": "https://learn.microsoft.com/en-us/message-passing-interface/microsoft-mpi", + "type": "article" + } + ] + }, + "eAM9orCNwzqh34uvGBVm8": { + "title": "MapReduce", + "description": "MapReduce is a prominent data processing technique used by Data Analysts around the world. It allows them to handle large data sets with complex, unstructured data efficiently. MapReduce breaks down a big data problem into smaller sub-tasks (Map) and then takes those results to create an output in a more usable format (Reduce). This technique is particularly useful in conducting exploratory analysis, as well as in handling big data operations such as text processing, graph processing, or more complicated machine learning algorithms.\n\nLearn more from the following resources:", + "links": [ + { + "title": "MapReduce", + "url": "https://www.databricks.com/glossary/mapreduce", + "type": "article" + }, + { + "title": "What is Apache MapReduce?", + "url": "https://www.ibm.com/topics/mapreduce", + "type": "article" + } + ] + }, + "SiYUdtYMDImRPmV2_XPkH": { + "title": "Deep Learning (Optional)", + "description": "Deep learning, a subset of machine learning technique, is increasingly becoming a critical tool for data analysts. Deep learning algorithms utilize multiple layers of neural networks to understand and interpret intricate structures in large data, a skill that is integral to the daily functions of a data analyst. With the ability to learn from unstructured or unlabeled data, deep learning opens a whole new range of possibilities for data analysts in terms of data processing, prediction, and categorization. It has applications in a variety of industries from healthcare to finance to e-commerce and beyond. A deeper understanding of deep learning methodologies can augment a data analyst's capability to evaluate and interpret complex datasets and provide valuable insights for decision making.", + "links": [] + }, + "gGHsKcS92StK5FolzmVvm": { + "title": "Neural Networks", + "description": "Neural Networks play a pivotal role in the landscape of deep learning, offering a plethora of benefits and applications for data analysts. They are computational models that emulate the way human brain processes information, enabling machines to make intelligent decisions. As a data analyst, understanding and utilizing neural networks can greatly enhance decision-making process as it allows to quickly and effectively analyze large datasets, recognize patterns, and forecast future trends. In deep learning, these networks are used for creating advanced models that can tackle complex tasks such as image recognition, natural language processing, and speech recognition, to name but a few. Therefore, an in-depth knowledge of neural networks is a significant asset for any aspiring or professional data analyst.\n\nLearn more from the following resources:", + "links": [ + { + "title": "What is a neural network?", + "url": "https://aws.amazon.com/what-is/neural-network/", + "type": "article" + }, + { + "title": "Explained: Neural networks", + "url": "https://news.mit.edu/2017/explained-neural-networks-deep-learning-0414", + "type": "article" + } + ] + }, + "XlCv2OirEfuBFa4ranyyE": { + "title": "CNNs", + "description": "Convolutional Neural Networks (CNNs) form an integral part of deep learning frameworks, particularly within the realm of image processing. Data analysts with a focus on deep learning applications often turn to CNNs for their capacity to efficiently process high-dimensional data, such as images, and extract critical features relevant to the problem at hand. As a powerful tool for modeling patterns in data, CNNs are frequently employed in applications ranging from image recognition to natural language processing (NLP). Understanding CNNs, therefore, provides a robust foundation for data analysts aspiring to harness the potential of deep learning techniques.\n\nLearn more from the following resources:", + "links": [ + { + "title": "What are convolutional neural networks?", + "url": "https://www.ibm.com/topics/convolutional-neural-networks", + "type": "article" + }, + { + "title": "What are Convolutional Neural Networks (CNNs)?", + "url": "https://www.youtube.com/watch?v=QzY57FaENXg", + "type": "video" + } + ] + }, + "Gocm98_tRg5BGxKcP-7zg": { + "title": "RNN", + "description": "Recurrent Neural Networks(RNNs) are a type of Artificial Neural Networks(ANNs) which introduces us to the realm of Deep Learning, an aspect that has been significantly contributing to the evolution of Data Analysis. RNNs are specifically designed to recognize patterns in sequences of data, such as text, genomes, handwriting, or the spoken word. This inherent feature of RNNs makes them extremely useful and versatile for a data analyst.\n\nA data analyst leveraging RNNs can effectively charter the intrinsic complexity of data sequences, classify them, and make accurate predictions. With the fundamental understanding of deep learning, data analysts can unlock the full potential of RNNs in delivering insightful data analysis that goes beyond traditional statistical methods. Modern research and applications of RNNs extend to multiple domains including natural language processing, speech recognition, and even in the financial sphere for stock price prediction making this a key tool in a data analyst’s arsenal.\n\nLearn more from the following resources:", + "links": [ + { + "title": "What is a recurrent neural network (RNN)?", + "url": "https://www.ibm.com/topics/recurrent-neural-networks", + "type": "article" + }, + { + "title": "Recurrent Neural Networks cheatsheet", + "url": "https://stanford.edu/~shervine/teaching/cs-230/cheatsheet-recurrent-neural-networks", + "type": "article" + } + ] + }, + "FJ4Sx477FWxyDsQr0R8rl": { + "title": "Tensorflow", + "description": "TensorFlow, developed by Google Brain Team, has become a crucial tool in the realm of data analytics, particularly within the field of deep learning. It's an open-source platform for machine learning, offering a comprehensive and flexible ecosystem of tools, libraries, and community resources. As a data analyst, understanding and implementing TensorFlow for deep learning models allows us to identify complex patterns and make insightful predictions which standard analysis could miss. It's in-demand skill that enhances our ability to generate accurate insights from colossal and complicated structured or unstructured data sets.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Tensorflow Website", + "url": "https://www.tensorflow.org/", + "type": "article" + }, + { + "title": "Tensorflow in 100 seconds", + "url": "https://www.youtube.com/watch?v=i8NETqtGHms", + "type": "video" + } + ] + }, + "LJSqfz6aYJbCe_bK8EWI1": { + "title": "Pytorch", + "description": "PyTorch, an open-source machine learning library, has gained considerable popularity among data analysts due to its simplicity and high performance in tasks such as natural language processing and artificial intelligence. Specifically, in the domain of deep learning, PyTorch stands out due to its dynamic computational graph, allowing for a highly intuitive and flexible platform for building complex models. For data analysts, mastering PyTorch can open up a broad range of opportunities for data model development, data processing, and integration of machine learning algorithms.\n\nLearn more from the following resources:", + "links": [ + { + "title": "PyTorch Website", + "url": "https://pytorch.org/", + "type": "article" + }, + { + "title": "PyTorch in 100 seconds", + "url": "https://www.youtube.com/watch?v=ORMx45xqWkA", + "type": "video" + } + ] + }, + "bHPJ6yOHtUq5EjJBSrJUE": { + "title": "Image Recognition", + "description": "Image Recognition has become a significant domain because of its diverse applications, including facial recognition, object detection, character recognition, and much more. As a Data Analyst, understanding Image Recognition under Deep Learning becomes crucial. The data analyst's role in this context involves deciphering complex patterns and extracting valuable information from image data. This area of machine learning combines knowledge of data analysis, image processing, and deep neural networks to provide accurate results, contributing significantly to the progression of fields like autonomous vehicles, medical imaging, surveillance, among others. Therefore, proficiency in this field paves the way for proficient data analysis, leading to innovative solutions and improved decision-making.\n\nLearn more from the following resources:", + "links": [ + { + "title": "What is image recognition?", + "url": "https://www.techtarget.com/searchenterpriseai/definition/image-recognition", + "type": "article" + }, + { + "title": "Image Recognition: Definition, Algorithms & Uses", + "url": "https://www.v7labs.com/blog/image-recognition-guide", + "type": "article" + } + ] + }, + "DFv-eiYDicF4cA4jgVk9_": { + "title": "Natural Language Processing", + "description": "In the sphere of data analysis, Natural Language Processing (NLP) has emerged as a critical aspect. NLP is a branch of artificial intelligence that involves the interaction between computers and human languages. It allows computers to understand, interpret, and generate human languages with meaning and context. This capability opens up potent avenues for data analysts, who often have to handle unstructured data such as customer reviews, comments, and other textual content.\n\nDeep Learning, a subset of machine learning based on artificial neural networks, is particularly effective for NLP tasks, enabling computers to learn from vast amounts of data. For data analysts, understanding and utilizing the potentials of NLP can greatly improve the efficiency of data processing and extraction of meaningful insights, especially when dealing with large or complex data sets. This knowledge can significantly enhance their ability to make data-driven decisions and predictions tailored to specific business objectives.\n\nLearn more from the following resources:", + "links": [ + { + "title": "What is NLP?", + "url": "https://aws.amazon.com/what-is/nlp/", + "type": "article" + }, + { + "title": "Natural Language Processing", + "url": "https://www.deeplearning.ai/resources/natural-language-processing/", + "type": "article" + } + ] + }, + "iTmtpXe7dR4XKslgpsk2q": { + "title": "Data Storage Solutions", + "description": "As a business enterprise expands, so does its data. For data analysts, the surge in information means they need efficient and scalable data storage solutions to manage vast volumes of structured and unstructured data, collectively referred to as Big Data. Big Data storage solutions are critical in preserving the integrity of data while also providing quick and easy access to the data when needed. These solutions use software and hardware components to securely store massive amounts of information across numerous servers, allowing data analysts to perform robust data extraction, data processing and complex data analyses. There are several options, from the traditional Relational Database Management Systems (RDBMS) to the more recent NoSQL databases, Hadoop ecosystems, and Cloud storage solutions, each offering unique capabilities and benefits to cater for different big data needs.", + "links": [] + } +} \ No newline at end of file diff --git a/public/roadmap-content/devops.json b/public/roadmap-content/devops.json new file mode 100644 index 000000000..1150d5e8b --- /dev/null +++ b/public/roadmap-content/devops.json @@ -0,0 +1,3102 @@ +{ + "v5FGKQc-_7NYEsWjmTEuq": { + "title": "Learn a Programming Language", + "description": "It doesn't matter what language you pick, but it is important to learn at least one. You will be able to use that language to write automation scripts.", + "links": [ + { + "title": "Guide to Picking a Language for DevOps", + "url": "https://cs.fyi/guide/programming-language-for-devops", + "type": "article" + } + ] + }, + "TwVfCYMS9jSaJ6UyYmC-K": { + "title": "Python", + "description": "Python is a multi-paradigm language. Being an interpreted language, code is executed as soon as it is written and the Python syntax allows for writing code in functional, procedural or object-oriented programmatic ways. Python is frequently recommended as the first language new coders should learn, because of its focus on readability, consistency, and ease of use. This comes with some downsides, as the language is not especially performant in most production tasks.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Visit Dedicated Python Roadmap", + "url": "/python", + "type": "article" + }, + { + "title": "Python Website", + "url": "https://www.python.org/", + "type": "article" + }, + { + "title": "Python Getting Started", + "url": "https://www.python.org/about/gettingstarted/", + "type": "article" + }, + { + "title": "Automate the Boring Stuff", + "url": "https://automatetheboringstuff.com/", + "type": "article" + }, + { + "title": "W3Schools - Python Tutorial ", + "url": "https://www.w3schools.com/python/", + "type": "article" + }, + { + "title": "Python Crash Course", + "url": "https://ehmatthes.github.io/pcc/", + "type": "article" + }, + { + "title": "Explore top posts about Python", + "url": "https://app.daily.dev/tags/python?ref=roadmapsh", + "type": "article" + } + ] + }, + "PuXAPYA0bsMgwcnlwJxQn": { + "title": "Ruby", + "description": "Ruby is a high-level, interpreted programming language that blends Perl, Smalltalk, Eiffel, Ada, and Lisp. Ruby focuses on simplicity and productivity along with a syntax that reads and writes naturally. Ruby supports procedural, object-oriented and functional programming and is dynamically typed.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Ruby Website", + "url": "https://www.ruby-lang.org/en/", + "type": "article" + }, + { + "title": "Learn Ruby in 20 minutes", + "url": "https://www.ruby-lang.org/en/documentation/quickstart/", + "type": "article" + }, + { + "title": "Explore top posts about Ruby", + "url": "https://app.daily.dev/tags/ruby?ref=roadmapsh", + "type": "article" + } + ] + }, + "npnMwSDEK2aLGgnuZZ4dO": { + "title": "Go", + "description": "Go is an open source programming language supported by Google. Go can be used to write cloud services, CLI tools, used for API development, and much more.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Visit Dedicated Go Roadmap", + "url": "/golang", + "type": "article" + }, + { + "title": "A Tour of Go – Go Basics", + "url": "https://go.dev/tour/welcome/1", + "type": "article" + }, + { + "title": "Go Reference Documentation", + "url": "https://go.dev/doc/", + "type": "article" + }, + { + "title": "Go by Example - annotated example programs", + "url": "https://gobyexample.com/", + "type": "article" + }, + { + "title": "W3Schools Go Tutorial ", + "url": "https://www.w3schools.com/go/", + "type": "article" + }, + { + "title": "Making a RESTful JSON API in Go", + "url": "https://thenewstack.io/make-a-restful-json-api-go/", + "type": "article" + }, + { + "title": "Explore top posts about Golang", + "url": "https://app.daily.dev/tags/golang?ref=roadmapsh", + "type": "article" + } + ] + }, + "eL62bKAoJCMsu7zPlgyhy": { + "title": "Rust", + "description": "Rust is a modern systems programming language focusing on safety, speed, and concurrency. It accomplishes these goals by being memory safe without using garbage collection.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "The Rust Programming Language - online book", + "url": "https://doc.rust-lang.org/book/", + "type": "article" + }, + { + "title": "Rust by Example - collection of runnable examples", + "url": "https://doc.rust-lang.org/stable/rust-by-example/index.html", + "type": "article" + }, + { + "title": "Comprehensive Rust by Google - Learn Rust in 4 Days", + "url": "https://google.github.io/comprehensive-rust/", + "type": "article" + }, + { + "title": "Microsoft Learn Course", + "url": "https://learn.microsoft.com/en-us/training/paths/rust-first-steps/", + "type": "article" + }, + { + "title": "Quick Rust Guide", + "url": "https://sivanaikk.github.io/rust/", + "type": "article" + }, + { + "title": "Rust Katas - Small, interactive Rust exercises", + "url": "https://rustlings.cool/", + "type": "article" + }, + { + "title": "Explore top posts about Rust", + "url": "https://app.daily.dev/tags/rust?ref=roadmapsh", + "type": "article" + }, + { + "title": "The Rust Programming Book - Video Version", + "url": "https://youtube.com/playlist?list=PLai5B987bZ9CoVR-QEIN9foz4QCJ0H2Y8", + "type": "video" + } + ] + }, + "QCdemtWa2mE78poNXeqzr": { + "title": "JavaScript / Node.js", + "description": "JavaScript allows you to add interactivity to your pages. Common examples that you may have seen on the websites are sliders, click interactions, popups and so on. Apart from being used on the frontend in browsers, there is Node.js which is an open-source, cross-platform, back-end JavaScript runtime environment that runs on the V8 engine and executes JavaScript code outside a web browser.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Visit Dedicated JavaScript Roadmap", + "url": "/javascript", + "type": "article" + }, + { + "title": "W3Schools – JavaScript Tutorial", + "url": "https://www.w3schools.com/js/", + "type": "article" + }, + { + "title": "The Modern JavaScript Tutorial", + "url": "https://javascript.info/", + "type": "article" + }, + { + "title": "W3Schools – Node.js Tutorial", + "url": "https://www.w3schools.com/nodejs/", + "type": "article" + }, + { + "title": "What is NPM?", + "url": "https://www.w3schools.com/nodejs/nodejs_npm.asp", + "type": "article" + }, + { + "title": "Official Documentation", + "url": "https://nodejs.org/en/learn/getting-started/introduction-to-nodejs", + "type": "article" + }, + { + "title": "Explore top posts about JavaScript", + "url": "https://app.daily.dev/tags/javascript?ref=roadmapsh", + "type": "article" + }, + { + "title": "JavaScript Crash Course for Beginners", + "url": "https://youtu.be/hdI2bqOjy3c", + "type": "video" + }, + { + "title": "Node.js Crash Course", + "url": "https://www.youtube.com/watch?v=fBNz5xF-Kx4", + "type": "video" + }, + { + "title": "Node.js Tutorial for Beginners", + "url": "https://www.youtube.com/watch?v=TlB_eWDSMt4", + "type": "video" + } + ] + }, + "qe84v529VbCyydl0BKFk2": { + "title": "Operating System", + "description": "**An Operating system serves as a bridge between a computer's user and its hardware. An operating system's function is to offer a setting in which a user can conveniently and effectively run programmes.** In simple terms we can say that and Operating System (OS) is an interface between a computer user and computer hardware. An OS permits software programmes to communicate with a computer's hardware, The **kernel** is the name of Piece of software that houses the fundamental elements of **Operating System.**\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "All you need to know about OS.", + "url": "https://www.javatpoint.com/os-tutorial", + "type": "article" + }, + { + "title": "Learn Operating Systems", + "url": "https://www.tutorialspoint.com/operating_system/os_overview.htm", + "type": "article" + }, + { + "title": "What are Operating Systems?", + "url": "https://www.youtube.com/watch?v=pVzRTmdd9j0", + "type": "video" + }, + { + "title": "Operating Systems!", + "url": "https://www.youtube.com/watch?v=vBURTt97EkA&list=PLBlnK6fEyqRiVhbXDGLXDk_OQAeuVcp2O", + "type": "video" + } + ] + }, + "cTqVab0VbVcn3W7i0wBrX": { + "title": "Ubuntu / Debian", + "description": "Debian is a free and open-source Linux distribution developed by the Debian Project, an all volunteer software community organization. Debian is the upstream distribution of Ubuntu.\n\nUbuntu is a free and open-source Linux distribution based on Debian. Ubuntu is available in three versions Desktop, Server and Core.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Debian Website", + "url": "https://www.debian.org/", + "type": "article" + }, + { + "title": "Ubuntu Website", + "url": "https://ubuntu.com/", + "type": "article" + }, + { + "title": "Learn the ways of Linux-fu, for free", + "url": "https://linuxjourney.com/", + "type": "article" + }, + { + "title": "The Linux Command Line by William Shotts", + "url": "https://linuxcommand.org/tlcl.php", + "type": "article" + }, + { + "title": "Linux Upskill Challenge", + "url": "https://linuxupskillchallenge.org/", + "type": "article" + }, + { + "title": "Linux Fundamentals", + "url": "https://academy.hackthebox.com/course/preview/linux-fundamentals", + "type": "article" + }, + { + "title": "Explore top posts about Ubuntu", + "url": "https://app.daily.dev/tags/ubuntu?ref=roadmapsh", + "type": "article" + }, + { + "title": "Linux Operating System - Crash Course for Beginners", + "url": "https://www.youtube.com/watch?v=ROjZy1WbCIA", + "type": "video" + }, + { + "title": "Introduction to Linux - Full Course for Beginners", + "url": "https://www.youtube.com/watch?v=sWbUDq4S6Y8&pp=ygUTVWJ1bnR1IGNyYXNoIGNvdXJzZQ%3D%3D", + "type": "video" + } + ] + }, + "zhNUK953p6tjREndk3yQZ": { + "title": "SUSE Linux", + "description": "openSUSE is a free to use Linux distribution aimed to promote the use of Linux everywhere. openSUSE is released in two versions Leap and Tumbleweed\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "openSUSE Linux Website", + "url": "https://www.opensuse.org/", + "type": "article" + }, + { + "title": "openSUSE Documentation", + "url": "https://en.opensuse.org/Main_Page", + "type": "article" + }, + { + "title": "Unofficial openSUSE Getting Started Guide", + "url": "https://opensuse-guide.org/", + "type": "article" + }, + { + "title": "Explore top posts about Linux", + "url": "https://app.daily.dev/tags/linux?ref=roadmapsh", + "type": "article" + } + ] + }, + "7mS6Y_BOAHNgM3OjyFtZ9": { + "title": "RHEL / Derivatives", + "description": "Red Hat Enterprise Linux (RHEL) is a popular distribution of the Linux operating system that is designed for enterprise-level use. It is developed and maintained by Red Hat, Inc., and it is available under a subscription-based model.\n\nThere are several distributions of Linux that are based on RHEL, or that have been derived from RHEL in some way. These distributions are known as RHEL derivatives. Some examples of RHEL derivatives include: AlmaLinux, CentOS, CloudLinux, Oracle Linux, and Scientific Linux.\n\nRHEL derivatives are often used in enterprise environments because they offer the stability and reliability of RHEL, but with the added benefit of being free or lower-cost alternatives.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Red Hat Enterprise Linux Website", + "url": "https://www.redhat.com/en/technologies/linux-platforms/enterprise-linux", + "type": "article" + }, + { + "title": "RHEL Documentation", + "url": "https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/", + "type": "article" + }, + { + "title": "RHEL Getting Started Guides", + "url": "https://www.redhat.com/en/technologies/linux-platforms/enterprise-linux/get-started", + "type": "article" + }, + { + "title": "What is Red Hat Enterprise Linux (RHEL) - Red Hat", + "url": "https://www.redhat.com/en/topics/linux/what-is-rhel", + "type": "article" + }, + { + "title": "Learn Linux 101", + "url": "https://developer.ibm.com/series/learn-linux-101/", + "type": "article" + } + ] + }, + "PiPHFimToormOPl1EtEe8": { + "title": "FreeBSD", + "description": "FreeBSD is a free and open-source Unix-like operating system including many features such as preemptive multitasking, memory protection, virtual memory, and multi-user facilities.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "FreeBSD Website", + "url": "https://www.freebsd.org/", + "type": "article" + }, + { + "title": "Official FreeBSD Documentation", + "url": "https://docs.freebsd.org/en/", + "type": "article" + }, + { + "title": "FreeBSD Handbook", + "url": "https://docs.freebsd.org/en/books/handbook/", + "type": "article" + }, + { + "title": "FreeBSD Resources for Newbies ", + "url": "https://www.freebsd.org/projects/newbies/", + "type": "article" + } + ] + }, + "97cJYKqv7CPPUXkKNwM4x": { + "title": "OpenBSD", + "description": "OpenBSD is a free and open-source Unix-like operating system, focussed on portability, standardization, correctness, proactive security and integrated cryptography. The popular software application [OpenSSH](https://www.openssh.com/) is developed by from OpenBSD\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "OpenBSD Website", + "url": "https://www.openbsd.org/", + "type": "article" + }, + { + "title": "Official OpenBSD Documentation", + "url": "https://man.openbsd.org/search", + "type": "article" + }, + { + "title": "OpenBSD Handbook", + "url": "https://www.openbsdhandbook.com/", + "type": "article" + }, + { + "title": "OpenBSD Installation Guide", + "url": "https://www.openbsd.org/faq/faq4.html", + "type": "article" + } + ] + }, + "haiYSwNt3rjiiwCDszPk1": { + "title": "NetBSD", + "description": "NetBSD is a free, fast, secure, and highly portable Unix-like Open Source operating system. It is available for a wide range of platforms, from large-scale servers and powerful desktop systems to handheld and embedded devices.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "NetBSD Website", + "url": "https://netbsd.org/", + "type": "article" + }, + { + "title": "Official NetBSD Documentation", + "url": "https://netbsd.org/docs/", + "type": "article" + }, + { + "title": "NetBSD Guide", + "url": "https://netbsd.org/docs/guide/en/index.html", + "type": "article" + } + ] + }, + "UOQimp7QkM3sxmFvk5d3i": { + "title": "Windows", + "description": "Windows is a graphical user interface (GUI) based operating system developed by Microsoft. It is a hybrid kernel-based proprietary operating system. According to a survey, till April 2022, windows is the most popular operating system in the world with a 75% market share.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Windows Official Site", + "url": "http://microsoft.com/windows", + "type": "article" + }, + { + "title": "Windows Documentation for Developers", + "url": "https://learn.microsoft.com/en-us/windows/", + "type": "article" + }, + { + "title": "Explore top posts about Windows", + "url": "https://app.daily.dev/tags/windows?ref=roadmapsh", + "type": "article" + } + ] + }, + "wjJPzrFJBNYOD3SJLzW2M": { + "title": "Terminal Knowledge", + "description": "A terminal is simply a text-based interface to the computer, it is used to interact with your computer system via CLI (command line interface)\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is CLI?", + "url": "https://en.wikipedia.org/wiki/Command-line_interface", + "type": "article" + } + ] + }, + "x-JWvG1iw86ULL9KrQmRu": { + "title": "Process Monitoring", + "description": "A process is an instance of a computer program that is being executed. Each process is identified by a unique number called a process ID (PID). A process is a running program. The operating system tracks processes through the use of process identifiers. A process identifier (PID) is a unique number that identifies a specific process. A PID is automatically assigned to each process when it is created on the system.\n\nThere are several linux commands that can be used to monitor processes. The most common ones are:\n\n* `ps` - report a snapshot of the current processes.\n* `top` - display Linux processes.\n* `htop` - interactive process viewer.\n* `atop` - advanced interactive monitor to view the load on a Linux system.\n* `lsof` - list open files.\n\nThe `ps` utility displays a header line, followed by lines containing information about all of your processes that have controlling terminals.\n\n* [@article@ps Documentation](https://man7.org/linux/man-pages/man1/ps.1.html)\n* [@article@ps Cheat Sheet](https://www.sysadmin.md/ps-cheatsheet.html)\n* [@video@Linux Crash Course - The ps Command](https://www.youtube.com/watch?v=wYwGNgsfN3I)\n\nThe `top` program periodically displays a sorted list of system processes. The default sorting key is pid, but other keys can be used instead. Various output options are available.\n\n* [@article@top Documentation](https://man7.org/linux/man-pages/man1/top.1.html)\n* [@article@top Cheat Sheet](https://gist.github.com/ericandrewlewis/4983670c508b2f6b181703df43438c37)\n\nhtop is a cross-platform ncurses-based process. It is similar to top, but allows you to scroll vertically and horizontally, and interact using a pointing device (mouse). You can observe all processes running on the system, along with their command line arguments, as well as view them in a tree format, select multiple processes and act on them all at once.\n\n* [@article@htop Documentation](https://www.man7.org/linux/man-pages/man1/htop.1.html)\n* [@article@htop Cheat Sheet](https://www.maketecheasier.com/power-user-guide-htop/)\n\nLsof lists on its standard output file information about files opened by processes.", + "links": [ + { + "title": "lsof Cheat Sheet", + "url": "https://neverendingsecurity.wordpress.com/2015/04/13/lsof-commands-cheatsheet/", + "type": "article" + }, + { + "title": "lsof Documentation", + "url": "https://man7.org/linux/man-pages/man8/lsof.8.html", + "type": "article" + }, + { + "title": "Explore top posts about Monitoring", + "url": "https://app.daily.dev/tags/monitoring?ref=roadmapsh", + "type": "article" + }, + { + "title": "Linux Crash Course - The lsof Command", + "url": "https://www.youtube.com/watch?v=n9nZ1ellaV0", + "type": "video" + } + ] + }, + "gIEQDgKOsoEnSv8mpEzGH": { + "title": "Performance Monitoring", + "description": "There are many tools available to monitor the performance of your application. Some of the most popular are:\n\n* `nmon` - A system monitor tool for Linux and AIX systems.\n* `iostat` - A tool that reports CPU statistics and input/output statistics for devices, partitions and network filesystems.\n* `sar` - A system monitor command used to report on various system loads, including CPU activity, memory/paging, device load, network.\n* `vmstat` - A tool that reports virtual memory statistics.\n\nNmon is a fully interactive performance monitoring command-line utility tool for Linux. It is a benchmark tool that displays performance about the cpu, memory, network, disks, file system, nfs, top processes, resources, and power micro-partition.\n\n* [@article@nmon Documentation](https://www.ibm.com/docs/en/aix/7.2?topic=n-nmon-command)\n\nThe iostat command in Linux is used for monitoring system input/output statistics for devices and partitions. It monitors system input/output by observing the time the devices are active in relation to their average transfer rates. The iostat produce reports may be used to change the system configuration to raised balance the input/output between the physical disks.\n\n* [@article@iostat Documentation](https://man7.org/linux/man-pages/man1/iostat.1.html)\n\nShort for **S**ystem **A**ctivity **R**eport, `sar` is a command line tool for Unix and Unix-like operating systems that shows a report of different information about the usage and activity of resources in the operating system.\n\n* [@article@SAR Man Page](https://man7.org/linux/man-pages/man1/sar.1.html)\n* [@article@SAR Man Page 2](https://linux.die.net/man/1/sar)\n* [@article@Sar tutorial for beginners](https://linuxhint.com/sar_linux_tutorial/)\n\nShort for **V**irtual **m**emory **stat**istic reporter, `vmstat` is a command line tool for Unix and Unix-like operating systems that reports various information about the operating system such as memory, paging, processes, I/O, CPU and disk usage.", + "links": [ + { + "title": "Linux commands: exploring virtual memory with vmstat", + "url": "https://www.redhat.com/sysadmin/linux-commands-vmstat", + "type": "article" + }, + { + "title": "VMstat Man Page", + "url": "https://man7.org/linux/man-pages/man8/vmstat.8.html", + "type": "article" + }, + { + "title": "vmstat tutorial", + "url": "https://phoenixnap.com/kb/vmstat-command", + "type": "article" + }, + { + "title": "Explore top posts about Monitoring", + "url": "https://app.daily.dev/tags/monitoring?ref=roadmapsh", + "type": "article" + } + ] + }, + "OaqKLZe-XnngcDhDzCtRt": { + "title": "Networking Tools", + "description": "Networking tools are used to troubleshoot network issues. They are also used to monitor network traffic and to test network connectivity. Some of the most common networking tools are:\n\n* `traceroute` - Traces the route taken by packets over an IP network.\n* `ping` - sends echo request packets to a host to test the Internet connection.\n* `mtr` - Combines the functionality of `traceroute` and `ping` into a single diagnostic tool.\n* `nmap` - Scans hosts for open ports.\n* `netstat` - Displays network connections, routing tables, interface statistics, masquerade connections, and multicast memberships.\n* `ufw` and `firewalld` - Firewall management tools.\n* `iptables` and `nftables` - Firewall management tools.\n* `tcpdump` - Dumps traffic on a network.\n* `dig` - DNS lookup utility.\n* `scp` - Secure copy.\n\n`traceroute` command is a command in Linux that prints the route a network packet takes from its source (e.g. your computer) to the destination host (e.g., [roadmap.sh](http://roadmap.sh)). It is quite valuable in investigating slow network connections as it can help us spot the slow leg of the network packet journey through the internet.\n\n* [How to Run Traceroute in Linux](https://linuxhint.com/run_traceroute_linux/)\n\n`ping` (**P**acket **In**ternet **G**roper) command is used to check the network connectivity between host and server/host. This command takes as input the IP address or the URL and sends a data packet to the specified address with the message “PING” and get a response from the server/host this time is recorded which is called latency.\n\n* [What is ping command?](https://linuxize.com/post/linux-ping-command/)\n\n`mtr` combines the functionality of the traceroute and ping programs in a single network diagnostic tool.\n\n* [Javatpoint: Linux mtr Command](https://www.javatpoint.com/linux-mtr)\n* [mtr Linux command](https://www.tutorialspoint.com/unix_commands/mtr.htm)\n* [How to traceroute use mtr command in Linux](https://www.devopsroles.com/how-to-traceroute-use-mtr-command-in-linux/)\n\nNMAP stands for Network Mapper and is an open-source tool used to explore and audit the network's security, such as checking firewalls and scanning ports.\n\n* [NMAP Official Manual Book](https://nmap.org/book/man.html)\n\nNetstat is a command line utility to display all the network connections on a system. It displays all the tcp, udp and unix socket connections. Apart from connected sockets it also displays listening sockets that are waiting for incoming connections.\n\n* [netstat command in Linux with Examples](https://www.tutorialspoint.com/unix_commands/netstat.htm)\n* [Netstat Tutorial](http://www.c-jump.com/CIS24/Slides/Networking/html_utils/netstat.html)\n* [Netstat Commands - Network Administration Tutorial](https://www.youtube.com/watch?v=bxFwpm4IobU)\n* [Linux Command Line Tutorial For Beginners - netstat command](https://www.youtube.com/watch?v=zGNcvBaN5wE)\n\nUFW, or _uncomplicated firewall_, is command-line based utility for managing firewall rules in Arch Linux, Debian and Ubuntu. It's aim is to make firewall configuration as simple as possible. It is a frontend for the `iptables` firewalling tool.\n\n* [ufw Documentation](https://manpages.ubuntu.com/manpages/trusty/man8/ufw.8.html)\n* [Basic Introduction to UFW](https://www.linux.com/training-tutorials/introduction-uncomplicated-firewall-ufw/)\n* [UFW Essentials](https://www.digitalocean.com/community/tutorials/ufw-essentials-common-firewall-rules-and-commands)\n\nIPtables is a command-line firewall utility that uses policy chains to allow or block traffic that will be enforced by the linux kernel’s netfilter framework. Iptables packet filtering mechanism is organized into three different kinds of structures: tables, chains and targets.\n\n* [Iptables tutorial](https://www.hostinger.in/tutorials/iptables-tutorial)\n* [Beginners to Advanced Guide Iptables](https://erravindrapawadia.medium.com/iptables-tutorial-beginners-to-advanced-guide-to-linux-firewall-839e10501759)\n\n`tcpdump` is a command line tool used for analysing network traffic passing through your system. It can be used to capture and filter packets and display them in a human-readable format. The captured information can be analysed at a later date as well.\n\n* [tcpdump Documentation](https://www.tcpdump.org/manpages/tcpdump.1.html)\n* [Basic Introduction to Tcpdump](https://opensource.com/article/18/10/introduction-tcpdump)\n* [50 ways to isolate traffic with Tcpdump](https://danielmiessler.com/study/tcpdump/)\n* [Interpreting Tcpdump output and data](https://www.youtube.com/watch?v=7bsQP9sKHrs)\n\n`dig` command stands for **D**omain **I**nformation **G**roper. It is used for retrieving information about DNS name servers. It is mostly used by network administrators for verifying and troubleshooting DNS problems and to perform DNS lookups. It replaces older tools such as `nslookup` and the `host`.\n\n* [More on dig](https://linuxize.com/post/how-to-use-dig-command-to-query-dns-in-linux/)\n* [What is DNS?](https://www.cloudflare.com/en-gb/learning/dns/what-is-dns/)\n\n`SCP` is an acronym for Secure Copy [Protocol.It](http://Protocol.It) is a command line utility that allows the user to securely copy files and directories between two locations usually between unix or linux systems.The protocol ensures the transmission of files is encrypted to prevent anyone with suspicious intentions from getting sensitive information.`SCP` uses encryption over an `SSH` (Secure Shell) connection, this ensures that the data being transferred is protected from suspicious attacks.", + "links": [ + { + "title": "10 SCP command examples", + "url": "https://www.tecmint.com/scp-commands-examples/", + "type": "article" + }, + { + "title": "SCP command explained", + "url": "https://phoenixnap.com/kb/linux-scp-command", + "type": "article" + } + ] + }, + "cUifrP7v55psTb20IZndf": { + "title": "Text Manipulation", + "description": "Some of the most common commands you will use in the terminal are text manipulation commands. These commands allow you to search, replace, and manipulate text in files and streams. Here are some of the most common commands you will use:\n\n* `awk` - A programming language designed for text processing and typically used as a data extraction and reporting tool.\n* `sed` - A stream editor for filtering and transforming text.\n* `grep` - A command-line utility for searching plain-text data sets for lines that match a regular expression.\n* `sort` - A command-line utility for sorting lines of text files.\n* `cut` - A command-line utility for cutting sections from each line of files.\n* `uniq` - A command-line utility for reporting or omitting repeated lines.\n* `cat` - A command-line utility for concatenating files and printing on the standard output.\n* `echo` - A command-line utility for displaying a line of text.\n* `fmt` - A command-line utility for simple optimal text formatting.\n* `tr` - A command-line utility for translating or deleting characters.\n* `nl` - A command-line utility for numbering lines of files.\n* `wc` - A command-line utility for printing newline, word, and byte counts for files.\n\n`awk` is a general-purpose scripting language used for manipulating data or text and generating reports in the Linux world. It is mostly used for pattern scanning and processing. It searches one or more files to see if they contain lines that match the specified patterns and then performs the associated actions.\n\n* [@article@How AWK works?](https://linuxize.com/post/awk-command/)\n* [@video@Linux Crash Course - awk](https://www.youtube.com/watch?v=oPEnvuj9QrI)\n\n`sed`(**S**tream **Ed**itor) command in UNIX can perform lots of functions on file like searching, finding and replacing, insertion or deletion. By using SED you can edit files even without opening them in editors like [VI Editor](https://www.redhat.com/sysadmin/introduction-vi-editor).\n\n* [@article@Detailed Manual](https://www.gnu.org/software/sed/manual/sed.html)\n* [@video@Linux Crash Course - The sed Command](https://www.youtube.com/watch?v=nXLnx8ncZyE&t=218s)\n\nThe `grep` command (**g**lobal search for **r**egular **e**xpression and **p**rint out) searches file(s) for a particular pattern of characters, and displays all lines that contain that pattern. It can be used with other commands like `ps` making it more useful.\n\n* [@article@Detailed Manual](https://www.gnu.org/software/grep/manual/grep.html)\n* [@video@Linux Crash Course - The grep Command](https://www.youtube.com/watch?v=Tc_jntovCM0)\n\n`sort` command is used to sort the contents of a file in a particular order. By default, it sorts a file assuming the contents are in ASCII. But it also can also be used to sort numerically by using appropriate options.\n\n* [@article@Options](https://en.wikipedia.org/wiki/Sort_\\(Unix\\))\n\nThe cut utility cuts out selected portions of each line (as specified by list) from each file and writes them to the standard output.\n\n* [@article@cut Documentation](https://man7.org/linux/man-pages/man1/cut.1.html)\n* [@article@cut Cheat Sheet](https://bencane.com/2012/10/22/cheat-sheet-cutting-text-with-cut/)\n\nThe uniq utility reads the specified input\\_file comparing adjacent lines, and writes a copy of each unique input line to the output\\_file.\n\n* [@article@uniq Documentation](https://man7.org/linux/man-pages/man1/uniq.1.html)\n\n`cat` (concatenate) command is very frequently used in Linux. It reads data from the file and gives its content as output. It helps us to create, view, and concatenate files.\n\n* [@article@Cat Command with examples](https://www.tecmint.com/13-basic-cat-command-examples-in-linux/)\n* [@article@Options](https://en.wikipedia.org/wiki/Cat_\\(Unix\\))\n\n`echo` is a built-in command in Linux used to display lines of text/string that are passed as an argument. It is mostly used in shell scripts and batch files to output status text or `ENV` variables to the screen or a file.\n\n* [@article@Echo command with Examples](https://www.tecmint.com/echo-command-in-linux/)\n* [@video@Linux Crash Course - The echo Command](https://www.youtube.com/watch?v=S_ySzMHxMjw)\n\n`fmt` command is for formatting and optimizing contents in text files. It will be really useful when it comes to beautify large text files by setting uniform column width and spaces.\n\n* [@article@Fmt command with Examples](https://www.devopsroles.com/fmt-command-in-linux-with-example/)\n\nThe tr utility copies the standard input to the standard output with substitution or deletion of selected characters.\n\n* [@article@tr Documentation](https://linuxcommand.org/lc3_man_pages/tr1.html)\n* [@article@tr Cheat Sheet](https://linuxopsys.com/topics/tr-command-in-linux)\n\nThe nl utility reads lines from the named file or the standard input if the file argument is omitted, applies a configurable line numbering filter operation and writes the result to the standard output.\n\n* [@article@nl Documentation](https://man7.org/linux/man-pages/man1/nl.1.html)\n\nThe wc utility displays the number of lines, words, and bytes contained in each input file, or standard input (if no file is specified) to the standard output.", + "links": [ + { + "title": "wc Documentation", + "url": "https://linux.die.net/man/1/wc", + "type": "article" + }, + { + "title": "wc Cheat Sheet", + "url": "https://onecompiler.com/cheatsheets/wc", + "type": "article" + } + ] + }, + "syBIAL1mHbJLnTBoSxXI7": { + "title": "Bash", + "description": "", + "links": [ + { + "title": "Interactive Shell Scripting Tutorial", + "url": "https://www.learnshell.org/en/Welcome", + "type": "article" + }, + { + "title": "Explore top posts about Bash", + "url": "https://app.daily.dev/tags/bash?ref=roadmapsh", + "type": "article" + } + ] + }, + "z6IBekR8Xl-6f8WEb05Nw": { + "title": "Power Shell", + "description": "Windows PowerShell is a command-line shell and scripting language designed specifically for system administration. Its counterpart in Linux is called Bash Scripting. Built on the .NET Framework, Windows PowerShell enables IT professionals to control and automate the administration of the Windows operating system and applications that run in a Windows Server environment.", + "links": [ + { + "title": "PowerShell Documentation", + "url": "https://learn.microsoft.com/en-us/powershell/", + "type": "article" + }, + { + "title": "Explore top posts about PowerShell", + "url": "https://app.daily.dev/tags/powershell?ref=roadmapsh", + "type": "article" + } + ] + }, + "Jt8BmtLUH6fHT2pGKoJs3": { + "title": "Vim / Nano / Emacs", + "description": "Editors are tools that allow you to create or edit files on your file system.\n\nVim\n---\n\nVim is a highly configurable text editor built to make creating and changing any kind of text very efficient. It is included as \"vi\" with most UNIX systems and with Apple OS X.\n\nVim ships with `vimtutor` that is a tutor designed to describe enough of the Vim commands that you will be able to easily use Vim as an all-purpose editor.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Vim", + "url": "https://www.vim.org", + "type": "article" + }, + { + "title": "Vim help files", + "url": "https://vimhelp.org/", + "type": "article" + }, + { + "title": "Vim Tips Wiki", + "url": "https://vim.fandom.com/wiki/Vim_Tips_Wiki", + "type": "article" + }, + { + "title": "Vim Adventures", + "url": "https://vim-adventures.com/", + "type": "article" + }, + { + "title": "GNU Nano", + "url": "https://www.nano-editor.org/", + "type": "article" + }, + { + "title": "GNU Nano Manual", + "url": "https://www.nano-editor.org/dist/latest/nano.html", + "type": "article" + }, + { + "title": "PowerShell Documentation", + "url": "https://learn.microsoft.com/en-us/powershell/", + "type": "article" + }, + { + "title": "GNU Emacs", + "url": "https://www.gnu.org/software/emacs/", + "type": "article" + }, + { + "title": "GNU Emacs Documentation", + "url": "https://www.gnu.org/software/emacs/documentation.html", + "type": "article" + } + ] + }, + "LvhFmlxz5uIy7k_nzx2Bv": { + "title": "Version Control Systems", + "description": "Version control/source control systems allow developers to track and control changes to code over time. These services often include the ability to make atomic revisions to code, branch/fork off of specific points, and to compare versions of code. They are useful in determining the who, what, when, and why code changes were made.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Git", + "url": "https://git-scm.com/", + "type": "article" + }, + { + "title": "What is Version Control?", + "url": "https://www.atlassian.com/git/tutorials/what-is-version-control", + "type": "article" + } + ] + }, + "uyDm1SpOQdpHjq9zBAdck": { + "title": "Git", + "description": "[Git](https://git-scm.com/) is a free and open source distributed version control system designed to handle everything from small to very large projects with speed and efficiency.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Learn Git with Tutorials, News and Tips - Atlassian", + "url": "https://www.atlassian.com/git", + "type": "article" + }, + { + "title": "Git Cheat Sheet", + "url": "https://cs.fyi/guide/git-cheatsheet", + "type": "article" + }, + { + "title": "Explore top posts about Git", + "url": "https://app.daily.dev/tags/git?ref=roadmapsh", + "type": "article" + }, + { + "title": "Git & GitHub Crash Course For Beginners", + "url": "https://www.youtube.com/watch?v=SWYqp7iY_Tc", + "type": "video" + } + ] + }, + "h10BH3OybHcIN2iDTSGkn": { + "title": "VCS Hosting", + "description": "When working on a team, you often need a remote place to put your code so others can access it, create their own branches, and create or review pull requests. These services often include issue tracking, code review, and continuous integration features. A few popular choices are GitHub, GitLab, BitBucket, and AWS CodeCommit.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "GitHub", + "url": "https://github.com/features/", + "type": "opensource" + }, + { + "title": "GitLab", + "url": "https://about.gitlab.com/", + "type": "article" + }, + { + "title": "BitBucket", + "url": "https://bitbucket.org/product/guides/getting-started/overview", + "type": "article" + }, + { + "title": "How to choose the best source code repository", + "url": "https://bitbucket.org/product/code-repository", + "type": "article" + } + ] + }, + "ot9I_IHdnq2yAMffrSrbN": { + "title": "GitHub", + "description": "GitHub is a provider of Internet hosting for software development and version control using Git. It offers the distributed version control and source code management functionality of Git, plus its own features.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "GitHub Website", + "url": "https://github.com", + "type": "opensource" + }, + { + "title": "GitHub Documentation", + "url": "https://docs.github.com/en/get-started/quickstart", + "type": "article" + }, + { + "title": "How to Use Git in a Professional Dev Team", + "url": "https://ooloo.io/project/github-flow", + "type": "article" + }, + { + "title": "Explore top posts about GitHub", + "url": "https://app.daily.dev/tags/github?ref=roadmapsh", + "type": "article" + }, + { + "title": "What is GitHub?", + "url": "https://www.youtube.com/watch?v=w3jLJU7DT5E", + "type": "video" + }, + { + "title": "Git vs. GitHub: Whats the difference?", + "url": "https://www.youtube.com/watch?v=wpISo9TNjfU", + "type": "video" + }, + { + "title": "Git and GitHub for Beginners", + "url": "https://www.youtube.com/watch?v=RGOj5yH7evk", + "type": "video" + }, + { + "title": "Git and GitHub - CS50 Beyond 2019", + "url": "https://www.youtube.com/watch?v=eulnSXkhE7I", + "type": "video" + } + ] + }, + "oQIB0KE0BibjIYmxrpPZS": { + "title": "GitLab", + "description": "GitLab is a provider of internet hosting for software development and version control using Git. It offers the distributed version control and source code management functionality of Git, plus its own features.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "GitLab Website", + "url": "https://gitlab.com/", + "type": "opensource" + }, + { + "title": "GitLab Documentation", + "url": "https://docs.gitlab.com/", + "type": "article" + }, + { + "title": "Explore top posts about GitLab", + "url": "https://app.daily.dev/tags/gitlab?ref=roadmapsh", + "type": "article" + } + ] + }, + "Z7SsBWgluZWr9iWb2e9XO": { + "title": "Bitbucket", + "description": "Bitbucket is a Git based hosting and source code repository service that is Atlassian's alternative to other products like GitHub, GitLab etc\n\nBitbucket offers hosting options via Bitbucket Cloud (Atlassian's servers), Bitbucket Server (customer's on-premise) or Bitbucket Data Centre (number of servers in customers on-premise or cloud environment)\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Bitbucket Website", + "url": "https://bitbucket.org/product", + "type": "article" + }, + { + "title": "A brief overview of Bitbucket", + "url": "https://bitbucket.org/product/guides/getting-started/overview#a-brief-overview-of-bitbucket", + "type": "article" + }, + { + "title": "Getting started with Bitbucket", + "url": "https://bitbucket.org/product/guides/basics/bitbucket-interface", + "type": "article" + }, + { + "title": "Using Git with Bitbucket Cloud", + "url": "https://www.atlassian.com/git/tutorials/learn-git-with-bitbucket-cloud", + "type": "article" + }, + { + "title": "Explore top posts about Bitbucket", + "url": "https://app.daily.dev/tags/bitbucket?ref=roadmapsh", + "type": "article" + }, + { + "title": "Bitbucket tutorial | How to use Bitbucket Cloud", + "url": "https://www.youtube.com/watch?v=M44nEyd_5To", + "type": "video" + }, + { + "title": "Bitbucket Tutorial | Bitbucket for Beginners", + "url": "https://www.youtube.com/watch?v=i5T-DB8tb4A", + "type": "video" + } + ] + }, + "jCWrnQNgjHKyhzd9dwOHz": { + "title": "What is and how to setup X ?", + "description": "Learn how to setup:\n\n* Forward Proxy\n* Reverse Proxy\n* Load Balancer\n* Firewall\n* Caching Server\n* Web Server", + "links": [] + }, + "F93XnRj0BLswJkzyRggLS": { + "title": "Forward Proxy", + "description": "Forward Proxy, often called proxy server is a server that sits in front of a group of **client machines**. When those computers make requests to sites and services on the Internet, the proxy server intercepts those requests and then communicates with web servers on behalf of those clients, like a middleman.\n\n**Common Uses:**\n\n* To block access to certain content\n* To protect client identity online\n* To provide restricted internet to organizations\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is Forward Proxy?", + "url": "https://www.fortinet.com/resources/cyberglossary/proxy-server", + "type": "article" + }, + { + "title": "Forward vs Reverse Proxy", + "url": "https://oxylabs.io/blog/reverse-proxy-vs-forward-proxy", + "type": "article" + } + ] + }, + "f3tM2uo6LLSOmyeFfLc7h": { + "title": "Firewall", + "description": "Firewall is a **network security device** that monitors and filters incoming and outgoing network traffic based on an organization’s previously established security policies. It is a barrier that sits between a private internal network and the public Internet. A firewall’s main purpose is to allow non-threatening traffic in and to keep dangerous traffic out.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is Firewall?", + "url": "https://www.checkpoint.com/cyber-hub/network-security/what-is-firewall/", + "type": "article" + }, + { + "title": "Types of Firewall", + "url": "https://www.cisco.com/c/en_in/products/security/firewalls/what-is-a-firewall.html", + "type": "article" + }, + { + "title": "Why do we need Firewalls?", + "url": "https://www.tutorialspoint.com/what-is-a-firewall-and-why-do-you-need-one", + "type": "article" + }, + { + "title": "Explore top posts about Firewall", + "url": "https://app.daily.dev/tags/firewall?ref=roadmapsh", + "type": "article" + }, + { + "title": "Firewalls and Network Security - SimpliLearn", + "url": "https://www.youtube.com/watch?v=9GZlVOafYTg", + "type": "video" + } + ] + }, + "ukOrSeyK1ElOt9tTjCkfO": { + "title": "Nginx", + "description": "NGINX is a powerful web server and uses a non-threaded, event-driven architecture that enables it to outperform Apache if configured correctly. It can also do other important things, such as load balancing, HTTP caching, or be used as a reverse proxy.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Official Website", + "url": "https://nginx.org/", + "type": "article" + }, + { + "title": "Explore top posts about Nginx", + "url": "https://app.daily.dev/tags/nginx?ref=roadmapsh", + "type": "article" + }, + { + "title": "NGINX Explained in 100 Seconds", + "url": "https://www.youtube.com/watch?v=JKxlsvZXG7c", + "type": "video" + } + ] + }, + "dF3otkMMN09tgCzci8Jyv": { + "title": "Tomcat", + "description": "Tomcat is an open source implementation of the Jakarta Servlet, Jakarta Server Pages, Jakarta Expression Language, Jakarta WebSocket, Jakarta Annotations and Jakarta Authentication specifications. These specifications are part of the Jakarta EE platform.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Tomcat Website", + "url": "https://tomcat.apache.org/", + "type": "article" + }, + { + "title": "Official Documentation(Tomcat 10.0)", + "url": "https://tomcat.apache.org/tomcat-10.0-doc/index.html", + "type": "article" + }, + { + "title": "Apache Tomcat", + "url": "https://www.youtube.com/c/ApacheTomcatOfficial", + "type": "video" + } + ] + }, + "0_GMTcMeZv3A8dYkHRoW7": { + "title": "Apache", + "description": "Apache is a free, open-source HTTP server, available on many operating systems, but mainly used on Linux distributions. It is one of the most popular options for web developers, as it accounts for over 30% of all the websites, as estimated by W3Techs.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Apache Server Website", + "url": "https://httpd.apache.org/", + "type": "article" + }, + { + "title": "Explore top posts about Apache", + "url": "https://app.daily.dev/tags/apache?ref=roadmapsh", + "type": "article" + }, + { + "title": "What is Apache Web Server?", + "url": "https://www.youtube.com/watch?v=kaaenHXO4t4", + "type": "video" + } + ] + }, + "54UZNO2q8M5FiA_XbcU_D": { + "title": "Caddy", + "description": "Caddy is an open-source web server with automatic HTTPS written in Go. It is easy to configure and use, and it is a great choice for small to medium-sized projects.", + "links": [ + { + "title": "Caddy Website", + "url": "https://caddyserver.com/", + "type": "article" + } + ] + }, + "5iJOE1QxMvf8BQ_8ssiI8": { + "title": "IIS", + "description": "Internet Information Services (IIS) for Windows® Server is a flexible, secure and manageable Web server for hosting anything on the Web.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Official Website", + "url": "https://www.iis.net/", + "type": "article" + }, + { + "title": "Explore top posts about .NET", + "url": "https://app.daily.dev/tags/.net?ref=roadmapsh", + "type": "article" + }, + { + "title": "Learn Windows Web Server IIS", + "url": "https://www.youtube.com/watch?v=1VdxPWwtISA", + "type": "video" + } + ] + }, + "R4XSY4TSjU1M7cW66zUqJ": { + "title": "Caching Server", + "description": "A cache server is a **dedicated network server** or service acting as a server that saves Web pages or other Internet content locally. By placing previously requested information in temporary storage, or cache, a cache server both speeds up access to data and reduces demand on an enterprise's bandwidth.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is Caching?", + "url": "https://www.cloudflare.com/en-gb/learning/cdn/what-is-caching/", + "type": "article" + }, + { + "title": "What is Cache Server?", + "url": "https://networkencyclopedia.com/cache-server/", + "type": "article" + }, + { + "title": "Site Cache vs Browser Cache vs Server Cache", + "url": "https://wp-rocket.me/blog/different-types-of-caching/", + "type": "article" + } + ] + }, + "i8Sd9maB_BeFurULrHXNq": { + "title": "Load Balancer", + "description": "Load Balancer acts as the **traffic cop** sitting in front of your servers and routing client requests across all servers capable of fulfilling those requests in a manner that maximizes speed and capacity utilization and ensures that no one server is overworked. If one of the servers goes down, the load balancer redirects traffic to the remaining online servers.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is Load Balancing?", + "url": "https://www.nginx.com/resources/glossary/load-balancing/", + "type": "article" + }, + { + "title": "Load Balancing concepts and algorithms", + "url": "https://www.cloudflare.com/en-gb/learning/performance/what-is-load-balancing/", + "type": "article" + } + ] + }, + "eGF7iyigl57myx2ejpmNC": { + "title": "Reverse Proxy", + "description": "A Reverse Proxy server is a type of proxy server that typically sits behind the firewall in a private network and directs client requests to the appropriate backend server. It provides an additional level of security by hiding the server related details like `IP Address` to clients. It is also known as **server side proxy**.\n\n**Common Uses:**\n\n* Load balancing\n* Web acceleration\n* Security and anonymity\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is Reverse Proxy?", + "url": "https://www.cloudflare.com/en-gb/learning/cdn/glossary/reverse-proxy/", + "type": "article" + }, + { + "title": "NGINX documentation", + "url": "https://www.nginx.com/resources/glossary/reverse-proxy-server/", + "type": "article" + } + ] + }, + "CQhUflAcv1lhBnmDY0gaz": { + "title": "Containers", + "description": "Containers are a construct in which [cgroups](https://en.wikipedia.org/wiki/Cgroups), [namespaces](https://en.wikipedia.org/wiki/Linux_namespaces), and [chroot](https://en.wikipedia.org/wiki/Chroot) are used to fully encapsulate and isolate a process. This encapsulated process, called a container image, shares the kernel of the host with other containers, allowing containers to be significantly smaller and faster than virtual machines.\n\nThese images are designed for portability, allowing for full local testing of a static image, and easy deployment to a container management platform.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What are Containers?", + "url": "https://cloud.google.com/learn/what-are-containers", + "type": "article" + }, + { + "title": "What is a Container?", + "url": "https://www.docker.com/resources/what-container/", + "type": "article" + }, + { + "title": "Articles about Containers - The New Stack", + "url": "https://thenewstack.io/category/containers/", + "type": "article" + }, + { + "title": "Explore top posts about Containers", + "url": "https://app.daily.dev/tags/containers?ref=roadmapsh", + "type": "article" + }, + { + "title": "What are Containers?", + "url": "https://www.youtube.com/playlist?list=PLawsLZMfND4nz-WDBZIj8-nbzGFD4S9oz", + "type": "video" + } + ] + }, + "P0acFNZ413MSKElHqCxr3": { + "title": "Docker", + "description": "Docker is a platform for working with containerized applications. Among its features are a daemon and client for managing and interacting with containers, registries for storing images, and a desktop application to package all these features together.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Docker Documentation", + "url": "https://docs.docker.com/", + "type": "article" + }, + { + "title": "Explore top posts about Docker", + "url": "https://app.daily.dev/tags/docker?ref=roadmapsh", + "type": "article" + }, + { + "title": "Docker Tutorial", + "url": "https://www.youtube.com/watch?v=RqTEHSBrYFw", + "type": "video" + }, + { + "title": "Docker simplified in 55 seconds", + "url": "https://youtu.be/vP_4DlOH1G4", + "type": "video" + } + ] + }, + "qYRJYIZsmf-inMqKECRkI": { + "title": "LXC", + "description": "LXC is a well-known Linux container runtime that consists of tools, templates, and library and language bindings. It's pretty low level, very flexible and covers just about every containment feature supported by the upstream kernel.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "LXC Website", + "url": "https://linuxcontainers.org/", + "type": "article" + }, + { + "title": "LXC Documentation", + "url": "https://linuxcontainers.org/lxc/documentation/", + "type": "article" + }, + { + "title": "Getting started with LXC containers", + "url": "https://www.youtube.com/watch?v=CWmkSj_B-wo", + "type": "video" + } + ] + }, + "2Wd9SlWGg6QtxgiUVLyZL": { + "title": "Cloud Providers", + "description": "Cloud providers provide a layer of APIs to abstract infrastructure and provision it based on security and billing boundaries. The cloud runs on servers in data centers, but the abstractions cleverly give the appearance of interacting with a single “platform” or large application. The ability to quickly provision, configure, and secure resources with cloud providers has been key to both the tremendous success and complexity of modern DevOps.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Cloud service provider", + "url": "https://www.techtarget.com/searchitchannel/definition/cloud-service-provider-cloud-provider", + "type": "article" + }, + { + "title": "What are Cloud Providers?", + "url": "https://www.redhat.com/en/topics/cloud-computing/what-are-cloud-providers", + "type": "article" + }, + { + "title": "Explore top posts about Cloud", + "url": "https://app.daily.dev/tags/cloud?ref=roadmapsh", + "type": "article" + } + ] + }, + "1ieK6B_oqW8qOC6bdmiJe": { + "title": "AWS", + "description": "Amazon Web Services has been the market leading cloud computing platform since 2011, ahead of Azure and Google Cloud. AWS offers over 200 services with data centers located all over the globe.\n\nAWS service is an online platform that provides scalable and cost-effective cloud computing solutions. It is broadly adopted cloud platform that offers several on-demand operations like compute power, database storage, content delivery and so on.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "AWS Course for Beginners", + "url": "https://www.coursera.org/learn/aws-cloud-technical-essentials?specialization=aws-devops", + "type": "course" + }, + { + "title": "DevOps on AWS Course ", + "url": "https://www.coursera.org/specializations/aws-devops?#courses", + "type": "course" + }, + { + "title": "AWS Website", + "url": "https://aws.amazon.com/", + "type": "article" + }, + { + "title": "AWS Documentation", + "url": "https://docs.aws.amazon.com/", + "type": "article" + }, + { + "title": "AWS Cloud Essentials", + "url": "https://aws.amazon.com/getting-started/cloud-essentials/", + "type": "article" + }, + { + "title": "Overview of Amazon Web Services", + "url": "https://docs.aws.amazon.com/whitepapers/latest/aws-overview/introduction.html", + "type": "article" + }, + { + "title": "Sign up for AWS", + "url": "https://portal.aws.amazon.com/billing/signup", + "type": "article" + }, + { + "title": "How to learn AWS", + "url": "https://cs.fyi/guide/how-to-learn-aws/", + "type": "article" + }, + { + "title": "AWS Ramp Up Guide", + "url": "https://d1.awsstatic.com/training-and-certification/ramp-up_guides/Ramp-Up_Guide_CloudPractitioner.pdf", + "type": "article" + }, + { + "title": "Cloud Practitioner Essentials", + "url": "https://explore.skillbuilder.aws/learn/course/external/view/elearning/134/aws-cloud-practitioner-essentials", + "type": "article" + }, + { + "title": "AWS Guide by SimpliLearn", + "url": "https://www.simplilearn.com/tutorials/aws-tutorial/what-is-aws", + "type": "article" + }, + { + "title": "Explore top posts about AWS", + "url": "https://app.daily.dev/tags/aws?ref=roadmapsh", + "type": "article" + }, + { + "title": "AWS Tutorial for Beginners", + "url": "https://www.youtube.com/watch?v=k1RI5locZE4&t=129s", + "type": "video" + }, + { + "title": "AWS Practitioner ", + "url": "https://youtu.be/SOTamWNgDKc", + "type": "video" + } + ] + }, + "ctor79Vd7EXDMdrLyUcu_": { + "title": "Azure", + "description": "Microsoft Azure is a cloud computing service operated by Microsoft. Azure currently provides more than 200 products and cloud services.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Azure Website", + "url": "https://azure.microsoft.com/en-us/", + "type": "article" + }, + { + "title": "Official Documentation", + "url": "https://docs.microsoft.com/en-us/azure/", + "type": "article" + }, + { + "title": "Azure Get Started Guide", + "url": "https://azure.microsoft.com/en-ca/get-started/#explore-azure", + "type": "article" + }, + { + "title": "Get to know Azure", + "url": "https://azure.microsoft.com/en-us/explore/", + "type": "article" + }, + { + "title": "Explore top posts about Azure", + "url": "https://app.daily.dev/tags/azure?ref=roadmapsh", + "type": "article" + } + ] + }, + "zYrOxFQkl3KSe67fh3smD": { + "title": "Google Cloud", + "description": "Google Cloud is Google's cloud computing service offering, providing over 150 products/services to choose from. It consists of a set of physical assets, such as computers and hard disk drives, and virtual resources, such as virtual machines(VMs), that are contained in Google's data centers. It runs on the same infrastructure that Google uses internally for its end-user products, such as Search, Gmail, Google Drive, and YouTube.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Coursera Complete Course for Google Cloud ", + "url": "https://www.coursera.org/professional-certificates/cloud-engineering-gcp#courses", + "type": "course" + }, + { + "title": "Google Cloud Website", + "url": "https://cloud.google.com/", + "type": "article" + }, + { + "title": "Official Documentation", + "url": "https://cloud.google.com/docs", + "type": "article" + }, + { + "title": "Google Cloud Get Started Guide", + "url": "https://cloud.google.com/docs/get-started/", + "type": "article" + }, + { + "title": "5 Tips to Become a Google Cloud Certified Professional Architect", + "url": "https://thenewstack.io/5-tips-to-become-a-google-cloud-certified-professional-architect/", + "type": "article" + }, + { + "title": "Explore top posts about Cloud", + "url": "https://app.daily.dev/tags/cloud?ref=roadmapsh", + "type": "article" + }, + { + "title": "Google Cloud by Edureka on You-Tube", + "url": "https://www.youtube.com/watch?v=IUU6OR8yHCc", + "type": "video" + } + ] + }, + "-h-kNVDNzZYnQAR_4lfXc": { + "title": "Digital Ocean", + "description": "DigitalOcean is a cloud computing service offering products and services in Compute, Storage, Managed Databases, Containers & Images and Networking.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "DigitalOcean Website", + "url": "https://www.digitalocean.com/", + "type": "article" + }, + { + "title": "Official Documentation", + "url": "https://docs.digitalocean.com/products/", + "type": "article" + }, + { + "title": "DigitalOcean Get Started Guide", + "url": "https://docs.digitalocean.com/products/getting-started/", + "type": "article" + }, + { + "title": "Explore top posts about DigitalOcean", + "url": "https://app.daily.dev/tags/digitalocean?ref=roadmapsh", + "type": "article" + } + ] + }, + "YUJf-6ccHvYjL_RzufQ-G": { + "title": "Alibaba Cloud", + "description": "Alibaba Cloud is a cloud computing service, offering over 100 products and services with data centers in 24 regions and 74 availability zones around the world.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Alibaba Cloud Website", + "url": "https://www.alibabacloud.com/", + "type": "article" + }, + { + "title": "Official Documentation", + "url": "https://www.alibabacloud.com/help/en/", + "type": "article" + }, + { + "title": "Alibaba Cloud Getting Started Guide", + "url": "https://www.alibabacloud.com/getting-started", + "type": "article" + }, + { + "title": "Explore top posts about Cloud", + "url": "https://app.daily.dev/tags/cloud?ref=roadmapsh", + "type": "article" + } + ] + }, + "I327qPYGMcdayRR5WT0Ek": { + "title": "Hetzner", + "description": "Hetzner is a German hosting provider that offers a wide range of services, including dedicated servers, cloud servers, and colocation. They are known for their high-quality hardware, competitive pricing, and excellent customer support.\n\nVisit the following resources to learn more about Hetzner:", + "links": [ + { + "title": "Hetzner Website", + "url": "https://www.hetzner.com/", + "type": "article" + } + ] + }, + "FaPf567JGRAg1MBlFj9Tk": { + "title": "Heroku ", + "description": "Heroku is a cloud platform as a service subsidiary of Salesforce. Heroku officially supports Node.js, Ruby, Java, PHP, Python, Go, Scala and Clojure, along with any language that runs on Linux via a third-party build pack.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Heroku Website", + "url": "https://www.heroku.com/", + "type": "article" + }, + { + "title": "Official Documentation", + "url": "https://devcenter.heroku.com/", + "type": "article" + }, + { + "title": "Heroku Get Started Guide", + "url": "https://devcenter.heroku.com/start", + "type": "article" + }, + { + "title": "Explore top posts about Heroku", + "url": "https://app.daily.dev/tags/heroku?ref=roadmapsh", + "type": "article" + } + ] + }, + "eJZdjheptmiwKsVokt7Io": { + "title": "Contabo", + "description": "Contabo offers a wide range of hosting services, from VPS to dedicated servers. They are known for their low prices and high performance.\n\nVisit the following link to learn more about Contabo:", + "links": [ + { + "title": "Contabo - Official Website", + "url": "https://contabo.com/", + "type": "article" + } + ] + }, + "RDLmML_HS2c8J4D_U_KYe": { + "title": "FTP / SFTP", + "description": "File Transfer Protocol(FTP) is `TCP/IP` based application layer communication protocol that helps transferring files between local and remote file systems over the network. To transfer a file, 2 TCP connections(control connection and data connection) are used in parallel.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "FTP vs SFTP vs FTPS", + "url": "https://www.fortinet.com/resources/cyberglossary/file-transfer-protocol-ftp-meaning", + "type": "article" + } + ] + }, + "Vu955vdsYerCG8G6suqml": { + "title": "DNS", + "description": "DNS (**D**omain **N**ame **S**ystem) is the phonebook of the Internet. Humans access information online through domain names, like [nytimes.com](http://nytimes.com) or [espn.com](http://espn.com). Web browsers interact through Internet Protocol (IP) addresses. DNS translates domain names to IP addresses so browsers can load Internet resources.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is DNS?", + "url": "https://www.cloudflare.com/en-gb/learning/dns/what-is-dns/", + "type": "article" + }, + { + "title": "HOw DNS works (comic)", + "url": "https://howdns.works/", + "type": "article" + }, + { + "title": "Explore top posts about DNS", + "url": "https://app.daily.dev/tags/dns?ref=roadmapsh", + "type": "article" + }, + { + "title": "DNS and How does it Work?", + "url": "https://www.youtube.com/watch?v=Wj0od2ag5sk", + "type": "video" + }, + { + "title": "DNS Records", + "url": "https://www.youtube.com/watch?v=7lxgpKh_fRY", + "type": "video" + } + ] + }, + "ke-8MeuLx7AS2XjSsPhxe": { + "title": "HTTP", + "description": "HTTP is the `TCP/IP` based application layer communication protocol which standardizes how the client and server communicate with each other. It defines how the content is requested and transmitted across the internet.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Everything you need to know about HTTP", + "url": "https://cs.fyi/guide/http-in-depth", + "type": "article" + }, + { + "title": "What is HTTP?", + "url": "https://www.cloudflare.com/en-gb/learning/ddos/glossary/hypertext-transfer-protocol-http/", + "type": "article" + }, + { + "title": "An overview of HTTP", + "url": "https://developer.mozilla.org/en-US/docs/Web/HTTP/Overview", + "type": "article" + }, + { + "title": "HTTP/3 From A To Z: Core Concepts", + "url": "https://www.smashingmagazine.com/2021/08/http3-core-concepts-part1/", + "type": "article" + }, + { + "title": "HTTP Crash Course & Exploration", + "url": "https://www.youtube.com/watch?v=iYM2zFP3Zn0", + "type": "video" + } + ] + }, + "AJO3jtHvIICj8YKaSXl0U": { + "title": "HTTPS", + "description": "HTTPS (**H**ypertext **T**ransfer **P**rotocol **S**ecure) is the secure version of HTTP, which is the primary protocol used to send data between a web browser and a website.\n\n`HTTPS = HTTP + SSL/TLS`\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is HTTPS?", + "url": "https://www.cloudflare.com/en-gb/learning/ssl/what-is-https/", + "type": "article" + }, + { + "title": "Why HTTPS Matters", + "url": "https://developers.google.com/web/fundamentals/security/encrypt-in-transit/why-https", + "type": "article" + }, + { + "title": "Enabling HTTPS on Your Servers", + "url": "https://developers.google.com/web/fundamentals/security/encrypt-in-transit/enable-https", + "type": "article" + }, + { + "title": "How HTTPS works (comic)", + "url": "https://howhttps.works/", + "type": "article" + } + ] + }, + "0o6ejhfpmO4S8A6djVWva": { + "title": "SSL / TLS", + "description": "Secure Sockets Layer (SSL) and Transport Layer Security (TLS) are cryptographic protocols used to provide security in internet communications. These protocols encrypt the data that is transmitted over the web, so anyone who tries to intercept packets will not be able to interpret the data. One difference that is important to know is that SSL is now deprecated due to security flaws, and most modern web browsers no longer support it. But TLS is still secure and widely supported, so preferably use TLS.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Cloudflare - What is SSL?", + "url": "https://www.cloudflare.com/learning/ssl/what-is-ssl/", + "type": "article" + }, + { + "title": "Cloudflare - What is TLS?", + "url": "https://www.cloudflare.com/en-gb/learning/ssl/transport-layer-security-tls/", + "type": "article" + }, + { + "title": "Wikipedia - SSL/TLS", + "url": "https://en.wikipedia.org/wiki/Transport_Layer_Security", + "type": "article" + }, + { + "title": "SSH vs SSL vs TLS", + "url": "https://www.youtube.com/watch?v=k3rFFLmQCuY", + "type": "video" + } + ] + }, + "wcIRMLVm3SdEJWF9RPfn7": { + "title": "SSH", + "description": "The SSH (**S**ecure **Sh**ell) is a network communication protocol that enables two computers to communicate over an insecure network. It is a secure alternative to the non-protected login protocols (such as telnet, rlogin) and insecure file transfer methods (such as FTP). It is mostly used for secure Remote Login and File Transfer.\n\n`SFTP = FTP + SSH`\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "SSH Intro", + "url": "https://www.baeldung.com/cs/ssh-intro", + "type": "article" + }, + { + "title": "What is SSH?", + "url": "https://www.ssh.com/academy/ssh/protocol", + "type": "article" + }, + { + "title": "SFTP using SSH", + "url": "https://www.goanywhere.com/blog/how-sftp-works", + "type": "article" + }, + { + "title": "Explore top posts about SSH", + "url": "https://app.daily.dev/tags/ssh?ref=roadmapsh", + "type": "article" + } + ] + }, + "E-lSLGzgOPrz-25ER2Hk7": { + "title": "White / Grey Listing", + "description": "White listing is a process of adding an email to an approved sender list, so emails from that sender are never moved to the spam folder. This tells an email server to move messages to the inbox directly.\n\n`Greylisting` is a method of protecting e-mail users against spam. A mail transfer agent (MTA) using greylisting will \"temporarily reject\" any email from a sender it does not recognize. If the mail is legitimate, the originating server will try again after a delay, and the email will be accepted if sufficient time has elapsed.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Basic Introduction to whitelisting", + "url": "https://www.cblohm.com/blog/education-marketing-trends/what-is-email-whitelisting/", + "type": "article" + }, + { + "title": "Detailed Introduction to greylisting", + "url": "https://en.wikipedia.org/wiki/Greylisting_(email)", + "type": "article" + } + ] + }, + "zJy9dOynWgLTDKI1iBluG": { + "title": "SMTP", + "description": "Email is emerging as one of the most valuable services on the internet today. Most internet systems use SMTP as a method to transfer mail from one user to another. SMTP is a push protocol and is used to send the mail whereas POP (post office protocol) or IMAP (internet message access protocol) are used to retrieve those emails at the receiver’s side.\n\nSMTP is an application layer protocol. The client who wants to send the mail opens a TCP connection to the SMTP server and then sends the mail across the connection. The SMTP server is an always-on listening mode. As soon as it listens for a TCP connection from any client, the SMTP process initiates a connection through port 25. After successfully establishing a TCP connection the client process sends the mail instantly.", + "links": [] + }, + "5vUKHuItQfkarp7LtACvX": { + "title": "DMARC", + "description": "DMARC stands for Domain-based Message Authentication, Reporting, and Conformance, is an authentication method on the email that is built to protect domain email from invalid email addresses or commonly known as email spoofing, email attacks, phishing, scams, and other threat activities.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "DMARC Official Website", + "url": "https://dmarc.org/", + "type": "article" + }, + { + "title": "Explore top posts about Security", + "url": "https://app.daily.dev/tags/security?ref=roadmapsh", + "type": "article" + } + ] + }, + "WMuXqa4b5wyRuYAQKQJRj": { + "title": "IMAPS", + "description": "IMAP (port 143) or IMAPS (port 993) allows you to access your email wherever you are, from any device. When you read an email message using IMAP, you aren't actually downloading or storing it on your computer; instead, you're reading it from the email service. As a result, you can check your email from different devices, anywhere in the world: your phone, a computer, a friend's computer.\n\nIMAP only downloads a message when you click on it, and attachments aren't automatically downloaded. This way you're able to check your messages a lot more quickly than POP.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Wikipedia: Internet Message Access Protocol", + "url": "https://en.wikipedia.org/wiki/Internet_Message_Access_Protocol", + "type": "article" + }, + { + "title": "What is IMAP and How To Use It | Email Tutorial", + "url": "https://www.youtube.com/watch?v=cfXabGOA2s8", + "type": "video" + } + ] + }, + "ewcJfnDFKXN8I5TLpXEaB": { + "title": "SPF", + "description": "Sender Policy Framework (SPF) is used to authenticate the sender of an email. With an SPF record in place, Internet Service Providers can verify that a mail server is authorized to send email for a specific domain. An SPF record is a DNS TXT record containing a list of the IP addresses that are allowed to send email on behalf of your domain.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is a DNS SPF record?", + "url": "https://www.cloudflare.com/learning/dns/dns-records/dns-spf-record/", + "type": "article" + }, + { + "title": "SPF Overview", + "url": "https://www.youtube.com/watch?v=WFPYrAr1boU", + "type": "video" + } + ] + }, + "fzO6xVTBxliu24f3W5zaU": { + "title": "POP3S", + "description": "POP3 (port 110) or POP3s (port 995) stands for The Post Office Protocol. It's an Internet standard protocol used by local email software clients to retrieve emails from a remote mail server over a TCP/IP connection.\n\nEmail servers hosted by Internet service providers also use POP3 to receive and hold emails intended for their subscribers. Periodically, these subscribers will use email client software to check their mailbox on the remote server and download any emails addressed to them.\n\nOnce the email client has downloaded the emails, they are usually deleted from the server, although some email clients allow users to specify that mails be copied or saved on the server for a period of time.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is POP3?", + "url": "https://www.techtarget.com/whatis/definition/POP3-Post-Office-Protocol-3", + "type": "article" + } + ] + }, + "RYCD78msIR2BPJoIP71aj": { + "title": "Domain Keys", + "description": "DomainKeys Identified Mail (DKIM) is an email authentication method designed to detect forged sender addresses in email (email spoofing), a technique often used in phishing and email spam.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "DomainKeys Identified Mail", + "url": "https://www.brainkart.com/article/DomainKeys-Identified-Mail_8493/", + "type": "article" + } + ] + }, + "QZ7bkY-MaEgxYoPDP3nma": { + "title": "OSI Model", + "description": "Open Systems Interconnection (OSI) model is a **conceptual** model consists of 7 layers, that was proposed to standardize the communication between devices over the network. It was the first standard model for network communications, adopted by all major computer and telecommunication companies in the early 1980s.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is OSI Model?", + "url": "https://www.cloudflare.com/en-gb/learning/ddos/glossary/open-systems-interconnection-model-osi/", + "type": "article" + }, + { + "title": "OSI Model", + "url": "https://www.youtube.com/watch?v=dV8mjZd1OtU", + "type": "video" + }, + { + "title": "OSI vs TCP/IP Model", + "url": "https://www.youtube.com/watch?v=F5rni9fr1yE", + "type": "video" + } + ] + }, + "w5d24Sf8GDkLDLGUPxzS9": { + "title": "Networking & Protocols", + "description": "As a DevOps engineer you will need to understand the basics of networking protocols, how they work, and how they are used in the real world. To get you started, you should learn about, [TCP/IP](https://en.wikipedia.org/wiki/Internet_protocol_suite), [HTTP](https://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol), [HTTPS](https://en.wikipedia.org/wiki/HTTPS), [FTP](https://en.wikipedia.org/wiki/File_Transfer_Protocol), [SSH](https://en.wikipedia.org/wiki/Secure_Shell), [SMTP](https://en.wikipedia.org/wiki/Simple_Mail_Transfer_Protocol), [DNS](https://en.wikipedia.org/wiki/Domain_Name_System), [DHCP](https://en.wikipedia.org/wiki/Dynamic_Host_Configuration_Protocol), [NTP](https://en.wikipedia.org/wiki/Network_Time_Protocol).\n\nHere are some of the resources to learn about SSH:", + "links": [ + { + "title": "Cloudflare - What is SSL?", + "url": "https://www.cloudflare.com/learning/ssl/what-is-ssl/", + "type": "article" + }, + { + "title": "Cloudflare - What is TLS?", + "url": "https://www.cloudflare.com/en-gb/learning/ssl/transport-layer-security-tls/", + "type": "article" + }, + { + "title": "Everything you need to know about HTTP", + "url": "https://cs.fyi/guide/http-in-depth", + "type": "article" + }, + { + "title": "What is HTTP?", + "url": "https://www.cloudflare.com/en-gb/learning/ddos/glossary/hypertext-transfer-protocol-http/", + "type": "article" + }, + { + "title": "What is HTTPS?", + "url": "https://www.cloudflare.com/en-gb/learning/ssl/what-is-https/", + "type": "article" + }, + { + "title": "What is DNS?", + "url": "https://www.cloudflare.com/en-gb/learning/dns/what-is-dns/", + "type": "article" + }, + { + "title": "OpenSSH Full Guide", + "url": "https://www.youtube.com/watch?v=YS5Zh7KExvE", + "type": "video" + }, + { + "title": "SSH vs SSL vs TLS", + "url": "https://www.youtube.com/watch?v=k3rFFLmQCuY", + "type": "video" + }, + { + "title": "DNS and How does it Work?", + "url": "https://www.youtube.com/watch?v=Wj0od2ag5sk", + "type": "video" + }, + { + "title": "DNS Records", + "url": "https://www.youtube.com/watch?v=7lxgpKh_fRY", + "type": "video" + } + ] + }, + "9p_ufPj6QH9gHbWBQUmGw": { + "title": "Serverless", + "description": "", + "links": [ + { + "title": "What is Serverless?", + "url": "https://www.redhat.com/en/topics/cloud-native-apps/what-is-serverless", + "type": "article" + }, + { + "title": "Explore top posts about Serverless", + "url": "https://app.daily.dev/tags/serverless?ref=roadmapsh", + "type": "article" + } + ] + }, + "LZDRgDxEZ3klp2PrrJFBX": { + "title": "Vercel", + "description": "Vercel is a provider of front-end cloud that provides the infrastructure to build, scale, and secure a faster, more personalized web. In other words, it is a cloud platform designed to simplify the deployment process for web applications, particularly those built with modern frameworks like React, Next, etc. where various projects can be deployed by connecting the GitHub repository in Vercel we can deploy the selected GitHub branch to the Vercel domains. Simultaneously, it provides custom domains to deploy code on live servers. These servers contain the `vercel.app` as the suffix in the domain.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Official Website", + "url": "https://vercel.com/", + "type": "article" + }, + { + "title": "Official Docs", + "url": "https://vercel.com/docs", + "type": "article" + }, + { + "title": "Explore top posts about Vercel", + "url": "https://app.daily.dev/tags/vercel?ref=roadmapsh", + "type": "article" + }, + { + "title": "Vercel: Product Walkthrough ", + "url": "https://www.youtube.com/watch?v=sPmat30SE4k", + "type": "video" + } + ] + }, + "l8VAewSEXzoyqYFhoplJj": { + "title": "Cloudflare", + "description": "", + "links": [ + { + "title": "Cloudflare Documentation", + "url": "https://developers.cloudflare.com/", + "type": "article" + }, + { + "title": "Explore top posts about Cloudflare", + "url": "https://app.daily.dev/tags/cloudflare?ref=roadmapsh", + "type": "article" + } + ] + }, + "mlrlf2McMI7IBhyEdq0Nf": { + "title": "Azure Functions", + "description": "", + "links": [ + { + "title": "Azure Functions Overview", + "url": "https://learn.microsoft.com/en-us/azure/azure-functions/functions-overview", + "type": "article" + }, + { + "title": "Explore top posts about Azure", + "url": "https://app.daily.dev/tags/azure?ref=roadmapsh", + "type": "article" + } + ] + }, + "UfQrIJ-uMNJt9H_VM_Q5q": { + "title": "AWS Lambda", + "description": "", + "links": [ + { + "title": "AWS Lambda Introduction", + "url": "https://docs.aws.amazon.com/lambda/latest/operatorguide/intro.html", + "type": "article" + }, + { + "title": "Explore top posts about AWS", + "url": "https://app.daily.dev/tags/aws?ref=roadmapsh", + "type": "article" + } + ] + }, + "hCKODV2b_l2uPit0YeP1M": { + "title": "Netlify", + "description": "Netlify Functions are serverless functions that allow developers to run server-side code in a JAMstack environment without managing servers. They are built on AWS Lambda and automatically deploy alongside your Netlify site. These functions can handle tasks like API requests, form submissions, and database operations, enabling dynamic functionality in static sites. They support various languages including JavaScript, TypeScript, and Go. Netlify Functions integrate seamlessly with Netlify's deployment pipeline, offering easy development, testing, and production deployment.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Official Documentation", + "url": "https://docs.netlify.com/platform/primitives/#functions", + "type": "article" + }, + { + "title": "Introducing Netlify Functions 2.0", + "url": "https://www.netlify.com/blog/introducing-netlify-functions-2-0/", + "type": "article" + } + ] + }, + "1oYvpFG8LKT1JD6a_9J0m": { + "title": "Provisioning", + "description": "Tools in this category are used to provision infrastructure in cloud providers. This includes DNS, networking, security policies, servers, containers, and a whole host of vendor-specific constructs. In this category, the use of cloud provider-agnostic tooling is strongly encouraged. These skills can be applied across most cloud providers, and the more specific domain-specific languages tend to have less reach.", + "links": [] + }, + "XA__697KgofsH28coQ-ma": { + "title": "AWS CDK", + "description": "The AWS Cloud Development Kit (AWS CDK) is an open-source software development framework used to provision cloud infrastructure resources in a safe, repeatable manner through AWS CloudFormation. AWS CDK offers the flexibility to write infrastructure as code in popular languages like JavaScript, TypeScript, Python, Java, C#, and Go.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "AWS CDK Examples", + "url": "https://github.com/aws-samples/aws-cdk-examples", + "type": "opensource" + }, + { + "title": "AWS CDK Website", + "url": "https://aws.amazon.com/cdk/", + "type": "article" + }, + { + "title": "Official Documentation", + "url": "https://docs.aws.amazon.com/cdk/index.html", + "type": "article" + }, + { + "title": "What is the AWS CDK?", + "url": "https://docs.aws.amazon.com/cdk/v2/guide/home.html", + "type": "article" + }, + { + "title": "AWS SDK Getting Started Guide", + "url": "https://docs.aws.amazon.com/cdk/v2/guide/getting_started.html", + "type": "article" + }, + { + "title": "Explore top posts about AWS", + "url": "https://app.daily.dev/tags/aws?ref=roadmapsh", + "type": "article" + } + ] + }, + "TgBb4aL_9UkyU36CN4qvS": { + "title": "CloudFormation", + "description": "CloudFormation is the AWS service that helps to define collections of AWS resources. CloudFormation lets you model, provision, and manage AWS and third-party resources by treating infrastructure as code.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "AWS CloudFormation Website", + "url": "https://aws.amazon.com/cloudformation/", + "type": "article" + }, + { + "title": "Official Documentation", + "url": "https://docs.aws.amazon.com/cloudformation/index.html", + "type": "article" + }, + { + "title": "AWS CloudFormation Getting Started Guide", + "url": "https://aws.amazon.com/cloudformation/getting-started/", + "type": "article" + }, + { + "title": "CloudFormation Sample Templates", + "url": "https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-sample-templates.html", + "type": "article" + }, + { + "title": "Explore top posts about AWS CloudFormation", + "url": "https://app.daily.dev/tags/aws-cloudformation?ref=roadmapsh", + "type": "article" + } + ] + }, + "O0xZ3dy2zIDbOetVrgna6": { + "title": "Pulumi", + "description": "Pulumi is an open source Infrastructure as Code tool that can be written in TypeScript, JavaScript, Python, Go, .NET, Java, and YAML to model cloud infrastructure.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Pulumi Website", + "url": "https://www.pulumi.com/", + "type": "article" + }, + { + "title": "Official Documentation", + "url": "https://www.pulumi.com/docs/", + "type": "article" + }, + { + "title": "Pulumi Getting Started Guide", + "url": "https://www.pulumi.com/docs/get-started/", + "type": "article" + }, + { + "title": "Explore top posts about Pulumi", + "url": "https://app.daily.dev/tags/pulumi?ref=roadmapsh", + "type": "article" + } + ] + }, + "nUBGf1rp9GK_pbagWCP9g": { + "title": "Terraform", + "description": "Terraform is an extremely popular open source Infrastructure as Code (IaC) tool that can be used with many different cloud and service provider APIs. Terraform focuses on an immutable approach to infrastructure, with a terraform state file center to tracking the status of your real world infrastructure.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Terraform Website", + "url": "https://www.terraform.io/", + "type": "article" + }, + { + "title": "Terraform Documentation", + "url": "https://www.terraform.io/docs", + "type": "article" + }, + { + "title": "Terraform Tutorials", + "url": "https://learn.hashicorp.com/terraform", + "type": "article" + }, + { + "title": "Terraform CDK Website", + "url": "https://www.terraform.io/cdktf", + "type": "article" + }, + { + "title": "What is the CDKTF?", + "url": "https://www.terraform.io/cdktf/concepts/cdktf-architecture", + "type": "article" + }, + { + "title": "CDKTF Getting Started Guide", + "url": "https://learn.hashicorp.com/tutorials/terraform/cdktf-install?in=terraform/cdktf", + "type": "article" + }, + { + "title": "CDKTF Examples", + "url": "https://www.terraform.io/cdktf/examples", + "type": "article" + }, + { + "title": "How to Scale Your Terraform Infrastructure", + "url": "https://thenewstack.io/how-to-scale-your-terraform-infrastructure/", + "type": "article" + }, + { + "title": "Explore top posts about Terraform", + "url": "https://app.daily.dev/tags/terraform?ref=roadmapsh", + "type": "article" + }, + { + "title": "Intro to Terraform Video", + "url": "https://www.youtube.com/watch?v=h970ZBgKINg&ab_channel=HashiCorp", + "type": "video" + } + ] + }, + "V9sOxlNOyRp0Mghl7zudv": { + "title": "Configuration Management", + "description": "Configuration management is a systems engineering process for establishing consistency of a product’s attributes throughout its life. In the technology world, configuration management is an IT management process that tracks individual configuration items of an IT system. IT systems are composed of IT assets that vary in granularity. An IT asset may represent a piece of software, or a server, or a cluster of servers. The following focuses on configuration management as it directly applies to IT software assets and software asset CI/CD.\n\nSoftware configuration management is a systems engineering process that tracks and monitors changes to a software systems configuration metadata. In software development, configuration management is commonly used alongside version control and CI/CD infrastructure. This post focuses on its modern application and use in agile CI/CD software environments.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is configuration management?", + "url": "https://www.atlassian.com/microservices/microservices-architecture/configuration-management", + "type": "article" + } + ] + }, + "h9vVPOmdUSeEGVQQaSTH5": { + "title": "Ansible", + "description": "Ansible is an open-source configuration management, application deployment and provisioning tool that uses its own declarative language in YAML. Ansible is agentless, meaning you only need remote connections via SSH or Windows Remote Management via Powershell in order to function\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Ansible Website", + "url": "https://www.ansible.com/", + "type": "article" + }, + { + "title": "Official Documentation", + "url": "https://docs.ansible.com/", + "type": "article" + }, + { + "title": "Ansible Getting Started Guide", + "url": "https://www.ansible.com/resources/get-started", + "type": "article" + }, + { + "title": "Explore top posts about Ansible", + "url": "https://app.daily.dev/tags/ansible?ref=roadmapsh", + "type": "article" + }, + { + "title": "Ansible Full Course for Beginners", + "url": "https://www.youtube.com/watch?v=9Ua2b06oAr4", + "type": "video" + } + ] + }, + "kv508kxzUj_CjZRb-TeRv": { + "title": "Chef", + "description": "Emerging in 2009, [Chef](https://en.wikipedia.org/wiki/Progress_Chef) (now known as Progress Chef) is one of the earliest configuration management tools to gain popularity. Chef \"Recipes\" are written in Ruby, in a primarily [declarative](https://en.wikipedia.org/wiki/Declarative_programming) style.\n\nChef requires that a client is installed on a server being managed. This client polls a Chef-Server regularly, to determine what its configuration should be. Chef-Solo is also available, a version of Chef that allows provisioning of a single node by running chef locally.\n\nA key tenet of Chef recipe design is the concept of [idempotence](https://en.wikipedia.org/wiki/Idempotence). All Chef recipes should be runnable multiple times and produce the same result - this is especially necessary in cases where the client/server model listed above is in use. This pattern of configuration management is highly influential for future declarative tools like Terraform and Cloud Formation.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Chef Website", + "url": "https://www.chef.io/products/chef-infra", + "type": "article" + }, + { + "title": "Chef Tutorial", + "url": "https://www.tutorialspoint.com/chef/index.htm", + "type": "article" + }, + { + "title": "Explore top posts about Chef", + "url": "https://app.daily.dev/tags/chef?ref=roadmapsh", + "type": "article" + } + ] + }, + "yP1y8U3eblpzbaLiCGliU": { + "title": "Puppet", + "description": "Puppet, an automated administrative engine for your Linux, Unix, and Windows systems, performs administrative tasks (such as adding users, installing packages, and updating server configurations) based on a centralized specification.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Puppet Website", + "url": "https://puppet.com/", + "type": "article" + }, + { + "title": "Official Documentation", + "url": "https://puppet.com/docs", + "type": "article" + }, + { + "title": "Introduction to Puppet", + "url": "https://puppet.com/docs/puppet/6/puppet_overview.html", + "type": "article" + }, + { + "title": "Explore top posts about Puppet", + "url": "https://app.daily.dev/tags/puppet?ref=roadmapsh", + "type": "article" + } + ] + }, + "aQJaouIaxIJChM-40M3HQ": { + "title": "CI / CD Tools", + "description": "CI/CD is a method to frequently deliver apps to customers by introducing automation into the stages of app development. The main concepts attributed to CI/CD are continuous integration, continuous delivery, and continuous deployment. CI/CD is a solution to the problems integrating new code can cause for development and operations teams.\n\nSpecifically, CI/CD introduces ongoing automation and continuous monitoring throughout the lifecycle of apps, from integration and testing phases to delivery and deployment. Taken together, these connected practices are often referred to as a \"CI/CD pipeline\" and are supported by development and operations teams working together in an agile way with either a DevOps or site reliability engineering (SRE) approach.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "CI vs CD", + "url": "https://www.atlassian.com/continuous-delivery/principles/continuous-integration-vs-delivery-vs-deployment", + "type": "article" + }, + { + "title": "What is CI/CD?", + "url": "https://www.redhat.com/en/topics/devops/what-is-ci-cd", + "type": "article" + }, + { + "title": "CI/CD Pipeline: A Gentle Introduction", + "url": "https://semaphoreci.com/blog/cicd-pipeline", + "type": "article" + }, + { + "title": "Explore top posts about CI/CD", + "url": "https://app.daily.dev/tags/cicd?ref=roadmapsh", + "type": "article" + }, + { + "title": "DevOps CI/CD Explained in 100 Seconds", + "url": "https://www.youtube.com/watch?v=scEDHsr3APg", + "type": "video" + } + ] + }, + "JnWVCS1HbAyfCJzGt-WOH": { + "title": "GitHub Actions", + "description": "Automate, customize, and execute your software development workflows right in your repository with GitHub Actions. You can discover, create, and share actions to perform any job you'd like, including CI/CD, and combine actions in a completely customized workflow.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "GitHub Actions Documentation", + "url": "https://docs.github.com/en/actions", + "type": "article" + }, + { + "title": "Learn GitHub Actions", + "url": "https://docs.github.com/en/actions/learn-github-actions", + "type": "article" + }, + { + "title": "Explore top posts about GitHub", + "url": "https://app.daily.dev/tags/github?ref=roadmapsh", + "type": "article" + }, + { + "title": "GitHub Actions - Supercharge your GitHub Flow", + "url": "https://youtu.be/cP0I9w2coGU", + "type": "video" + }, + { + "title": "Automate your Workflow with GitHub Actions", + "url": "https://www.youtube.com/watch?v=nyKZTKQS_EQ", + "type": "video" + } + ] + }, + "2KjSLLVTvl2G2KValw7S7": { + "title": "GitLab CI", + "description": "GitLab offers a CI/CD service that can be used as a SaaS offering or self-managed using your own resources. You can use GitLab CI with any GitLab hosted repository, or any BitBucket Cloud or GitHub repository in the GitLab Premium self-managed, GitLab Premium SaaS and higher tiers.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "GitLab Website", + "url": "https://gitlab.com/", + "type": "opensource" + }, + { + "title": "GitLab Documentation", + "url": "https://docs.gitlab.com/", + "type": "article" + }, + { + "title": "Get Started with GitLab CI", + "url": "https://docs.gitlab.com/ee/ci/quick_start/", + "type": "article" + }, + { + "title": "Learn GitLab Tutorials", + "url": "https://docs.gitlab.com/ee/tutorials/", + "type": "article" + }, + { + "title": "GitLab CI/CD Examples", + "url": "https://docs.gitlab.com/ee/ci/examples/", + "type": "article" + }, + { + "title": "Explore top posts about GitLab", + "url": "https://app.daily.dev/tags/gitlab?ref=roadmapsh", + "type": "article" + } + ] + }, + "dUapFp3f0Rum-rf_Vk_b-": { + "title": "Jenkins", + "description": "Jenkins is an open-source CI/CD automation server. Jenkins is primarily used for building projects, running tests, static code analysis and deployments.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Jenkins Website", + "url": "https://www.jenkins.io/", + "type": "article" + }, + { + "title": "Official Jenkins Handbook", + "url": "https://www.jenkins.io/doc/book/", + "type": "article" + }, + { + "title": "Jenkins Getting Started Guide", + "url": "https://www.jenkins.io/doc/pipeline/tour/getting-started/", + "type": "article" + }, + { + "title": "Explore top posts about Jenkins", + "url": "https://app.daily.dev/tags/jenkins?ref=roadmapsh", + "type": "article" + } + ] + }, + "-pGF3soruWWxwE4LxE5Vk": { + "title": "Travis CI", + "description": "Travis CI is a CI/CD service that is primarily used for building and testing projects that are hosted on BitBucket and GitHub. Open source projects can utilize Travis CI for free.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Travis CI Website", + "url": "https://www.travis-ci.com/", + "type": "article" + }, + { + "title": "Travis CI Documentation", + "url": "https://docs.travis-ci.com/", + "type": "article" + }, + { + "title": "Travis CI Tutorial", + "url": "https://docs.travis-ci.com/user/tutorial/", + "type": "article" + }, + { + "title": "Explore top posts about CI/CD", + "url": "https://app.daily.dev/tags/cicd?ref=roadmapsh", + "type": "article" + } + ] + }, + "1-JneOQeGhox-CKrdiquq": { + "title": "Circle CI", + "description": "CircleCI is a CI/CD service that can be integrated with GitHub, BitBucket and GitLab repositories. The service that can be used as a SaaS offering or self-managed using your own resources.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "CircleCI Website", + "url": "https://circleci.com/", + "type": "article" + }, + { + "title": "CircleCI Documentation", + "url": "https://circleci.com/docs", + "type": "article" + }, + { + "title": "Configuration Tutorial", + "url": "https://circleci.com/docs/config-intro", + "type": "article" + }, + { + "title": "Explore top posts about CI/CD", + "url": "https://app.daily.dev/tags/cicd?ref=roadmapsh", + "type": "article" + } + ] + }, + "TsXFx1wWikVBVoFUUDAMx": { + "title": "Drone", + "description": "Drone is a CI/CD service offering by [Harness](https://harness.io/). Each build runs on an isolated Docker container, and Drone integrates with many popular source code management repositories like GitHub, BitBucket and GitLab\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Drone Website", + "url": "https://www.drone.io/", + "type": "article" + }, + { + "title": "Official Documentation", + "url": "https://docs.drone.io/", + "type": "article" + }, + { + "title": "Drone Getting Started Guide", + "url": "https://docs.drone.io/server/overview/", + "type": "article" + } + ] + }, + "L000AbzF3oLcn4B1eUIYX": { + "title": "TeamCity", + "description": "TeamCity is a CI/CD service provided by JetBrains. TeamCity can be used as a SaaS offering or self-managed using your own resources.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "TeamCity Website", + "url": "https://www.jetbrains.com/teamcity/", + "type": "article" + }, + { + "title": "Official Documentation", + "url": "https://www.jetbrains.com/help/teamcity/teamcity-documentation.html", + "type": "article" + }, + { + "title": "TeamCity Tutorials", + "url": "https://www.jetbrains.com/teamcity/tutorials/", + "type": "article" + }, + { + "title": "TeamCity Learning Portal", + "url": "https://www.jetbrains.com/teamcity/learn/", + "type": "article" + }, + { + "title": "Explore top posts about CI/CD", + "url": "https://app.daily.dev/tags/cicd?ref=roadmapsh", + "type": "article" + } + ] + }, + "hcrPpjFxPi_iLiMdLKJrO": { + "title": "Secret Management", + "description": "Secret management is an important aspect of DevOps, as it involves securely storing and managing sensitive information, such as passwords, API keys, and other secrets, that are used by applications and infrastructure.\n\nThere are several ways to manage secrets in a cloud environment:\n\n* Secret stores: A secret store is a specialized database or service that is designed to securely store and manage secrets. Examples of secret stores include Hashicorp Vault, AWS Secrets Manager, and Google Cloud Secret Manager.\n* Encryption: Secrets can be encrypted using a variety of encryption algorithms and protocols, such as AES, RSA, and PGP. Encrypted secrets can be stored in a variety of locations, such as a file system, a database, or a cloud storage service.\n* Access controls: Access to secrets should be restricted to only those users or systems that need them, using techniques such as role-based access controls, multi-factor authentication, and least privilege principles.\n\nEffective secret management is essential for maintaining the security and integrity of a DevOps environment. It is important to regularly review and update secret management practices to ensure that secrets are being stored and managed securely.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "How to Manage Secrets in Web Applications?", + "url": "https://cs.fyi/guide/secret-management-best-practices", + "type": "article" + }, + { + "title": "Secrets management guide — approaches, open source tools, commercial products, challenges and questions", + "url": "https://medium.com/@burshteyn/secrets-management-guide-approaches-open-source-tools-commercial-products-challenges-db560fd0584d", + "type": "article" + }, + { + "title": "Secret Management Architectures: Finding the balance between security and complexity", + "url": "https://medium.com/slalom-technology/secret-management-architectures-finding-the-balance-between-security-and-complexity-9e56f2078e54", + "type": "article" + } + ] + }, + "ZWq23Q9ZNxLNti68oltxA": { + "title": "Sealed Secrets", + "description": "Sealed Secrets is a tool for securely storing and managing secrets in a Kubernetes environment. It is developed and maintained by Bitnami and is available as open-source software.\n\nIn a Sealed Secrets workflow, secrets are encrypted using a public key and stored as sealed secrets in a Git repository. The sealed secrets can then be deployed to a Kubernetes cluster, where they are decrypted using a private key and made available to the applications and infrastructure that need them.\n\nSealed Secrets is designed to be highly secure and easy to use, with a range of features for managing secrets, including:\n\n* Encryption: Sealed Secrets uses encryption algorithms and protocols, such as RSA, to securely store secrets.\n* Access controls: Sealed Secrets supports role-based access controls and multi-factor authentication to ensure that only authorized users or systems can access secrets.\n* Secret rotation: Sealed Secrets supports automatic secret rotation, allowing secrets to be regularly rotated to reduce the risk of unauthorized access.\n* Auditing: Sealed Secrets provides auditing capabilities, allowing administrators to track and monitor access to secrets.\n\nSealed Secrets is commonly used in Kubernetes environments to securely store and manage secrets, and it is often used in conjunction with other tools, such as Helm, to automate the deployment and management of cloud-native applications.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Sealed Secrets - Bitnami", + "url": "https://github.com/bitnami-labs/sealed-secrets", + "type": "opensource" + } + ] + }, + "yQ4d2uiROZYr950cjYnQE": { + "title": "Cloud Specific Tools", + "description": "There are several cloud-specific tools for securely storing and managing secrets, such as:\n\n* AWS Secrets Manager: AWS Secrets Manager is a service provided by Amazon Web Services (AWS) for securely storing and managing secrets. It provides features such as automatic secret rotation and integration with other AWS services.\n* Google Cloud Secret Manager: Google Cloud Secret Manager is a service provided by Google Cloud for securely storing and managing secrets. It provides features such as automatic secret rotation and integration with other Google Cloud services.\n* Azure Key Vault: Azure Key Vault is a service provided by Microsoft Azure for securely storing and managing secrets. It provides features such as automatic secret rotation and integration with other Azure services.\n\nThese cloud-specific tools are designed to be used in conjunction with cloud-based applications and infrastructure and are typically integrated with other cloud services, such as container orchestration platforms and continuous delivery pipelines.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "AWS Secrets Manager - Amazon Web Services", + "url": "https://aws.amazon.com/secrets-manager/", + "type": "article" + }, + { + "title": "Google Cloud Secret Manager - Google Cloud", + "url": "https://cloud.google.com/secret-manager", + "type": "article" + }, + { + "title": "Azure Key Vault - Microsoft Azure", + "url": "https://azure.microsoft.com/en-us/services/key-vault/", + "type": "article" + }, + { + "title": "Explore top posts about Cloud", + "url": "https://app.daily.dev/tags/cloud?ref=roadmapsh", + "type": "article" + } + ] + }, + "tZzvs80KzqT8aDvEyjack": { + "title": "Vault", + "description": "Vault is a tool for securely storing and managing secrets, such as passwords, API keys, and other sensitive information. It is developed and maintained by Hashicorp and is available as open-source software.\n\nVault is designed to be highly scalable and flexible, with a wide range of features for managing secrets, including:\n\n* Encryption: Vault uses encryption algorithms and protocols, such as AES and RSA, to securely store secrets.\n* Access controls: Vault supports role-based access controls and multi-factor authentication to ensure that only authorized users or systems can access secrets.\n* Secret rotation: Vault supports automatic secret rotation, allowing secrets to be regularly rotated to reduce the risk of unauthorized access.\n* Auditing: Vault provides auditing capabilities, allowing administrators to track and monitor access to secrets.\n\nVault is commonly used in DevOps environments to securely store and manage secrets, and it is often used in conjunction with other tools, such as Kubernetes and Helm, to automate the deployment and management of cloud-native applications.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Vault - Official Website", + "url": "https://www.vaultproject.io/", + "type": "article" + } + ] + }, + "GHQWHLxsO40kJ6z_YCinJ": { + "title": "SOPs", + "description": "SOPS (Secrets OPerationS) is an open-source tool for securely storing and managing secrets, such as passwords, API keys, and other sensitive information. It is developed and maintained by Mozilla and is available as a command-line tool and as a library for integration into other applications.\n\nSOPS is designed to be easy to use and highly secure, with a range of features for managing secrets, including:\n\n* Encryption: SOPS uses encryption algorithms and protocols, such as AES and RSA, to securely store secrets.\n* Access controls: SOPS supports role-based access controls and multi-factor authentication to ensure that only authorized users or systems can access secrets.\n* Secret rotation: SOPS supports automatic secret rotation, allowing secrets to be regularly rotated to reduce the risk of unauthorized access.\n* Auditing: SOPS provides auditing capabilities, allowing administrators to track and monitor access to secrets.\n\nSOPS is commonly used in DevOps environments to securely store and manage secrets, and it is often used in conjunction with other tools, such as Kubernetes and Helm, to automate the deployment and management of cloud-native applications.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Mozilla SOPS - Official Website", + "url": "https://github.com/mozilla/sops", + "type": "opensource" + } + ] + }, + "qqRLeTpuoW64H9LvY0U_w": { + "title": "Infrastructure Monitoring", + "description": "Monitoring refers to the practice of making the performance and status of infrastructure visible. This section contains common tools used for monitoring.\n\nThis is a very vendor-heavy space - use caution when studying materials exclusively from a given product or project, as there are many conflicting opinions and strategies in use. There is no single solution for the most substantially complex internet-facing applications, so understanding the pros and cons of these tools will be useful in helping you plan how to monitor a system for a given goal.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Best Practices to Optimize Infrastructure Monitoring within DevOps Teams", + "url": "https://thenewstack.io/best-practices-to-optimize-infrastructure-monitoring-within-devops-teams/", + "type": "article" + }, + { + "title": "Seven Steps to Effective Cloud Native Infrastructure Monitoring", + "url": "https://thenewstack.io/seven-steps-to-effective-cloud-native-infrastructure-monitoring/", + "type": "article" + }, + { + "title": "Explore top posts about Infrastructure", + "url": "https://app.daily.dev/tags/infrastructure?ref=roadmapsh", + "type": "article" + } + ] + }, + "W9sKEoDlR8LzocQkqSv82": { + "title": "Zabbix", + "description": "Zabbix is an enterprise-class open source monitoring solution for network monitoring and application monitoring of millions of metrics.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Zabbix Website", + "url": "https://www.zabbix.com/", + "type": "article" + }, + { + "title": "Official Documentation", + "url": "https://www.zabbix.com/manuals", + "type": "article" + }, + { + "title": "Zabbix Roadmap", + "url": "https://www.zabbix.com/roadmap", + "type": "article" + } + ] + }, + "NiVvRbCOCDpVvif48poCo": { + "title": "Prometheus", + "description": "Prometheus is a free software application used for event monitoring and alerting. It records real-time metrics in a time series database built using a HTTP pull model, with flexible queries and real-time alerting.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Prometheus Website", + "url": "https://prometheus.io/", + "type": "article" + }, + { + "title": "Official Documentation", + "url": "https://prometheus.io/docs/introduction/overview/", + "type": "article" + }, + { + "title": "Getting Started with Prometheus", + "url": "https://prometheus.io/docs/tutorials/getting_started/", + "type": "article" + }, + { + "title": "Explore top posts about Prometheus", + "url": "https://app.daily.dev/tags/prometheus?ref=roadmapsh", + "type": "article" + } + ] + }, + "bujq_C-ejtpmk-ICALByy": { + "title": "Datadog", + "description": "Datadog is a monitoring and analytics platform for large-scale applications. It encompasses infrastructure monitoring, application performance monitoring, log management, and user-experience monitoring. Datadog aggregates data across your entire stack with 400+ integrations for troubleshooting, alerting, and graphing.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Datadog Website", + "url": "https://www.datadoghq.com/", + "type": "article" + }, + { + "title": "Official Documentation", + "url": "https://docs.datadoghq.com/", + "type": "article" + }, + { + "title": "Explore top posts about DevOps", + "url": "https://app.daily.dev/tags/devops?ref=roadmapsh", + "type": "article" + } + ] + }, + "niA_96yR7uQ0sc6S_OStf": { + "title": "Grafana", + "description": "Grafana is the open-source platform for monitoring and observability. It allows you to query, visualize, alert on and understand your metrics no matter where they are stored.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Grafana Website", + "url": "https://grafana.com/", + "type": "article" + }, + { + "title": "Grafana Official Documentation", + "url": "https://grafana.com/docs/", + "type": "article" + }, + { + "title": "Grafana Community", + "url": "https://community.grafana.com/", + "type": "article" + }, + { + "title": "Grafana Webinars and Videos", + "url": "https://grafana.com/videos/", + "type": "article" + }, + { + "title": "Explore top posts about Grafana", + "url": "https://app.daily.dev/tags/grafana?ref=roadmapsh", + "type": "article" + } + ] + }, + "gaoZjOYmU0J5aM6vtLNvN": { + "title": "Logs Management", + "description": "Log management is the process of handling log events generated by all software applications and infrastructure on which they run. It involves log collection, aggregation, parsing, storage, analysis, search, archiving, and disposal, with the ultimate goal of using the data for troubleshooting and gaining business insights, while also ensuring the compliance and security of applications and infrastructure.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Introduction to Logs Management", + "url": "https://sematext.com/guides/log-management", + "type": "article" + }, + { + "title": "Log Management: What DevOps Teams Need to Know", + "url": "https://devops.com/log-management-what-devops-teams-need-to-know/", + "type": "article" + }, + { + "title": "Logging for Kubernetes: What to Log and How to Log It", + "url": "https://thenewstack.io/logging-for-kubernetes-what-to-log-and-how-to-log-it/", + "type": "article" + } + ] + }, + "K_qLhK2kKN_uCq7iVjqph": { + "title": "Elastic Stack", + "description": "Elastic Stack is a group of open source products comprised of Elasticsearch, Kibana, Beats, and Logstash and more that help store, search, analyze, and visualize data from various source, in different format, in real-time.\n\n* `Elastic Search` - Search and analytics engine\n* `Logstash/fluentd` - Data processing pipeline\n* `Kibana` - Dashboard to visualize data\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Elastic Stack Website", + "url": "https://www.elastic.co/elastic-stack/", + "type": "article" + }, + { + "title": "Official Docs", + "url": "https://www.elastic.co/guide/index.html", + "type": "article" + }, + { + "title": "Elastic Stack features", + "url": "https://www.elastic.co/elastic-stack/features", + "type": "article" + }, + { + "title": "Logstash vs Fluentd", + "url": "https://logz.io/blog/fluentd-logstash/", + "type": "article" + }, + { + "title": "Explore top posts about ELK", + "url": "https://app.daily.dev/tags/elk?ref=roadmapsh", + "type": "article" + } + ] + }, + "s_kss4FJ2KyZRdcKNHK2v": { + "title": "Graylog", + "description": "Graylog is a leading centralized log management solution for capturing, storing, and enabling real-time analysis of terabytes of machine data.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Graylog Website", + "url": "https://www.graylog.org/", + "type": "article" + }, + { + "title": "Official Documentation", + "url": "https://docs.graylog.org/", + "type": "article" + }, + { + "title": "Product Videos", + "url": "https://www.graylog.org/resources-videos", + "type": "article" + } + ] + }, + "dZID_Y_uRTF8JlfDCqeqs": { + "title": "Splunk", + "description": "The Splunk platform removes the barriers between data and action, empowering observability, IT and security teams to ensure their organizations are secure, resilient and innovative.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Splunk Website", + "url": "https://www.splunk.com/", + "type": "article" + }, + { + "title": "Official Documentation", + "url": "https://docs.splunk.com/Documentation", + "type": "article" + }, + { + "title": "Splunk Videos", + "url": "https://www.splunk.com/en_us/resources/videos.html", + "type": "article" + }, + { + "title": "Explore top posts about Logging", + "url": "https://app.daily.dev/tags/logging?ref=roadmapsh", + "type": "article" + } + ] + }, + "cjjMZdyLgakyVkImVQTza": { + "title": "Papertrail", + "description": "Papertrail is a leading centralized log management solution for capturing, storing, and enabling real-time analysis of terabytes of machine data.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Papertrail Website", + "url": "https://www.papertrail.com/", + "type": "article" + }, + { + "title": "Official Guides", + "url": "https://www.papertrail.com/solution/guides/", + "type": "article" + }, + { + "title": "Official Blog", + "url": "https://www.papertrail.com/blog/", + "type": "article" + } + ] + }, + "Yq8kVoRf20aL_o4VZU5--": { + "title": "Container Orchestration", + "description": "Containers are a construct in which [cgroups](https://en.wikipedia.org/wiki/Cgroups), [namespaces](https://en.wikipedia.org/wiki/Linux_namespaces), and [chroot](https://en.wikipedia.org/wiki/Chroot) are used to fully encapsulate and isolate a process. This encapsulated process, called a container image, shares the kernel of the host with other containers, allowing containers to be significantly smaller and faster than virtual machines.\n\nThese images are designed for portability, allowing for full local testing of a static image, and easy deployment to a container management platform.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What are Containers?", + "url": "https://cloud.google.com/learn/what-are-containers", + "type": "article" + }, + { + "title": "What is a Container?", + "url": "https://www.docker.com/resources/what-container/", + "type": "article" + }, + { + "title": "Articles about Containers - The New Stack", + "url": "https://thenewstack.io/category/containers/", + "type": "article" + }, + { + "title": "Explore top posts about Containers", + "url": "https://app.daily.dev/tags/containers?ref=roadmapsh", + "type": "article" + }, + { + "title": "What are Containers?", + "url": "https://www.youtube.com/playlist?list=PLawsLZMfND4nz-WDBZIj8-nbzGFD4S9oz", + "type": "video" + } + ] + }, + "XbrWlTyH4z8crSHkki2lp": { + "title": "GKE / EKS / AKS", + "description": "GKE - Google Kubernetes Engine\n------------------------------\n\nGKE is a managed Kubernetes service that lets you deploy, manage, and scale containerized applications on Google Cloud.\n\nEKS - Amazon Elastic Kubernetes Service\n---------------------------------------\n\nAmazon Elastic Kubernetes Service (Amazon EKS) is a fully managed Kubernetes service from AWS.\n\nAKS - Azure Kubernetes Service\n------------------------------\n\nAzure Kubernetes Service (AKS) manages your hosted Kubernetes environment, making it quick and easy to deploy and manage containerized applications without container orchestration expertise.", + "links": [] + }, + "FE2h-uQy6qli3rKERci1j": { + "title": "AWS ECS / Fargate", + "description": "ECS is a container orchestration service that allows you to run Docker containers on a cluster of EC2 instances. ECS is a good choice if you want to run Docker containers on EC2 instances and have full control over the underlying infrastructure.\n\nFargate is a serverless container orchestration service that allows you to run Docker containers without having to manage servers, clusters, or any other infrastructure. Fargate is a good choice if you want to run Docker containers without having to manage servers or clusters.", + "links": [] + }, + "VD24HC9qJOC42lbpJ-swC": { + "title": "Docker Swarm", + "description": "A Docker Swarm is a group of either physical or virtual machines that are running the Docker application and that have been configured to join together in a cluster. Once a group of machines have been clustered together, you can still run the Docker commands that you're used to, but they will now be carried out by the machines in your cluster. The activities of the cluster are controlled by a swarm manager, and machines that have joined the cluster are referred to as nodes.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Official Website", + "url": "https://www.docker.com/", + "type": "article" + }, + { + "title": "Docker Swarm Documentation", + "url": "https://docs.docker.com/engine/swarm/", + "type": "article" + }, + { + "title": "Tutorial: Manage Docker Swarm with Portainer", + "url": "https://thenewstack.io/tutorial-manage-docker-swarm-with-portainer/", + "type": "article" + }, + { + "title": "Tutorial: Create a Docker Swarm with Persistent Storage Using GlusterFS", + "url": "https://thenewstack.io/tutorial-create-a-docker-swarm-with-persistent-storage-using-glusterfs/", + "type": "article" + }, + { + "title": "Explore top posts about Docker", + "url": "https://app.daily.dev/tags/docker?ref=roadmapsh", + "type": "article" + }, + { + "title": "Docker Swarm Tutorial for Beginners", + "url": "https://www.youtube.com/watch?v=Tm0Q5zr3FL4", + "type": "video" + } + ] + }, + "zuBAjrqQPjj-0DHGjCaqT": { + "title": "Artifact Management", + "description": "", + "links": [] + }, + "C_sFyIsIIpriZlovvcbSE": { + "title": "Artifactory", + "description": "", + "links": [ + { + "title": "Artifactory Website", + "url": "https://jfrog.com/artifactory/", + "type": "article" + }, + { + "title": "Explore top posts about Artifactory", + "url": "https://app.daily.dev/tags/artifactory?ref=roadmapsh", + "type": "article" + } + ] + }, + "ootuLJfRXarVvm3J1Ir11": { + "title": "Nexus", + "description": "The Nexus Repository Manager is a widely used repository manager software developed by Sonatype. It's designed to manage binary components such as Java libraries, Docker images, npm packages, NuGet packages, and more. Nexus Repository Manager allows organizations to store, manage, and distribute software components securely and efficiently.", + "links": [ + { + "title": "Repository Management Basics", + "url": "https://learn.sonatype.com/courses/nxrm-admin-100/", + "type": "article" + }, + { + "title": "Nexus Installation and Configuration", + "url": "https://learn.sonatype.com/courses/nxrm-config-100/", + "type": "article" + }, + { + "title": "Nexus Repository Security Essentials", + "url": "https://learn.sonatype.com/courses/nxrm-sec-100/", + "type": "article" + }, + { + "title": "Nexus Best Practices", + "url": "https://help.sonatype.com/repomanager3/nexus-repository-best-practices", + "type": "article" + } + ] + }, + "vsmE6EpCc2DFGk1YTbkHS": { + "title": "Cloud Smith", + "description": "", + "links": [] + }, + "-INN1qTMLimrZgaSPCcHj": { + "title": "GitOps", + "description": "GitOps is a methodology for managing cloud-native applications and infrastructure using Git as the source of truth. It is based on the idea that all changes to the system, whether they are related to code, configuration, or infrastructure, should be made using Git and that Git should be used to automate the deployment and management of those changes.\n\nIn a GitOps workflow, changes to the system are made by committing code or configuration changes to a Git repository. These changes are then automatically deployed to the production environment using a continuous delivery pipeline. The pipeline is triggered by changes to the Git repository and is responsible for building, testing, and deploying the changes to the production environment.\n\nGitOps is designed to be a more efficient and agile way of managing cloud-native environments, as it allows developers to make changes to the system using familiar tools and processes and it provides a clear and auditable history of all changes to the system. It is often used in conjunction with tools such as Kubernetes and Helm to automate the deployment and management of cloud-native applications.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Guide to GitOps", + "url": "https://www.weave.works/technologies/gitops/", + "type": "article" + }, + { + "title": "Explore top posts about GitOps", + "url": "https://app.daily.dev/tags/gitops?ref=roadmapsh", + "type": "article" + } + ] + }, + "i-DLwNXdCUUug6lfjkPSy": { + "title": "ArgoCD", + "description": "Argo CD is a continuous delivery tool for Kubernetes that is based on the GitOps methodology. It is used to automate the deployment and management of cloud-native applications by continuously synchronizing the desired application state with the actual application state in the production environment.\n\nIn an Argo CD workflow, changes to the application are made by committing code or configuration changes to a Git repository. Argo CD monitors the repository and automatically deploys the changes to the production environment using a continuous delivery pipeline. The pipeline is triggered by changes to the Git repository and is responsible for building, testing, and deploying the changes to the production environment.\n\nArgo CD is designed to be a simple and efficient way to manage cloud-native applications, as it allows developers to make changes to the system using familiar tools and processes and it provides a clear and auditable history of all changes to the system. It is often used in conjunction with tools such as Helm to automate the deployment and management of cloud-native applications.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Argo CD - Argo Project", + "url": "https://argo-cd.readthedocs.io/en/stable/", + "type": "article" + }, + { + "title": "Explore top posts about ArgoCD", + "url": "https://app.daily.dev/tags/argocd?ref=roadmapsh", + "type": "article" + }, + { + "title": "ArgoCD Tutorial for Beginners", + "url": "https://www.youtube.com/watch?v=MeU5_k9ssrs", + "type": "video" + } + ] + }, + "6gVV_JUgKgwJb4C8tHZn7": { + "title": "FluxCD", + "description": "Flux CD is a continuous delivery tool for Kubernetes that is based on the GitOps methodology. It is used to automate the deployment and management of cloud-native applications by continuously synchronizing the desired application state with the actual application state in the production environment.\n\nIn a Flux CD workflow, changes to the application are made by committing code or configuration changes to a Git repository. Flux CD monitors the repository and automatically deploys the changes to the production environment using a continuous delivery pipeline. The pipeline is triggered by changes to the Git repository and is responsible for building, testing, and deploying the changes to the production environment.\n\nFlux CD is designed to be a simple and efficient way to manage cloud-native applications, as it allows developers to make changes to the system using familiar tools and processes and it provides a clear and auditable history of all changes to the system. It is often used in conjunction with tools such as Helm to automate the deployment and management of cloud-native applications.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Flux CD Docs", + "url": "https://docs.fluxcd.io/", + "type": "article" + }, + { + "title": "Explore top posts about Flux", + "url": "https://app.daily.dev/tags/flux?ref=roadmapsh", + "type": "article" + } + ] + }, + "EeWsihH9ehbFKebYoB5i9": { + "title": "Service Mesh", + "description": "A service mesh, like the open source project Istio, is a way to control how different parts of an application share data with one another. Unlike other systems for managing this communication, a service mesh is a dedicated infrastructure layer built right into an app. This visible infrastructure layer can document how well (or not) different parts of an app interact, so it becomes easier to optimize communication and avoid downtime as an app grows.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Whats a service mesh?", + "url": "https://www.redhat.com/en/topics/microservices/what-is-a-service-mesh", + "type": "article" + }, + { + "title": "The latest news about service mesh (TNS)", + "url": "https://thenewstack.io/category/service-mesh/", + "type": "article" + }, + { + "title": "Explore top posts about Service Mesh", + "url": "https://app.daily.dev/tags/service-mesh?ref=roadmapsh", + "type": "article" + } + ] + }, + "XsSnqW6k2IzvmrMmJeU6a": { + "title": "Istio", + "description": "Istio is an open source service mesh platform that provides a way to control how microservices share data with one another. It includes APIs that let Istio integrate into any logging platform, telemetry, or policy system. Istio is designed to run in a variety of environments: on-premise, cloud-hosted, in Kubernetes containers, in services running on virtual machines, and more.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is Istio?", + "url": "https://www.redhat.com/en/topics/microservices/what-is-istio", + "type": "article" + }, + { + "title": "Explore top posts about Istio", + "url": "https://app.daily.dev/tags/istio?ref=roadmapsh", + "type": "article" + } + ] + }, + "OXOTm3nz6o44p50qd0brN": { + "title": "Consul", + "description": "Consul is a service mesh solution providing a full featured control plane with service discovery, configuration, and segmentation functionality. Each of these features can be used individually as needed, or they can be used together to build a full service mesh. Consul requires a data plane and supports both a proxy and native integration model. Consul ships with a simple built-in proxy so that everything works out of the box, but also supports 3rd party proxy integrations such as Envoy.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is Consul?", + "url": "https://www.consul.io/docs/intro", + "type": "article" + }, + { + "title": "Explore top posts about HashiCorp", + "url": "https://app.daily.dev/tags/hashicorp?ref=roadmapsh", + "type": "article" + } + ] + }, + "hhoSe4q1u850PgK62Ubau": { + "title": "Linkerd", + "description": "Linkerd is an open source service mesh designed to be deployed into a variety of container schedulers and frameworks such as Kubernetes. It became the original “service mesh” when its creator Buoyant first coined the term in 2016. Like Twitter’s Finagle, on which it was based, Linkerd was first written in Scala and designed to be deployed on a per-host basis. Linkerd is one of the first products to be associated with the term service mesh and supports platforms such as Docker and Kubernetes.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Linkerd Website", + "url": "https://linkerd.io/", + "type": "article" + }, + { + "title": "Linkerd Documentation", + "url": "https://linkerd.io/2.11/overview/", + "type": "article" + }, + { + "title": "What is Linkerd?", + "url": "https://www.techtarget.com/searchitoperations/definition/Linkerd", + "type": "article" + }, + { + "title": "Explore top posts about Infrastructure", + "url": "https://app.daily.dev/tags/infrastructure?ref=roadmapsh", + "type": "article" + } + ] + }, + "epLLYArR16HlhAS4c33b4": { + "title": "Envoy", + "description": "Originally created at Lyft, Envoy is a high-performance data plane designed for service mesh architectures. Lyft open sourced it and donated it to the CNCF, where it is now one of the CNCF’s graduated open source projects. Envoy is a self contained process that is designed to run alongside every application server. All of the Envoys form a transparent communication mesh in which each application sends and receives messages to and from localhost and is unaware of the network topology.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Envoy Website", + "url": "https://www.envoyproxy.io/", + "type": "article" + }, + { + "title": "Envoy Documentation", + "url": "https://www.envoyproxy.io/docs/envoy/latest/start/start", + "type": "article" + }, + { + "title": "What is Envoy?", + "url": "https://www.envoyproxy.io/docs/envoy/latest/intro/what_is_envoy", + "type": "article" + }, + { + "title": "Explore top posts about Envoy", + "url": "https://app.daily.dev/tags/envoy?ref=roadmapsh", + "type": "article" + } + ] + }, + "Qc0MGR5bMG9eeM5Zb9PMk": { + "title": "Cloud Design Patterns", + "description": "", + "links": [] + }, + "JCe3fcOf-sokTJURyX1oI": { + "title": "Availability", + "description": "Availability is the percentage of time that a system is functional and working as intended, generally referred to as uptime. Availability can be affected by hardware or software errors, infrastructure problems, malicious attacks, and system load. Many cloud providers typically offer their users a service level agreement (SLA) that specifies the exact percentages of promised uptime/downtime. Availability is related to reliability in this sense. For example, a company might promise 99.99% uptime for their services.\n\nTo achieve high levels of uptime, it is important to eliminate single points of failure so that a single device failure does not disrupt the entire service. High availability in the cloud is often achieved by creating clusters. Clusters are groups of devices (such as servers) that all have access to the same shared storage and function as one single server to provide uninterrupted availability. This way, if one server goes down, the others are able to pick up the load until it comes back online. Clusters can range from two servers to even multiple buildings of servers.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "How High Availability Works in the Cloud", + "url": "https://codster.io/en/blog/high-availability-in-the-cloud/", + "type": "article" + }, + { + "title": "Techniques for Achieving High Availability", + "url": "https://www.sqlservercentral.com/articles/cloud-computing-basics-achieving-high-availability-2", + "type": "article" + } + ] + }, + "5FN7iva4DW_lv-r1tijd8": { + "title": "Data Management", + "description": "Data management is the key element of cloud applications, and influences most of the quality attributes. Data is typically hosted in different locations and across multiple servers for reasons such as performance, scalability or availability, and this can present a range of challenges. For example, data consistency must be maintained, and data will typically need to be synchronized across different locations.\n\nAdditionally data should be protected at rest, in transit, and via authorized access mechanisms to maintain security assurances of confidentiality, integrity, and availability. Refer to the Azure Security Benchmark Data Protection Control for more information.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Data management patterns", + "url": "https://docs.microsoft.com/en-us/azure/architecture/patterns/category/data-management", + "type": "article" + }, + { + "title": "Explore top posts about Data Management", + "url": "https://app.daily.dev/tags/data-management?ref=roadmapsh", + "type": "article" + } + ] + }, + "1_NRXjckZ0F8EtEmgixqz": { + "title": "Design and Implementation", + "description": "Good design encompasses factors such as consistency and coherence in component design and deployment, maintainability to simplify administration and development, and reusability to allow components and subsystems to be used in other applications and in other scenarios. Decisions made during the design and implementation phase have a huge impact on the quality and the total cost of ownership of cloud hosted applications and services.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Design and implementation patterns", + "url": "https://docs.microsoft.com/en-us/azure/architecture/patterns/category/design-implementation", + "type": "article" + } + ] + }, + "8kby89epyullS9W7uKDrs": { + "title": "Management and Monitoring", + "description": "DevOps management and monitoring entails overseeing the entire development process from planning, development, integration and testing, deployment, and operations. It involves a complete and real-time view of the status of applications, services, and infrastructure in the production environment. Features such as real-time streaming, historical replay, and visualizations are critical components of application and service monitoring.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Management and Monitoring Get Started Guide", + "url": "https://www.atlassian.com/devops/devops-tools/devops-monitoring", + "type": "article" + }, + { + "title": "Explore top posts about Monitoring", + "url": "https://app.daily.dev/tags/monitoring?ref=roadmapsh", + "type": "article" + } + ] + }, + "w2eCgBC-ydMHSxh7LMti8": { + "title": "Loki", + "description": "Loki is a horizontally scalable, highly available, multi-tenant log aggregation system inspired by Prometheus. It is designed to be very cost-effective and easy to operate. It does not index the contents of the logs, but rather a set of labels for each log stream.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Loki Website", + "url": "https://grafana.com/oss/loki/", + "type": "article" + }, + { + "title": "Official Documentation", + "url": "https://grafana.com/docs/loki/latest/?pg=oss-loki&plcmt=quick-links", + "type": "article" + }, + { + "title": "Explore top posts about Grafana", + "url": "https://app.daily.dev/tags/grafana?ref=roadmapsh", + "type": "article" + } + ] + }, + "hIBeTUiAI3zwUY6NgAO-A": { + "title": "Kubernetes", + "description": "Kubernetes is an [open source](https://github.com/kubernetes/kubernetes) container management platform, and the dominant product in this space. Using Kubernetes, teams can deploy images across multiple underlying hosts, defining their desired availability, deployment logic, and scaling logic in YAML. Kubernetes evolved from Borg, an internal Google platform used to provision and allocate compute resources (similar to the Autopilot and Aquaman systems of Microsoft Azure).\n\nThe popularity of Kubernetes has made it an increasingly important skill for the DevOps Engineer and has triggered the creation of Platform teams across the industry. These Platform engineering teams often exist with the sole purpose of making Kubernetes approachable and usable for their product development colleagues.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Kubernetes Website", + "url": "https://kubernetes.io/", + "type": "article" + }, + { + "title": "Kubernetes Documentation", + "url": "https://kubernetes.io/docs/home/", + "type": "article" + }, + { + "title": "Primer: How Kubernetes Came to Be, What It Is, and Why You Should Care", + "url": "https://thenewstack.io/primer-how-kubernetes-came-to-be-what-it-is-and-why-you-should-care/", + "type": "article" + }, + { + "title": "Kubernetes: An Overview", + "url": "https://thenewstack.io/kubernetes-an-overview/", + "type": "article" + }, + { + "title": "Explore top posts about Kubernetes", + "url": "https://app.daily.dev/tags/kubernetes?ref=roadmapsh", + "type": "article" + }, + { + "title": "Kubernetes Crash Course for Absolute Beginners", + "url": "https://www.youtube.com/watch?v=s_o8dwzRlu4", + "type": "video" + } + ] + }, + "JXsctlXUUS1ie8nNEgIk9": { + "title": "GCP Functions", + "description": "Cloud Functions are the serverless \"Function-as-a-Service\" offer of Google Cloud Platform.", + "links": [ + { + "title": "GCP Cloud Functions Overview", + "url": "https://cloud.google.com/functions/docs/concepts/overview", + "type": "article" + }, + { + "title": "GCP Cloud Functions Tutorial", + "url": "https://antonputra.com/google/google-cloud-functions-tutorial/", + "type": "article" + }, + { + "title": "Explore top posts about Google Cloud Platform", + "url": "https://app.daily.dev/tags/gcp?ref=roadmapsh", + "type": "article" + } + ] + }, + "wNguM6-YEznduz3MgBCYo": { + "title": "Application Monitoring", + "description": "Application monitoring refers to the practice of making the status and performance of a given _application_ visible. This may include details such as stacktraces, error logs, and the line of code implicated in a given failure. When combined with Infrastructure monitoring, this can provide a complete picture of what is happening in your system, and why.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Applying Basic vs. Advanced Monitoring Techniques", + "url": "https://thenewstack.io/applying-basic-vs-advanced-monitoring-techniques/", + "type": "article" + }, + { + "title": "Why Legacy Apps Need Your Monitoring Love, Too", + "url": "https://thenewstack.io/why-legacy-apps-need-your-monitoring-love-too/", + "type": "article" + }, + { + "title": "Explore top posts about Monitoring", + "url": "https://app.daily.dev/tags/monitoring?ref=roadmapsh", + "type": "article" + } + ] + }, + "8rd7T5ahK2I_zh5co-IF-": { + "title": "Jaeger", + "description": "Jaeger is an open source, end-to-end distributed tracing system that enables us to monitor and troubleshoot transactions in complex distributed systems.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Jaeger Website", + "url": "https://www.jaegertracing.io/", + "type": "article" + }, + { + "title": "Official Documentation", + "url": "https://www.jaegertracing.io/docs/1.37/", + "type": "article" + } + ] + }, + "pk76Us6z8LoX3f0mhnCyR": { + "title": "New Relic", + "description": "New Relic is where dev, ops, security and business teams solve software–performance problems with data.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "New Relic Website", + "url": "https://newrelic.com/", + "type": "article" + }, + { + "title": "Official Documentation", + "url": "https://docs.newrelic.com/", + "type": "article" + }, + { + "title": "New Relic Developer Hub", + "url": "https://developer.newrelic.com/", + "type": "article" + }, + { + "title": "Explore top posts about DevOps", + "url": "https://app.daily.dev/tags/devops?ref=roadmapsh", + "type": "article" + } + ] + }, + "datadog@BHny2Emf96suhAlltiEro.md": { + "title": "Datadog", + "description": "", + "links": [] + }, + "eOyu4wmKOrcMlhD8pUGGh": { + "title": "Prometheus", + "description": "Prometheus is a free software application used for event monitoring and alerting. It records real-time metrics in a time series database built using a HTTP pull model, with flexible queries and real-time alerting.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Prometheus Website", + "url": "https://prometheus.io/", + "type": "article" + }, + { + "title": "Official Documentation", + "url": "https://prometheus.io/docs/introduction/overview/", + "type": "article" + }, + { + "title": "Getting Started with Prometheus", + "url": "https://prometheus.io/docs/tutorials/getting_started/", + "type": "article" + }, + { + "title": "Explore top posts about Prometheus", + "url": "https://app.daily.dev/tags/prometheus?ref=roadmapsh", + "type": "article" + } + ] + }, + "K81bmtgnB1gfhYdi3TB5a": { + "title": "OpenTelemetry", + "description": "OpenTelemetry is a collection of tools, APIs, and SDKs. Use it to instrument, generate, collect, and export telemetry data (metrics, logs, and traces) to help you analyze your software’s performance and behavior.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "OpenTelemetry Website", + "url": "https://opentelemetry.io/", + "type": "article" + }, + { + "title": "Official Documentation", + "url": "https://opentelemetry.io/docs/", + "type": "article" + } + ] + } +} \ No newline at end of file diff --git a/public/roadmap-content/devrel.json b/public/roadmap-content/devrel.json new file mode 100644 index 000000000..e8e0713ac --- /dev/null +++ b/public/roadmap-content/devrel.json @@ -0,0 +1,863 @@ +{ + "SiYUdtYMDImRPmV2_XPkH": { + "title": "What is DevRel?", + "description": "DevRel, or Developer Relations, is a field that connects a company's technical products or services with the developer community. It encompasses community engagement, technical advocacy, feedback collection, content creation, event participation, and product evangelism.\n\nDevRel professionals build relationships with developers, educate them about company tools and APIs, gather insights for product improvement, produce technical content, represent the company at industry events, and promote technology adoption.\n\nThey typically possess both technical expertise and strong communication skills, serving as a vital link between external developers and internal engineering and product teams. DevRel aims to foster a positive ecosystem around a company's technology, driving adoption and user satisfaction.", + "links": [] + }, + "KP28dl1I9hxM130gIPxSZ": { + "title": "History and Evolution", + "description": "The first Developer Relations teams came about in the lates 80’s during fierce rivalry between Microsoft and Apple. Apple created a team of “Evangelists” and when Microsoft saw the large impact that this team were having on the sales and adoption of Apple products, they created a their own team of “Technical Evangelists” which later became the Microsoft Developer Relations Group.\n\nDevRel has since evolved into multiple roles within itself, including Developer Marketing, Developer Advocate and Developer Engineer.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Developer Relations History & Mission Structure", + "url": "https://lmcdunna.medium.com/developer-relations-history-mission-structure-5fcad869deac", + "type": "article" + }, + { + "title": "History of Developer Evangelism", + "url": "https://www.youtube.com/watch?v=ieiQmyrmakI", + "type": "video" + } + ] + }, + "No-UnhdGmtLW9uFGLy-ca": { + "title": "Developer Experience", + "description": "Developer Experience (DX) refers to the overall experience developers have while using a software product, tool, or platform. A positive DX is characterized by intuitive and well-documented APIs, robust and clear documentation, seamless integration processes, and responsive support. Good DX ensures that developers can efficiently build, deploy, and manage applications, reducing frustration and increasing productivity.\n\nLearn more from the following resources:", + "links": [ + { + "title": "What is developer experience?", + "url": "https://swimm.io/learn/developer-experience/what-is-developer-experience-devx-pillars-and-best-practices", + "type": "article" + }, + { + "title": "Developer Experience: What is it and why should you care? GitHub", + "url": "https://github.blog/2023-06-08-developer-experience-what-is-it-and-why-should-you-care/", + "type": "article" + } + ] + }, + "BooGiYTMoS0N5eobwjPHY": { + "title": "Developer Journey", + "description": "The Developer Journey encompasses the entire process that a developer goes through when interacting with a product or platform, from initial discovery to long-term engagement. This journey includes various stages such as awareness, onboarding, learning, building, deploying, and scaling. Each stage requires thoughtful design and support to ensure a smooth and positive experience. By understanding and optimizing each touchpoint in the Developer Journey, companies can reduce friction, enhance satisfaction, and foster a more loyal and productive developer community.\n\nLearn more from the following resources:", + "links": [ + { + "title": "The Developer Journey", + "url": "https://www.devrel.agency/developerjourney", + "type": "article" + }, + { + "title": "Developer Relations: The Developer Journey Map", + "url": "https://medium.com/codex/developer-relations-the-developer-journey-map-36bd4619f5f3", + "type": "article" + }, + { + "title": "Understand the developer journey", + "url": "https://www.commonroom.io/resources/ultimate-guide-to-developer-experience/", + "type": "article" + } + ] + }, + "UhMk8g5MMhvi3kWLLI6B1": { + "title": "Developer Marketing", + "description": "Developer Marketing focuses on strategies and tactics to engage and attract developers to a product, platform, or service. It involves creating targeted content, such as technical blogs, tutorials, and case studies, that addresses developers' needs and pain points. Effective developer marketing also includes hosting events like hackathons, webinars, and conferences to build community and demonstrate value. By leveraging channels that developers trust, such as GitHub, Stack Overflow, and social media, companies can increase visibility and adoption of their offerings.\n\nLearn more from the following resources:", + "links": [ + { + "title": "What is developer marketing?", + "url": "https://appsembler.com/glossary/developer-marketing/", + "type": "article" + }, + { + "title": "What is developer marketing? IronHorse.io", + "url": "https://ironhorse.io/blog/what-is-developer-marketing/", + "type": "article" + }, + { + "title": "What is developer marketing (B2D)", + "url": "https://www.productmarketingalliance.com/what-is-developer-marketing/", + "type": "article" + } + ] + }, + "N9HXTCQq2wfC-QurSofE_": { + "title": "Importance of DevRel", + "description": "Developer Relations (DevRel) is crucial for fostering a vibrant and engaged developer community around a product or platform. It involves creating and maintaining strong relationships with developers through activities like community building, technical support, advocacy, and education. Effective DevRel ensures that developers have the resources and support they need to succeed, which in turn drives product adoption, innovation, and loyalty. By bridging the gap between a company's development teams and external developers, DevRel can also provide valuable feedback for product improvements and help in shaping the future direction of the product or platform.\n\nLearn more from the following resources:", + "links": [ + { + "title": "DevRel - Why is it important?", + "url": "https://developers.onelogin.com/blog/devrel", + "type": "article" + }, + { + "title": "The role of developer advocacy in driving innovation", + "url": "https://leaddev.com/process/role-developer-advocacy-driving-innovation", + "type": "article" + } + ] + }, + "7MCmY1bABGPfmzjErADvg": { + "title": "Advocacy", + "description": "Developer Advocacy is the practice of representing and supporting the needs and interests of developers both within a company and in the broader developer community. Developer Advocates act as liaisons between the company’s development team and external developers, ensuring that the developers have the tools, resources, and knowledge to effectively use the company's products or platforms. They create educational content, provide technical support, gather feedback, and participate in community events.\n\nLearn more from the following resources:", + "links": [ + { + "title": "What is developer advocacy?", + "url": "https://appsembler.com/glossary/developer-advocacy/", + "type": "article" + }, + { + "title": "Developer Relations: New Career Path for Developers", + "url": "https://www.youtube.com/watch?v=iA2SQ4OL4GU", + "type": "video" + } + ] + }, + "EjqJkyojhO7f1uFHIoyGY": { + "title": "Education", + "description": "Developer Education focuses on providing developers with the knowledge and skills they need to effectively use a product, platform, or technology. This involves creating and delivering a variety of educational materials such as documentation, tutorials, courses, webinars, and interactive workshops. Effective developer education programs cater to different learning styles and levels of expertise, from beginners to advanced users.\n\nLearn more from the following resources:", + "links": [ + { + "title": "What is developer eduction?", + "url": "https://appsembler.com/glossary/developer-education/", + "type": "article" + }, + { + "title": "What is developer education - Dev.to", + "url": "https://dev.to/jacobandrewsky/what-is-developer-education-5a01", + "type": "article" + }, + { + "title": "Successful Developer Education: What you need to know", + "url": "https://www.developermarketing.io/successful-developer-education-what-you-need-to-know/", + "type": "article" + } + ] + }, + "1NFcpGzCJylRPbFYdD9bB": { + "title": "Community Support", + "description": "Community Support in the context of Developer Relations (DevRel) involves actively engaging with and assisting the developer community to foster a collaborative and supportive environment. This includes moderating forums, responding to queries, organizing community events, and facilitating peer-to-peer support. Effective community support ensures that developers feel heard and valued, and have access to timely and accurate assistance, which enhances their overall experience and productivity.", + "links": [ + { + "title": "Community and DevRel", + "url": "https://developerrelations.com/podcast/community-and-devrel", + "type": "article" + }, + { + "title": "New to DevRel? Talk to your community!", + "url": "https://dev.to/amandamartindev/new-to-devrel-talk-to-your-community-hmf", + "type": "article" + }, + { + "title": "DevRel & Community", + "url": "https://www.youtube.com/watch?v=CfXkY2m_0dU", + "type": "video" + } + ] + }, + "_3ZnPUl4kzzuM0bKQ8IAq": { + "title": "Content Creation", + "description": "Content creation in the context of Developer Relations (DevRel) involves producing a wide range of materials designed to educate, engage, and empower developers. This includes writing technical blogs, creating video tutorials, developing comprehensive documentation, and crafting code samples and demos. The goal is to provide valuable resources that help developers understand and effectively use a product or platform. High-quality content addresses common challenges, showcases best practices, and highlights new features or updates.\n\nLearn more from the following resources:", + "links": [ + { + "title": "The secret of content creation for DevRel", + "url": "https://developerrelations.com/dev-rel/the-secrets-of-content-creation-for-devrels", + "type": "article" + }, + { + "title": "Content strategy for DevRel teams", + "url": "https://www.stephaniemorillo.co/post/content-strategy-for-devrel-teams-a-primer", + "type": "article" + }, + { + "title": "DevRel Content Creation with Stephanie Wong from Google Cloud", + "url": "https://www.youtube.com/watch?v=DQE1hBXVXYA", + "type": "video" + } + ] + }, + "IxRvVvX9Fax08YUbVob4s": { + "title": "Feedback Loop", + "description": "Feedback loops are a crucial process that involves collecting, analyzing, and acting on feedback from developers to improve products and services. This loop begins with gathering insights through various channels such as forums, surveys, social media, direct interactions at events, and support tickets. The feedback is then communicated to internal teams, including product management, engineering, and marketing, to inform product enhancements, feature requests, and bug fixes.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Harnessing the Power of Community Feedback for Product Development", + "url": "https://draft.dev/learn/devrel-harnessing-the-power-of-community-feedback-for-product-development", + "type": "article" + }, + { + "title": "The Developer Advocacy Feedback Loop", + "url": "https://glaforge.dev/talks/2020/08/06/the-developer-advocacy-feedback-loop/", + "type": "article" + } + ] + }, + "LRZ8yxTfEGCXsYp4N1_uD": { + "title": "Public Speaking", + "description": "", + "links": [] + }, + "0ntOE6PSdMl_EXB9gdgIv": { + "title": "Writing Skills", + "description": "Good writing skills are crucial for software developers because they enhance communication, documentation, and collaboration. Clear and concise writing ensures that complex technical concepts are easily understood by team members, stakeholders, and end-users. Effective documentation, including comments, user manuals, and technical guides, helps maintain code quality and facilitates future maintenance and updates. Writing also plays a key role in crafting clear commit messages, bug reports, and project proposals, which are essential for smooth project management and collaboration. In an increasingly global and remote work environment, the ability to articulate ideas and solutions in writing becomes even more vital, ensuring that everyone is on the same page and can contribute effectively to the project's success.\n\nLearn more from the following resources:", + "links": [ + { + "title": "Putting Ideas into Words", + "url": "https://www.paulgraham.com/words.html", + "type": "article" + }, + { + "title": "Patterns in confusing explanations", + "url": "https://jvns.ca/blog/confusing-explanations/", + "type": "article" + }, + { + "title": "Signposting: How to reduce cognitive load for your reader", + "url": "https://newsletter.weskao.com/p/sign-posting-how-to-reduce-cognitive", + "type": "article" + } + ] + }, + "c0w241EL0Kh4ek76IgsEs": { + "title": "Blog Posts", + "description": "", + "links": [] + }, + "X0xUzEP0S6SyspvqyoDDk": { + "title": "Technical Documentation", + "description": "", + "links": [] + }, + "urbtoZtuJryK-6TJ3lmRN": { + "title": "Social Media", + "description": "", + "links": [] + }, + "PFjF2PnYpSbvd24jb_D2G": { + "title": "Presentation Techniques", + "description": "", + "links": [] + }, + "vH_ECVYSQ3Fg-DASO1EgK": { + "title": "Rules of Three", + "description": "", + "links": [] + }, + "_FZNqUKgPkzbgOPoiSLSU": { + "title": "PechaKucha", + "description": "", + "links": [] + }, + "Y0GUZynSXjITDOA-TP6LH": { + "title": "Storytelling", + "description": "", + "links": [] + }, + "uwvvWmcZnFqLoHRDFXFyW": { + "title": "Mind Mapping", + "description": "", + "links": [] + }, + "SNhrYZsmyDHN_JWh0eZy2": { + "title": "Visualization", + "description": "", + "links": [] + }, + "iKYmUvWFT_C0wnO0iB6gM": { + "title": "Engaging Audience", + "description": "", + "links": [] + }, + "VTGsmk3p4RVXiNhDmx2l8": { + "title": "The Hook", + "description": "", + "links": [] + }, + "LixiZj3-QcmQgGAqaaDr6": { + "title": "Contrast Principle", + "description": "", + "links": [] + }, + "tbIAEStaoVWnEWbdk7EGc": { + "title": "Handouts", + "description": "", + "links": [] + }, + "2iRt9E42i_ej4woKCW_s2": { + "title": "Repetition & Reinforcement", + "description": "", + "links": [] + }, + "Xf-Lxi1cKReo4yDLylBbL": { + "title": "Handling Q&A", + "description": "", + "links": [] + }, + "UdUDngq425NYSvIuOd7St": { + "title": "Active Listening", + "description": "", + "links": [] + }, + "jyScVS-sYMcZcH3hOwbMK": { + "title": "Anticipate Questions", + "description": "", + "links": [] + }, + "rhs6QwxZ7PZthLfi38FJn": { + "title": "Be Concise", + "description": "", + "links": [] + }, + "VSOdD9KKF_Qz8nbRdHNo3": { + "title": "Managing Difficult Questions", + "description": "", + "links": [] + }, + "yJygbi8cnp3oz2EFl2MR0": { + "title": "Community Engagement", + "description": "", + "links": [] + }, + "C2w8R4tNy2lOhhWU9l32s": { + "title": "Event Participation", + "description": "", + "links": [] + }, + "gvMbo22eRxqOzszc_w4Gz": { + "title": "Online Communities", + "description": "", + "links": [] + }, + "SJf9e7SQnzYVHoRWl0i6P": { + "title": "Networking", + "description": "", + "links": [] + }, + "aSYXa25_0O2qQl1O-N3xl": { + "title": "Basic Programming Skills", + "description": "", + "links": [] + }, + "KdFYmj36M2jrGfsYkukpo": { + "title": "IDEs", + "description": "", + "links": [] + }, + "j5nNSYI8s-cH8EA6G1EWY": { + "title": "VS Code", + "description": "", + "links": [] + }, + "NCnKS435DCl-8vilr1_XE": { + "title": "JetBrains IDEs", + "description": "", + "links": [] + }, + "sUEZHmKxtjO9gXKJoOdbF": { + "title": "APIs & SDKs", + "description": "", + "links": [] + }, + "pqp9FLRJRDDEnni72KHmv": { + "title": "Writing Documentation", + "description": "", + "links": [] + }, + "h6R3Vyq0U8t8WL3G5xC2l": { + "title": "Building SDKs", + "description": "", + "links": [] + }, + "7Q6_tdRaeb8BgreG8Mw-a": { + "title": "Understanding APIs", + "description": "", + "links": [] + }, + "x6KFf4akYuUYkNhrUwj1r": { + "title": "Version Control", + "description": "", + "links": [] + }, + "a-i1mgF3VAxbbpA1gMWyK": { + "title": "Git", + "description": "", + "links": [] + }, + "8O1AgUKXe35kdiYD02dyt": { + "title": "GitHub", + "description": "", + "links": [] + }, + "J2WunUJBzYw_D5cQH_pnH": { + "title": "Managing Discussions", + "description": "", + "links": [] + }, + "vCoEJyS4qMWTTiL17PmWr": { + "title": "Issues & Pull Requests", + "description": "", + "links": [] + }, + "x3qTqhR1uA5CXqULIJqL8": { + "title": "Labelling and Cleanup", + "description": "", + "links": [] + }, + "v_lDqjtxjRK67GsbtxXgT": { + "title": "Milestones & Releases", + "description": "", + "links": [] + }, + "7ZFTmlQHYQKNjKprwV8zk": { + "title": "Public Backlog", + "description": "", + "links": [] + }, + "GvmXdWiwITgWzx_f5_ick": { + "title": "Building a Community", + "description": "", + "links": [] + }, + "UKi3waPx2pozvZf2aQ52s": { + "title": "Identifying Audience", + "description": "", + "links": [] + }, + "WItTQ1QboHoRkqeHJzCM9": { + "title": "Platform Selection", + "description": "", + "links": [] + }, + "hPJM4QXxeqTxPlvJ2_D3_": { + "title": "Initial Outreach", + "description": "", + "links": [] + }, + "4ZvzY_xGO5BZOmfqj0TTq": { + "title": "Community Guidelines", + "description": "", + "links": [] + }, + "er9ukuBvY-F4F8S1qbbjU": { + "title": "Code of Conduct", + "description": "", + "links": [] + }, + "8I59U-nnkhQv8ldRuqQlb": { + "title": "Rules and Policies", + "description": "", + "links": [] + }, + "-6cf3RT4-cbwvLYIkCosF": { + "title": "Community Management", + "description": "", + "links": [] + }, + "d_dKF87OnRWoWj3Bf1uFf": { + "title": "Moderation", + "description": "", + "links": [] + }, + "8ls5kQvDgvwLbIrwYg1OL": { + "title": "Conflict Resolution", + "description": "", + "links": [] + }, + "6yLt4Ia52Jke9i5kJQvAC": { + "title": "Encouraging Participation", + "description": "", + "links": [] + }, + "Nta8pUncwNQxJlqF6h1AT": { + "title": "Recognition Programs", + "description": "", + "links": [] + }, + "usorG1GkkvGAZ0h_AGHVk": { + "title": "Event Management", + "description": "", + "links": [] + }, + "RQk3uOikjQYRyTu7vuAG7": { + "title": "Planning", + "description": "", + "links": [] + }, + "C9--J8gGZENQKn-QcZK4M": { + "title": "Promotion", + "description": "", + "links": [] + }, + "1m1keusP-PTjEwy0dCJJL": { + "title": "Execution", + "description": "", + "links": [] + }, + "kmcOYDvu1vq7AQPllZvv0": { + "title": "Post Event Followup", + "description": "", + "links": [] + }, + "_Qe_0RksYpX7Spzgc6Fw3": { + "title": "Surveys", + "description": "", + "links": [] + }, + "oWXfov-mOF47d7Vffyp3t": { + "title": "Feedback Collection", + "description": "", + "links": [] + }, + "B1IdobUaGeBLI2CgsFg8H": { + "title": "Blogging", + "description": "", + "links": [] + }, + "uzMfR6Yd9Jvjn8i5RpC1Q": { + "title": "Topic Selection", + "description": "", + "links": [] + }, + "l6-mzgDTXg2EPkyZyZ6TC": { + "title": "Writing Process", + "description": "", + "links": [] + }, + "zVBy8o66FdZueg-2v3gaw": { + "title": "SEO Basics", + "description": "", + "links": [] + }, + "2QiQW9tygpeaES8Wp1Kw6": { + "title": "Guest Blogging", + "description": "", + "links": [] + }, + "nlzI2fG3SwC5Q42qXcXPX": { + "title": "Cross-Promotion", + "description": "", + "links": [] + }, + "w1ZooDCDOkbL1EAa5Hx3d": { + "title": "Collaborations", + "description": "", + "links": [] + }, + "bRzzc137OlmivEGdhv5Ew": { + "title": "Video Production", + "description": "", + "links": [] + }, + "6zK9EJDKBC89UArY7sfgs": { + "title": "Editing", + "description": "", + "links": [] + }, + "_QHUpFW4kZ5SBaP7stXY2": { + "title": "Recording", + "description": "", + "links": [] + }, + "rLDRkUht9K1m4noMAIgKU": { + "title": "Scripting", + "description": "", + "links": [] + }, + "OUWVqJImrmsZpAtRrUYNH": { + "title": "Animations & Graphics", + "description": "", + "links": [] + }, + "pEMNcm_wJNmOkWm57L1pA": { + "title": "Video Production", + "description": "", + "links": [] + }, + "iPaFjacZ7hSWriSEqUmHc": { + "title": "Live Streaming", + "description": "", + "links": [] + }, + "Mdp4bBlhVbGohJkVlsDar": { + "title": "X", + "description": "", + "links": [] + }, + "OY5rn3XTbmz4LzSLRcNmw": { + "title": "YouTube", + "description": "", + "links": [] + }, + "QlWam-kHv8G_-yx3ClP9s": { + "title": "Twitch", + "description": "", + "links": [] + }, + "meZDgDJMy4aH5VqS-NJL4": { + "title": "Streamyard", + "description": "", + "links": [] + }, + "D7_iNPEKxFv0gw-fsNNrZ": { + "title": "Animations & Graphics", + "description": "", + "links": [] + }, + "8aiLVG4clveX1Luiehvxr": { + "title": "Technical Setup", + "description": "", + "links": [] + }, + "tRywPj_2VyjSLjxYJtYZd": { + "title": "Video", + "description": "", + "links": [] + }, + "7y4vHk_jgNTW6Q1WoqYDc": { + "title": "Audio", + "description": "", + "links": [] + }, + "71BBFjaON1NJi4rOHKW6K": { + "title": "Social Media", + "description": "", + "links": [] + }, + "6BqkO4XOspJg0-9GNLtUp": { + "title": "X", + "description": "", + "links": [] + }, + "6UR59TigEZ0NaixbaUIqn": { + "title": "LinkedIn", + "description": "", + "links": [] + }, + "ZMManUnO-9EQqi-xmLt5r": { + "title": "Facebook", + "description": "", + "links": [] + }, + "UAkGV9_I6qiKZMr1aqQCm": { + "title": "Instagram", + "description": "", + "links": [] + }, + "TGXPxTFv9EhsfS5uWR5gS": { + "title": "Content Strategy", + "description": "", + "links": [] + }, + "lG1FH7Q-YX5pG-7mMtbSR": { + "title": "Analytics and Optimization", + "description": "", + "links": [] + }, + "l2P44pL9eF8xarBwC_CVO": { + "title": "Consistent Posting", + "description": "", + "links": [] + }, + "WIH216mHg2OiSebzQYI-f": { + "title": "Engaging Content", + "description": "", + "links": [] + }, + "ZWkpgvXIzjN3_fOyhVEv0": { + "title": "Creating Brand Voice", + "description": "", + "links": [] + }, + "NWxAxiDgvlGpvqdkzqnOH": { + "title": "Tracking Engagement", + "description": "", + "links": [] + }, + "46iMfYgC7fCZLCy-qzl1B": { + "title": "Data-Driven Strategy Shift", + "description": "", + "links": [] + }, + "g3M6nfLr0DMcn-NCFF7nZ": { + "title": "Documentation", + "description": "", + "links": [] + }, + "RLf08xKMjlt6S9-MFiTo-": { + "title": "User Guides", + "description": "", + "links": [] + }, + "7IJO_jDpZUdlr_n5rBJ6O": { + "title": "API References", + "description": "", + "links": [] + }, + "6ubk20TBIL3_VrrRMe8tO": { + "title": "Tutorials", + "description": "", + "links": [] + }, + "xy9Kqtwrh5IhYHaqEhvl_": { + "title": "Sample Projects", + "description": "", + "links": [] + }, + "pGJrCyYhLLGUnv6LxpYUe": { + "title": "Code Samples", + "description": "", + "links": [] + }, + "mWcMSKnUQamUykBxND-Ju": { + "title": "Example Apps", + "description": "", + "links": [] + }, + "omnUSgUHZg2DmnOUJ0Xo1": { + "title": "Use Case Based", + "description": "", + "links": [] + }, + "LwNa3u9Lf88ju5w7CvSN5": { + "title": "Support", + "description": "", + "links": [] + }, + "oGTIvAY3zYgoiC63FQRSd": { + "title": "Forums", + "description": "", + "links": [] + }, + "j6tr3mAaKqTuEFTRSCsrK": { + "title": "Issue Tracking", + "description": "", + "links": [] + }, + "4GCQ3stXxW1HrlAVC0qDl": { + "title": "FAQs", + "description": "", + "links": [] + }, + "weyCcboaekqf5NuVAOxfU": { + "title": "Office Hours", + "description": "", + "links": [] + }, + "1fc0iWwOkheUJ7d0np86L": { + "title": "Webinars", + "description": "", + "links": [] + }, + "DCj1teu8Hp82EKnakFRPn": { + "title": "Key Metrics", + "description": "", + "links": [] + }, + "afR1VviBs2w0k8UmP38vn": { + "title": "Community Growth", + "description": "", + "links": [] + }, + "RXj0yB7KsIOM5whwtyBBU": { + "title": "Engagement Rates", + "description": "", + "links": [] + }, + "yhDBZfUAjumFHpUZtmLg3": { + "title": "Content Performance", + "description": "", + "links": [] + }, + "AwMwMU9hg_gCKPP4tykHb": { + "title": "Developer Satisfaction", + "description": "", + "links": [] + }, + "psk3bo-nSskboAoVTjlpz": { + "title": "Tools", + "description": "", + "links": [] + }, + "8xrhjG9qmbsoBC3F8zS-b": { + "title": "Google Analytics", + "description": "", + "links": [] + }, + "x8RIrK2VB-LBFbt6hAcQb": { + "title": "Social Media Analytics", + "description": "", + "links": [] + }, + "AL3-UzREwTpsADTU0YtRW": { + "title": "Platform Specific Analytics", + "description": "", + "links": [] + }, + "N1uh7dVKfSPT7w0MlKzWO": { + "title": "Reporting", + "description": "", + "links": [] + }, + "U2QKCu6TvDuxSNRfdM74n": { + "title": "Regular Reports", + "description": "", + "links": [] + }, + "0dRnUlgze87eq2FVU_mWp": { + "title": "Data Visualization", + "description": "", + "links": [] + }, + "mh1BZDVkc-VwA8aQAmDhO": { + "title": "Insights & Recommendations", + "description": "", + "links": [] + }, + "wcfrXA9zWZ4Taey7mR2yG": { + "title": "Thought Leadership", + "description": "", + "links": [] + }, + "I7RBMfoD30OstVLeTrMs9": { + "title": "Publishing", + "description": "", + "links": [] + }, + "QrDqUGWC2t9r6p4aR8I5g": { + "title": "Media Appearances", + "description": "", + "links": [] + }, + "ue0NaNnNpF7UhvJ8j0Yuo": { + "title": "Conference Speaking", + "description": "", + "links": [] + }, + "HN2gNsYYRLVOOdy_r8FKJ": { + "title": "Building a Personal Brand", + "description": "", + "links": [] + }, + "4ygpqUK70hI5r1AmmfMZq": { + "title": "Networking Strategies", + "description": "", + "links": [] + }, + "HIIqlnZ4Vad_1o4F0lkv-": { + "title": "Meetups", + "description": "", + "links": [] + }, + "lMbj0WdoEia_tdpO5rXph": { + "title": "Online Communities", + "description": "", + "links": [] + }, + "NhQUDVA9q1hnV44EKapbU": { + "title": "Open-Source", + "description": "", + "links": [] + }, + "bwwk6ESNyEJa3fCAIKPwh": { + "title": "Continuous Learning", + "description": "", + "links": [] + } +} \ No newline at end of file diff --git a/public/roadmap-content/frontend.json b/public/roadmap-content/frontend.json new file mode 100644 index 000000000..c2e5e42c6 --- /dev/null +++ b/public/roadmap-content/frontend.json @@ -0,0 +1,3138 @@ +{ + "VlNNwIEDWqQXtqkHWJYzC": { + "title": "Internet", + "description": "The Internet is a global network of computers connected to each other which communicate through a standardized set of protocols.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Learn How the Web Works", + "url": "https://internetfundamentals.com", + "type": "website" + }, + { + "title": "How does the Internet Work?", + "url": "https://cs.fyi/guide/how-does-internet-work", + "type": "article" + }, + { + "title": "The Internet Explained", + "url": "https://www.vox.com/2014/6/16/18076282/the-internet", + "type": "article" + }, + { + "title": "How Does the Internet Work?", + "url": "http://web.stanford.edu/class/msande91si/www-spr04/readings/week1/InternetWhitepaper.htm", + "type": "article" + }, + { + "title": "Introduction to Internet", + "url": "/guides/what-is-internet", + "type": "article" + }, + { + "title": "How does the Internet work?", + "url": "https://www.youtube.com/watch?v=x3c1ih2NJEg", + "type": "video" + }, + { + "title": "How the Internet Works in 5 Minutes", + "url": "https://www.youtube.com/watch?v=7_LPdttKXPc", + "type": "video" + } + ] + }, + "yCnn-NfSxIybUQ2iTuUGq": { + "title": "How does the internet work?", + "description": "The Internet is a global network of computers connected to each other which communicate through a standardized set of protocols.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "How does the Internet Work?", + "url": "https://cs.fyi/guide/how-does-internet-work", + "type": "article" + }, + { + "title": "How Does the Internet Work? MDN Docs", + "url": "https://developer.mozilla.org/en-US/docs/Learn/Common_questions/How_does_the_Internet_work", + "type": "article" + }, + { + "title": "Introduction to Internet", + "url": "/guides/what-is-internet", + "type": "article" + }, + { + "title": "How does the Internet work?", + "url": "https://www.youtube.com/watch?v=TNQsmPf24go", + "type": "video" + }, + { + "title": "How the Internet Works in 5 Minutes", + "url": "https://www.youtube.com/watch?v=7_LPdttKXPc", + "type": "video" + } + ] + }, + "R12sArWVpbIs_PHxBqVaR": { + "title": "What is HTTP?", + "description": "HTTP is the `TCP/IP` based application layer communication protocol which standardizes how the client and server communicate with each other. HTTP follows a classical \"Client-Server model\" with a client opening a connection request, then waiting until it receives a response. HTTP is a stateless protocol, that means that the server does not keep any data (state) between two requests.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Everything you need to know about HTTP", + "url": "https://cs.fyi/guide/http-in-depth", + "type": "article" + }, + { + "title": "What is HTTP?", + "url": "https://www.cloudflare.com/en-gb/learning/ddos/glossary/hypertext-transfer-protocol-http/", + "type": "article" + }, + { + "title": "How HTTPS Works ...in a comic!", + "url": "https://howhttps.works", + "type": "article" + }, + { + "title": "An overview of HTTP", + "url": "https://developer.mozilla.org/en-US/docs/Web/HTTP/Overview", + "type": "article" + }, + { + "title": "HTTP/3 From A To Z: Core Concepts", + "url": "https://www.smashingmagazine.com/2021/08/http3-core-concepts-part1/", + "type": "article" + }, + { + "title": "HTTP/3 Is Now a Standard: Why Use It and How to Get Started", + "url": "https://thenewstack.io/http-3-is-now-a-standard-why-use-it-and-how-to-get-started/", + "type": "article" + }, + { + "title": "HTTP Crash Course & Exploration", + "url": "https://www.youtube.com/watch?v=iYM2zFP3Zn0", + "type": "video" + } + ] + }, + "ZhSuu2VArnzPDp6dPQQSC": { + "title": "What is Domain Name?", + "description": "A domain name is a unique, easy-to-remember address used to access websites, such as ‘[google.com](http://google.com)’, and ‘[facebook.com](http://facebook.com)’. Users can connect to websites using domain names thanks to the Domain Name System (DNS).\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is a Domain Name?", + "url": "https://developer.mozilla.org/en-US/docs/Learn/Common_questions/What_is_a_domain_name", + "type": "article" + }, + { + "title": "What is a Domain Name? | Domain name vs. URL", + "url": "https://www.cloudflare.com/en-gb/learning/dns/glossary/what-is-a-domain-name/", + "type": "article" + }, + { + "title": "A Beginners Guide to How Domain Names Work", + "url": "https://www.youtube.com/watch?v=Y4cRx19nhJk", + "type": "video" + } + ] + }, + "aqMaEY8gkKMikiqleV5EP": { + "title": "What is hosting?", + "description": "Web hosting is an online service that allows you to publish your website files onto the internet. So, anyone who has access to the internet has access to your website.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What Is Web Hosting? Explained", + "url": "https://www.youtube.com/watch?v=htbY9-yggB0", + "type": "video" + }, + { + "title": "Different Types of Web Hosting Explained", + "url": "https://www.youtube.com/watch?v=AXVZYzw8geg", + "type": "video" + }, + { + "title": "Where to Host a Fullstack Project on a Budget", + "url": "https://www.youtube.com/watch?v=Kx_1NYYJS7Q", + "type": "video" + } + ] + }, + "hkxw9jPGYphmjhTjw8766": { + "title": "DNS and how it works?", + "description": "The Domain Name System (DNS) is the phonebook of the Internet. Humans access information online through domain names, like [nytimes.com](http://nytimes.com) or [espn.com](http://espn.com). Web browsers interact through Internet Protocol (IP) addresses. DNS translates domain names to IP addresses so browsers can load Internet resources.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is DNS?", + "url": "https://www.cloudflare.com/en-gb/learning/dns/what-is-dns/", + "type": "article" + }, + { + "title": "Mess with DNS - DNS Playground", + "url": "https://messwithdns.net/", + "type": "article" + }, + { + "title": "How DNS works (comic)", + "url": "https://howdns.works/", + "type": "article" + }, + { + "title": "Explore top posts about DNS", + "url": "https://app.daily.dev/tags/dns?ref=roadmapsh", + "type": "article" + }, + { + "title": "DNS and How does it Work?", + "url": "https://www.youtube.com/watch?v=Wj0od2ag5sk", + "type": "video" + }, + { + "title": "DNS Records", + "url": "https://www.youtube.com/watch?v=7lxgpKh_fRY", + "type": "video" + }, + { + "title": "When to add glue records to DNS settings", + "url": "https://www.youtube.com/watch?v=e48AyJOA9W8", + "type": "video" + }, + { + "title": "DNS Records for Newbies - How To Manage Website Records", + "url": "https://www.youtube.com/watch?v=YV5tkQYcvfg", + "type": "video" + } + ] + }, + "P82WFaTPgQEPNp5IIuZ1Y": { + "title": "Browsers and how they work?", + "description": "A web browser is a software application that enables a user to access and display web pages or other online content through its graphical user interface.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "How Browsers Work", + "url": "https://www.html5rocks.com/en/tutorials/internals/howbrowserswork/", + "type": "article" + }, + { + "title": "Role of Rendering Engine in Browsers", + "url": "https://www.browserstack.com/guide/browser-rendering-engine", + "type": "article" + }, + { + "title": "Populating the Page: How Browsers Work", + "url": "https://developer.mozilla.org/en-US/docs/Web/Performance/How_browsers_work", + "type": "article" + }, + { + "title": "Explore top posts about Browsers", + "url": "https://app.daily.dev/tags/browsers?ref=roadmapsh", + "type": "article" + }, + { + "title": "How Do Web Browsers Work?", + "url": "https://www.youtube.com/watch?v=WjDrMKZWCt0", + "type": "video" + } + ] + }, + "yWG2VUkaF5IJVVut6AiSy": { + "title": "HTML", + "description": "HTML stands for HyperText Markup Language. It is used on the frontend and gives the structure to the webpage which you can style using CSS and make interactive using JavaScript.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Interactive HTML Course", + "url": "https://github.com/denysdovhan/learnyouhtml", + "type": "opensource" + }, + { + "title": "W3Schools: Learn HTML", + "url": "https://www.w3schools.com/html/html_intro.asp", + "type": "article" + }, + { + "title": "htmlreference.io: All HTML elements at a glance", + "url": "https://htmlreference.io/", + "type": "article" + }, + { + "title": "HTML For Beginners The Easy Way", + "url": "https://html.com", + "type": "article" + }, + { + "title": "Web Development Basics", + "url": "https://internetingishard.netlify.app/html-and-css/index.html", + "type": "article" + }, + { + "title": "You don't need JavaScript for that", + "url": "https://www.htmhell.dev/adventcalendar/2023/2/", + "type": "article" + }, + { + "title": "Explore top posts about HTML", + "url": "https://app.daily.dev/tags/html?ref=roadmapsh", + "type": "article" + }, + { + "title": "HTML Full Course for Beginners", + "url": "https://youtu.be/mJgBOIoGihA", + "type": "video" + }, + { + "title": "HTML Full Course - Build a Website Tutorial", + "url": "https://www.youtube.com/watch?v=pQN-pnXPaVg", + "type": "video" + }, + { + "title": "HTML Tutorial for Beginners: HTML Crash Course", + "url": "https://www.youtube.com/watch?v=qz0aGYrrlhU", + "type": "video" + } + ] + }, + "PCirR2QiFYO89Fm-Ev3o1": { + "title": "Learn the basics", + "description": "HTML stands for HyperText Markup Language. It is used on the frontend and gives the structure to the webpage which you can style using CSS and make interactive using JavaScript.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "W3Schools: Learn HTML", + "url": "https://www.w3schools.com/html/html_intro.asp", + "type": "article" + }, + { + "title": "MDN Docs: Getting Started with HTML ", + "url": "https://developer.mozilla.org/en-US/docs/Learn/HTML/Introduction_to_HTML/Getting_started", + "type": "article" + }, + { + "title": "web.dev: Learn HTML", + "url": "https://web.dev/learn/html", + "type": "article" + }, + { + "title": "HTML Cheatsheet", + "url": "https://htmlcheatsheet.com", + "type": "article" + }, + { + "title": "HTML Full Course - Build a Website Tutorial", + "url": "https://www.youtube.com/watch?v=pQN-pnXPaVg", + "type": "video" + }, + { + "title": "HTML Tutorial for Beginners: HTML Crash Course", + "url": "https://www.youtube.com/watch?v=qz0aGYrrlhU", + "type": "video" + } + ] + }, + "z8-556o-PaHXjlytrawaF": { + "title": "Writing Semantic HTML", + "description": "Semantic element clearly describes its meaning to both the browser and the developer. In HTML, semantic element are the type of elements that can be used to define different parts of a web page such as `
`, ``, `
`, `
`, `
`, etc.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Guide to Writing Semantic HTML", + "url": "https://cs.fyi/guide/writing-semantic-html", + "type": "article" + }, + { + "title": "W3Schools: Semantic HTML", + "url": "https://www.w3schools.com/html/html5_semantic_elements.asp", + "type": "article" + }, + { + "title": "How To Write Semantic HTML", + "url": "https://hackernoon.com/how-to-write-semantic-html-dkq3ulo", + "type": "article" + }, + { + "title": "Semantic HTML: What It Is and How It Improves Your Site", + "url": "https://blog.hubspot.com/website/semantic-html", + "type": "article" + }, + { + "title": "Semantic Markup", + "url": "https://html.com/semantic-markup", + "type": "article" + }, + { + "title": "Semantic HTML - web.dev", + "url": "https://web.dev/learn/html/semantic-html/", + "type": "article" + }, + { + "title": "Explore top posts about HTML", + "url": "https://app.daily.dev/tags/html?ref=roadmapsh", + "type": "article" + } + ] + }, + "V5zucKEHnIPPjwHqsMPHF": { + "title": "Forms and Validations", + "description": "Before submitting data to the server, it is important to ensure all required form controls are filled out, in the correct format. This is called client-side form validation, and helps ensure data submitted matches the requirements set forth in the various form controls.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "MDN Web Docs: Client-side form validation", + "url": "https://developer.mozilla.org/en-US/docs/Learn/Forms/Form_validation", + "type": "article" + }, + { + "title": "Learn Forms by web.dev", + "url": "https://web.dev/learn/forms/", + "type": "article" + }, + { + "title": "W3Schools: JavaScript Form Validation", + "url": "https://www.w3schools.com/js/js_validation.asp", + "type": "article" + } + ] + }, + "iJIqi7ngpGHWAqtgdjgxB": { + "title": "Accessibility", + "description": "Web accessibility means that websites, tools, and technologies are designed and developed in such a way that people with disabilities can use them easily.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Developing for Web Accessibility by W3C WAI", + "url": "https://www.w3.org/WAI/tips/developing/", + "type": "article" + }, + { + "title": "Accessibility Tutorial", + "url": "https://www.w3schools.com/accessibility/index.php", + "type": "article" + }, + { + "title": "A Complete Guide To Accessible Front-End Components", + "url": "https://www.smashingmagazine.com/2021/03/complete-guide-accessible-front-end-components/", + "type": "article" + }, + { + "title": "MDN Accessibility", + "url": "https://developer.mozilla.org/en-US/docs/Web/Accessibility", + "type": "article" + }, + { + "title": "Accessibility for Developers by Google", + "url": "https://web.dev/accessibility", + "type": "article" + }, + { + "title": "Web Accessibility by Udacity", + "url": "https://www.udacity.com/course/web-accessibility--ud891", + "type": "article" + }, + { + "title": "Accessibility as an Essential Part of the Inclusive Developer Experience", + "url": "https://thenewstack.io/accessibility-as-an-essential-part-of-the-inclusive-developer-experience/", + "type": "article" + }, + { + "title": "Explore top posts about Accessibility", + "url": "https://app.daily.dev/tags/accessibility?ref=roadmapsh", + "type": "article" + }, + { + "title": "Complete Playlist on Accessibility", + "url": "https://youtube.com/playlist?list=PLNYkxOF6rcICWx0C9LVWWVqvHlYJyqw7g", + "type": "video" + } + ] + }, + "mH_qff8R7R6eLQ1tPHLgG": { + "title": "SEO Basics", + "description": "SEO or Search Engine Optimization is the technique used to optimize your website for better rankings on search engines such as Google, Bing etc.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "SEO Guide", + "url": "https://github.com/seo/guide", + "type": "opensource" + }, + { + "title": "Google Search Central — SEO Docs", + "url": "https://developers.google.com/search/docs", + "type": "article" + }, + { + "title": "8 Must-Know SEO Best Practices For Developers", + "url": "https://neilpatel.com/blog/seo-developers/", + "type": "article" + }, + { + "title": "SEO for Developers", + "url": "https://medium.com/welldone-software/seo-for-developers-a-quick-overview-5b5b7ce34679", + "type": "article" + }, + { + "title": "Learning SEO", + "url": "https://learningseo.io/", + "type": "article" + }, + { + "title": "Explore top posts about SEO", + "url": "https://app.daily.dev/tags/seo?ref=roadmapsh", + "type": "article" + }, + { + "title": "Complete SEO Course for Beginners", + "url": "https://www.youtube.com/watch?v=xsVTqzratPs", + "type": "video" + }, + { + "title": "SEO Expert Course", + "url": "https://www.youtube.com/watch?v=SnxeXZpZkI0", + "type": "video" + } + ] + }, + "ZhJhf1M2OphYbEmduFq-9": { + "title": "CSS", + "description": "CSS or Cascading Style Sheets is the language used to style the frontend of any website. CSS is a cornerstone technology of the World Wide Web, alongside HTML and JavaScript.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "The Odin Project", + "url": "https://www.theodinproject.com//", + "type": "article" + }, + { + "title": "What The Flexbox!", + "url": "https://flexbox.io/", + "type": "article" + }, + { + "title": "W3Schools — Learn CSS", + "url": "https://www.w3schools.com/css/", + "type": "article" + }, + { + "title": "cssreference.io: All CSS properties at a glance", + "url": "https://cssreference.io/", + "type": "article" + }, + { + "title": "Web.dev by Google — Learn CSS", + "url": "https://web.dev/learn/css/", + "type": "article" + }, + { + "title": "Learn to Code HTML & CSS", + "url": "https://learn.shayhowe.com/html-css/building-your-first-web-page/", + "type": "article" + }, + { + "title": "Joshw Comeaus CSS Hack Blog Posts", + "url": "https://www.joshwcomeau.com/", + "type": "article" + }, + { + "title": "100 Days CSS Challenge", + "url": "https://100dayscss.com", + "type": "article" + }, + { + "title": "Explore top posts about CSS", + "url": "https://app.daily.dev/tags/css?ref=roadmapsh", + "type": "article" + }, + { + "title": "CSS Complete Course", + "url": "https://youtu.be/n4R2E7O-Ngo", + "type": "video" + }, + { + "title": "CSS Crash Course For Absolute Beginners", + "url": "https://www.youtube.com/watch?v=yfoY53QXEnI", + "type": "video" + }, + { + "title": "HTML and CSS Tutorial", + "url": "https://www.youtube.com/watch?v=D-h8L5hgW-w", + "type": "video" + }, + { + "title": "CSS Masterclass - Tutorial & Course for Beginners", + "url": "https://www.youtube.com/watch?v=FqmB-Zj2-PA", + "type": "video" + } + ] + }, + "YFjzPKWDwzrgk2HUX952L": { + "title": "Learn the basics", + "description": "CSS or Cascading Style Sheets is the language used to style the frontend of any website. CSS is a cornerstone technology of the World Wide Web, alongside HTML and JavaScript.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "W3Schools — Learn CSS", + "url": "https://www.w3schools.com/css/", + "type": "article" + }, + { + "title": "web.dev — Learn CSS", + "url": "https://web.dev/learn/css/", + "type": "article" + }, + { + "title": "Learn to Code HTML & CSS", + "url": "https://learn.shayhowe.com/html-css/building-your-first-web-page/", + "type": "article" + }, + { + "title": "CSS Crash Course For Absolute Beginners", + "url": "https://www.youtube.com/watch?v=yfoY53QXEnI", + "type": "video" + }, + { + "title": "HTML and CSS Tutorial", + "url": "https://www.youtube.com/watch?v=D-h8L5hgW-w", + "type": "video" + }, + { + "title": "CSS Masterclass - Tutorial & Course for Beginners", + "url": "https://www.youtube.com/watch?v=FqmB-Zj2-PA", + "type": "video" + } + ] + }, + "dXeYVMXv-3MRQ1ovOUuJW": { + "title": "Making Layouts", + "description": "Float, grid, flexbox, positioning, display and box model are some of the key topics that are used for making layouts. Use the resources below to learn about these topics:\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Learn CSS Grid for free", + "url": "https://scrimba.com/learn/cssgrid", + "type": "course" + }, + { + "title": "Learn and Practice Flexbox", + "url": "https://flexboxfroggy.com/", + "type": "article" + }, + { + "title": "Game for learning CSS Grid", + "url": "https://cssgridgarden.com/", + "type": "article" + }, + { + "title": "All about Floats", + "url": "https://css-tricks.com/all-about-floats/", + "type": "article" + }, + { + "title": "Positioning Types: How Do They Differ?", + "url": "https://css-tricks.com/absolute-relative-fixed-positioining-how-do-they-differ/", + "type": "article" + }, + { + "title": "The Box Model", + "url": "https://developer.mozilla.org/en-US/docs/Learn/CSS/Building_blocks/The_box_model", + "type": "article" + }, + { + "title": "A Complete Guide to Flexbox", + "url": "https://css-tricks.com/snippets/css/a-guide-to-flexbox", + "type": "article" + }, + { + "title": "A Complete Guide to Grid", + "url": "https://css-tricks.com/snippets/css/complete-guide-grid", + "type": "article" + }, + { + "title": "Learn CSS Grid - Course", + "url": "https://cssgrid.io/", + "type": "article" + }, + { + "title": "Get on the Grid at Last with the CSS Grid Layout Module", + "url": "https://thenewstack.io/get-grid-last-css-grid-template-markup/", + "type": "article" + } + ] + }, + "TKtWmArHn7elXRJdG6lDQ": { + "title": "Responsive Design", + "description": "Responsive Web Designing is the technique to make your webpages look good on all screen sizes. There are certain techniques used to achieve that e.g. CSS media queries, percentage widths, min or max widths heights etc.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Responsive Web Design", + "url": "https://www.w3schools.com/css/css_rwd_intro.asp", + "type": "article" + }, + { + "title": "Learn Responsive Design", + "url": "https://web.dev/learn/design/", + "type": "article" + }, + { + "title": "The Beginner’s Guide to Responsive Web Design", + "url": "https://kinsta.com/blog/responsive-web-design/", + "type": "article" + }, + { + "title": "The guide to responsive web design in 2022", + "url": "https://webflow.com/blog/responsive-web-design", + "type": "article" + }, + { + "title": "5 simple tips to making responsive layouts the easy way", + "url": "https://www.youtube.com/watch?v=VQraviuwbzU", + "type": "video" + }, + { + "title": "Introduction To Responsive Web Design", + "url": "https://www.youtube.com/watch?v=srvUrASNj0s", + "type": "video" + } + ] + }, + "ODcfFEorkfJNupoQygM53": { + "title": "JavaScript", + "description": "JavaScript allows you to add interactivity to your pages. Common examples that you may have seen on the websites are sliders, click interactions, popups and so on.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "You Dont Know JS Yet (book series) ", + "url": "https://github.com/getify/You-Dont-Know-JS", + "type": "opensource" + }, + { + "title": "Learn the basics of JavaScript", + "url": "https://github.com/workshopper/javascripting", + "type": "opensource" + }, + { + "title": "Visit Dedicated JavaScript Roadmap", + "url": "/javascript", + "type": "article" + }, + { + "title": "W3Schools – JavaScript Tutorial", + "url": "https://www.w3schools.com/js/", + "type": "article" + }, + { + "title": "The Modern JavaScript Tutorial", + "url": "https://javascript.info/", + "type": "article" + }, + { + "title": "Learn JavaScript: Covered many topics", + "url": "https://www.javascripttutorial.net/", + "type": "article" + }, + { + "title": "Eloquent JavaScript textbook", + "url": "https://eloquentjavascript.net/", + "type": "article" + }, + { + "title": "Build 30 Javascript projects in 30 days", + "url": "https://javascript30.com/", + "type": "article" + }, + { + "title": "Explore top posts about JavaScript", + "url": "https://app.daily.dev/tags/javascript?ref=roadmapsh", + "type": "article" + }, + { + "title": "JavaScript Crash Course for Beginners", + "url": "https://youtu.be/hdI2bqOjy3c?t=2", + "type": "video" + }, + { + "title": "Build a Netflix Landing Page Clone with HTML, CSS & JS", + "url": "https://youtu.be/P7t13SGytRk?t=22", + "type": "video" + } + ] + }, + "A4brX0efjZ0FFPTB4r6U0": { + "title": "Fetch API / Ajax (XHR)", + "description": "Ajax is the technique that lets us send and receive the data asynchronously from the servers e.g. updating the user profile or asynchronously fetching the list of searched products without reloading the page.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Fetch API MDN Docs", + "url": "https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API", + "type": "article" + }, + { + "title": "A Simple Guide to JavaScript Fetch API", + "url": "https://www.javascripttutorial.net/javascript-fetch-api/", + "type": "article" + }, + { + "title": "Introduction to Fetch", + "url": "https://web.dev/introduction-to-fetch/", + "type": "article" + }, + { + "title": "JavaScript Fetch API", + "url": "https://www.youtube.com/watch?v=-ZI0ea5O2oA", + "type": "video" + } + ] + }, + "0MAogsAID9R04R5TTO2Qa": { + "title": "Learn DOM Manipulation", + "description": "The Document Object Model (DOM) is a programming interface built for HTML and XML documents. It represents the page that allows programs and scripts to dynamically update the document structure, content, and style. With DOM, we can easily access and manipulate tags, IDs, classes, attributes, etc.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "DOM Tree", + "url": "https://javascript.info/dom-nodes", + "type": "article" + }, + { + "title": "Eloquent JavaScript, 3rd Edition: The Document Object Model", + "url": "https://eloquentjavascript.net/14_dom.html", + "type": "article" + }, + { + "title": "JavaScript HTML DOM", + "url": "https://www.w3schools.com/js/js_htmldom.asp", + "type": "article" + }, + { + "title": "JavaScript DOM", + "url": "https://www.javascripttutorial.net/javascript-dom/", + "type": "article" + }, + { + "title": "Learn the HTML DOM with Exercises - CodeGuage", + "url": "https://www.codeguage.com/courses/js/html-dom-introduction", + "type": "article" + }, + { + "title": "Explore top posts about DOM", + "url": "https://app.daily.dev/tags/dom?ref=roadmapsh", + "type": "article" + }, + { + "title": "What is DOM, Shadow DOM and Virtual DOM?", + "url": "https://www.youtube.com/watch?v=7Tok22qxPzQ", + "type": "video" + }, + { + "title": "JavaScript DOM Crash Course", + "url": "https://www.youtube.com/watch?v=0ik6X4DJKCc", + "type": "video" + } + ] + }, + "wQSjQqwKHfn5RGPk34BWI": { + "title": "Learn the Basics", + "description": "JavaScript allows you to add interactivity to your pages. Common examples that you may have seen on the websites are sliders, click interactions, popups and so on.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "W3Schools – JavaScript Tutorial", + "url": "https://www.w3schools.com/js/", + "type": "article" + }, + { + "title": "The Modern JavaScript Tutorial", + "url": "https://javascript.info/", + "type": "article" + }, + { + "title": "JavaScript Crash Course for Beginners", + "url": "https://youtu.be/hdI2bqOjy3c?t=2", + "type": "video" + }, + { + "title": "Build a Netflix Landing Page Clone with HTML, CSS & JS", + "url": "https://youtu.be/P7t13SGytRk?t=22", + "type": "video" + } + ] + }, + "MXnFhZlNB1zTsBFDyni9H": { + "title": "VCS Hosting", + "description": "There are different repository hosting services with the most famous one being GitHub, GitLab and BitBucket. I would recommend creating an account on GitHub because that is where most of the OpenSource work is done and most of the developers are.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "GitHub: Where the world builds software", + "url": "https://github.com", + "type": "opensource" + }, + { + "title": "GitLab: Iterate faster, innovate together", + "url": "https://gitlab.com", + "type": "opensource" + }, + { + "title": "BitBucket: The Git solution for professional teams", + "url": "https://bitbucket.com", + "type": "article" + } + ] + }, + "NIY7c4TQEEHx0hATu-k5C": { + "title": "Version Control Systems", + "description": "Version control systems allow you to track changes to your codebase/files over time. They allow you to go back to some previous version of the codebase without any issues. Also, they help in collaborating with people working on the same code – if you’ve ever collaborated with other people on a project, you might already know the frustration of copying and merging the changes from someone else into your codebase; version control systems allow you to get rid of this issue.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Git Documentation", + "url": "https://git-scm.com/docs", + "type": "article" + }, + { + "title": "Learn Git by Atlassian", + "url": "https://www.atlassian.com/git", + "type": "article" + }, + { + "title": "Version Control System Introduction", + "url": "https://www.youtube.com/watch?v=zbKdDsNNOhg", + "type": "video" + }, + { + "title": "Git & GitHub Crash Course For Beginners", + "url": "https://www.youtube.com/watch?v=SWYqp7iY_Tc", + "type": "video" + }, + { + "title": "Learn Git in 20 Minutes", + "url": "https://youtu.be/Y9XZQO1n_7c?t=21", + "type": "video" + } + ] + }, + "R_I4SGYqLk5zze5I1zS_E": { + "title": "Git", + "description": "[Git](https://git-scm.com/) is a free and open source distributed version control system designed to handle everything from small to very large projects with speed and efficiency.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Learn Git with Tutorials, News and Tips - Atlassian", + "url": "https://www.atlassian.com/git", + "type": "article" + }, + { + "title": "Git Cheat Sheet", + "url": "https://cs.fyi/guide/git-cheatsheet", + "type": "article" + }, + { + "title": "Tutorial: Git for Absolutely Everyone", + "url": "https://thenewstack.io/tutorial-git-for-absolutely-everyone/", + "type": "article" + }, + { + "title": "Explore top posts about Git", + "url": "https://app.daily.dev/tags/git?ref=roadmapsh", + "type": "article" + }, + { + "title": "Git & GitHub Crash Course For Beginners", + "url": "https://www.youtube.com/watch?v=SWYqp7iY_Tc", + "type": "video" + } + ] + }, + "IqvS1V-98cxko3e9sBQgP": { + "title": "Package Managers", + "description": "Package managers allow you to manage the dependencies (external code written by you or someone else) that your project needs to work correctly.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Modern JavaScript for Dinosaurs", + "url": "https://peterxjang.com/blog/modern-javascript-explained-for-dinosaurs.html", + "type": "article" + }, + { + "title": "An Absolute Beginners Guide to Using npm", + "url": "https://nodesource.com/blog/an-absolute-beginners-guide-to-using-npm/", + "type": "article" + }, + { + "title": "Yarn - Getting Started", + "url": "https://yarnpkg.com/en/docs/getting-started", + "type": "article" + }, + { + "title": "NPM tutorial for Beginners", + "url": "https://www.youtube.com/watch?v=2V1UUhBJ62Y", + "type": "video" + }, + { + "title": "NPM Crash Course", + "url": "https://www.youtube.com/watch?v=jHDhaSSKmB0", + "type": "video" + }, + { + "title": "Yarn Crash Course", + "url": "https://www.youtube.com/watch?v=g9_6KmiBISk", + "type": "video" + } + ] + }, + "qmTVMJDsEhNIkiwE_UTYu": { + "title": "GitHub", + "description": "[GitHub](https://github.com) is a provider of internet hosting for software development and version control using Git. It offers the distributed version control and source code management functionality of Git, plus its own features.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "GitHub: Quickstart", + "url": "https://docs.github.com/en/get-started/quickstart/hello-world", + "type": "article" + }, + { + "title": "Learn GitHub by doing", + "url": "https://skills.github.com/", + "type": "article" + }, + { + "title": "Explore top posts about GitHub", + "url": "https://app.daily.dev/tags/github?ref=roadmapsh", + "type": "article" + }, + { + "title": "What is GitHub?", + "url": "https://www.youtube.com/watch?v=w3jLJU7DT5E", + "type": "video" + }, + { + "title": "Git vs. GitHub: Whats the difference?", + "url": "https://www.youtube.com/watch?v=wpISo9TNjfU", + "type": "video" + }, + { + "title": "Git and GitHub for Beginners", + "url": "https://www.youtube.com/watch?v=RGOj5yH7evk", + "type": "video" + }, + { + "title": "Git and GitHub - CS50 Beyond 2019", + "url": "https://www.youtube.com/watch?v=eulnSXkhE7I", + "type": "video" + } + ] + }, + "zIoSJMX3cuzCgDYHjgbEh": { + "title": "GitLab", + "description": "[GitLab](https://gitlab.com) is a provider of internet hosting for software development and version control using Git. It offers the distributed version control and source code management functionality of Git, plus its own features.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "GitLab Website", + "url": "https://gitlab.com/", + "type": "opensource" + }, + { + "title": "GitLab Documentation", + "url": "https://docs.gitlab.com/", + "type": "article" + }, + { + "title": "Development: Connect git to GitLab for Small Projects", + "url": "https://thenewstack.io/development-connect-git-to-gitlab-for-small-projects/", + "type": "article" + }, + { + "title": "Explore top posts about GitLab", + "url": "https://app.daily.dev/tags/gitlab?ref=roadmapsh", + "type": "article" + } + ] + }, + "DILBiQp7WWgSZ5hhtDW6A": { + "title": "Bitbucket", + "description": "[BitBucket](https://bitbucket.com) is a provider of internet hosting for software development and version control using Git. It offers the distributed version control and source code management functionality of Git, plus its own features.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "How to use BitBucket?", + "url": "https://bitbucket.org/product/guides", + "type": "article" + }, + { + "title": "BitBucket Website", + "url": "https://bitbucket.com/", + "type": "article" + }, + { + "title": "Explore top posts about Bitbucket", + "url": "https://app.daily.dev/tags/bitbucket?ref=roadmapsh", + "type": "article" + } + ] + }, + "yrq3nOwFREzl-9EKnpU-e": { + "title": "yarn", + "description": "Yarn is a software packaging system developed in 2016 by Facebook for Node.js JavaScript runtime environment that provides speed, consistency, stability, and security as an alternative to npm (package manager).\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Modern JavaScript for Dinosaurs", + "url": "https://peterxjang.com/blog/modern-javascript-explained-for-dinosaurs.html", + "type": "article" + }, + { + "title": "Yarn - Getting Started", + "url": "https://yarnpkg.com/en/docs/getting-started", + "type": "article" + }, + { + "title": "Explore top posts about Yarn", + "url": "https://app.daily.dev/tags/yarn?ref=roadmapsh", + "type": "article" + }, + { + "title": "Yarn Crash Course", + "url": "https://www.youtube.com/watch?v=g9_6KmiBISk", + "type": "video" + } + ] + }, + "SLxA5qJFp_28TRzr1BjxZ": { + "title": "pnpm", + "description": "PNPM is an alternative package manager for Node. js which stands for “Performant NPM”. The main purpose of PNPM is to hold all the packages at a global (centralized) store and use them if needed by other projects too by creating hard links to it.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Official Website", + "url": "https://pnpm.io", + "type": "article" + }, + { + "title": "Meet PNPM: The Faster, More Performant NPM", + "url": "https://blog.bitsrc.io/pnpm-javascript-package-manager-4b5abd59dc9", + "type": "article" + } + ] + }, + "ib_FHinhrw8VuSet-xMF7": { + "title": "npm", + "description": "npm is a package manager for the JavaScript programming language maintained by npm, Inc. npm is the default package manager for the JavaScript runtime environment Node.js.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "How to NPM", + "url": "https://github.com/workshopper/how-to-npm", + "type": "opensource" + }, + { + "title": "Modern JavaScript for Dinosaurs", + "url": "https://peterxjang.com/blog/modern-javascript-explained-for-dinosaurs.html", + "type": "article" + }, + { + "title": "An Absolute Beginners Guide to Using npm", + "url": "https://nodesource.com/blog/an-absolute-beginners-guide-to-using-npm/", + "type": "article" + }, + { + "title": "Explore top posts about NPM", + "url": "https://app.daily.dev/tags/npm?ref=roadmapsh", + "type": "article" + }, + { + "title": "NPM tutorial for Beginners", + "url": "https://www.youtube.com/watch?v=2V1UUhBJ62Y", + "type": "video" + }, + { + "title": "NPM Crash Course", + "url": "https://www.youtube.com/watch?v=jHDhaSSKmB0", + "type": "video" + } + ] + }, + "eXezX7CVNyC1RuyU_I4yP": { + "title": "Pick a Framework", + "description": "Web frameworks are designed to write web applications. Frameworks are collections of libraries that aid in the development of a software product or website. Frameworks for web application development are collections of various tools. Frameworks vary in their capabilities and functions, depending on the tasks set. They define the structure, establish the rules, and provide the development tools required.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is the difference between a framework and a library?", + "url": "https://www.youtube.com/watch?v=D_MO9vIRBcA", + "type": "video" + }, + { + "title": "Which JS Framework is best?", + "url": "https://www.youtube.com/watch?v=cuHDQhDhvPE", + "type": "video" + } + ] + }, + "-bHFIiXnoUQSov64WI9yo": { + "title": "Angular", + "description": "Angular is a component based front-end development framework built on TypeScript which includes a collection of well-integrated libraries that include features like routing, forms management, client-server communication, and more.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Visit Dedicated Angular Roadmap", + "url": "/angular", + "type": "article" + }, + { + "title": "Official - Getting started with Angular", + "url": "https://angular.io/start", + "type": "article" + }, + { + "title": "Explore top posts about Angular", + "url": "https://app.daily.dev/tags/angular?ref=roadmapsh", + "type": "article" + }, + { + "title": "Angular for Beginners Course", + "url": "https://www.youtube.com/watch?v=3qBXWUpoPHo", + "type": "video" + } + ] + }, + "ERAdwL1G9M1bnx-fOm5ZA": { + "title": "Vue.js", + "description": "Vue.js is an open-source JavaScript framework for building user interfaces and single-page applications. It is mainly focused on front end development.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Visit Dedicated Vue Roadmap", + "url": "/vue", + "type": "article" + }, + { + "title": "Vue.js Website", + "url": "https://vuejs.org/", + "type": "article" + }, + { + "title": "Official Getting Started", + "url": "https://vuejs.org/v2/guide/", + "type": "article" + }, + { + "title": "Meet Vue.js, the Flexible JavaScript Framework", + "url": "https://thenewstack.io/meet-vue-js-flexible-javascript-framework/", + "type": "article" + }, + { + "title": "Explore top posts about Vue.js", + "url": "https://app.daily.dev/tags/vuejs?ref=roadmapsh", + "type": "article" + }, + { + "title": "Vue.js Course for Beginners", + "url": "https://www.youtube.com/watch?v=FXpIoQ_rT_c", + "type": "video" + }, + { + "title": "Vue.js Crash Course", + "url": "https://www.youtube.com/watch?v=qZXt1Aom3Cs", + "type": "video" + } + ] + }, + "tG5v3O4lNIFc2uCnacPak": { + "title": "React", + "description": "React is the most popular front-end JavaScript library for building user interfaces. React can also render on the server using Node and power mobile apps using React Native.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Visit Dedicated React Roadmap", + "url": "/react", + "type": "article" + }, + { + "title": "React Website", + "url": "https://react.dev", + "type": "article" + }, + { + "title": "Official Getting Started", + "url": "https://react.dev/learn/tutorial-tic-tac-toe", + "type": "article" + }, + { + "title": "Explore top posts about React", + "url": "https://app.daily.dev/tags/react?ref=roadmapsh", + "type": "article" + }, + { + "title": "React JS Course for Beginners", + "url": "https://www.youtube.com/watch?v=nTeuhbP7wdE", + "type": "video" + }, + { + "title": "React Course - Beginners Tutorial for React JavaScript Library [2022]", + "url": "https://www.youtube.com/watch?v=bMknfKXIFA8", + "type": "video" + }, + { + "title": "Understanding Reacts UI Rendering Process", + "url": "https://www.youtube.com/watch?v=i793Qm6kv3U", + "type": "video" + } + ] + }, + "ZR-qZ2Lcbu3FtqaMd3wM4": { + "title": "Svelte", + "description": "Svelte is a javascript framework that unlike Vue and React does not use vertical DOM diffing but instead knows exactly what and where to update when the state changes. It's mainly focused on frontend and building user interfaces.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Svelte Website", + "url": "https://svelte.dev/", + "type": "article" + }, + { + "title": "Svelte Documentation", + "url": "https://svelte.dev/docs", + "type": "article" + }, + { + "title": "All About Svelte, the Much-Loved, State-Driven Web Framework", + "url": "https://thenewstack.io/all-about-svelte-the-much-loved-state-driven-web-framework/", + "type": "article" + }, + { + "title": "Svelte and the Future of Frontend Development", + "url": "https://thenewstack.io/svelte-and-the-future-of-front-end-development/", + "type": "article" + }, + { + "title": "Explore top posts about Svelte", + "url": "https://app.daily.dev/tags/svelte?ref=roadmapsh", + "type": "article" + }, + { + "title": "Svelte Course Playlist for beginners", + "url": "https://www.youtube.com/playlist?list=PL4cUxeGkcC9hlbrVO_2QFVqVPhlZmz7tO", + "type": "video" + } + ] + }, + "DxOSKnqAjZOPP-dq_U7oP": { + "title": "Solid JS", + "description": "Solid is a reactive JavaScript toolkit for building user interfaces without a virtual DOM. To ensure that only the relevant code is executed when a state update occurs, it compiles templates down to real DOM nodes once and wraps modifications into fine-grained reactions.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Official Website - SolidJS", + "url": "https://www.solidjs.com/", + "type": "article" + }, + { + "title": "SolidJS Tutorial", + "url": "https://www.solidjs.com/tutorial/introduction_basics", + "type": "article" + }, + { + "title": "Explore top posts about JavaScript", + "url": "https://app.daily.dev/tags/javascript?ref=roadmapsh", + "type": "article" + } + ] + }, + "N5DCb6bDfgUnSdHPLYY4g": { + "title": "Qwik", + "description": "Qwik is a new kind of web framework that can deliver instant loading web applications at any size or complexity. Your sites and apps can boot with about 1kb of JS (regardless of application complexity), and achieve consistent performance at scale.\n\nRead more about Qwik [on the official website](https://qwik.builder.io/).", + "links": [] + }, + "XDTD8el6OwuQ55wC-X4iV": { + "title": "Writing CSS", + "description": "The way we write CSS in our modern front-end applications is completely different from how we used to write CSS before. There are methods such as Styled Components, CSS Modules, Styled JSX, Emotion, etc", + "links": [] + }, + "eghnfG4p7i-EDWfp3CQXC": { + "title": "Tailwind", + "description": "CSS Framework that provides atomic CSS classes to help you style components e.g. `flex`, `pt-4`, `text-center` and `rotate-90` that can be composed to build any design, directly in your markup.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Tailwind Website", + "url": "https://tailwindcss.com", + "type": "article" + }, + { + "title": "Explore top posts about CSS", + "url": "https://app.daily.dev/tags/css?ref=roadmapsh", + "type": "article" + }, + { + "title": "Tailwind CSS Full Course for Beginners", + "url": "https://www.youtube.com/watch?v=lCxcTsOHrjo", + "type": "video" + }, + { + "title": "Tailwind CSS Crash Course", + "url": "https://www.youtube.com/watch?v=UBOj6rqRUME", + "type": "video" + }, + { + "title": "Should You Use Tailwind CSS?", + "url": "https://www.youtube.com/watch?v=hdGsFpZ0J2E", + "type": "video" + }, + { + "title": "Official Screencasts", + "url": "https://www.youtube.com/c/TailwindLabs/videos", + "type": "video" + } + ] + }, + "nPg_YWpMJtlhU2t2UD_6B": { + "title": "CSS Architecture", + "description": "CSS is notoriously difficult to manage in large, complex, rapidly-iterated systems. There are different ways of writing CSS that allows in writing more maintainable CSS.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "A Look at Some CSS Methodologies", + "url": "https://www.webfx.com/blog/web-design/css-methodologies/", + "type": "article" + }, + { + "title": "BEM Official Website", + "url": "https://en.bem.info", + "type": "article" + }, + { + "title": "OOCSS Official Website", + "url": "http://oocss.org/", + "type": "article" + }, + { + "title": "SMACSS Official Website", + "url": "http://smacss.com/", + "type": "article" + }, + { + "title": "Explore top posts about Architecture", + "url": "https://app.daily.dev/tags/architecture?ref=roadmapsh", + "type": "article" + } + ] + }, + "UTW1pP59dUehuf0zeHXqL": { + "title": "CSS Preprocessors", + "description": "CSS Preprocessors are scripting languages that extend the default capabilities of CSS. They enable us to use logic in our CSS code, such as variables, nesting, inheritance, mixins, functions, and mathematical operations.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Explore top posts about CSS", + "url": "https://app.daily.dev/tags/css?ref=roadmapsh", + "type": "article" + } + ] + }, + "dRDmS072xeNLX7p_X565w": { + "title": "BEM", + "description": "The Block, Element, Modifier methodology (commonly referred to as BEM) is a popular naming convention for classes in HTML and CSS. Developed by the team at Yandex, its goal is to help developers better understand the relationship between the HTML and CSS in a given project.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "BEM Official Website", + "url": "https://en.bem.info", + "type": "article" + }, + { + "title": "BEM Documentation", + "url": "https://en.bem.info/methodology/quick-start", + "type": "article" + }, + { + "title": "BEM 101", + "url": "https://css-tricks.com/bem-101", + "type": "article" + }, + { + "title": "BEM Tutorials", + "url": "https://en.bem.info/tutorials/", + "type": "article" + } + ] + }, + "kukEE5rMSPa4NeNjx21kt": { + "title": "Sass", + "description": "Sass is a preprocessor scripting language that is interpreted or compiled into Cascading Style Sheets. It lets you write maintainable CSS and provides features like variable, nesting, mixins, extension, functions, loops, conditionals and so on.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Sass Website", + "url": "https://sass-lang.com/", + "type": "article" + }, + { + "title": "Official Documentation", + "url": "https://sass-lang.com/documentation", + "type": "article" + }, + { + "title": "Sass Tutorial for Beginners", + "url": "https://www.youtube.com/watch?v=_a5j7KoflTs", + "type": "video" + }, + { + "title": "Sass, BEM, & Responsive Design", + "url": "https://www.youtube.com/watch?v=jfMHA8SqUL4", + "type": "video" + } + ] + }, + "9WlPENh9g1xOv-zA64Tfg": { + "title": "PostCSS", + "description": "PostCSS is a tool for transforming styles with JS plugins. These plugins can lint your CSS, support variables and mixins, transpile future CSS syntax, inline images, and more.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Official Website", + "url": "https://postcss.org/", + "type": "article" + }, + { + "title": "Explore top posts about CSS", + "url": "https://app.daily.dev/tags/css?ref=roadmapsh", + "type": "article" + } + ] + }, + "i9z0stM4uKu27Cz6NIgNX": { + "title": "Build Tools", + "description": "Task runners automatically execute commands and carry out processes behind the scenes. This helps automate your workflow by performing mundane, repetitive tasks that you would otherwise waste an egregious amount of time repeating yourself.\n\nCommon usages of task runners include numerous development tasks such as: spinning up development servers, compiling code (ex. SCSS to CSS), running linters, serving files up from a local port on your computer, and many more!\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "webpack is a static module bundler for modern JavaScript applications", + "url": "https://webpack.js.org/", + "type": "article" + }, + { + "title": "Vite Next Generation Frontend Tooling", + "url": "https://vitejs.dev", + "type": "article" + }, + { + "title": "Parcel is a zero configuration build tool for the web", + "url": "https://parceljs.org/", + "type": "article" + }, + { + "title": "Explore top posts about Tools", + "url": "https://app.daily.dev/tags/tools?ref=roadmapsh", + "type": "article" + } + ] + }, + "9VcGfDBBD8YcKatj4VcH1": { + "title": "Linters and Formatters", + "description": "A linter is a tool used to analyze code and discover bugs, syntax errors, stylistic inconsistencies, and suspicious constructs. Popular linters for JavaScript include ESLint, JSLint, and JSHint.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What Is a Linter?", + "url": "https://www.testim.io/blog/what-is-a-linter-heres-a-definition-and-quick-start-guide/", + "type": "article" + } + ] + }, + "hkSc_1x09m7-7BO7WzlDT": { + "title": "Module Bundlers", + "description": "A module bundler is a tool that takes pieces of JavaScript and their dependencies and bundles them into a single file, usually for use in the browser. You may have used tools such as Browserify, Webpack, Rollup or one of many others.\n\nIt usually starts with an entry file, and from there it bundles up all of the code needed for that entry file.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Module Bundlers Explained", + "url": "https://www.youtube.com/watch?v=5IG4UmULyoA", + "type": "video" + } + ] + }, + "NS-hwaWa5ebSmNNRoxFDp": { + "title": "Parcel", + "description": "Parcel is a web application bundler, differentiated by its developer experience. It offers blazing-fast performance utilizing multicore processing and requires zero configuration.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Official Website and Docs", + "url": "https://parceljs.org/plugin-system/bundler/", + "type": "article" + }, + { + "title": "Explore top posts about Web Development", + "url": "https://app.daily.dev/tags/webdev?ref=roadmapsh", + "type": "article" + }, + { + "title": "Using Parcel Bundler with React", + "url": "https://www.youtube.com/watch?v=hCxvp3_o0gM", + "type": "video" + } + ] + }, + "sCjErk7rfWAUvhl8Kfm3n": { + "title": "Rollup", + "description": "Rollup is a module bundler for JavaScript which compiles small pieces of code into something larger and more complex, such as a library or application.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Official Website and Docs", + "url": "https://rollupjs.org/", + "type": "article" + }, + { + "title": "Explore top posts about Web Development", + "url": "https://app.daily.dev/tags/webdev?ref=roadmapsh", + "type": "article" + }, + { + "title": "How to Set Up JavaScript Bundling Using Rollup", + "url": "https://www.youtube.com/watch?v=ICYLOZuFMz8", + "type": "video" + } + ] + }, + "twufEtHgxcRUWAUQ9bXus": { + "title": "Webpack", + "description": "Webpack is a module bundler. Its main purpose is to bundle JavaScript files for usage in a browser, yet it is also capable of transforming, bundling, or packaging just about any resource or asset.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Webpack Official Website", + "url": "https://webpack.js.org/", + "type": "article" + }, + { + "title": "Webpack Documentation", + "url": "https://webpack.js.org/concepts/", + "type": "article" + }, + { + "title": "A Complete Guide to Webpack 5", + "url": "https://www.valentinog.com/blog/webpack", + "type": "article" + }, + { + "title": "Explore top posts about Webpack", + "url": "https://app.daily.dev/tags/webpack?ref=roadmapsh", + "type": "article" + } + ] + }, + "4W7UXfdKIUsm1bUrjdTVT": { + "title": "esbuild", + "description": "Our current build tools for the web are 10-100x slower than they could be. The main goal of the esbuild bundler project is to bring about a new era of build tool performance, and create an easy-to-use modern bundler along the way.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Esbuild Official Website", + "url": "https://esbuild.github.io/", + "type": "article" + }, + { + "title": "Esbuild Documentation", + "url": "https://esbuild.github.io/api/", + "type": "article" + }, + { + "title": "Explore top posts about Web Development", + "url": "https://app.daily.dev/tags/webdev?ref=roadmapsh", + "type": "article" + }, + { + "title": "Why are People Obsessed with esbuild?", + "url": "https://www.youtube.com/watch?v=9XS_RA6zyyU", + "type": "video" + }, + { + "title": "What Is ESBuild?", + "url": "https://www.youtube.com/watch?v=ZY8Vu8cbWF0", + "type": "video" + } + ] + }, + "0Awx3zEI5_gYEIrD7IVX6": { + "title": "Vite", + "description": "Vite is a build tool that aims to provide a faster and leaner development experience for modern web projects.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Vite Website", + "url": "https://vitejs.dev", + "type": "article" + }, + { + "title": "Vite Documentation", + "url": "https://vitejs.dev/guide", + "type": "article" + }, + { + "title": "Explore top posts about Vite", + "url": "https://app.daily.dev/tags/vite?ref=roadmapsh", + "type": "article" + }, + { + "title": "Vite Crash Course", + "url": "https://youtu.be/LQQ3CR2JTX8", + "type": "video" + }, + { + "title": "Vite Tutorial Video", + "url": "https://www.youtube.com/watch?v=VAeRhmpcWEQ", + "type": "video" + } + ] + }, + "zbkpu_gvQ4mgCiZKzS1xv": { + "title": "Prettier", + "description": "Prettier is an opinionated code formatter with support for JavaScript, HTML, CSS, YAML, Markdown, GraphQL Schemas. By far the biggest reason for adopting Prettier is to stop all the on-going debates over styles.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Prettier Website", + "url": "https://prettier.io", + "type": "article" + }, + { + "title": "Why Prettier", + "url": "https://prettier.io/docs/en/why-prettier.html", + "type": "article" + } + ] + }, + "NFjsI712_qP0IOmjuqXar": { + "title": "ESLint", + "description": "With ESLint you can impose the coding standard using a certain set of standalone rules.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "ESLint Official Website", + "url": "https://eslint.org/", + "type": "article" + }, + { + "title": "Introduction to ESLint", + "url": "https://dev.to/shivambmgupta/eslint-what-why-when-how-5f1d", + "type": "article" + }, + { + "title": "Explore top posts about JavaScript", + "url": "https://app.daily.dev/tags/javascript?ref=roadmapsh", + "type": "article" + }, + { + "title": "ESLint Quickstart - find errors automatically", + "url": "https://www.youtube.com/watch?v=qhuFviJn-es", + "type": "video" + } + ] + }, + "igg4_hb3XE3vuvY8ufV-4": { + "title": "Testing", + "description": "Before delivering your application to users, you need to be sure that your app meets the requirements it was designed for, and that it doesn't do any weird, unintended things (called 'bugs'). To accomplish this, we 'test' our applications in different ways.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "A comprehensive dive into software testing.", + "url": "https://www.softwaretestingmaterial.com/software-testing/", + "type": "article" + }, + { + "title": "The different types of software tests", + "url": "https://www.atlassian.com/continuous-delivery/software-testing/types-of-software-testing", + "type": "article" + }, + { + "title": "Testing React apps with Jest", + "url": "https://jestjs.io/docs/tutorial-react", + "type": "article" + }, + { + "title": "Explore top posts about Testing", + "url": "https://app.daily.dev/tags/testing?ref=roadmapsh", + "type": "article" + } + ] + }, + "hVQ89f6G0LXEgHIOKHDYq": { + "title": "Vitest", + "description": "Vitest is a fast Vite-native unit test framework with out-of-box ESM, TypeScript and JSX support. Works on React, Vue, Svelte and more projects created with Vite\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Vitest Website", + "url": "https://vitest.dev/", + "type": "article" + } + ] + }, + "g5itUjgRXd9vs9ujHezFl": { + "title": "Jest", + "description": "Jest is a delightful JavaScript Testing Framework with a focus on simplicity. It works with projects using: Babel, TypeScript, Node, React, Angular, Vue and more!\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Jest Website", + "url": "https://jestjs.io/", + "type": "article" + }, + { + "title": "Explore top posts about Jest", + "url": "https://app.daily.dev/tags/jest?ref=roadmapsh", + "type": "article" + }, + { + "title": "JavaScript Testing With Jest", + "url": "https://youtu.be/IPiUDhwnZxA?si=2_lE2bDo2fRuo2CU", + "type": "video" + } + ] + }, + "jramLk8FGuaEH4YpHIyZT": { + "title": "Playwright", + "description": "Playwright is an open-source test automation library initially developed by Microsoft contributors. It supports programming languages such as Java, Python, C#, and NodeJS. Playwright comes with Apache 2.0 License and is most popular with NodeJS with Javascript/Typescript.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Playwright Website", + "url": "https://playwright.dev/", + "type": "article" + }, + { + "title": "Playwright Tutorial: Learn Basics and Setup", + "url": "https://www.browserstack.com/guide/playwright-tutorial", + "type": "article" + }, + { + "title": "Playwright, a Time-Saving End-to-End Testing Framework", + "url": "https://thenewstack.io/playwright-a-time-saving-end-to-end-testing-framework/", + "type": "article" + }, + { + "title": "Get started with end-to-end testing: Playwright", + "url": "https://www.youtube.com/playlist?list=PLQ6Buerc008ed-F9OksF7ek37wR3y916p", + "type": "video" + } + ] + }, + "DaynCz5RR26gjT6N6gTDL": { + "title": "Cypress", + "description": "Cypress framework is a JavaScript-based end-to-end testing framework built on top of Mocha – a feature-rich JavaScript test framework running on and in the browser, making asynchronous testing simple and convenient. It also uses a BDD/TDD assertion library and a browser to pair with any JavaScript testing framework.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Official Website", + "url": "https://www.cypress.io/", + "type": "article" + }, + { + "title": "Official Documentation", + "url": "https://docs.cypress.io/guides/overview/why-cypress#Other", + "type": "article" + }, + { + "title": "Explore top posts about Cypress", + "url": "https://app.daily.dev/tags/cypress?ref=roadmapsh", + "type": "article" + }, + { + "title": "Cypress End-to-End Testing", + "url": "https://www.youtube.com/watch?v=7N63cMKosIE", + "type": "video" + } + ] + }, + "U5mD5FmVx7VWeKxDpQxB5": { + "title": "Authentication Strategies", + "description": "Authentication strategies are methods or techniques used to verify the identity of a user or system in order to grant access to a protected resource. There are several different authentication strategies that can be used, including:\n\n* Basic Authentication\n* Session Based Authentication\n* Token Based Authentication\n* JWT Authentication\n* OAuth\n* SSO\n\nYou don't necessarily need to learn all of these, how to implement and the ins and outs from the get go. But it's important to know what they are and how they work. This will help you make better decisions when choosing an authentication strategy for your application.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Basic Authentication", + "url": "https://roadmap.sh/guides/basic-authentication", + "type": "article" + }, + { + "title": "Session Based Authentication", + "url": "https://roadmap.sh/guides/session-authentication", + "type": "article" + }, + { + "title": "Token Based Authentication", + "url": "https://roadmap.sh/guides/token-authentication", + "type": "article" + }, + { + "title": "JWT Authentication", + "url": "https://roadmap.sh/guides/jwt-authentication", + "type": "article" + }, + { + "title": "OAuth", + "url": "https://roadmap.sh/guides/oauth", + "type": "article" + }, + { + "title": "SSO - Single Sign On", + "url": "https://roadmap.sh/guides/sso", + "type": "article" + } + ] + }, + "RDWbG3Iui6IPgp0shvXtg": { + "title": "Web Security Basics", + "description": "Web security refers to the protective measures taken by the developers to protect the web applications from threats that could affect the business.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "OWASP Web Application Security Testing Checklist", + "url": "https://github.com/0xRadi/OWASP-Web-Checklist", + "type": "opensource" + }, + { + "title": "Why HTTPS Matters", + "url": "https://developers.google.com/web/fundamentals/security/encrypt-in-transit/why-https", + "type": "article" + }, + { + "title": "Wikipedia - OWASP", + "url": "https://en.wikipedia.org/wiki/OWASP", + "type": "article" + }, + { + "title": "OWASP Top 10 Security Risks", + "url": "https://sucuri.net/guides/owasp-top-10-security-vulnerabilities-2021/", + "type": "article" + }, + { + "title": "OWASP Cheatsheets", + "url": "https://cheatsheetseries.owasp.org/cheatsheets/AJAX_Security_Cheat_Sheet.html", + "type": "article" + }, + { + "title": "Content Security Policy (CSP)", + "url": "https://developer.mozilla.org/en-US/docs/Web/HTTP/CSP", + "type": "article" + }, + { + "title": "Explore top posts about Security", + "url": "https://app.daily.dev/tags/security?ref=roadmapsh", + "type": "article" + }, + { + "title": "OWASP ZAP Step-by-Step Tutorial", + "url": "https://www.youtube.com/playlist?list=PLH8n_ayg-60J9i3nsLybper-DR3zJw6Z5", + "type": "video" + } + ] + }, + "AfH2zCbqzw0Nisg1yyISS": { + "title": "CORS", + "description": "Cross-Origin Resource Sharing (CORS) is an HTTP-header based mechanism that allows a server to indicate any origins (domain, scheme, or port) other than its own from which a browser should permit loading resources.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Cross-Origin Resource Sharing (CORS)", + "url": "https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS", + "type": "article" + }, + { + "title": "Understanding CORS", + "url": "https://rbika.com/blog/understanding-cors", + "type": "article" + }, + { + "title": "CORS in 100 Seconds", + "url": "https://www.youtube.com/watch?v=4KHiSt0oLJ0", + "type": "video" + }, + { + "title": "CORS in 6 minutes", + "url": "https://www.youtube.com/watch?v=PNtFSVU-YTI", + "type": "video" + } + ] + }, + "uum7vOhOUR38vLuGZy8Oa": { + "title": "HTTPS", + "description": "HTTPS is a secure way to send data between a web server and a browser.\n\nHypertext transfer protocol secure (HTTPS) is the secure version of HTTP, which is the primary protocol used to send data between a web browser and a website. HTTPS is encrypted in order to increase security of data transfer. This is particularly important when users transmit sensitive data, such as by logging into a bank account, email service, or health insurance provider\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "What is HTTPS?", + "url": "https://www.cloudflare.com/en-gb/learning/ssl/what-is-https/", + "type": "article" + }, + { + "title": "Why HTTPS Matters", + "url": "https://developers.google.com/web/fundamentals/security/encrypt-in-transit/why-https", + "type": "article" + }, + { + "title": "Enabling HTTPS on Your Servers", + "url": "https://web.dev/enable-https/", + "type": "article" + }, + { + "title": "How HTTPS works (comic)", + "url": "https://howhttps.works/", + "type": "article" + }, + { + "title": "SSL, TLS, HTTP, HTTPS Explained", + "url": "https://www.youtube.com/watch?v=hExRDVZHhig", + "type": "video" + }, + { + "title": "HTTPS — Stories from the field", + "url": "https://www.youtube.com/watch?v=GoXgl9r0Kjk", + "type": "video" + } + ] + }, + "rmcm0CZbtNVC9LZ14-H6h": { + "title": "Content Security Policy", + "description": "Content Security Policy is a computer security standard introduced to prevent cross-site scripting, clickjacking and other code injection attacks resulting from execution of malicious content in the trusted web page context.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "MDN Content Security Policy (CSP)", + "url": "https://developer.mozilla.org/en-US/docs/Web/HTTP/CSP", + "type": "article" + }, + { + "title": "Google Devs Content Security Policy (CSP)", + "url": "https://developers.google.com/web/fundamentals/security/csp", + "type": "article" + }, + { + "title": "Web.dev - Content Security Policy (CSP)", + "url": "https://web.dev/csp/", + "type": "article" + }, + { + "title": "Explore top posts about Security", + "url": "https://app.daily.dev/tags/security?ref=roadmapsh", + "type": "article" + } + ] + }, + "JanR7I_lNnUCXhCMGLdn-": { + "title": "OWASP Security Risks", + "description": "OWASP or Open Web Application Security Project is an online community that produces freely-available articles, methodologies, documentation, tools, and technologies in the field of web application security.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "OWASP Web Application Security Testing Checklist", + "url": "https://github.com/0xRadi/OWASP-Web-Checklist", + "type": "opensource" + }, + { + "title": "Wikipedia - OWASP", + "url": "https://en.wikipedia.org/wiki/OWASP", + "type": "article" + }, + { + "title": "OWASP Top 10 Security Risks", + "url": "https://sucuri.net/guides/owasp-top-10-security-vulnerabilities-2021/", + "type": "article" + }, + { + "title": "OWASP Cheatsheets", + "url": "https://cheatsheetseries.owasp.org/cheatsheets/AJAX_Security_Cheat_Sheet.html", + "type": "article" + }, + { + "title": "OWASP Top 10: A Guide to the Worst Software Vulnerabilities", + "url": "https://thenewstack.io/owasp-top-10-a-guide-to-the-worst-software-vulnerabilities/", + "type": "article" + }, + { + "title": "Explore top posts about Security", + "url": "https://app.daily.dev/tags/security?ref=roadmapsh", + "type": "article" + } + ] + }, + "ruoFa3M4bUE3Dg6GXSiUI": { + "title": "Web Components", + "description": "Web Components is a suite of different technologies allowing you to create reusable custom elements — with their functionality encapsulated away from the rest of your code — and utilize them in your web apps.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Web Components | MDN", + "url": "https://developer.mozilla.org/en-US/docs/Web/Web_Components", + "type": "article" + }, + { + "title": "WebComponents.org", + "url": "https://webcomponents.github.io/", + "type": "article" + }, + { + "title": "Explore top posts about Web Components", + "url": "https://app.daily.dev/tags/web-components?ref=roadmapsh", + "type": "article" + }, + { + "title": "Web Components Crash Course", + "url": "https://www.youtube.com/watch?v=PCWaFLy3VUo", + "type": "video" + } + ] + }, + "hwPOGT0-duy3KfI8QaEwF": { + "title": "Type Checkers", + "description": "Type checker helps developers write code with fewer bugs by adding types to their code, trying to catch type errors within your code, and then removing them during compile time. Flow and TypeScript are two popular static type checkers for JavaScript.", + "links": [] + }, + "VxiQPgcYDFAT6WgSRWpIA": { + "title": "Custom Elements", + "description": "One of the key features of the Web Components standard is the ability to create custom elements that encapsulate your functionality on an HTML page, rather than having to make do with a long, nested batch of elements that together provide a custom page feature.\n\nVisit the following resources to learn more:", + "links": [ + { + "title": "Using custom elements | MDN web docs", + "url": "https://developer.mozilla.org/en-US/docs/Web/Web_Components/Using_custom_elements", + "type": "article" + }, + { + "title": "Explore top posts about Web Development", + "url": "https://app.daily.dev/tags/webdev?ref=roadmapsh", + "type": "article" + } + ] + }, + "Hk8AVonOd693_y1sykPqd": { + "title": "HTML Templates", + "description": "The `