diff --git a/.github/workflows/samples_getstarted_flowasfunction.yml b/.github/workflows/samples_getstarted_flowasfunction.yml
index 20dbdaa35d7..a7ca3a996f8 100644
--- a/.github/workflows/samples_getstarted_flowasfunction.yml
+++ b/.github/workflows/samples_getstarted_flowasfunction.yml
@@ -9,7 +9,7 @@ on:
- cron: "17 21 * * *" # Every day starting at 5:17 BJT
pull_request:
branches: [ main ]
- paths: [ examples/tutorials/get-started/**, examples/flows/standard/web-classification/**, examples/flows/chat/chat-basic/**, .github/workflows/samples_getstarted_flowasfunction.yml, examples/requirements.txt, examples/connections/azure_openai.yml ]
+ paths: [ examples/tutorials/get-started/**, examples/*requirements.txt, .github/workflows/samples_getstarted_flowasfunction.yml ]
workflow_dispatch:
env:
diff --git a/.github/workflows/samples_getstarted_quickstart.yml b/.github/workflows/samples_getstarted_quickstart.yml
index db55632b867..bf5ae85e59d 100644
--- a/.github/workflows/samples_getstarted_quickstart.yml
+++ b/.github/workflows/samples_getstarted_quickstart.yml
@@ -9,7 +9,7 @@ on:
- cron: "55 21 * * *" # Every day starting at 5:55 BJT
pull_request:
branches: [ main ]
- paths: [ examples/tutorials/get-started/**, examples/requirements.txt, examples/flows/standard/web-classification/**, examples/flows/evaluation/eval-classification-accuracy/**, .github/workflows/samples_getstarted_quickstart.yml, examples/connections/azure_openai.yml ]
+ paths: [ examples/tutorials/get-started/**, examples/*requirements.txt, .github/workflows/samples_getstarted_quickstart.yml ]
workflow_dispatch:
env:
diff --git a/.github/workflows/samples_getstarted_quickstartazure.yml b/.github/workflows/samples_getstarted_quickstartazure.yml
index 64f9b4ba279..944ac22540b 100644
--- a/.github/workflows/samples_getstarted_quickstartazure.yml
+++ b/.github/workflows/samples_getstarted_quickstartazure.yml
@@ -9,7 +9,7 @@ on:
- cron: "24 20 * * *" # Every day starting at 4:24 BJT
pull_request:
branches: [ main ]
- paths: [ examples/tutorials/get-started/**, examples/requirements.txt, examples/flows/standard/web-classification/**, examples/flows/evaluation/eval-classification-accuracy/**, .github/workflows/samples_getstarted_quickstartazure.yml, examples/connections/azure_openai.yml ]
+ paths: [ examples/tutorials/get-started/**, examples/*requirements.txt, .github/workflows/samples_getstarted_quickstartazure.yml ]
workflow_dispatch:
env:
diff --git a/.github/workflows/samples_runflowwithpipeline_pipeline.yml b/.github/workflows/samples_runflowwithpipeline_pipeline.yml
index 2b6d495d243..6708abdbf75 100644
--- a/.github/workflows/samples_runflowwithpipeline_pipeline.yml
+++ b/.github/workflows/samples_runflowwithpipeline_pipeline.yml
@@ -9,7 +9,7 @@ on:
- cron: "33 20 * * *" # Every day starting at 4:33 BJT
pull_request:
branches: [ main ]
- paths: [ examples/tutorials/run-flow-with-pipeline/**, examples/flows/standard/web-classification/**, .github/workflows/samples_runflowwithpipeline_pipeline.yml, examples/requirements.txt, examples/connections/azure_openai.yml ]
+ paths: [ examples/tutorials/run-flow-with-pipeline/**, examples/*requirements.txt, .github/workflows/samples_runflowwithpipeline_pipeline.yml ]
workflow_dispatch:
env:
diff --git a/.github/workflows/samples_runmanagement_cloudrunmanagement.yml b/.github/workflows/samples_runmanagement_cloudrunmanagement.yml
index d22810e48e1..f5de2480378 100644
--- a/.github/workflows/samples_runmanagement_cloudrunmanagement.yml
+++ b/.github/workflows/samples_runmanagement_cloudrunmanagement.yml
@@ -9,7 +9,7 @@ on:
- cron: "24 20 * * *" # Every day starting at 4:24 BJT
pull_request:
branches: [ main ]
- paths: [ examples/tutorials/run-management/**, examples/requirements.txt, examples/flows/standard/web-classification/**, .github/workflows/samples_runmanagement_cloudrunmanagement.yml, examples/connections/azure_openai.yml ]
+ paths: [ examples/tutorials/run-management/**, examples/*requirements.txt, .github/workflows/samples_runmanagement_cloudrunmanagement.yml ]
workflow_dispatch:
env:
diff --git a/.github/workflows/samples_runmanagement_runmanagement.yml b/.github/workflows/samples_runmanagement_runmanagement.yml
index cf4a2be6a56..dca2d327158 100644
--- a/.github/workflows/samples_runmanagement_runmanagement.yml
+++ b/.github/workflows/samples_runmanagement_runmanagement.yml
@@ -9,7 +9,7 @@ on:
- cron: "51 20 * * *" # Every day starting at 4:51 BJT
pull_request:
branches: [ main ]
- paths: [ examples/tutorials/run-management/**, examples/requirements.txt, examples/flows/standard/web-classification/**, .github/workflows/samples_runmanagement_runmanagement.yml, examples/connections/azure_openai.yml ]
+ paths: [ examples/tutorials/run-management/**, examples/*requirements.txt, .github/workflows/samples_runmanagement_runmanagement.yml ]
workflow_dispatch:
env:
diff --git a/.github/workflows/samples_tracing_autogengroupchat_traceautogengroupchat.yml b/.github/workflows/samples_tracing_autogengroupchat_traceautogengroupchat.yml
index b683fa7e2de..ea3b15a9734 100644
--- a/.github/workflows/samples_tracing_autogengroupchat_traceautogengroupchat.yml
+++ b/.github/workflows/samples_tracing_autogengroupchat_traceautogengroupchat.yml
@@ -9,7 +9,7 @@ on:
- cron: "11 20 * * *" # Every day starting at 4:11 BJT
pull_request:
branches: [ main ]
- paths: [ examples/tutorials/tracing/autogen-groupchat/**, .github/workflows/samples_tracing_autogengroupchat_traceautogengroupchat.yml, examples/requirements.txt, examples/connections/azure_openai.yml ]
+ paths: [ examples/tutorials/tracing/autogen-groupchat/**, examples/*requirements.txt, .github/workflows/samples_tracing_autogengroupchat_traceautogengroupchat.yml ]
workflow_dispatch:
env:
diff --git a/.github/workflows/samples_tracing_customotlpcollector_otlptracecollector.yml b/.github/workflows/samples_tracing_customotlpcollector_otlptracecollector.yml
index 899523561b9..baf92638905 100644
--- a/.github/workflows/samples_tracing_customotlpcollector_otlptracecollector.yml
+++ b/.github/workflows/samples_tracing_customotlpcollector_otlptracecollector.yml
@@ -9,7 +9,7 @@ on:
- cron: "22 21 * * *" # Every day starting at 5:22 BJT
pull_request:
branches: [ main ]
- paths: [ examples/tutorials/tracing/custom-otlp-collector/**, .github/workflows/samples_tracing_customotlpcollector_otlptracecollector.yml, examples/requirements.txt, examples/connections/azure_openai.yml ]
+ paths: [ examples/tutorials/tracing/custom-otlp-collector/**, examples/*requirements.txt, .github/workflows/samples_tracing_customotlpcollector_otlptracecollector.yml ]
workflow_dispatch:
env:
diff --git a/.github/workflows/samples_tracing_langchain_tracelangchain.yml b/.github/workflows/samples_tracing_langchain_tracelangchain.yml
index d87ffb67d1b..46a406cc6a7 100644
--- a/.github/workflows/samples_tracing_langchain_tracelangchain.yml
+++ b/.github/workflows/samples_tracing_langchain_tracelangchain.yml
@@ -9,7 +9,7 @@ on:
- cron: "21 19 * * *" # Every day starting at 3:21 BJT
pull_request:
branches: [ main ]
- paths: [ examples/tutorials/tracing/langchain/**, .github/workflows/samples_tracing_langchain_tracelangchain.yml, examples/requirements.txt, examples/connections/azure_openai.yml ]
+ paths: [ examples/tutorials/tracing/langchain/**, examples/*requirements.txt, .github/workflows/samples_tracing_langchain_tracelangchain.yml ]
workflow_dispatch:
env:
diff --git a/.github/workflows/samples_tracing_llm_tracellm.yml b/.github/workflows/samples_tracing_llm_tracellm.yml
index f1f3a211e3a..54cd46fa04f 100644
--- a/.github/workflows/samples_tracing_llm_tracellm.yml
+++ b/.github/workflows/samples_tracing_llm_tracellm.yml
@@ -9,7 +9,7 @@ on:
- cron: "36 21 * * *" # Every day starting at 5:36 BJT
pull_request:
branches: [ main ]
- paths: [ examples/tutorials/tracing/llm/**, .github/workflows/samples_tracing_llm_tracellm.yml, examples/requirements.txt, examples/connections/azure_openai.yml ]
+ paths: [ examples/tutorials/tracing/llm/**, examples/*requirements.txt, .github/workflows/samples_tracing_llm_tracellm.yml ]
workflow_dispatch:
env:
diff --git a/docs/tutorials/index.md b/docs/tutorials/index.md
index 9601c58c66b..6b404c9d2d4 100644
--- a/docs/tutorials/index.md
+++ b/docs/tutorials/index.md
@@ -2,31 +2,38 @@
This section contains a collection of flow samples and step-by-step tutorials.
-|Area|
Sample
|Description|
+|Category|Sample
|Description|
|--|--|--|
-|SDK|[Getting started with prompt flow](https://github.com/microsoft/promptflow/blob/main/examples/tutorials/get-started/quickstart.ipynb)| A step by step guidance to invoke your first flow run.
-|CLI|[Chat with PDF](https://github.com/microsoft/promptflow/blob/main/examples/tutorials/e2e-development/chat-with-pdf.md)| An end-to-end tutorial on how to build a high quality chat application with prompt flow, including flow development and evaluation with metrics.
-|SDK|[Chat with PDF - test, evaluation and experimentation](https://github.com/microsoft/promptflow/blob/main/examples/flows/chat/chat-with-pdf/chat-with-pdf.ipynb)| We will walk you through how to use prompt flow Python SDK to test, evaluate and experiment with the "Chat with PDF" flow.
-|SDK|[Connection management](https://github.com/microsoft/promptflow/blob/main/examples/connections/connection.ipynb)| Manage various types of connections using sdk
-|CLI|[Working with connection](https://github.com/microsoft/promptflow/blob/main/examples/connections/README.md)| Manage various types of connections using cli
-|SDK|[Run prompt flow in Azure AI](https://github.com/microsoft/promptflow/blob/main/examples/tutorials/get-started/quickstart-azure.ipynb)| A quick start tutorial to run a flow in Azure AI and evaluate it.
-|SDK|[Flow run management in Azure AI](https://github.com/microsoft/promptflow/blob/main/examples/tutorials/run-management/cloud-run-management.ipynb)| Flow run management in azure AI
-|AZURE|[Develop promptflow copilot](https://github.com/microsoft/promptflow/blob/main/examples/tutorials/develop-promptflow-copilot/develop-promptflow-copilot.md)| A step by step guidance to develop a promptflow copilot.
-
-## Samples
-
-|Area|Sample
|Description|
-|--|--|--|
-|Standard Flow|[basic](https://github.com/microsoft/promptflow/tree/main/examples/flows/standard/basic)| a basic flow with prompt and python tool.
-|Standard Flow|[basic-with-connection](https://github.com/microsoft/promptflow/tree/main/examples/flows/standard/basic-with-connection)| a basic flow using custom connection with prompt and python tool
-|Standard Flow|[basic-with-builtin-llm](https://github.com/microsoft/promptflow/tree/main/examples/flows/standard/basic-with-builtin-llm)| a basic flow using builtin llm tool
-|Standard Flow|[customer-intent-extraction](https://github.com/microsoft/promptflow/tree/main/examples/flows/standard/customer-intent-extraction)| a flow created from existing langchain python code
-|Standard Flow|[web-classification](https://github.com/microsoft/promptflow/tree/main/examples/flows/standard/web-classification)| a flow demonstrating multi-class classification with LLM. Given an url, it will classify the url into one web category with just a few shots, simple summarization and classification prompts.
-|Standard Flow|[autonomous-agent](https://github.com/microsoft/promptflow/tree/main/examples/flows/standard/autonomous-agent)| a flow showcasing how to construct a AutoGPT flow to autonomously figures out how to apply the given functions to solve the goal, which is film trivia that provides accurate and up-to-date information about movies, directors, actors, and more.
-|Chat Flow|[chat-with-wikipedia](https://github.com/microsoft/promptflow/tree/main/examples/flows/chat/chat-with-wikipedia)| a flow demonstrating Q&A with GPT3.5 using information from Wikipedia to make the answer more grounded.
-|Chat Flow|[chat-with-pdf](https://github.com/microsoft/promptflow/tree/main/examples/flows/chat/chat-with-pdf)| a flow that allow you to ask questions about the content of a PDF file and get answers.
-|Evaluation Flow|[eval-classification-accuracy](https://github.com/microsoft/promptflow/tree/main/examples/flows/evaluation/eval-classification-accuracy)| a flow illustrating how to evaluate the performance of a classification system.
-
-Learn more: [Try out more promptflow examples.](https://github.com/microsoft/promptflow/tree/main/examples)
+|Tracing|[Tracing](https://github.com/microsoft/promptflow/blob/main/examples/tutorials/tracing/README.md)| Prompt flow provides the tracing feature to capture and visualize the internal execution details for all flows|
+|Tracing|[Tracing with llm application](https://github.com/microsoft/promptflow/blob/main/examples/tutorials/tracing/llm/trace-llm.ipynb)|Tracing LLM application|
+|Tracing|[Tracing with autogen](https://github.com/microsoft/promptflow/blob/main/examples/tutorials/tracing/autogen-groupchat/trace-autogen-groupchat.ipynb)|Tracing LLM calls in autogen group chat application|
+|Tracing|[Tracing with langchain apps](https://github.com/microsoft/promptflow/blob/main/examples/tutorials/tracing/langchain/trace-langchain.ipynb)|Tracing LLM calls in langchain application|
+|Tracing|[Tracing with custom opentelemetry collector](https://github.com/microsoft/promptflow/blob/main/examples/tutorials/tracing/custom-otlp-collector/otlp-trace-collector.ipynb)|A tutorial on how to levarage custom OTLP collector.|
+|Prompty|[Getting started with prompty](https://github.com/microsoft/promptflow/blob/main/examples/prompty/basic/prompty-quickstart.ipynb)|A quickstart tutorial to run a prompty and evaluate it.|
+|Prompty|[Chat with prompty](https://github.com/microsoft/promptflow/blob/main/examples/prompty/chat-basic/chat-with-prompty.ipynb)|A quickstart tutorial to run a chat prompty and evaluate it.|
+|Prompty|[Prompty output format](https://github.com/microsoft/promptflow/blob/main/examples/prompty/format-output/prompty-output-format.ipynb)||
+|Flow|[Getting started with flex flow in azure](https://github.com/microsoft/promptflow/blob/main/examples/flex-flows/basic/flex-flow-quickstart-azure.ipynb)|A quickstart tutorial to run a flex flow and evaluate it in azure.|
+|Flow|[Getting started with flex flow](https://github.com/microsoft/promptflow/blob/main/examples/flex-flows/basic/flex-flow-quickstart.ipynb)|A quickstart tutorial to run a flex flow and evaluate it.|
+|Flow|[Chat with class based flex flow in azure](https://github.com/microsoft/promptflow/blob/main/examples/flex-flows/chat-basic/chat-with-class-based-flow-azure.ipynb)|A quickstart tutorial to run a class based flex flow and evaluate it in azure.|
+|Flow|[Chat with class based flex flow](https://github.com/microsoft/promptflow/blob/main/examples/flex-flows/chat-basic/chat-with-class-based-flow.ipynb)|A quickstart tutorial to run a class based flex flow and evaluate it.|
+|Flow|[Stream chat with flex flow](https://github.com/microsoft/promptflow/blob/main/examples/flex-flows/chat-stream/chat-stream-with-flex-flow.ipynb)|A quickstart tutorial to run a class based flex flow in stream mode and evaluate it.|
+|Flow|[Run dag flow in azure](https://github.com/microsoft/promptflow/blob/main/examples/tutorials/get-started/quickstart-azure.ipynb)|A quickstart tutorial to run a flow in Azure AI and evaluate it.|
+|Flow|[Chat with pdf in azure](https://github.com/microsoft/promptflow/blob/main/examples/flows/chat/chat-with-pdf/chat-with-pdf-azure.ipynb)|A tutorial of chat-with-pdf flow that executes in Azure AI|
+|Flow|[Getting started with dag flow](https://github.com/microsoft/promptflow/blob/main/examples/tutorials/get-started/quickstart.ipynb)|A quickstart tutorial to run a flow and evaluate it.|
+|Flow|[Execute flow as a function](https://github.com/microsoft/promptflow/blob/main/examples/tutorials/get-started/flow-as-function.ipynb)|This guide will walk you through the main scenarios of executing flow as a function.|
+|Flow|[Run flows in azure ml pipeline](https://github.com/microsoft/promptflow/blob/main/examples/tutorials/run-flow-with-pipeline/pipeline.ipynb)|Create pipeline using components to run a distributed job with tensorflow|
+|Flow|[Flow run management in azure](https://github.com/microsoft/promptflow/blob/main/examples/tutorials/run-management/cloud-run-management.ipynb)|Flow run management in Azure AI|
+|Flow|[Flow run management](https://github.com/microsoft/promptflow/blob/main/examples/tutorials/run-management/run-management.ipynb)|Flow run management|
+|Flow|[Evaluate with langchain's evaluator](https://github.com/microsoft/promptflow/blob/main/examples/flex-flows/eval-criteria-with-langchain/langchain-eval.ipynb)|A tutorial to converting LangChain criteria evaluator application to flex flow.|
+|Flow|[Chat with pdf - test, evaluation and experimentation](https://github.com/microsoft/promptflow/blob/main/examples/flows/chat/chat-with-pdf/chat-with-pdf.ipynb)|A tutorial of chat-with-pdf flow that allows user ask questions about the content of a PDF file and get answers|
+|Deployment|[Deploy flow using kubernetes](https://github.com/microsoft/promptflow/blob/main/examples/tutorials/flow-deploy/kubernetes/README.md)| This example demos how to deploy flow as a Kubernetes app|
+|Deployment|[Deploy a flow using docker](https://github.com/microsoft/promptflow/blob/main/examples/tutorials/flow-deploy/docker/README.md)| This example demos how to deploy flow as a docker app|
+|Deployment|[Create service with flow](https://github.com/microsoft/promptflow/blob/main/examples/tutorials/flow-deploy/create-service-with-flow/README.md)| This example shows how to create a simple service with flow|
+|Deployment|[Distribute flow as executable app](https://github.com/microsoft/promptflow/blob/main/examples/tutorials/flow-deploy/distribute-flow-as-executable-app/README.md)| This example demos how to package flow as a executable app|
+|Deployment|[Deploy flow using azure app service](https://github.com/microsoft/promptflow/blob/main/examples/tutorials/flow-deploy/azure-app-service/README.md)| This example demos how to deploy a flow using Azure App Service|
+|Rag|[Tutorial: chat with pdf](https://github.com/microsoft/promptflow/blob/main/examples/tutorials/e2e-development/chat-with-pdf.md)| Retrieval Augmented Generation (or RAG) has become a prevalent pattern to build intelligent application with Large Language Models (or LLMs) since it can infuse external knowledge into the model, which is not trained with those up-to-date or proprietary information|
+|Rag|[Tutorial: how prompt flow helps on quality improvement](https://github.com/microsoft/promptflow/blob/main/examples/tutorials/flow-fine-tuning-evaluation/promptflow-quality-improvement.md)| This tutorial is designed to enhance your understanding of improving flow quality through prompt tuning and evaluation|
+|Rag|[Develop promptflow copilot](https://github.com/microsoft/promptflow/blob/main/examples/tutorials/develop-promptflow-copilot/develop-promptflow-copilot.md)| In this tutorial, we will provide a detailed walkthrough on creating a RAG-based copilot using the Azure Machine Learning promptflow toolkit|
+Learn more: [Try out more promptflow examples.](https://github.com/microsoft/promptflow/tree/main/examples)
\ No newline at end of file
diff --git a/examples/tutorials/develop-promptflow-copilot/develop-promptflow-copilot.md b/examples/tutorials/develop-promptflow-copilot/develop-promptflow-copilot.md
index 7c2015fc319..dfdd9402c94 100644
--- a/examples/tutorials/develop-promptflow-copilot/develop-promptflow-copilot.md
+++ b/examples/tutorials/develop-promptflow-copilot/develop-promptflow-copilot.md
@@ -1,3 +1,8 @@
+---
+category: rag
+weight: 30
+---
+
# Develop promptflow copilot
In this tutorial, we will provide a detailed walkthrough on creating a RAG-based copilot using the Azure Machine Learning promptflow toolkit. Our tutorial will cover a range of essential topics, including:
diff --git a/examples/tutorials/e2e-development/chat-with-pdf.md b/examples/tutorials/e2e-development/chat-with-pdf.md
index a39f121896b..d67fe8270f2 100644
--- a/examples/tutorials/e2e-development/chat-with-pdf.md
+++ b/examples/tutorials/e2e-development/chat-with-pdf.md
@@ -1,5 +1,7 @@
---
resources: examples/connections/azure_openai.yml, examples/flows/chat/chat-with-pdf
+cloud: local
+category: rag
---
# Tutorial: Chat with PDF
diff --git a/examples/tutorials/flow-deploy/azure-app-service/README.md b/examples/tutorials/flow-deploy/azure-app-service/README.md
index 4f00cf70031..a73972bbe71 100644
--- a/examples/tutorials/flow-deploy/azure-app-service/README.md
+++ b/examples/tutorials/flow-deploy/azure-app-service/README.md
@@ -1,5 +1,7 @@
---
resources: examples/connections/azure_openai.yml, examples/flows/standard/web-classification
+category: deployment
+weight: 60
---
# Deploy flow using Azure App Service
diff --git a/examples/tutorials/flow-deploy/create-service-with-flow/README.md b/examples/tutorials/flow-deploy/create-service-with-flow/README.md
index ba29a949888..4968a9bccf4 100644
--- a/examples/tutorials/flow-deploy/create-service-with-flow/README.md
+++ b/examples/tutorials/flow-deploy/create-service-with-flow/README.md
@@ -1,5 +1,7 @@
---
resources: examples/tutorials/flow-deploy/create-service-with-flow
+category: deployment
+weight: 50
---
# Create service with flow
diff --git a/examples/tutorials/flow-deploy/distribute-flow-as-executable-app/README.md b/examples/tutorials/flow-deploy/distribute-flow-as-executable-app/README.md
index e3b53ac30f8..0cd199a1df0 100644
--- a/examples/tutorials/flow-deploy/distribute-flow-as-executable-app/README.md
+++ b/examples/tutorials/flow-deploy/distribute-flow-as-executable-app/README.md
@@ -1,5 +1,7 @@
---
resources: examples/connections/azure_openai.yml, examples/flows/standard/web-classification
+category: deployment
+weight: 50
---
# Distribute flow as executable app
diff --git a/examples/tutorials/flow-deploy/docker/README.md b/examples/tutorials/flow-deploy/docker/README.md
index c86692f4977..d60a824fa0b 100644
--- a/examples/tutorials/flow-deploy/docker/README.md
+++ b/examples/tutorials/flow-deploy/docker/README.md
@@ -1,5 +1,7 @@
---
resources: examples/connections/azure_openai.yml, examples/flows/standard/web-classification
+category: deployment
+weight: 40
---
# Deploy a flow using Docker
diff --git a/examples/tutorials/flow-deploy/kubernetes/README.md b/examples/tutorials/flow-deploy/kubernetes/README.md
index db95e2b14bc..002ba33e340 100644
--- a/examples/tutorials/flow-deploy/kubernetes/README.md
+++ b/examples/tutorials/flow-deploy/kubernetes/README.md
@@ -1,5 +1,7 @@
---
resources: examples/connections/azure_openai.yml, examples/flows/standard/web-classification
+category: deployment
+weight: 30
---
# Deploy flow using Kubernetes
diff --git a/examples/tutorials/flow-fine-tuning-evaluation/promptflow-quality-improvement.md b/examples/tutorials/flow-fine-tuning-evaluation/promptflow-quality-improvement.md
index fd6a893c1bc..9709d6dd300 100644
--- a/examples/tutorials/flow-fine-tuning-evaluation/promptflow-quality-improvement.md
+++ b/examples/tutorials/flow-fine-tuning-evaluation/promptflow-quality-improvement.md
@@ -1,5 +1,6 @@
---
resources: examples/connections/azure_openai.yml, examples/flows/chat/chat-basic, examples/flows/chat/chat-math-variant, examples/flows/evaluation/eval-chat-math
+category: rag
---
# Tutorial: How prompt flow helps on quality improvement
diff --git a/examples/tutorials/tracing/README.md b/examples/tutorials/tracing/README.md
index 17c10a8e769..83f9c869285 100644
--- a/examples/tutorials/tracing/README.md
+++ b/examples/tutorials/tracing/README.md
@@ -1,5 +1,7 @@
---
resources: examples/tutorials/tracing/
+cloud: local
+category: tracing
---
## Tracing
diff --git a/scripts/readme/ghactions_driver/readme_step.py b/scripts/readme/ghactions_driver/readme_step.py
index 3e958a83b46..87850024451 100644
--- a/scripts/readme/ghactions_driver/readme_step.py
+++ b/scripts/readme/ghactions_driver/readme_step.py
@@ -359,7 +359,7 @@ def write_workflow(
from .resource_resolver import resolve_tutorial_resource
path_filter = resolve_tutorial_resource(
- workflow_name, readme_path.resolve()
+ workflow_name, readme_path.resolve(), output_telemetry
)
else:
if (
diff --git a/scripts/readme/ghactions_driver/readme_templates/tutorial_index.md.jinja2 b/scripts/readme/ghactions_driver/readme_templates/tutorial_index.md.jinja2
new file mode 100644
index 00000000000..f6be89e517e
--- /dev/null
+++ b/scripts/readme/ghactions_driver/readme_templates/tutorial_index.md.jinja2
@@ -0,0 +1,10 @@
+# Tutorials
+
+This section contains a collection of flow samples and step-by-step tutorials.
+
+|Category|Sample
|Description|
+|--|--|--|
+{% for item in items %}|{{ item.category }}|[{{ item.title }}]({{ item.url }})|{{ item.description }}|
+{% endfor %}
+
+Learn more: [Try out more promptflow examples.](https://github.com/microsoft/promptflow/tree/main/examples)
diff --git a/scripts/readme/ghactions_driver/resource_resolver.py b/scripts/readme/ghactions_driver/resource_resolver.py
index b2876e370c9..4a943b3b535 100644
--- a/scripts/readme/ghactions_driver/resource_resolver.py
+++ b/scripts/readme/ghactions_driver/resource_resolver.py
@@ -1,5 +1,4 @@
from pathlib import Path
-from typing import List
import markdown
import nbformat
@@ -11,36 +10,74 @@
"Please follow examples contributing guide to declare tutorial resources: "
"https://github.com/microsoft/promptflow/blob/main/examples/CONTRIBUTING.md"
)
+TITLE_KEY_NAME = "title"
+CLOUD_KEY_NAME = "cloud"
+CATEGORY_KEY_NAME = "category"
+WEIGHT_KEY_NAME = "weight"
-def _parse_resources_string_from_notebook(path: Path) -> str:
+def _parse_resources_string_from_notebook(path: Path, output_telemetry):
with open(path, "r", encoding="utf-8") as f:
nb = nbformat.read(f, as_version=4)
+ obj = {}
+ if nb.metadata.get('build_doc', False):
+ if nb.metadata['build_doc'].get(CLOUD_KEY_NAME, None):
+ output_telemetry.cloud = nb.metadata['build_doc']['category']
+ if nb.metadata['build_doc'].get(CATEGORY_KEY_NAME, None):
+ output_telemetry.category = nb.metadata['build_doc']['section']
+ if nb.metadata['build_doc'].get(WEIGHT_KEY_NAME, None):
+ output_telemetry.weight = int(nb.metadata['build_doc']['weight'])
+ cell = nb['cells'][0]
+ for cell in nb['cells']:
+ if (cell['cell_type'] == 'markdown'):
+ break
+ if (cell['cell_type'] == 'markdown'):
+ lines = cell.source.split('\n')
+ for line in lines:
+ if '#' in line:
+ output_telemetry.title = line.replace('#', '').strip()
+ break
if RESOURCES_KEY_NAME not in nb.metadata:
raise Exception(RESOURCES_KEY_ERROR_MESSAGE + f" . Error in {path}")
- return nb.metadata[RESOURCES_KEY_NAME]
+ obj[RESOURCES_KEY_NAME] = nb.metadata[RESOURCES_KEY_NAME]
+ return obj
-def _parse_resources_string_from_markdown(path: Path) -> str:
+def _parse_resources_string_from_markdown(path: Path, output_telemetry):
markdown_content = path.read_text(encoding="utf-8")
md = markdown.Markdown(extensions=["meta"])
md.convert(markdown_content)
+ obj = {}
+ for line in md.lines:
+ if '#' in line:
+ output_telemetry.title = line.replace('#', '').strip()
+ break
+ if CLOUD_KEY_NAME in md.Meta:
+ output_telemetry.cloud = md.Meta[CLOUD_KEY_NAME][0]
+ if CATEGORY_KEY_NAME in md.Meta:
+ output_telemetry.category = md.Meta[CATEGORY_KEY_NAME][0]
+ if WEIGHT_KEY_NAME in md.Meta:
+ output_telemetry.weight = int(md.Meta[WEIGHT_KEY_NAME][0])
if RESOURCES_KEY_NAME not in md.Meta:
raise Exception(RESOURCES_KEY_ERROR_MESSAGE + f" . Error in {path}")
- return md.Meta[RESOURCES_KEY_NAME][0]
+ obj[RESOURCES_KEY_NAME] = md.Meta[RESOURCES_KEY_NAME][0]
+ return obj
-def _parse_resources(path: Path) -> List[str]:
+def _parse_resources(path: Path, output_telemetry):
+ metadata = {}
if path.suffix == ".ipynb":
- resources_string = _parse_resources_string_from_notebook(path)
+ metadata = _parse_resources_string_from_notebook(path, output_telemetry)
+ resources_string = metadata["resources"]
elif path.suffix == ".md":
- resources_string = _parse_resources_string_from_markdown(path)
+ metadata = _parse_resources_string_from_markdown(path, output_telemetry)
+ resources_string = metadata["resources"]
else:
raise Exception(f"Unknown file type: {path.suffix!r}")
return [resource.strip() for resource in resources_string.split(",")]
-def resolve_tutorial_resource(workflow_name: str, resource_path: Path) -> str:
+def resolve_tutorial_resource(workflow_name: str, resource_path: Path, output_telemetry):
"""Resolve tutorial resources, so that workflow can be triggered more precisely.
A tutorial workflow should listen to changes of:
@@ -55,7 +92,7 @@ def resolve_tutorial_resource(workflow_name: str, resource_path: Path) -> str:
working_dir = resource_path.parent.relative_to(git_base_dir).as_posix()
path_filter_list = [f"{working_dir}/**"]
# resources declared in text file
- resources = _parse_resources(resource_path)
+ resources = _parse_resources(resource_path, output_telemetry)
for resource in resources:
# skip empty line
if len(resource) == 0:
diff --git a/scripts/readme/readme.py b/scripts/readme/readme.py
index aacc2a87e95..2107238deb2 100644
--- a/scripts/readme/readme.py
+++ b/scripts/readme/readme.py
@@ -28,6 +28,20 @@ def get_notebook_readme_description(notebook) -> str:
return ""
+def get_notebook_buildDoc_description(notebook) -> str:
+ """
+ Set each ipynb metadata description at .metadata.description
+ """
+ try:
+ # read in notebook
+ with open(notebook, "r", encoding="utf-8") as f:
+ data = json.load(f)
+ return data["metadata"]["build_doc"]
+ except Exception:
+ print(f"{notebook} metadata build_doc not set")
+ return {}
+
+
def get_readme_description_first_sentence(readme) -> str:
"""
Get each readme first sentence of first paragraph
@@ -42,7 +56,14 @@ def get_readme_description_first_sentence(readme) -> str:
if line.startswith("#"):
line = ""
# skip metadata section
- if line.startswith("---") or line.startswith("resources"):
+ if (
+ line.startswith("---")
+ or line.startswith("resources:")
+ or line.startswith("title:")
+ or line.startswith("cloud:")
+ or line.startswith("category:")
+ or line.startswith("weight:")
+ ):
line = ""
if line.strip() == "" and sentence != "":
break
@@ -112,102 +133,53 @@ def write_readme(workflow_telemetries, readme_telemetries):
# For workflows, open ipynb as raw json and
# setup description at .metadata.description
description = get_notebook_readme_description(workflow_telemetry.notebook)
+ build_doc = get_notebook_buildDoc_description(workflow_telemetry.notebook)
notebook_path = gh_working_dir.replace("examples/", "") + f"/{notebook_name}"
+ default_workflow_item = {
+ "name": notebook_name,
+ "path": notebook_path,
+ "pipeline_name": pipeline_name,
+ "yaml_name": yaml_name,
+ "description": description,
+ "build_doc": build_doc,
+ "title": workflow_telemetry.title.capitalize()
+ if hasattr(workflow_telemetry, "title")
+ else "Empty title",
+ "cloud": workflow_telemetry.cloud.capitalize()
+ if hasattr(workflow_telemetry, "cloud")
+ else "NOT DEFINED",
+ "category": workflow_telemetry.category.capitalize()
+ if hasattr(workflow_telemetry, "category")
+ else "General",
+ "weight": workflow_telemetry.weight
+ if hasattr(workflow_telemetry, "weight")
+ else 0,
+ }
if gh_working_dir.startswith("examples/flows/standard"):
- flows["notebooks"].append(
- {
- "name": notebook_name,
- "path": notebook_path,
- "pipeline_name": pipeline_name,
- "yaml_name": yaml_name,
- "description": description,
- }
- )
+ flows["notebooks"].append(default_workflow_item)
elif gh_working_dir.startswith("examples/connections"):
- connections["notebooks"].append(
- {
- "name": notebook_name,
- "path": notebook_path,
- "pipeline_name": pipeline_name,
- "yaml_name": yaml_name,
- "description": description,
- }
- )
+ connections["notebooks"].append(default_workflow_item)
elif gh_working_dir.startswith("examples/flows/evaluation"):
- evaluations["notebooks"].append(
- {
- "name": notebook_name,
- "path": notebook_path,
- "pipeline_name": pipeline_name,
- "yaml_name": yaml_name,
- "description": description,
- }
- )
+ evaluations["notebooks"].append(default_workflow_item)
elif gh_working_dir.startswith("examples/tutorials"):
if "quickstart" in notebook_name:
- quickstarts["notebooks"].append(
- {
- "name": notebook_name,
- "path": notebook_path,
- "pipeline_name": pipeline_name,
- "yaml_name": yaml_name,
- "description": description,
- }
- )
+ quickstarts["notebooks"].append(default_workflow_item)
else:
- tutorials["notebooks"].append(
- {
- "name": notebook_name,
- "path": notebook_path,
- "pipeline_name": pipeline_name,
- "yaml_name": yaml_name,
- "description": description,
- }
- )
+ tutorials["notebooks"].append(default_workflow_item)
elif gh_working_dir.startswith("examples/flows/chat"):
- chats["notebooks"].append(
- {
- "name": notebook_name,
- "path": notebook_path,
- "pipeline_name": pipeline_name,
- "yaml_name": yaml_name,
- "description": description,
- }
- )
+ chats["notebooks"].append(default_workflow_item)
elif gh_working_dir.startswith("examples/flex-flows"):
- flex_flows["notebooks"].append(
- {
- "name": notebook_name,
- "path": notebook_path,
- "pipeline_name": pipeline_name,
- "yaml_name": yaml_name,
- "description": description,
- }
- )
+ flex_flows["notebooks"].append(default_workflow_item)
elif gh_working_dir.startswith("examples/prompty"):
- prompty["notebooks"].append(
- {
- "name": notebook_name,
- "path": notebook_path,
- "pipeline_name": pipeline_name,
- "yaml_name": yaml_name,
- "description": description,
- }
- )
+ prompty["notebooks"].append(default_workflow_item)
elif gh_working_dir.startswith("examples/tools/use-cases"):
- toolusecases["notebooks"].append(
- {
- "name": notebook_name,
- "path": notebook_path,
- "pipeline_name": pipeline_name,
- "yaml_name": yaml_name,
- "description": description,
- }
- )
+ toolusecases["notebooks"].append(default_workflow_item)
else:
print(f"Unknown workflow type: {gh_working_dir}")
- # Adjust tutorial names:
+ # Adjust tutorial names:
+
+ no_workflow_readmes = []
for readme_telemetry in readme_telemetries:
if readme_telemetry.readme_name.endswith("README.md"):
@@ -217,6 +189,29 @@ def write_readme(workflow_telemetries, readme_telemetries):
".md", ""
)
notebook_path = readme_telemetry.readme_name.replace("examples/", "")
+ if not hasattr(readme_telemetry, "workflow_name"):
+ no_workflow_readme_item = {
+ "name": notebook_name,
+ "path": notebook_path,
+ "description": get_readme_description_first_sentence(
+ readme_telemetry.readme_name
+ ),
+ "title": readme_telemetry.title.capitalize()
+ if hasattr(readme_telemetry, "title")
+ else "Empty title",
+ "cloud": readme_telemetry.cloud.capitalize()
+ if hasattr(readme_telemetry, "cloud")
+ else "NOT DEFINED",
+ "category": readme_telemetry.category.capitalize()
+ if hasattr(readme_telemetry, "category")
+ else "General",
+ "weight": readme_telemetry.weight
+ if hasattr(readme_telemetry, "weight")
+ else 0,
+ }
+ no_workflow_readmes.append(no_workflow_readme_item)
+ continue
+
pipeline_name = readme_telemetry.workflow_name
yaml_name = f"{readme_telemetry.workflow_name}.yml"
description = get_readme_description_first_sentence(
@@ -224,97 +219,44 @@ def write_readme(workflow_telemetries, readme_telemetries):
)
readme_folder = readme_telemetry.readme_folder
+ default_readme_item = {
+ "name": notebook_name,
+ "path": notebook_path,
+ "pipeline_name": pipeline_name,
+ "yaml_name": yaml_name,
+ "description": description,
+ "title": readme_telemetry.title.capitalize()
+ if hasattr(readme_telemetry, "title")
+ else "Empty title",
+ "cloud": readme_telemetry.cloud.capitalize()
+ if hasattr(readme_telemetry, "cloud")
+ else "NOT DEFINED",
+ "category": readme_telemetry.category.capitalize()
+ if hasattr(readme_telemetry, "category")
+ else "General",
+ "weight": readme_telemetry.weight
+ if hasattr(readme_telemetry, "weight")
+ else 0,
+ }
if readme_folder.startswith("examples/flows/standard"):
- flows["readmes"].append(
- {
- "name": notebook_name,
- "path": notebook_path,
- "pipeline_name": pipeline_name,
- "yaml_name": yaml_name,
- "description": description,
- }
- )
+ flows["readmes"].append(default_readme_item)
elif readme_folder.startswith("examples/connections"):
- connections["readmes"].append(
- {
- "name": notebook_name,
- "path": notebook_path,
- "pipeline_name": pipeline_name,
- "yaml_name": yaml_name,
- "description": description,
- }
- )
+ connections["readmes"].append(default_readme_item)
elif readme_folder.startswith("examples/flows/evaluation"):
- evaluations["readmes"].append(
- {
- "name": notebook_name,
- "path": notebook_path,
- "pipeline_name": pipeline_name,
- "yaml_name": yaml_name,
- "description": description,
- }
- )
+ evaluations["readmes"].append(default_readme_item)
elif readme_folder.startswith("examples/tutorials"):
if "quickstart" in notebook_name:
- quickstarts["readmes"].append(
- {
- "name": notebook_name,
- "path": notebook_path,
- "pipeline_name": pipeline_name,
- "yaml_name": yaml_name,
- "description": description,
- }
- )
+ quickstarts["readmes"].append(default_readme_item)
else:
- tutorials["readmes"].append(
- {
- "name": notebook_name,
- "path": notebook_path,
- "pipeline_name": pipeline_name,
- "yaml_name": yaml_name,
- "description": description,
- }
- )
+ tutorials["readmes"].append(default_readme_item)
elif readme_folder.startswith("examples/flows/chat"):
- chats["readmes"].append(
- {
- "name": notebook_name,
- "path": notebook_path,
- "pipeline_name": pipeline_name,
- "yaml_name": yaml_name,
- "description": description,
- }
- )
+ chats["readmes"].append(default_readme_item)
elif readme_folder.startswith("examples/flex-flows"):
- flex_flows["readmes"].append(
- {
- "name": notebook_name,
- "path": notebook_path,
- "pipeline_name": pipeline_name,
- "yaml_name": yaml_name,
- "description": description,
- }
- )
+ flex_flows["readmes"].append(default_readme_item)
elif readme_folder.startswith("examples/prompty"):
- prompty["readmes"].append(
- {
- "name": notebook_name,
- "path": notebook_path,
- "pipeline_name": pipeline_name,
- "yaml_name": yaml_name,
- "description": description,
- }
- )
+ prompty["readmes"].append(default_readme_item)
elif readme_folder.startswith("examples/tools/use-cases"):
- toolusecases["readmes"].append(
- {
- "name": notebook_name,
- "path": notebook_path,
- "pipeline_name": pipeline_name,
- "yaml_name": yaml_name,
- "description": description,
- }
- )
+ toolusecases["readmes"].append(default_readme_item)
else:
print(f"Unknown workflow type: {readme_folder}")
@@ -323,6 +265,8 @@ def write_readme(workflow_telemetries, readme_telemetries):
key=itemgetter("name"),
reverse=True,
)
+
+ # Debug this replacement to check if generated correctly
replacement = {
"branch": BRANCH,
"tutorials": tutorials,
@@ -346,7 +290,81 @@ def write_readme(workflow_telemetries, readme_telemetries):
template = env.get_template("README.md.jinja2")
with open(readme_file, "w") as f:
f.write(template.render(replacement))
- print("finished writing README.md")
+ print(f"finished writing {str(readme_file)}")
+
+ # Build a table out of replacement
+ # |Area|Cloud|Category|Sample|Description|
+ new_items = []
+ for row in replacement.keys():
+ if row == "branch":
+ continue
+ for item in replacement[row]["notebooks"]:
+ item[
+ "url"
+ ] = f"https://github.com/microsoft/promptflow/blob/main/examples/{item['path']}"
+ item["area"] = "SDK"
+ new_items.append(item)
+ for item in replacement[row]["readmes"]:
+ if item.get("category", "General") == "General":
+ print(
+ f"Tutorial Index: Skipping {item['path']} for not having a category"
+ )
+ continue
+ item[
+ "url"
+ ] = f"https://github.com/microsoft/promptflow/blob/main/examples/{item['path']}"
+ item["area"] = "CLI"
+ new_items.append(item)
+ for item in no_workflow_readmes:
+ if not item["path"].startswith("tutorials"):
+ print(f"Tutorial Index: Skipping {item['path']} for not being in tutorials")
+ continue
+ if item.get("category", "General") == "General":
+ print(f"Tutorial Index: Skipping {item['path']} for not having a category")
+ continue
+ item[
+ "url"
+ ] = f"https://github.com/microsoft/promptflow/blob/main/examples/{item['path']}"
+ item["area"] = "CLI"
+ new_items.append(item)
+
+ # sort new_items by category
+ tracing_category = sorted(
+ [item for item in new_items if item["category"] == "Tracing"],
+ key=lambda x: x["weight"],
+ )
+ prompty_category = sorted(
+ [item for item in new_items if item["category"] == "Prompty"],
+ key=lambda x: x["weight"],
+ )
+ flow_category = sorted(
+ [item for item in new_items if item["category"] == "Flow"],
+ key=lambda x: x["weight"],
+ )
+ deployment_category = sorted(
+ [item for item in new_items if item["category"] == "Deployment"],
+ key=lambda x: x["weight"],
+ )
+ rag_category = sorted(
+ [item for item in new_items if item["category"] == "Rag"],
+ key=lambda x: x["weight"],
+ )
+
+ real_new_items = [
+ *tracing_category,
+ *prompty_category,
+ *flow_category,
+ *deployment_category,
+ *rag_category,
+ ]
+ tutorial_items = {"items": real_new_items}
+ tutorial_index_file = (
+ Path(ReadmeStepsManage.git_base_dir()) / "docs/tutorials/index.md"
+ )
+ template_tutorial = env.get_template("tutorial_index.md.jinja2")
+ with open(tutorial_index_file, "w") as f:
+ f.write(template_tutorial.render(tutorial_items))
+ print(f"Tutorial Index: finished writing {str(tutorial_index_file)}")
def main(check):
@@ -363,9 +381,7 @@ def main(check):
"examples/flex-flows/**/README.md",
"examples/prompty/**/README.md",
"examples/connections/**/README.md",
- "examples/tutorials/e2e-development/*.md",
- "examples/tutorials/flow-fine-tuning-evaluation/*.md",
- "examples/tutorials/**/README.md",
+ "examples/tutorials/**/*.md",
"examples/tools/use-cases/**/README.md",
]
# exclude the readme since this is 3p integration folder, pipeline generation is not included
@@ -394,6 +410,8 @@ def main(check):
continue
output_object[workflow.workflow_name].append(item)
for readme in readme_telemetry:
+ if not hasattr(readme_telemetry, "workflow_name"):
+ continue
output_object[readme.workflow_name] = []
readme_items = re.split(r"\[|,| |\]", readme.path_filter)
readme_items = list(filter(None, readme_items))
diff --git a/scripts/readme/readme_generator.py b/scripts/readme/readme_generator.py
index 62f4980f415..a9b4e188526 100644
--- a/scripts/readme/readme_generator.py
+++ b/scripts/readme/readme_generator.py
@@ -11,12 +11,15 @@
def local_filter(callback, array: [Path]):
results = []
+ backups = []
for index, item in enumerate(array):
result = callback(item, index, array)
# if returned true, append item to results
if result:
results.append(item)
- return results
+ else:
+ backups.append(item)
+ return results, backups
def no_readme_generation_filter(item: Path, index, array) -> bool:
@@ -50,7 +53,7 @@ def set_difference(p, q):
globs)
readme_items = sorted([i for i in globs_exclude])
- readme_items = local_filter(no_readme_generation_filter, readme_items)
+ readme_items, no_generation_files = local_filter(no_readme_generation_filter, readme_items)
for readme in readme_items:
readme_telemetry = Telemetry()
workflow_name = readme.relative_to(ReadmeStepsManage.git_base_dir())
@@ -58,6 +61,18 @@ def set_difference(p, q):
write_readme_workflow(workflow_name.resolve(), readme_telemetry)
ReadmeSteps.cleanup()
output_files.append(readme_telemetry)
+ for readme in no_generation_files:
+ readme_telemetry = Telemetry()
+ from ghactions_driver.resource_resolver import resolve_tutorial_resource
+ try:
+ resolve_tutorial_resource(
+ "TEMP", readme.resolve(), readme_telemetry
+ )
+ except Exception:
+ pass
+ readme_telemetry.readme_name = str(readme.relative_to(ReadmeStepsManage.git_base_dir()))
+ readme_telemetry.readme_folder = str(readme.relative_to(ReadmeStepsManage.git_base_dir()).parent)
+ output_files.append(readme_telemetry)
if __name__ == "__main__":
diff --git a/scripts/readme/workflow_generator.py b/scripts/readme/workflow_generator.py
index 34462acfe4c..319ea890119 100644
--- a/scripts/readme/workflow_generator.py
+++ b/scripts/readme/workflow_generator.py
@@ -58,10 +58,16 @@ def write_notebook_workflow(notebook, name, output_telemetry=Telemetry()):
schedule_minute = name_hash % 60
schedule_hour = (name_hash // 60) % 4 + 19 # 19-22 UTC
- if "examples/tutorials" in gh_working_dir:
- notebook_path = Path(ReadmeStepsManage.git_base_dir()) / str(notebook)
- path_filter = resolve_tutorial_resource(workflow_name, notebook_path.resolve())
- elif "samples_configuration" in workflow_name:
+ notebook_path = Path(ReadmeStepsManage.git_base_dir()) / str(notebook)
+ try:
+ # resolve tutorial resources
+ path_filter = resolve_tutorial_resource(workflow_name, notebook_path.resolve(), output_telemetry)
+ except Exception:
+ if "examples/tutorials" in gh_working_dir:
+ raise
+ else:
+ pass
+ if "samples_configuration" in workflow_name:
# exception, samples configuration is very simple and not related to other prompt flow examples
path_filter = (
"[ examples/configuration.ipynb, .github/workflows/samples_configuration.yml ]"