diff --git a/versioned_docs/version-2.0/how_to_guides/prompts/manage_prompts_programatically.mdx b/versioned_docs/version-2.0/how_to_guides/prompts/manage_prompts_programatically.mdx index 82c52fda..7d361170 100644 --- a/versioned_docs/version-2.0/how_to_guides/prompts/manage_prompts_programatically.mdx +++ b/versioned_docs/version-2.0/how_to_guides/prompts/manage_prompts_programatically.mdx @@ -74,10 +74,10 @@ url = prompts.push("joke-generator", prompt) # url is a link to the prompt in the UI print(url) `), - TypeScriptBlock(`import * as prompts from "langchain/hub"; + TypeScriptBlock(`import * as hub from "langchain/hub"; import { ChatPromptTemplate } from "@langchain/core/prompts";\n const prompt = ChatPromptTemplate.fromTemplate("tell me a joke about {topic}"); -url = prompts.push("joke-generator", chain); +const url = hub.push("joke-generator", chain); // url is a link to the prompt in the UI console.log(url); `), @@ -110,13 +110,13 @@ url = prompts.push("joke-generator-with-model", chain) # url is a link to the prompt in the UI print(url) `), - TypeScriptBlock(`import * as prompts from "langchain/hub"; + TypeScriptBlock(`import * as hub from "langchain/hub"; import { ChatPromptTemplate } from "@langchain/core/prompts"; -import { ChatOpenAI } from "langchain-openai";\n +import { ChatOpenAI } from "@langchain/openai";\n const model = new ChatOpenAI({ model: "gpt-4o-mini" });\n const prompt = ChatPromptTemplate.fromTemplate("tell me a joke about {topic}"); const chain = prompt.pipe(model);\n -prompts.push("joke-generator-with-model", chain);`), +await hub.push("joke-generator-with-model", chain);`), ]} groupId="client-language" /> @@ -144,12 +144,12 @@ prompt = prompts.pull("joke-generator") model = ChatOpenAI(model="gpt-4o-mini")\n chain = prompt | model chain.invoke({"topic": "cats"})`), - TypeScriptBlock(`import * as prompts from "langchain/hub"; -import { ChatOpenAI } from "langchain-openai";\n -const prompt = prompts.pull("joke-generator"); + TypeScriptBlock(`import * as hub from "langchain/hub"; +import { ChatOpenAI } from "@langchain/openai";\n +const prompt = await hub.pull("joke-generator"); const model = new ChatOpenAI({ model: "gpt-4o-mini" });\n const chain = prompt.pipe(model); -chain.invoke({"topic": "cats"});`), +await chain.invoke({"topic": "cats"});`), ]} groupId="client-language" /> @@ -168,10 +168,10 @@ chain.invoke({"topic": "cats"})`), LangChainPyBlock(`from langchain import hub as prompts chain = prompts.pull("joke-generator-with-model", include_model=True) chain.invoke({"topic": "cats"})`), - TypeScriptBlock(`import * as prompts from "langchain/hub"; + TypeScriptBlock(`import * as hub from "langchain/hub"; import { Runnable } from "@langchain/core/runnables";\n -const chain = prompts.pull("joke-generator-with-model", { includeModel: true }); -chain.invoke({"topic": "cats"});`), +const chain = await hub.pull("joke-generator-with-model", { includeModel: true }); +await chain.invoke({"topic": "cats"});`), ]} groupId="client-language" /> @@ -182,7 +182,7 @@ When pulling a prompt, you can also specify a specific commit hash to pull a spe tabs={[ PythonBlock(`prompt = client.pull_prompt("joke-generator:12344e88")`), LangChainPyBlock(`prompt = prompts.pull("joke-generator:12344e88")`), - TypeScriptBlock(`prompt = prompts.pull("joke-generator:12344e88")`), + TypeScriptBlock(`const prompt = await hub.pull("joke-generator:12344e88")`), ]} groupId="client-language" /> @@ -193,7 +193,7 @@ To pull a public prompt from the LangChain Hub, you need to specify the handle o tabs={[ PythonBlock(`prompt = client.pull_prompt("efriis/my-first-prompt")`), LangChainPyBlock(`prompt = prompts.pull("efriis/my-first-prompt")`), - TypeScriptBlock(`prompt = prompts.pull("efriis/my-first-prompt")`), + TypeScriptBlock(`const prompt = await hub.pull("efriis/my-first-prompt")`), ]} groupId="client-language" /> @@ -203,6 +203,21 @@ To pull a public prompt from the LangChain Hub, you need to specify the handle o If you want to store your prompts in LangSmith but use them directly with a model provider's API, you can use our conversion methods. These convert your prompt into the payload required for the OpenAI or Anthropic API. +These conversion methods rely on logic from within LangChain integration packages, and you will need to install the appropriate package as a dependency +in addition to your official SDK of choice. Here are some examples: + +### OpenAI + += 0.3.2` + ), + ]} + groupId="client-language" +/> + + +### Anthropic + += 0.3.3` + ), + ]} + groupId="client-language" +/> + +