from langchain_openai import OpenAI
llm = OpenAI(model_name="gpt-3.5-turbo-instruct", max_tokens=1024, temperature=.7)
print(llm.invoke("Tell me a scared story."))
It was a dark and stormy night, and the wind was howling outside.....
from langchain_openai import ChatOpenAI
from langchain import PromptTemplate, LLMChain
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
AIMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from langchain.schema import (
AIMessage,
HumanMessage,
SystemMessage
)
chat = ChatOpenAI(temperature=0)
# #human给出的信息是一个字符串内容,给到chat之后会返回一个AIMessage信息,其中的内容将是给出的反馈答案
chat.invoke([HumanMessage(content="Translate this sentence from English to Chinese. I love programming.")])
batch_messages = [
[
SystemMessage(content="You are a helpful assistant that translates English to French."),
HumanMessage(content="Translate this sentence from English to French. I love programming.")
],
[
SystemMessage(content="You are a helpful assistant that translates English to Chinese."),
HumanMessage(content="Translate this sentence from English to Chinese. I love artificial intelligence.")
],
]
result = chat.generate(batch_messages)
print(result.generations[0][0].text)
print(result.generations[1][0].text)
Parse with prompt:这个方法接受一个字符串(假定为语言模型的响应)和一个 提示(假定为生成这个响应的提示),并将其解析成某种结构。提示主要用于在输 出解析器需要重试或修复输出时提供信息,以便根据提示的内容进行相应的操作。
from typing import List
from langchain_openai import OpenAI
from langchain.output_parsers import PydanticOutputParser
from langchain.prompts import PromptTemplate
from langchain.pydantic_v1 import BaseModel, Field, validator
model = OpenAI(model_name='gpt-3.5-turbo-instruct', temperature=0.0)
# 定义您期望的数据结构
class Joke(BaseModel):
setup: str = Field(description="question to set up a joke")
punchline: str = Field(description="answer to resolve the joke")
# 使用 Pydantic 轻松添加自定义验证逻辑
@validator('setup')
def question_ends_with_question_mark(cls, field):
if field[-1] != "?":
raise ValueError("question should end with a question mark")
return field
# 初始化 PydanticOutputParser 解析器
parser = PydanticOutputParser(pydantic_object=Joke)
# 设置提示模板
prompt = PromptTemplate(
template="Answer the user query.\n{format_instructions}\n{query}\n",
input_variables=["query"],
partial_variables={"format_instructions": parser.get_format_instructions()}
)
# 组合提示模板和语言模型
prompt_and_model = prompt | model
# 提交用户查询并解析响应
output = prompt_and_model.invoke({"query": "Tell me a joke."})
parser.invoke(output)
Joke(setup='Why did the tomato turn red?', punchline='Because it saw the salad dressing!')
我们可以看到 template="Answer the user query.\n{format_instructions}\n{query}\n",其中format_instructions是指导语言模型应该返回什么格式内容的提示词,query是用户的问题。这个模板是用来告诉模型应该如何构建其输出,以确保输出的内容可以被后续的解析器正确处理。
The output should be formatted as a JSON instance that conforms to the JSON schema below.
As an example, for the schema {"properties": {"foo": {"title": "Foo", "description": "a list of strings", "type": "array", "items": {"type": "string"}}}, "required": ["foo"]}
the object {"foo": ["bar", "baz"]} is a well-formatted instance of the schema. The object {"properties": {"foo": ["bar", "baz"]}} is not well-formatted.
Here is the output schema:
```
{"properties": {"setup": {"title": "Setup", "description": "question to set up a joke", "type": "string"}, "punchline": {"title": "Punchline", "description": "answer to resolve the joke", "type": "string"}}, "required": ["setup", "punchline"]}
```
from langchain import PromptTemplate
from langchain_openai import OpenAI
def generate_city_names(city_features):
#具体的模版内容,其中要进行补全的地方用大括号进行变量的放置,不需要其他的操作,类似于字符串中对于某一个变量对其的format格式化
prompt_template = "I would like to travel to other cities. The main features of cities are:{}。Give me three cities by nameonly."
#将提示包装成字符串列表
prompt = [prompt_template.format(city_features)]
llm = OpenAI()
response = llm.generate(prompt, max_tokens=100, temperature=0.8)
city_names = [gen[0].text.strip() for gen in response.generations]
return city_names
city_features = "Sun, beach, romance."
city_names = generate_city_names(city_features)
print(city_names)
from langchain import PromptTemplate
#input_variables的值可以为空,说明其中没有任何变量
no_input_prompt = PromptTemplate(input_variables=[], template="Tell me a joke.")
no_input_prompt.format()
#一个input_variable的示例,这样模版化之后的提示将把adjective作为参数传入
one_input_prompt = PromptTemplate(input_variables=["adjective"],
template="Tell me a {adjective} joke.")
one_input_prompt.format(adjective="funny")
#多个input_variables的示例,模版后的提示将adjective和content作为参数传入
multiple_input_prompt = PromptTemplate(
input_variables=["adjective", "content"],
template="Tell me a {adjective} joke about {content}."
)
multiple_input_prompt.format(adjective="funny", content="chickens")
prompt=PromptTemplate(
template="You are a helpful assistant that translates {input_language} to {output_language}.",
input_variables=["input_language", "output_language"],
)
system_message_prompt = SystemMessagePromptTemplate(prompt=prompt)
import langchain
from langchain_openai import OpenAI
from langchain.cache import InMemoryCache
langchain.llm_cache = InMemoryCache()
# To make the caching really obvious, lets use a slower model.
llm = OpenAI(model_name="gpt-3.5-turbo-instruct", n=2, best_of=2)
# The first time, it is not yet in cache, so it should take longer
print(llm.invoke("Tell me a joke"))
# The second time it is, so it goes faster
print(llm.invoke("Tell me a joke"))
Why was the math book sad?
Because it had too many problems.
Why was the math book sad?
Because it had too many problems.
import langchain
from langchain_openai import ChatOpenAI
from langchain.cache import SQLiteCache
# 设置语言模型的缓存数据存储的地址
langchain.llm_cache = SQLiteCache(database_path=".langchain.db")
# 加载 llm 模型
llm = ChatOpenAI()
# 第一次向模型提问
result = llm.invoke('tell me a joke')
print(result)
# 第二次向模型提问同样的问题
result2 = llm.invoke('tell me a joke')
print(result2)
content='Why did the scarecrow win an award? Because he was outstanding in his field!'
content='Why did the scarecrow win an award? Because he was outstanding in his field!'
from langchain.prompts import PromptTemplate
from langchain_openai import OpenAI
llm = OpenAI(temperature=0.9)
#提示模版格式化变量内容
prompt = PromptTemplate(
input_variables=["product"],
template="What is a good name for a company that makes{product}?",
)
from langchain.chains import LLMChain
#实例化chain对象
chain = LLMChain(llm=llm, prompt=prompt)
#用chain的run方法来运行指定输入变量的链
print(chain.invoke("colorful socks"))
from langchain_openai import ChatOpenAI
from langchain.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
)
human_message_prompt = HumanMessagePromptTemplate(
prompt=PromptTemplate(
template="What is a good name for a company that makes {product}?",
input_variables=["product"],
)
)
chat_prompt_template = ChatPromptTemplate.from_messages([human_message_prompt])
chat = ChatOpenAI(temperature=0.9)
chain = LLMChain(llm=chat, prompt=chat_prompt_template)
print(chain.invoke("colorful socks"))
" Hello! It's nice to meet you. I am an AI created by OpenAI. I am constantly learning and improving my abilities through machine learning algorithms. How can I assist you today?"
conversation.predict(input="I'm doing well! Just having a conversation with an AI.")
" That's great to hear! I am always happy to engage in conversations and learn more about human interactions. Is there anything specific you would like to talk about?"
conversation.predict(input="Tell me about yourself.")
' Of course! As I mentioned, I am an AI created by OpenAI. I am designed to process and analyze large amounts of data, and use that information to perform various tasks and provide helpful responses. I am constantly learning and adapting to new information, which allows me to improve my abilities over time. Is there anything else you would like to know about me?'
from langchain_openai import OpenAI
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.chains import SimpleSequentialChain
# 定义第一个chain
llm = OpenAI(temperature=.7)
template = """You are a playwright. Given the title of play, it is your job to write a synopsis for that title.
Title: {title}
Playwright: This is a synopsis for the above play:"""
prompt_template = PromptTemplate(input_variables=["title"], template=template)
synopsis_chain = LLMChain(llm=llm, prompt=prompt_template)
# 定义第二个chain
llm = OpenAI(temperature=.7)
template = """You are a play critic from the New York Times. Given the synopsis of play, it is your job to write a review for that play.
Play Synopsis:
{synopsis}
Review from a New York Times play critic of the above play:"""
prompt_template = PromptTemplate(input_variables=["synopsis"], template=template)
review_chain = LLMChain(llm=llm, prompt=prompt_template)
# 通过简单顺序链组合两个LLMChain
overall_chain = SimpleSequentialChain(chains=[synopsis_chain, review_chain], verbose=True)
# 执行顺序链
review = overall_chain.invoke("Tragedy at sunset on the beach")
Tragedy at Sunset on the Beach follows the story of two young lovers, Mia and Jack....
from langchain.chains.router import MultiPromptChain
from langchain_openai import OpenAI
from langchain.chains import ConversationChain
from langchain.chains.llm import LLMChain
from langchain.prompts import PromptTemplate
physics_template = """You are a very smart physics professor. \
You are great at answering questions about physics in a concise and
easy to understand manner. \
When you don't know the answer to a question you admit that you
don't know.
Here is a question:
{input}"""
math_template = """You are a very good mathematician. You are great
at answering math questions. \
You are so good because you are able to break down hard problems
into their component parts, \
answer the component parts, and then put them together to answer
the broader question.
Here is a question:
{input}"""
prompt_infos = [
{
"name": "physics",
"description": "Good for answering questions about physics",
"prompt_template": physics_template
},
{
"name": "math",
"description": "Good for answering math questions",
"prompt_template": math_template
}
]
llm = OpenAI()
destination_chains = {}
# 将prompt_infos列表中的每个prompt信息转换为一个ConversationChain实例
for p_info in prompt_infos:
name = p_info["name"]
prompt_template = p_info["prompt_template"]
prompt = PromptTemplate(template=prompt_template, input_variables=["input"])
chain = LLMChain(llm=llm, prompt=prompt)
destination_chains[name] = chain
# LLMRouterChain链条使用一个LLM来确定如何进行路由。
from langchain.chains.router.llm_router import LLMRouterChain, RouterOutputParser
from langchain.chains.router.multi_prompt_prompt import MULTI_PROMPT_ROUTER_TEMPLATE
destinations = [f"{p['name']}: {p['description']}" for p in prompt_infos]
destinations_str = "\n".join(destinations)
# physics: Good for answering questions about physics
# math: Good for answering math questions
router_template = MULTI_PROMPT_ROUTER_TEMPLATE.format(
destinations=destinations_str
)
router_prompt = PromptTemplate(
template=router_template,
input_variables=["input"],
output_parser=RouterOutputParser(),
)
default_chain = ConversationChain(llm=llm, output_key="text")
router_chain = LLMRouterChain.from_llm(llm, router_prompt)
chain = MultiPromptChain(router_chain=router_chain, destination_chains=destination_chains, default_chain=default_chain, verbose=True)
print(chain.invoke("What is black body radiation?"))
physics: {'input': 'What is black body radiation?'}
Black body radiation is the electromagnetic radiation emitted by ...
print(chain.invoke("What is the first prime number greater than 40 such that one plus the prime number is divisible by 3"))
math: {'input': 'What is the first prime number greater than 40 such that one plus the prime number is divisible by 3'}
The first prime number greater than 40 that satisfies this ...
print(chain.invoke("What is the name of the type of cloud that rins"))
None: {'input': 'What is the name of the type of cloud that rins'}
There are various types of clouds in computing, such as public cloud...