初次体验

  1. 获取api

    在智谱注册后,在https://www.bigmodel.cn/usercenter/proj-mgmt/apikeys获得api.

  2. 运行代码体验

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    from zhipuai import ZhipuAI
    client = ZhipuAI(api_key="") # 请填写您自己的APIKey
    response = client.chat.completions.create(
    model="glm-4-plus", # 请填写您要调用的模型名称
    messages=[
    {"role": "user", "content": "作为一名营销专家,请为我的产品创作一个吸引人的口号"},
    {"role": "assistant", "content": "当然,要创作一个吸引人的口号,请告诉我一些关于您产品的信息"},
    {"role": "user", "content": "智谱AI开放平台"},
    {"role": "assistant", "content": "点燃未来,智谱AI绘制无限,让创新触手可及!"},
    {"role": "user", "content": "创作一个更精准且吸引人的口号"}
    ],
    )
    print(response.choices[0].message)

和langchain框架使用体验

具体看这里 https://www.bigmodel.cn/dev/api/thirdparty-frame/langchain-sdk

1
2
3
4
5
# !pip uninstall langchain_community
# !pip uninstall langchain langchain_openai langchain_core httpx_sse langchainhub langchain_community
!pip install langchain==0.1.12.
##注意要这个版本,因为这个langchain更新比较频繁
!pip install langchain_openai langchain_core httpx_sse langchainhub langchain_community
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
!pip install langchain langchainhub httpx_sse

# ! pip install langchain_openai
import os

from langchain_openai import ChatOpenAI
from langchain.prompts import (
ChatPromptTemplate,
MessagesPlaceholder,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from langchain.chains import LLMChain
from langchain.memory import ConversationBufferMemory

llm = ChatOpenAI(
temperature=0.95,
model="glm-4",
openai_api_key="", ##!you api
openai_api_base="https://open.bigmodel.cn/api/paas/v4/"
)
prompt = ChatPromptTemplate(
messages=[
SystemMessagePromptTemplate.from_template(
"You are a nice chatbot having a conversation with a human."
),
MessagesPlaceholder(variable_name="chat_history"),
HumanMessagePromptTemplate.from_template("{question}")
]
)

memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
conversation = LLMChain(
llm=llm,
prompt=prompt,
verbose=True,
memory=memory
)
conversation.invoke({"question": "tell me a joke"})

tavily的api从这里获取:https://app.tavily.com/home

1
2
3
4
5
6
7
8
9
10
11
12
13
# !pip install langchain_core
from langchain import hub
from langchain.agents import AgentExecutor, create_react_agent
from langchain_community.tools.tavily_search import TavilySearchResults

os.environ["TAVILY_API_KEY"] = "api" ##换成自己的ai
tools = [TavilySearchResults(max_results=1)]
prompt = hub.pull("hwchase17/react")

# # Choose the LLM to use
agent = create_react_agent(llm, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
agent_executor.invoke({"input": "what is LangChain?"})