Baichuan large model AI dialogue practice - Python develops a dialogue robot

Baichuan Big Model is open to provide an API experience center. The experience is good. Some friends are also interested in building their own conversation robots. Today I will briefly introduce through Python how to call the API of Baichuan Big Model to build your own small product.
Insert image description here

How to install Python in a development environment? Referring to online information, it is assumed that there is already a normal Python environment.

import requests

# API 请求 URL
url = "https://api.baichuan-ai.com/v1/chat/completions"

# 请求头部参数
headers = {
    
    
    "Content-Type": "application/json",
    "Authorization": "Bearer sk-333333333"  # 替换为实际的 API Key
}

# 请求体参数
data = {
    
    
    "model": "Baichuan2-Turbo",
    "messages": [
        {
    
    
            "role": "user",
            "content": "你好,"
        }
#        {
    
    
#            "role": "assistant",
#            "content": "你好!有什么我可以帮助你的吗?"
#        }
        # 添加更多对话消息...
    ],
    "stream": True,  # 是否使用流式接口
    "temperature": 0.3,
    "top_p": 0.85,
    "top_k": 5,
    "with_search_enhance": False
    # 添加其他参数...
}

# 发送 POST 请求
response = requests.post(url, headers=headers, json=data)

# 输出响应内容
print(response.text)

Executing the above code, the results are as follows:

hogworts@hogworts baichuan % python3 chat.py 
data: {
    
    "id":"chatcmpl-M233b00CLE3goFd","object":"chat.completion.chunk","created":1703138623,"model":"Baichuan2-Turbo","choices":[{
    
    "index":0,"delta":{
    
    "role":"assistant","content":"你好"}}]}

data: {
    
    "id":"chatcmpl-M233b00CLE3goFd","object":"chat.completion.chunk","created":1703138623,"model":"Baichuan2-Turbo","choices":[{
    
    "index":0,"delta":{
    
    "role":"assistant","content":"!有什么我可以帮助"}}]}

data: {
    
    "id":"chatcmpl-M233b00CLE3goFd","object":"chat.completion.chunk","created":1703138623,"model":"Baichuan2-Turbo","choices":[{
    
    "index":0,"delta":{
    
    "role":"assistant","content":"你的吗?"},"finish_reason":"stop"}],"usage":{
    
    "prompt_tokens":4,"completion_tokens":9,"total_tokens":13}}

data: [DONE]


1. The style is not beautiful enough. Can the output results be formatted as JSON for easier reading?
2. The above-mentioned conversation can only be carried out once, and the user experience is very poor. Can it be made into a continuous conversation mode?

The following is reconstructed into a continuous dialogue mode.

import subprocess
import requests
import json

# API 请求 URL
url = "https://api.baichuan-ai.com/v1/chat/completions"

# 替换为实际的 API Key
api_key = "sk-33333333"

# 请求头部参数
headers = {
    
    
    "Content-Type": "application/json",
    "Authorization":f"Bearer {
      
      api_key}"
}

# 持续对话循环
while True:
    # 用户输入
    user_input = input("用户: ")

    # 如果用户输入为空,则退出循环
    if not user_input:
        break

    # 构建对话消息
    messages = [
        {
    
    "role": "user", "content": user_input}
        # 如果有历史消息,可以继续添加
    ]

    # 请求体参数
    data = {
    
    
        "model": "Baichuan2-Turbo",
        "messages": messages,
        "stream": False,
        "temperature": 0.3,
        "top_p": 0.85,
        "top_k": 5,
        "with_search_enhance": False
        # 添加其他参数...
    }

    # 发送 POST 请求
    response = requests.post(url, headers=headers, json=data)

    # 处理响应
    if response.status_code == 200:
        # 使用 jq 美化 JSON 输出
        result = json.loads(response.text)
        formatted_result = subprocess.run(['jq', '.'], input=json.dumps(result), text=True, capture_output=True)

        # 输出助手的回复
        print(f"助手: {
      
      formatted_result.stdout}")

    else:
        print(f"请求失败,状态码: {
      
      response.status_code}")

# 用户输入为空,退出循环
print("对话结束。")

Execute the program and return the following results:

hogworts@hogworts baichuan % python3 chat-json.py 
用户: hi guys
助手: {
    
    
  "id": "chatcmpl-M8c0000CLE7to5U",
  "object": "chat.completion",
  "created": 1703138875,
  "model": "Baichuan2-Turbo",
  "choices": [
    {
    
    
      "index": 0,
      "message": {
    
    
        "role": "assistant",
        "content": "Hi!"
      },
      "finish_reason": "stop"
    }
  ],
  "usage": {
    
    
    "prompt_tokens": 4,
    "completion_tokens": 3,
    "total_tokens": 7
  }
}

用户: where are you going
助手: {
    
    
  "id": "chatcmpl-M400600CLE88o2H",
  "object": "chat.completion",
  "created": 1703138888,
  "model": "Baichuan2-Turbo",
  "choices": [
    {
    
    
      "index": 0,
      "message": {
    
    
        "role": "assistant",
        "content": "我暂时不能离开这个平台,但我可以帮助您回答各种问题、提供信息或执行一些任务。如果您有任何问题,请随时提问。"
      },
      "finish_reason": "stop"
    }
  ],
  "usage": {
    
    
    "prompt_tokens": 6,
    "completion_tokens": 29,
    "total_tokens": 35
  }
}

Problem: A single API_KEY often has concurrency limits, which can easily be limited by interface requests when used by a large number of users.

Refactor it to support multiple keys for polling, so you can respond to more user requests.
Insert image description here
Note: After the API_KEY is generated, it will be directly desensitized and displayed. This is the rule on many platforms. Therefore, keep your KEY carefully, otherwise you will not be able to get it back even if you generated it yourself.

import aiohttp
import asyncio
import random
import subprocess
import json


# 替换为你的多个 API Key
api_keys = ["sk-222", "sk-333", "sk-44", "sk-555", "sk-666"]

# API 请求 URL
url = "https://api.baichuan-ai.com/v1/chat/completions"

# 请求体参数
data = {
    
    
    "model": "Baichuan2-Turbo",
    "messages": [],
    "stream": False,
    "temperature": 0.3,
    "top_p": 0.85,
    "top_k": 5,
    "with_search_enhance": False
    # 添加其他参数...
}

# 异步发送请求的函数
async def send_request(api_key, user_input):
    headers = {
    
    
        "Content-Type": "application/json",
        "Authorization": f"Bearer {
      
      api_key}"
    }

    # 添加用户输入到 messages 中
    data["messages"].append({
    
    "role": "user", "content": user_input})

    async with aiohttp.ClientSession() as session:
        async with session.post(url, headers=headers, json=data) as response:
            if response.status == 200:
                result = await response.json()
                choices = result.get("choices", [])
                return choices
            else:
                print(f"API Key: {
      
      api_key}, 请求失败,状态码: {
      
      response.status}")
                return None


# 异步主函数
async def main():
    while True:
        # 接受用户输入
        user_input = input("用户: ")

        # 如果用户输入为空,则退出循环
        if not user_input:
            break

        # 随机选择一个 API Key
        selected_api_key = random.choice(api_keys)

        # 使用异步发送请求
        choices = await send_request(selected_api_key, user_input)

        # 处理请求的结果
        if choices:
#	    json_result = json.loads(choices)
#	    formatted_result = subprocess.run(['jq', '.'], input=json.dumps(json_result), text=True, capture_output=True)

            print(f"API Key: {
      
      selected_api_key}, Choices: {
      
      choices}")
	# 使用 jq 美化 JSON 输出
            #result = json.loads(choices)
            #formatted_result = subprocess.run(['jq', '.'], input=json.dumps(result), text=True, capture_output=True)

            # 输出助手的回复
            #print(f"API Key: {selected_api_key}, Choices: {formatted_result.stdout}")
        else:
            print(f"API Key: {
      
      selected_api_key}, 请求失败")

        # 清空 messages,准备下一轮聊天
        data["messages"] = []

# Run the event loop
if __name__ == "__main__":
    asyncio.run(main())

The above will involve many common Python component libraries, such as requests, json, subprocess, aiohttp, etc., which are necessary installation packages for special functions. They only need to be installed through the pip command, otherwise they cannot be used normally.

Guess you like

Origin blog.csdn.net/hero272285642/article/details/135129367