forked from github/codespaces-models
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathbasic.py
28 lines (22 loc) · 838 Bytes
/
basic.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
"""This sample demonstrates a basic call to the chat completion API.
It is leveraging your endpoint and key. The call is synchronous."""
import os
from mistralai.client import MistralClient
from mistralai.models.chat_completion import ChatMessage
token = os.environ["GITHUB_TOKEN"]
endpoint = "https://models.inference.ai.azure.com"
# Pick one of the Mistral models from the GitHub Models service
model_name = "Mistral-small"
client = MistralClient(api_key=token, endpoint=endpoint)
response = client.chat(
model=model_name,
messages=[
ChatMessage(role="system", content="You are a helpful assistant."),
ChatMessage(role="user", content="What is the capital of France?"),
],
# Optional parameters
temperature=1.,
max_tokens=1000,
top_p=1.
)
print(response.choices[0].message.content)