diff --git a/g4f/Provider/Airforce.py b/g4f/Provider/Airforce.py
index 442ee9d41b4..7e825dac436 100644
--- a/g4f/Provider/Airforce.py
+++ b/g4f/Provider/Airforce.py
@@ -32,8 +32,7 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
api_endpoint_imagine2 = "https://api.airforce/imagine2"
working = True
- needs_auth = True
- supports_stream = True
+ supports_stream = False
supports_system_message = True
supports_message_history = True
@@ -41,9 +40,7 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
default_image_model = "flux"
hidden_models = {"Flux-1.1-Pro"}
-
additional_models_imagine = ["flux-1.1-pro", "midjourney", "dall-e-3"]
-
model_aliases = {
# Alias mappings for models
"gpt-4": "gpt-4o",
diff --git a/g4f/Provider/AmigoChat.py b/g4f/Provider/AmigoChat.py
index bb732f24048..b5be5724de2 100644
--- a/g4f/Provider/AmigoChat.py
+++ b/g4f/Provider/AmigoChat.py
@@ -13,7 +13,7 @@
'chat': {
'gpt-4o-2024-11-20': {'persona_id': "gpt"},
'gpt-4o': {'persona_id': "summarizer"},
- 'gpt-4o-mini': {'persona_id': "gemini-1-5-flash"},
+ 'gpt-4o-mini': {'persona_id': "amigo"},
'o1-preview-': {'persona_id': "openai-o-one"}, # Amigo, your balance is not enough to make the request, wait until 12 UTC or upgrade your plan
'o1-preview-2024-09-12-': {'persona_id': "orion"}, # Amigo, your balance is not enough to make the request, wait until 12 UTC or upgrade your plan
@@ -24,7 +24,7 @@
'codellama/CodeLlama-34b-Instruct-hf': {'persona_id': "codellama-CodeLlama-34b-Instruct-hf"},
'gemini-1.5-pro': {'persona_id': "gemini-1-5-pro"}, # Amigo, your balance is not enough to make the request, wait until 12 UTC or upgrade your plan
- 'gemini-1.5-flash': {'persona_id': "amigo"},
+ 'gemini-1.5-flash': {'persona_id': "gemini-1.5-flash"},
'claude-3-5-sonnet-20240620': {'persona_id': "claude"},
'claude-3-5-sonnet-20241022': {'persona_id': "clude-claude-3-5-sonnet-20241022"},
@@ -200,7 +200,6 @@ async def create_async_generator(
"temperature": temperature,
"top_p": top_p
}
- print(data)
async with session.post(cls.chat_api_endpoint, json=data, timeout=timeout) as response:
await raise_for_status(response)
async for line in response.iter_lines():
diff --git a/g4f/Provider/Copilot.py b/g4f/Provider/Copilot.py
index cc0cb6abc39..6b0db2aeda1 100644
--- a/g4f/Provider/Copilot.py
+++ b/g4f/Provider/Copilot.py
@@ -2,6 +2,7 @@
import json
import asyncio
+import base64
from http.cookiejar import CookieJar
from urllib.parse import quote
@@ -93,6 +94,20 @@ def create_completion(
) as session:
if cls._access_token is not None:
cls._cookies = session.cookies.jar
+ if cls._access_token is None:
+ try:
+ url = "https://copilot.microsoft.com/cl/eus-sc/collect"
+ headers = {
+ "Accept": "application/x-clarity-gzip",
+ "referrer": "https://copilot.microsoft.com/onboarding"
+ }
+ response = session.post(url, headers=headers, data=get_clarity())
+ clarity_token = json.loads(response.text.split(" ", maxsplit=1)[-1])[0]["value"]
+ debug.log(f"Copilot: Clarity Token: ...{clarity_token[-12:]}")
+ except Exception as e:
+ debug.log(f"Copilot: {e}")
+ else:
+ clarity_token = None
response = session.get("https://copilot.microsoft.com/c/api/user")
raise_for_status(response)
user = response.json().get('firstName')
@@ -125,6 +140,12 @@ def create_completion(
uploaded_images.append({"type":"image", "url": response.json().get("url")})
wss = session.ws_connect(cls.websocket_url)
+ if clarity_token is not None:
+ wss.send(json.dumps({
+ "event": "challengeResponse",
+ "token": clarity_token,
+ "method":"clarity"
+ }).encode(), CurlWsFlag.TEXT)
wss.send(json.dumps({
"event": "send",
"conversationId": conversation_id,
@@ -209,4 +230,9 @@ def readHAR(url: str):
if api_key is None:
raise NoValidHarFileError("No access token found in .har files")
- return api_key, cookies
\ No newline at end of file
+ return api_key, cookies
+
+def get_clarity() -> bytes:
+ #{"e":["0.7.58",5,7284,4779,"n59ae4ieqq","aln5en","1upufhz",1,0,0],"a":[[7323,12,65,217,324],[7344,12,65,214,329],[7385,12,65,211,334],[7407,12,65,210,337],[7428,12,65,209,338],[7461,12,65,209,339],[7497,12,65,209,339],[7531,12,65,208,340],[7545,12,65,208,342],[11654,13,65,208,342],[11728,14,65,208,342],[11728,9,65,208,342,17535,19455,0,0,0,"Annehmen",null,"52w7wqv1r.8ovjfyrpu",1],[7284,4,1,393,968,393,968,0,0,231,310,939,0],[12063,0,2,147,3,4,4,18,5,1,10,79,25,15],[12063,36,6,[11938,0]]]}
+ body = base64.b64decode("H4sIAAAAAAAAA23RwU7DMAwG4HfJ2aqS2E5ibjxH1cMOnQYqYZvUTQPx7vyJRGGAemj01XWcP+9udg+j80MetDhSyrEISc5GrqrtZnmaTydHbrdUnSsWYT2u+8Obo0Ce/IQvaDBmjkwhUlKKIRNHmQgosqEArWPRDQMx90rxeUMPzB1j+UJvwNIxhTvsPcXyX1T+rizE4juK3mEEhpAUg/JvzW1/+U/tB1LATmhqotoiweMea50PLy2vui4LOY3XfD1dwnkor5fn/e18XBFgm6fHjSzZmCyV7d3aRByAEYextaTHEH3i5pgKGVP/s+DScE5PuLKIpW6FnCi1gY3Rbpqmj0/DI/+L7QEAAA==")
+ return body
\ No newline at end of file
diff --git a/g4f/Provider/DDG.py b/g4f/Provider/DDG.py
index 070a7db2cf6..71c39bcdf4b 100644
--- a/g4f/Provider/DDG.py
+++ b/g4f/Provider/DDG.py
@@ -77,9 +77,7 @@ async def create_async_generator(
if conversation is None:
conversation = Conversation(model)
is_new_conversation = True
-
- debug.last_model = model
-
+
if conversation.vqd is None:
conversation.vqd = await cls.get_vqd(proxy, connector)
if not conversation.vqd:
@@ -91,7 +89,7 @@ async def create_async_generator(
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36',
'x-vqd-4': conversation.vqd,
}
-
+
async with ClientSession(headers=headers, connector=get_connector(connector, proxy)) as session:
if is_new_conversation:
conversation.message_history = [{"role": "user", "content": format_prompt(messages)}]
@@ -119,7 +117,7 @@ async def create_async_generator(
async with session.post(cls.api_endpoint, json=data) as response:
conversation.vqd = response.headers.get("x-vqd-4")
await raise_for_status(response)
-
+
async for line in response.content:
if line:
decoded_line = line.decode('utf-8')
diff --git a/g4f/Provider/Mhystical.py b/g4f/Provider/Mhystical.py
index 2aa98ebc499..14412c07c5b 100644
--- a/g4f/Provider/Mhystical.py
+++ b/g4f/Provider/Mhystical.py
@@ -4,6 +4,7 @@
import logging
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
+from ..requests.raise_for_status import raise_for_status
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
@@ -67,17 +68,10 @@ async def create_async_generator(
"messages": [{"role": "user", "content": format_prompt(messages)}]
}
async with session.post(cls.api_endpoint, json=data, headers=headers, proxy=proxy) as response:
- if response.status == 400:
- yield "Error: API key is missing"
- elif response.status == 429:
- yield "Error: Rate limit exceeded"
- elif response.status == 500:
- yield "Error: Internal server error"
- else:
- response.raise_for_status()
- response_text = await response.text()
- filtered_response = cls.filter_response(response_text)
- yield filtered_response
+ await raise_for_status(response)
+ response_text = await response.text()
+ filtered_response = cls.filter_response(response_text)
+ yield filtered_response
@staticmethod
def filter_response(response_text: str) -> str:
diff --git a/g4f/Provider/needs_auth/GithubCopilot.py b/g4f/Provider/needs_auth/GithubCopilot.py
index 754c8d4e5b2..4346ab0338b 100644
--- a/g4f/Provider/needs_auth/GithubCopilot.py
+++ b/g4f/Provider/needs_auth/GithubCopilot.py
@@ -62,7 +62,6 @@ async def create_async_generator(
if conversation is not None:
conversation_id = conversation.conversation_id
if conversation_id is None:
- print(headers)
async with session.post("https://api.individual.githubcopilot.com/github/chat/threads", headers=headers) as response:
await raise_for_status(response)
conversation_id = (await response.json()).get("thread_id")
diff --git a/g4f/Provider/needs_auth/HuggingFace.py b/g4f/Provider/needs_auth/HuggingFace.py
index 40db1ee7005..3fa5a624074 100644
--- a/g4f/Provider/needs_auth/HuggingFace.py
+++ b/g4f/Provider/needs_auth/HuggingFace.py
@@ -3,25 +3,37 @@
import json
import base64
import random
+import requests
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from ...errors import ModelNotFoundError
+from ...errors import ModelNotFoundError, ModelNotSupportedError
from ...requests import StreamSession, raise_for_status
from ...image import ImageResponse
from .HuggingChat import HuggingChat
class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
- url = "https://huggingface.co/chat"
+ url = "https://huggingface.co"
working = True
supports_message_history = True
default_model = HuggingChat.default_model
default_image_model = HuggingChat.default_image_model
- models = HuggingChat.models
- image_models = [default_image_model]
model_aliases = HuggingChat.model_aliases
+ @classmethod
+ def get_models(cls) -> list[str]:
+ if not cls.models:
+ url = "https://huggingface.co/api/models?inference=warm&pipeline_tag=text-generation"
+ cls.models = [model["id"] for model in requests.get(url).json()]
+ cls.models.append("meta-llama/Llama-3.2-11B-Vision-Instruct")
+ cls.models.append("nvidia/Llama-3.1-Nemotron-70B-Instruct-HF")
+ if not cls.image_models:
+ url = "https://huggingface.co/api/models?pipeline_tag=text-to-image"
+ cls.image_models = [model["id"] for model in requests.get(url).json() if model["trendingScore"] >= 20]
+ cls.models.extend(cls.image_models)
+ return cls.models
+
@classmethod
async def create_async_generator(
cls,
@@ -36,7 +48,10 @@ async def create_async_generator(
prompt: str = None,
**kwargs
) -> AsyncResult:
- model = cls.get_model(model)
+ try:
+ model = cls.get_model(model)
+ except ModelNotSupportedError:
+ pass
headers = {
'accept': '*/*',
'accept-language': 'en',
@@ -55,6 +70,7 @@ async def create_async_generator(
}
if api_key is not None:
headers["Authorization"] = f"Bearer {api_key}"
+ payload = None
if model in cls.image_models:
stream = False
prompt = messages[-1]["content"] if prompt is None else prompt
@@ -66,12 +82,28 @@ async def create_async_generator(
"temperature": temperature,
**kwargs
}
- payload = {"inputs": format_prompt(messages), "parameters": params, "stream": stream}
async with StreamSession(
headers=headers,
proxy=proxy,
timeout=600
) as session:
+ if payload is None:
+ async with session.get(f"https://huggingface.co/api/models/{model}") as response:
+ model_data = await response.json()
+ if "config" in model_data and "tokenizer_config" in model_data["config"] and "eos_token" in model_data["config"]["tokenizer_config"]:
+ eos_token = model_data["config"]["tokenizer_config"]["eos_token"]
+ if eos_token == "":
+ inputs = format_prompt_mistral(messages)
+ elif eos_token == "<|im_end|>":
+ inputs = format_prompt_qwen(messages)
+ elif eos_token == "<|eot_id|>":
+ inputs = format_prompt_llama(messages)
+ else:
+ inputs = format_prompt(messages)
+ else:
+ inputs = format_prompt(messages)
+ payload = {"inputs": inputs, "parameters": params, "stream": stream}
+
async with session.post(f"{api_base.rstrip('/')}/models/{model}", json=payload) as response:
if response.status == 404:
raise ModelNotFoundError(f"Model is not supported: {model}")
@@ -105,3 +137,18 @@ def format_prompt(messages: Messages) -> str:
if message["role"] == "assistant"
])
return f"{history}[INST] {question} [/INST]"
+
+def format_prompt_qwen(messages: Messages) -> str:
+ return "".join([
+ f"<|im_start|>{message['role']}\n{message['content']}\n<|im_end|>\n" for message in messages
+ ]) + "<|im_start|>assistant\n"
+
+def format_prompt_llama(messages: Messages) -> str:
+ return "<|begin_of_text|>" + "".join([
+ f"<|start_header_id|>{message['role']}<|end_header_id|>\n\n{message['content']}\n<|eot_id|>\n" for message in messages
+ ]) + "<|start_header_id|>assistant<|end_header_id|>\\n\\n"
+
+def format_prompt_mistral(messages: Messages) -> str:
+ return "".join([
+ f"<|{message['role']}|>\n{message['content']}'\n" for message in messages
+ ]) + "<|assistant|>\n"
\ No newline at end of file
diff --git a/g4f/Provider/openai/new.py b/g4f/Provider/openai/new.py
index 9f99f2af62b..43994f0437d 100644
--- a/g4f/Provider/openai/new.py
+++ b/g4f/Provider/openai/new.py
@@ -476,7 +476,6 @@ def __str__(self):
def get_turnstile_token(dx: str, p: str) -> str:
decoded_bytes = base64.b64decode(dx)
- # print(decoded_bytes.decode())
return process_turnstile_token(decoded_bytes.decode(), p)
diff --git a/g4f/api/__init__.py b/g4f/api/__init__.py
index b42c599175a..6f6b0ffa03a 100644
--- a/g4f/api/__init__.py
+++ b/g4f/api/__init__.py
@@ -470,15 +470,17 @@ def safe_get_models(provider: ProviderType) -> list[str]:
})
def upload_cookies(files: List[UploadFile]):
response_data = []
- for file in files:
- try:
- if file and file.filename.endswith(".json") or file.filename.endswith(".har"):
- filename = os.path.basename(file.filename)
- with open(os.path.join(get_cookies_dir(), filename), 'wb') as f:
- shutil.copyfileobj(file.file, f)
- response_data.append({"filename": filename})
- finally:
- file.file.close()
+ if not AppConfig.ignore_cookie_files:
+ for file in files:
+ try:
+ if file and file.filename.endswith(".json") or file.filename.endswith(".har"):
+ filename = os.path.basename(file.filename)
+ with open(os.path.join(get_cookies_dir(), filename), 'wb') as f:
+ shutil.copyfileobj(file.file, f)
+ response_data.append({"filename": filename})
+ finally:
+ file.file.close()
+ read_cookie_files()
return response_data
@self.app.get("/v1/synthesize/{provider}", responses={
diff --git a/g4f/gui/client/static/js/chat.v1.js b/g4f/gui/client/static/js/chat.v1.js
index 044d2ab7568..54853226139 100644
--- a/g4f/gui/client/static/js/chat.v1.js
+++ b/g4f/gui/client/static/js/chat.v1.js
@@ -610,6 +610,7 @@ const ask_gpt = async (message_id, message_index = -1, regenerate = false, provi
const auto_continue = document.getElementById("auto_continue")?.checked;
const download_images = document.getElementById("download_images")?.checked;
let api_key = get_api_key_by_provider(provider);
+ const ignored = Array.from(settings.querySelectorAll("input.provider:not(:checked)")).map((el)=>el.value);
await api("conversation", {
id: message_id,
conversation_id: window.conversation_id,
@@ -620,6 +621,7 @@ const ask_gpt = async (message_id, message_index = -1, regenerate = false, provi
auto_continue: auto_continue,
download_images: download_images,
api_key: api_key,
+ ignored: ignored,
}, files, message_id);
if (!error_storage[message_id]) {
html = markdown_render(message_storage[message_id]);
@@ -1217,6 +1219,7 @@ function count_tokens(model, text) {
if (window.GPTTokenizer_cl100k_base) {
return GPTTokenizer_cl100k_base.encode(text).length;
}
+ return 0;
}
function count_words(text) {
@@ -1256,6 +1259,10 @@ systemPrompt.addEventListener("input", function() {
});
window.addEventListener('load', async function() {
+ await safe_load_conversation(window.conversation_id, false);
+});
+
+window.addEventListener('DOMContentLoaded', async function() {
await on_load();
if (window.conversation_id == "{{chat_id}}") {
window.conversation_id = uuid();
@@ -1309,7 +1316,6 @@ async function on_api() {
let prompt_lock = false;
messageInput.addEventListener("keydown", async (evt) => {
if (prompt_lock) return;
-
// If not mobile and not shift enter
if (!window.matchMedia("(pointer:coarse)").matches && evt.keyCode === 13 && !evt.shiftKey) {
evt.preventDefault();
@@ -1361,7 +1367,7 @@ async function on_api() {
option.innerHTML = `