From 270bf54832af88da660527c1c6669e531c3cc31c Mon Sep 17 00:00:00 2001 From: naman-gupta99 Date: Wed, 4 Jun 2025 00:54:43 -0400 Subject: [PATCH] Naman | Creates Llama Training Script --- agentdriver/execution/gen_finetune_data.py | 2 +- agentdriver/execution/llama/__init__.py | 0 .../execution/llama/collect_planner_input.py | 16 ++ agentdriver/execution/llama/fine_tune.py | 141 ++++++++++++++++ .../execution/llama/gen_finetune_data.py | 41 +++++ agentdriver/execution/llama/inference.py | 39 +++++ .../execution/llama/llama_finetune_sample.py | 153 ++++++++++++++++++ agentdriver/llm_core/chat.py | 1 - agentdriver/planning/generate_messages.py | 45 ++++++ agentdriver/planning/motion_planning.py | 42 +---- ...lanning_prmopts.py => planning_prompts.py} | 0 requirements.txt | 66 ++++---- 12 files changed, 471 insertions(+), 75 deletions(-) create mode 100644 agentdriver/execution/llama/__init__.py create mode 100644 agentdriver/execution/llama/collect_planner_input.py create mode 100644 agentdriver/execution/llama/fine_tune.py create mode 100644 agentdriver/execution/llama/gen_finetune_data.py create mode 100644 agentdriver/execution/llama/inference.py create mode 100644 agentdriver/execution/llama/llama_finetune_sample.py create mode 100644 agentdriver/planning/generate_messages.py rename agentdriver/planning/{planning_prmopts.py => planning_prompts.py} (100%) diff --git a/agentdriver/execution/gen_finetune_data.py b/agentdriver/execution/gen_finetune_data.py index fca35cf..6c61181 100644 --- a/agentdriver/execution/gen_finetune_data.py +++ b/agentdriver/execution/gen_finetune_data.py @@ -5,7 +5,7 @@ import random from pathlib import Path -from agentdriver.planning.planning_prmopts import planning_system_message as system_message +from agentdriver.planning.planning_prompts import planning_system_message as system_message from agentdriver.planning.motion_planning import generate_messages def generate_traj_finetune_data(data_path, data_file, sample_ratio=1.0, use_gt_cot=False): diff --git a/agentdriver/execution/llama/__init__.py b/agentdriver/execution/llama/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/agentdriver/execution/llama/collect_planner_input.py b/agentdriver/execution/llama/collect_planner_input.py new file mode 100644 index 0000000..66c5f66 --- /dev/null +++ b/agentdriver/execution/llama/collect_planner_input.py @@ -0,0 +1,16 @@ +## Run tool use, memory retrieval, and reasoning to generate training data for planning and testing input for planner + +from pathlib import Path + +from agentdriver.main.language_agent import LanguageAgent +from agentdriver.llm_core.api_keys import OPENAI_ORG, OPENAI_API_KEY + +import openai +openai.organization = OPENAI_ORG +openai.api_key = OPENAI_API_KEY + +if __name__ == "__main__": + data_path = Path('data/') + split = 'train' + language_agent = LanguageAgent(data_path, split, model_name="gpt-3.5-turbo-0613", finetune_cot=False, verbose=False) + language_agent.collect_planner_input(invalid_tokens=None) \ No newline at end of file diff --git a/agentdriver/execution/llama/fine_tune.py b/agentdriver/execution/llama/fine_tune.py new file mode 100644 index 0000000..28984a1 --- /dev/null +++ b/agentdriver/execution/llama/fine_tune.py @@ -0,0 +1,141 @@ +## finetuning motion planner +import os +import torch +from datasets import load_dataset +from transformers import ( + AutoModelForCausalLM, + AutoTokenizer, + BitsAndBytesConfig, + TrainingArguments, + pipeline, + logging, +) +from peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training +from trl import SFTTrainer +from accelerate import FullyShardedDataParallelPlugin, Accelerator +from torch.distributed.fsdp.fully_sharded_data_parallel import ( + FullOptimStateDictConfig, + FullStateDictConfig, +) + +from agentdriver.execution.llama.gen_finetune_data import generate_traj_finetune_data + +def setup_fsdp(): + fsdp_plugin = FullyShardedDataParallelPlugin( + state_dict_config=FullStateDictConfig(offload_to_cpu=True, rank0_only=False), + optim_state_dict_config=FullOptimStateDictConfig( + offload_to_cpu=True, rank0_only=False + ), + ) + return Accelerator(fsdp_plugin=fsdp_plugin) + +def train_llama(data_path, sample_ratio=0.1): + # Generate training data + print("Generating fine-tuning data ...") + generate_traj_finetune_data(data_path=data_path, data_file="data_samples_train.json", + sample_ratio=sample_ratio, use_gt_cot=False) + + # Setup model and tokenizer + base_model_id = "NousResearch/Meta-Llama-3-8B" + max_length = 2048 # Adjust based on your needs + + print("Loading tokenizer and model...") + tokenizer = AutoTokenizer.from_pretrained( + base_model_id, + padding_side="left", + add_eos_token=True, + add_bos_token=True, + ) + tokenizer.pad_token = tokenizer.eos_token + + # Load datasets + train_file = f"finetune_planner_{int(sample_ratio * 100)}.csv" + train_dataset = load_dataset("csv", data_files=os.path.join(data_path, train_file), split="train") + + # Setup quantization + bnb_config = BitsAndBytesConfig( + load_in_4bit=True, + bnb_4bit_use_double_quant=True, + bnb_4bit_quant_type="nf4", + bnb_4bit_compute_dtype=torch.bfloat16, + ) + + # Load model + model = AutoModelForCausalLM.from_pretrained( + base_model_id, + quantization_config=bnb_config, + device_map="auto" + ) + + # Prepare model for training + model.gradient_checkpointing_enable() + model = prepare_model_for_kbit_training(model) + + # Setup LoRA + config = LoraConfig( + r=8, + lora_alpha=16, + target_modules=[ + "q_proj", + "k_proj", + "v_proj", + "o_proj", + "gate_proj", + "up_proj", + "down_proj", + "lm_head", + ], + bias="none", + lora_dropout=0.05, + task_type="CAUSAL_LM", + ) + + model = get_peft_model(model, config) + + # Setup accelerator + accelerator = setup_fsdp() + model = accelerator.prepare_model(model) + + if torch.cuda.device_count() > 1: + model.is_parallelizable = True + model.model_parallel = True + + # Setup training arguments + training_args = TrainingArguments( + output_dir=f"./llama_planner_{int(sample_ratio * 100)}", + num_train_epochs=3, + per_device_train_batch_size=4, + gradient_accumulation_steps=4, + gradient_checkpointing=True, + optim="paged_adamw_8bit", + learning_rate=2.5e-5, + logging_steps=10, + bf16=True, + save_strategy="steps", + save_steps=100, + evaluation_strategy="no", + do_eval=False, + report_to="none", + ) + + # Initialize trainer + trainer = SFTTrainer( + model=model, + train_dataset=train_dataset, + args=training_args, + tokenizer=tokenizer, + max_seq_length=max_length, + ) + + # Train + print("Starting training...") + model.config.use_cache = False + trainer.train() + + # Save the model + output_dir = f"./llama_planner_{int(sample_ratio * 100)}_final" + trainer.save_model(output_dir) + print(f"Model saved to {output_dir}") + +if __name__ == "__main__": + train_llama(data_path="data/finetune", sample_ratio=0.1) diff --git a/agentdriver/execution/llama/gen_finetune_data.py b/agentdriver/execution/llama/gen_finetune_data.py new file mode 100644 index 0000000..5c4009b --- /dev/null +++ b/agentdriver/execution/llama/gen_finetune_data.py @@ -0,0 +1,41 @@ +import json +import random +from pathlib import Path + +from agentdriver.planning.planning_prompts import planning_system_message as system_message +from agentdriver.planning.generate_messages import generate_messages + +def generate_traj_finetune_data(data_path, data_file, sample_ratio=1.0, use_gt_cot=False): + data_samples = json.load(open(Path(data_path) / Path(data_file), 'r')) + + sample_size = int(len(data_samples) * sample_ratio) + data_samples = random.sample(data_samples, sample_size) + + train_data = [] + for data_sample in data_samples: + token, user_message, assistant_message = generate_messages(data_sample, use_gt_cot=use_gt_cot) + assert assistant_message is not None + + # Format for Llama fine-tuning + full_prompt = f"""### System: {system_message} + ### Human: {user_message} + # ### Assistant: {assistant_message}""" + + train_data.append({ + "text": full_prompt + }) + + print("#### Data Summarization ####") + print(f"Number of total samples: {len(train_data)}") + + # Save as CSV for Llama training + saved_file_name = f"finetune_planner_{int(sample_ratio * 100)}.csv" + with open(Path(data_path) / Path(saved_file_name), "w") as f: + f.write("text\n") # CSV header + for item in train_data: + # Escape quotes and newlines for CSV + text = item["text"].replace('"', '""').replace('\n', '\\n') + f.write(f'"{text}"\n') + +if __name__ == "__main__": + generate_traj_finetune_data(data_path="data/finetune", data_file="data_samples_train.json", use_gt_cot=False) \ No newline at end of file diff --git a/agentdriver/execution/llama/inference.py b/agentdriver/execution/llama/inference.py new file mode 100644 index 0000000..128319c --- /dev/null +++ b/agentdriver/execution/llama/inference.py @@ -0,0 +1,39 @@ +## Run tool use, memory retrieval, and reasoning to generate training data for planning and testing input for planner + +from pathlib import Path +import time +import json + +from agentdriver.main.language_agent import LanguageAgent +from agentdriver.llm_core.api_keys import OPENAI_ORG, OPENAI_API_KEY, FINETUNE_PLANNER_NAME + +import openai +openai.organization = OPENAI_ORG +openai.api_key = OPENAI_API_KEY + +if __name__ == "__main__": + data_path = Path('data/') + split = 'val' + language_agent = LanguageAgent( + data_path, + split, + model_name="gpt-3.5-turbo-0613", + planner_model_name=FINETUNE_PLANNER_NAME, + finetune_cot=False, + verbose=False + ) + + current_time = time.strftime("%D:%H:%M") + current_time = current_time.replace("/", "_") + current_time = current_time.replace(":", "_") + save_path = Path("experiments") / Path(current_time) + save_path.mkdir(exist_ok=True, parents=True) + with open("data/finetune/data_samples_val.json", "r") as f: + data_samples = json.load(f) + + planning_traj_dict = language_agent.inference_all( + data_samples=data_samples, + data_path=Path(data_path) / Path(split), + save_path=save_path, + ) + \ No newline at end of file diff --git a/agentdriver/execution/llama/llama_finetune_sample.py b/agentdriver/execution/llama/llama_finetune_sample.py new file mode 100644 index 0000000..65c50ce --- /dev/null +++ b/agentdriver/execution/llama/llama_finetune_sample.py @@ -0,0 +1,153 @@ +import os +import torch +from datasets import load_dataset +from transformers import ( + AutoModelForCausalLM, + AutoTokenizer, + BitsAndBytesConfig, + TrainingArguments, + pipeline, + logging, +) +from peft import LoraConfig, get_peft_model +from trl import SFTTrainer + +from accelerate import FullyShardedDataParallelPlugin, Accelerator +from torch.distributed.fsdp.fully_sharded_data_parallel import ( + FullOptimStateDictConfig, + FullStateDictConfig, +) +from peft import prepare_model_for_kbit_training +import ast + +fsdp_plugin = FullyShardedDataParallelPlugin( + state_dict_config=FullStateDictConfig(offload_to_cpu=True, rank0_only=False), + optim_state_dict_config=FullOptimStateDictConfig( + offload_to_cpu=True, rank0_only=False + ), +) + +accelerator = Accelerator(fsdp_plugin=fsdp_plugin) + +base_model_id = "NousResearch/Meta-Llama-3-8B" + +max_length = 1570 + +tokenizer = AutoTokenizer.from_pretrained( + base_model_id, + padding_side="left", + add_eos_token=True, + add_bos_token=True, +) +tokenizer.pad_token = tokenizer.eos_token + + +def tokenize(prompt): + result = tokenizer( + prompt, + truncation=True, + max_length=max_length, + padding="max_length", + ) + result["labels"] = result["input_ids"].copy() + return result + + +train_dataset = load_dataset( + "csv", data_files="input/train_shuffled2.csv", split="train" +) +eval_dataset = load_dataset("csv", data_files="input/test_shuffled2.csv", split="train") + + +def generate_and_tokenize_prompt(data_point): + full_prompt = f""" +You'll be provided with some questions ... You should ... +### Question: {data_point['Question']} +### Output: {data_point['Output']} +""" + + return tokenize(full_prompt) + + +tokenized_train_dataset = train_dataset.map(generate_and_tokenize_prompt) +tokenized_val_dataset = eval_dataset.map(generate_and_tokenize_prompt) + +bnb_config = BitsAndBytesConfig( + load_in_4bit=True, + bnb_4bit_use_double_quant=True, + bnb_4bit_quant_type="nf4", + bnb_4bit_compute_dtype=torch.bfloat16, +) + +model = AutoModelForCausalLM.from_pretrained( + base_model_id, quantization_config=bnb_config, device_map="auto" +) + +model.gradient_checkpointing_enable() +model = prepare_model_for_kbit_training(model) + +config = LoraConfig( + r=8, + lora_alpha=16, + target_modules=[ + "q_proj", + "k_proj", + "v_proj", + "o_proj", + "gate_proj", + "up_proj", + "down_proj", + "lm_head", + ], + bias="none", + lora_dropout=0.05, + task_type="CAUSAL_LM", +) + +model = get_peft_model(model, config) +model = accelerator.prepare_model(model) + + +if torch.cuda.device_count() > 1: + model.is_parallelizable = True + model.model_parallel = True + +import transformers +from datetime import datetime + +project = "baseline" +base_model_name = "llama3-8b" +run_name = base_model_name + "-" + project +output_dir = "./" + run_name + +tokenizer.pad_token = tokenizer.eos_token + +trainer = transformers.Trainer( + model=model, + train_dataset=tokenized_train_dataset, + eval_dataset=tokenized_val_dataset, + args=transformers.TrainingArguments( + output_dir=output_dir, + warmup_steps=5, + num_train_epochs=3, + per_device_train_batch_size=8, + gradient_checkpointing=True, + gradient_accumulation_steps=4, + max_steps=-1, + learning_rate=2.5e-5, + logging_steps=400, + bf16=True, + optim="paged_adamw_8bit", + logging_dir="./logs", + save_strategy="steps", + save_steps=400, + evaluation_strategy="steps", + eval_steps=400, + do_eval=True, + report_to="none", + ), + data_collator=transformers.DataCollatorForLanguageModeling(tokenizer, mlm=False), +) + +model.config.use_cache = False +trainer.train() diff --git a/agentdriver/llm_core/chat.py b/agentdriver/llm_core/chat.py index 6b5f517..fd2f857 100644 --- a/agentdriver/llm_core/chat.py +++ b/agentdriver/llm_core/chat.py @@ -1,7 +1,6 @@ # Basic Chat Completion Functions # Written by Jiageng Mao -import json from typing import List, Dict from agentdriver.llm_core.chat_utils import completion_with_backoff from agentdriver.llm_core.timeout import timeout diff --git a/agentdriver/planning/generate_messages.py b/agentdriver/planning/generate_messages.py new file mode 100644 index 0000000..d1aba88 --- /dev/null +++ b/agentdriver/planning/generate_messages.py @@ -0,0 +1,45 @@ +def generate_messages( + data_sample, + use_peception=True, + use_short_experience=True, + verbose=True, + use_gt_cot=False, +): + token = data_sample["token"] + ego = data_sample["ego"] + perception = data_sample["perception"] + commonsense = data_sample["commonsense"] + experiences = data_sample["experiences"] + reasoning = data_sample["reasoning"] + long_experiences = ( + data_sample["long_experiences"] if "long_experiences" in data_sample else None + ) + chain_of_thoughts = ( + data_sample["chain_of_thoughts"] if "chain_of_thoughts" in data_sample else "" + ) + planning_target = ( + data_sample["planning_target"] if "planning_target" in data_sample else None + ) + + user_message = ego + if use_peception: + user_message += perception + if use_short_experience: + if experiences: + user_message += experiences + else: + if long_experiences: + user_message += long_experiences + user_message += commonsense + if use_gt_cot: + user_message += chain_of_thoughts + else: + user_message += reasoning + + assistant_message = planning_target + + if verbose: + print(user_message) + print(assistant_message) + + return token, user_message, assistant_message diff --git a/agentdriver/planning/motion_planning.py b/agentdriver/planning/motion_planning.py index 72e6894..040c429 100644 --- a/agentdriver/planning/motion_planning.py +++ b/agentdriver/planning/motion_planning.py @@ -1,6 +1,4 @@ -import openai import pickle -import json import ast import numpy as np import time @@ -8,48 +6,12 @@ from tqdm import tqdm import os -from agentdriver.planning.planning_prmopts import planning_system_message as system_message +from agentdriver.planning.planning_prompts import planning_system_message as system_message from agentdriver.llm_core.chat import run_one_round_conversation from agentdriver.reasoning.collision_check import collision_check from agentdriver.reasoning.collision_optimization import collision_optimization -from agentdriver.llm_core.api_keys import OPENAI_ORG, OPENAI_API_KEY +from agentdriver.planning.generate_messages import generate_messages -openai.organization = OPENAI_ORG -openai.api_key = OPENAI_API_KEY - -def generate_messages(data_sample, use_peception=True, use_short_experience=True, verbose=True, use_gt_cot=False): - token = data_sample["token"] - ego = data_sample["ego"] - perception = data_sample["perception"] - commonsense = data_sample["commonsense"] - experiences = data_sample["experiences"] - reasoning = data_sample["reasoning"] - long_experiences = data_sample["long_experiences"] if "long_experiences" in data_sample else None - chain_of_thoughts = data_sample["chain_of_thoughts"] if "chain_of_thoughts" in data_sample else "" - planning_target = data_sample["planning_target"] if "planning_target" in data_sample else None - - user_message = ego - if use_peception: - user_message += perception - if use_short_experience: - if experiences: - user_message += experiences - else: - if long_experiences: - user_message += long_experiences - user_message += commonsense - if use_gt_cot: - user_message += chain_of_thoughts - else: - user_message += reasoning - - assistant_message = planning_target - - if verbose: - print(user_message) - print(assistant_message) - - return token, user_message, assistant_message def planning_single_inference( planner_model_id, diff --git a/agentdriver/planning/planning_prmopts.py b/agentdriver/planning/planning_prompts.py similarity index 100% rename from agentdriver/planning/planning_prmopts.py rename to agentdriver/planning/planning_prompts.py diff --git a/requirements.txt b/requirements.txt index 6e7821c..5c379eb 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,24 +1,24 @@ absl-py==2.0.0 aiohttp==3.8.6 aiosignal==1.3.1 -asttokens @ file:///home/conda/feedstock_root/build_artifacts/asttokens_1694046349000/work +asttokens==2.2.1 async-timeout==4.0.3 attrs==23.1.0 -backcall @ file:///home/conda/feedstock_root/build_artifacts/backcall_1592338393461/work -backports.functools-lru-cache @ file:///home/conda/feedstock_root/build_artifacts/backports.functools_lru_cache_1687772187254/work +backcall==0.2.0 +backports.functools-lru-cache==1.6.6 cachetools==5.3.2 casadi==3.6.5 certifi==2023.7.22 charset-normalizer==3.3.0 -comm @ file:///home/conda/feedstock_root/build_artifacts/comm_1691044910542/work +comm==0.2.0 contourpy==1.1.1 cycler==0.12.1 -debugpy @ file:///croot/debugpy_1690905042057/work +debugpy==1.8.0 decorator==4.4.2 descartes==1.1.0 -entrypoints @ file:///home/conda/feedstock_root/build_artifacts/entrypoints_1643888246732/work -exceptiongroup @ file:///home/conda/feedstock_root/build_artifacts/exceptiongroup_1692026125334/work -executing @ file:///home/conda/feedstock_root/build_artifacts/executing_1667317341051/work +entrypoints==0.4 +exceptiongroup==1.1.3 +executing==2.0.1 filelock==3.13.0 fire==0.5.0 fonttools==4.43.1 @@ -31,25 +31,25 @@ grpcio==1.59.0 idna==3.4 imageio==2.31.6 imageio-ffmpeg==0.4.9 -ipykernel @ file:///home/conda/feedstock_root/build_artifacts/ipykernel_1693880262622/work -ipython @ file:///home/conda/feedstock_root/build_artifacts/ipython_1696264049390/work -jedi @ file:///home/conda/feedstock_root/build_artifacts/jedi_1696326070614/work +ipykernel==6.28.0 +ipython==8.18.1 +jedi==0.19.1 Jinja2==3.1.2 joblib==1.3.2 -jupyter-client @ file:///home/conda/feedstock_root/build_artifacts/jupyter_client_1654730843242/work -jupyter_core @ file:///home/conda/feedstock_root/build_artifacts/jupyter_core_1696972408382/work +jupyter-client==8.6.0 +jupyter_core==5.5.0 kiwisolver==1.4.5 lazy_loader==0.3 lightning-utilities==0.9.0 Markdown==3.5 MarkupSafe==2.1.3 matplotlib==3.5.3 -matplotlib-inline @ file:///home/conda/feedstock_root/build_artifacts/matplotlib-inline_1660814786464/work +matplotlib-inline==0.1.6 moviepy==1.0.3 mpmath==1.3.0 multidict==6.0.4 ndjson==0.3.1 -nest-asyncio @ file:///home/conda/feedstock_root/build_artifacts/nest-asyncio_1697083700168/work +nest-asyncio==1.5.8 networkx==3.2.1 numpy==1.26.1 nuscenes-devkit==1.1.11 @@ -68,28 +68,28 @@ nvidia-nvtx-cu12==12.1.105 oauthlib==3.2.2 openai==0.28.1 opencv-python==4.8.1.78 -packaging @ file:///home/conda/feedstock_root/build_artifacts/packaging_1696202382185/work -parso @ file:///home/conda/feedstock_root/build_artifacts/parso_1638334955874/work -pexpect @ file:///home/conda/feedstock_root/build_artifacts/pexpect_1667297516076/work -pickleshare @ file:///home/conda/feedstock_root/build_artifacts/pickleshare_1602536217715/work +packaging==23.2 +parso==0.8.3 +pexpect==4.9.0 +pickleshare==0.7.5 Pillow==10.0.1 -platformdirs @ file:///home/conda/feedstock_root/build_artifacts/platformdirs_1696272223550/work +platformdirs==4.1.0 proglog==0.1.10 -prompt-toolkit @ file:///home/conda/feedstock_root/build_artifacts/prompt-toolkit_1688565951714/work +prompt-toolkit==3.0.43 protobuf==4.23.4 -psutil @ file:///opt/conda/conda-bld/psutil_1656431268089/work -ptyprocess @ file:///home/conda/feedstock_root/build_artifacts/ptyprocess_1609419310487/work/dist/ptyprocess-0.7.0-py2.py3-none-any.whl -pure-eval @ file:///home/conda/feedstock_root/build_artifacts/pure_eval_1642875951954/work +psutil==5.9.7 +ptyprocess==0.7.0 +pure-eval==0.2.3 pyasn1==0.5.0 pyasn1-modules==0.3.0 pycocotools==2.0.7 -Pygments @ file:///home/conda/feedstock_root/build_artifacts/pygments_1691408637400/work +Pygments==2.17.2 pyparsing==3.1.1 pyquaternion==0.9.9 -python-dateutil @ file:///home/conda/feedstock_root/build_artifacts/python-dateutil_1626286286081/work +python-dateutil==2.8.2 pytorch-lightning==1.2.10 PyYAML==6.0.1 -pyzmq @ file:///croot/pyzmq_1686601365461/work +pyzmq==25.1.2 regex==2023.10.3 requests==2.31.0 requests-oauthlib==1.3.1 @@ -98,8 +98,8 @@ scikit-image==0.22.0 scikit-learn==1.3.2 scipy==1.11.3 Shapely==1.8.5.post1 -six @ file:///home/conda/feedstock_root/build_artifacts/six_1620240208055/work -stack-data @ file:///home/conda/feedstock_root/build_artifacts/stack_data_1669632077133/work +six==1.16.0 +stack-data==0.6.3 sympy==1.12 tenacity==8.2.3 tensorboard==2.15.0 @@ -112,12 +112,12 @@ torch==2.1.0 torchaudio==2.1.0 torchmetrics==0.2.0 torchvision==0.16.0 -tornado @ file:///home/conda/feedstock_root/build_artifacts/tornado_1648827254365/work +tornado==6.4 tqdm==4.66.1 -traitlets @ file:///home/conda/feedstock_root/build_artifacts/traitlets_1696377679271/work +traitlets==5.14.0 triton==2.1.0 -typing_extensions @ file:///home/conda/feedstock_root/build_artifacts/typing_extensions_1695040754690/work +typing_extensions==4.9.0 urllib3==2.0.6 -wcwidth @ file:///home/conda/feedstock_root/build_artifacts/wcwidth_1696255154857/work +wcwidth==0.2.13 Werkzeug==3.0.1 yarl==1.9.2