Skip to content

Commit

Permalink
Imporve openai api documents (#1827)
Browse files Browse the repository at this point in the history
Co-authored-by: Chayenne <[email protected]>
  • Loading branch information
zhaochenyang20 and Chayenne authored Oct 30, 2024
1 parent 5e00dde commit 539df95
Show file tree
Hide file tree
Showing 8 changed files with 856 additions and 206 deletions.
1 change: 1 addition & 0 deletions docs/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ make clean

### Serve (preview)
Run an HTTP server and visit http://localhost:8000 in your browser.

```
python3 -m http.server --d _build/html
```
Expand Down
29 changes: 29 additions & 0 deletions docs/_static/css/custom_log.css
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
.output_area {
color: #615656;
}

table.autosummary td {
width: 50%
}

img.align-center {
display: block;
margin-left: auto;
margin-right: auto;
}

.output_area.stderr {
color: #d3d3d3 !important;
}

.output_area.stdout {
color: #d3d3d3 !important;
}

div.output_area.stderr {
color: #d3d3d3 !important;
}

div.output_area.stdout {
color: #d3d3d3 !important;
}
16 changes: 15 additions & 1 deletion docs/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,10 @@
}

html_static_path = ["_static"]
html_css_files = ["css/readthedocs.css"]
html_css_files = ["css/custom_log.css"]

def setup(app):
app.add_css_file('css/custom_log.css')

myst_enable_extensions = [
"dollarmath",
Expand Down Expand Up @@ -127,3 +130,14 @@
}

html_theme = "sphinx_book_theme"


nbsphinx_prolog = """
.. raw:: html
<style>
.output_area.stderr, .output_area.stdout {
color: #d3d3d3 !important; /* light gray */
}
</style>
"""
160 changes: 141 additions & 19 deletions docs/embedding_model.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -21,38 +21,91 @@
"The following code is equivalent to running this in the shell:\n",
"```bash\n",
"python -m sglang.launch_server --model-path Alibaba-NLP/gte-Qwen2-7B-instruct \\\n",
" --port 30010 --host 0.0.0.0 --is-embedding --log-level error\n",
" --port 30010 --host 0.0.0.0 --is-embedding\n",
"```\n",
"\n",
"Remember to add `--is-embedding` to the command."
]
},
{
"cell_type": "code",
"execution_count": 7,
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Embedding server is ready. Proceeding with the next steps.\n"
"/home/chenyang/miniconda3/envs/AlphaMeemory/lib/python3.11/site-packages/transformers/utils/hub.py:128: FutureWarning: Using `TRANSFORMERS_CACHE` is deprecated and will be removed in v5 of Transformers. Use `HF_HOME` instead.\n",
" warnings.warn(\n",
"[2024-10-29 21:07:15] server_args=ServerArgs(model_path='Alibaba-NLP/gte-Qwen2-7B-instruct', tokenizer_path='Alibaba-NLP/gte-Qwen2-7B-instruct', tokenizer_mode='auto', skip_tokenizer_init=False, load_format='auto', trust_remote_code=False, dtype='auto', kv_cache_dtype='auto', quantization=None, context_length=None, device='cuda', served_model_name='Alibaba-NLP/gte-Qwen2-7B-instruct', chat_template=None, is_embedding=True, host='0.0.0.0', port=30010, mem_fraction_static=0.88, max_running_requests=None, max_total_tokens=None, chunked_prefill_size=8192, max_prefill_tokens=16384, schedule_policy='lpm', schedule_conservativeness=1.0, tp_size=1, stream_interval=1, random_seed=568040040, constrained_json_whitespace_pattern=None, log_level='info', log_level_http=None, log_requests=False, show_time_cost=False, api_key=None, file_storage_pth='SGLang_storage', enable_cache_report=False, watchdog_timeout=600, dp_size=1, load_balance_method='round_robin', dist_init_addr=None, nnodes=1, node_rank=0, json_model_override_args='{}', enable_double_sparsity=False, ds_channel_config_path=None, ds_heavy_channel_num=32, ds_heavy_token_num=256, ds_heavy_channel_type='qk', ds_sparse_decode_threshold=4096, lora_paths=None, max_loras_per_batch=8, attention_backend='flashinfer', sampling_backend='flashinfer', grammar_backend='outlines', disable_flashinfer=False, disable_flashinfer_sampling=False, disable_radix_cache=False, disable_regex_jump_forward=False, disable_cuda_graph=False, disable_cuda_graph_padding=False, disable_disk_cache=False, disable_custom_all_reduce=False, disable_mla=False, disable_penalizer=False, disable_nan_detection=False, enable_overlap_schedule=False, enable_mixed_chunk=False, enable_torch_compile=False, torch_compile_max_bs=32, cuda_graph_max_bs=160, torchao_config='', enable_p2p_check=False, triton_attention_reduce_in_fp32=False, num_continuous_decode_steps=1)\n",
"/home/chenyang/miniconda3/envs/AlphaMeemory/lib/python3.11/site-packages/transformers/utils/hub.py:128: FutureWarning: Using `TRANSFORMERS_CACHE` is deprecated and will be removed in v5 of Transformers. Use `HF_HOME` instead.\n",
" warnings.warn(\n",
"/home/chenyang/miniconda3/envs/AlphaMeemory/lib/python3.11/site-packages/transformers/utils/hub.py:128: FutureWarning: Using `TRANSFORMERS_CACHE` is deprecated and will be removed in v5 of Transformers. Use `HF_HOME` instead.\n",
" warnings.warn(\n",
"[2024-10-29 21:07:20 TP0] Init torch distributed begin.\n",
"[2024-10-29 21:07:20 TP0] Load weight begin. avail mem=47.27 GB\n",
"[2024-10-29 21:07:21 TP0] lm_eval is not installed, GPTQ may not be usable\n",
"INFO 10-29 21:07:22 weight_utils.py:243] Using model weights format ['*.safetensors']\n",
"Loading safetensors checkpoint shards: 0% Completed | 0/7 [00:00<?, ?it/s]\n",
"Loading safetensors checkpoint shards: 14% Completed | 1/7 [00:00<00:03, 1.65it/s]\n",
"Loading safetensors checkpoint shards: 29% Completed | 2/7 [00:01<00:04, 1.02it/s]\n",
"Loading safetensors checkpoint shards: 43% Completed | 3/7 [00:03<00:04, 1.24s/it]\n",
"Loading safetensors checkpoint shards: 57% Completed | 4/7 [00:05<00:04, 1.47s/it]\n",
"Loading safetensors checkpoint shards: 71% Completed | 5/7 [00:07<00:03, 1.62s/it]\n",
"Loading safetensors checkpoint shards: 86% Completed | 6/7 [00:08<00:01, 1.64s/it]\n",
"Loading safetensors checkpoint shards: 100% Completed | 7/7 [00:10<00:00, 1.63s/it]\n",
"Loading safetensors checkpoint shards: 100% Completed | 7/7 [00:10<00:00, 1.49s/it]\n",
"\n",
"[2024-10-29 21:07:32 TP0] Load weight end. type=Qwen2ForCausalLM, dtype=torch.float16, avail mem=32.91 GB\n",
"[2024-10-29 21:07:33 TP0] Memory pool end. avail mem=4.56 GB\n",
"[2024-10-29 21:07:33 TP0] max_total_num_tokens=509971, max_prefill_tokens=16384, max_running_requests=2049, context_len=131072\n",
"[2024-10-29 21:07:33] INFO: Started server process [2650986]\n",
"[2024-10-29 21:07:33] INFO: Waiting for application startup.\n",
"[2024-10-29 21:07:33] INFO: Application startup complete.\n",
"[2024-10-29 21:07:33] INFO: Uvicorn running on http://0.0.0.0:30010 (Press CTRL+C to quit)\n",
"[2024-10-29 21:07:34] INFO: 127.0.0.1:47812 - \"GET /v1/models HTTP/1.1\" 200 OK\n"
]
},
{
"data": {
"text/html": [
"<strong style='color: #00008B;'><br> This cell combines server and notebook output. <br> <br> Typically, the server runs in a separate terminal, <br> but we combine the output of server and notebook to demonstrate the usage better.<br> <br> In our documentation, server output is in gray, notebook output is highlighted.<br> </strong>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[2024-10-29 21:07:34] INFO: 127.0.0.1:41780 - \"GET /get_model_info HTTP/1.1\" 200 OK\n",
"[2024-10-29 21:07:34 TP0] Prefill batch. #new-seq: 1, #new-token: 6, #cached-token: 0, cache hit rate: 0.00%, token usage: 0.00, #running-req: 0, #queue-req: 0\n",
"[2024-10-29 21:07:35] INFO: 127.0.0.1:41792 - \"POST /encode HTTP/1.1\" 200 OK\n",
"[2024-10-29 21:07:35] The server is fired up and ready to roll!\n"
]
}
],
"source": [
"from sglang.utils import execute_shell_command, wait_for_server, terminate_process\n",
"from sglang.utils import (\n",
" execute_shell_command,\n",
" wait_for_server,\n",
" terminate_process,\n",
" print_highlight,\n",
")\n",
"\n",
"embedding_process = execute_shell_command(\n",
" \"\"\"\n",
"python -m sglang.launch_server --model-path Alibaba-NLP/gte-Qwen2-7B-instruct \\\n",
" --port 30010 --host 0.0.0.0 --is-embedding --log-level error\n",
" --port 30010 --host 0.0.0.0 --is-embedding\n",
"\"\"\"\n",
")\n",
"\n",
"wait_for_server(\"http://localhost:30010\")\n",
"\n",
"print(\"Embedding server is ready. Proceeding with the next steps.\")"
"wait_for_server(\"http://localhost:30010\")"
]
},
{
Expand All @@ -64,15 +117,34 @@
},
{
"cell_type": "code",
"execution_count": 8,
"execution_count": 2,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Text embedding (first 10): [0.0083160400390625, 0.0006804466247558594, -0.00809478759765625, -0.0006995201110839844, 0.0143890380859375, -0.0090179443359375, 0.01238250732421875, 0.00209808349609375, 0.0062103271484375, -0.003047943115234375]\n"
"[2024-10-28 02:10:30 TP0] Prefill batch. #new-seq: 1, #new-token: 4, #cached-token: 0, cache hit rate: 0.00%, token usage: 0.00, #running-req: 0, #queue-req: 0\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[2024-10-28 02:10:31] INFO: 127.0.0.1:48094 - \"POST /v1/embeddings HTTP/1.1\" 200 OK\n"
]
},
{
"data": {
"text/html": [
"<strong style='color: #00008B;'>Text embedding (first 10): [0.0083160400390625, 0.0006804466247558594, -0.00809478759765625, -0.0006995201110839844, 0.0143890380859375, -0.0090179443359375, 0.01238250732421875, 0.00209808349609375, 0.0062103271484375, -0.003047943115234375]</strong>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
Expand All @@ -89,7 +161,7 @@
" \"embedding\"\n",
"]\n",
"\n",
"print(f\"Text embedding (first 10): {text_embedding[:10]}\")"
"print_highlight(f\"Text embedding (first 10): {text_embedding[:10]}\")"
]
},
{
Expand All @@ -101,15 +173,32 @@
},
{
"cell_type": "code",
"execution_count": 9,
"execution_count": 3,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Text embedding (first 10): [0.00829315185546875, 0.0007004737854003906, -0.00809478759765625, -0.0006799697875976562, 0.01438140869140625, -0.00897979736328125, 0.0123748779296875, 0.0020923614501953125, 0.006195068359375, -0.0030498504638671875]\n"
"[2024-10-28 02:10:31] INFO: 127.0.0.1:48110 - \"GET /get_model_info HTTP/1.1\" 200 OK\n",
"[2024-10-28 02:10:31 TP0] Prefill batch. #new-seq: 1, #new-token: 6, #cached-token: 0, cache hit rate: 0.00%, token usage: 0.00, #running-req: 0, #queue-req: 0\n",
"[2024-10-28 02:10:31] INFO: 127.0.0.1:48114 - \"POST /encode HTTP/1.1\" 200 OK\n",
"[2024-10-28 02:10:31] The server is fired up and ready to roll!\n",
"[2024-10-28 02:10:31 TP0] Prefill batch. #new-seq: 1, #new-token: 1, #cached-token: 3, cache hit rate: 21.43%, token usage: 0.00, #running-req: 0, #queue-req: 0\n",
"[2024-10-28 02:10:31] INFO: 127.0.0.1:48118 - \"POST /v1/embeddings HTTP/1.1\" 200 OK\n"
]
},
{
"data": {
"text/html": [
"<strong style='color: #00008B;'>Text embedding (first 10): [0.00829315185546875, 0.0007004737854003906, -0.00809478759765625, -0.0006799697875976562, 0.01438140869140625, -0.00897979736328125, 0.0123748779296875, 0.0020923614501953125, 0.006195068359375, -0.0030498504638671875]</strong>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
Expand All @@ -124,7 +213,7 @@
")\n",
"\n",
"embedding = response.data[0].embedding[:10]\n",
"print(f\"Text embedding (first 10): {embedding}\")"
"print_highlight(f\"Text embedding (first 10): {embedding}\")"
]
},
{
Expand All @@ -138,15 +227,36 @@
},
{
"cell_type": "code",
"execution_count": 10,
"execution_count": 4,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/home/chenyang/miniconda3/envs/AlphaMeemory/lib/python3.11/site-packages/transformers/utils/hub.py:127: FutureWarning: Using `TRANSFORMERS_CACHE` is deprecated and will be removed in v5 of Transformers. Use `HF_HOME` instead.\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Input IDs embedding (first 10): [0.00829315185546875, 0.0007004737854003906, -0.00809478759765625, -0.0006799697875976562, 0.01438140869140625, -0.00897979736328125, 0.0123748779296875, 0.0020923614501953125, 0.006195068359375, -0.0030498504638671875]\n"
"[2024-10-28 02:10:32 TP0] Prefill batch. #new-seq: 1, #new-token: 1, #cached-token: 3, cache hit rate: 33.33%, token usage: 0.00, #running-req: 0, #queue-req: 0\n",
"[2024-10-28 02:10:32] INFO: 127.0.0.1:48124 - \"POST /v1/embeddings HTTP/1.1\" 200 OK\n"
]
},
{
"data": {
"text/html": [
"<strong style='color: #00008B;'>Input IDs embedding (first 10): [0.00829315185546875, 0.0007004737854003906, -0.00809478759765625, -0.0006799697875976562, 0.01438140869140625, -0.00897979736328125, 0.0123748779296875, 0.0020923614501953125, 0.006195068359375, -0.0030498504638671875]</strong>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
Expand All @@ -168,14 +278,26 @@
" 0\n",
"][\"embedding\"]\n",
"\n",
"print(f\"Input IDs embedding (first 10): {input_ids_embedding[:10]}\")"
"print_highlight(f\"Input IDs embedding (first 10): {input_ids_embedding[:10]}\")"
]
},
{
"cell_type": "code",
"execution_count": 11,
"execution_count": 5,
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[2024-10-28 02:10:32] INFO: Shutting down\n",
"[2024-10-28 02:10:32] INFO: Waiting for application shutdown.\n",
"[2024-10-28 02:10:32] INFO: Application shutdown complete.\n",
"[2024-10-28 02:10:32] INFO: Finished server process [1188896]\n",
"W1028 02:10:32.490000 140389363193408 torch/_inductor/compile_worker/subproc_pool.py:126] SubprocPool unclean exit\n"
]
}
],
"source": [
"terminate_process(embedding_process)"
]
Expand Down
Loading

0 comments on commit 539df95

Please sign in to comment.