This guide sets up Python 3.11, Jupyter Notebook, Hugging Face models, and local model loading using uv
and pyenv
.
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
brew install pyenv uv
echo 'export PYENV_ROOT="$HOME/.pyenv"' >> ~/.zprofile
echo 'export PATH="$PYENV_ROOT/bin:$PATH"' >> ~/.zprofile
echo 'eval "$(pyenv init --path)"' >> ~/.zprofile
source ~/.zprofile
pyenv install 3.11.9
pyenv global 3.11.9
which python
# should be ~/.pyenv/shims/python
python --version
# should be Python 3.11.9
mkdir -p ~/llm/jupyter
cd ~/llm/jupyter
uv venv
Save the following file as requirements.txt
in the project folder:
torch==2.3.0
numpy==1.26.4
transformers==4.52.4
huggingface-hub==0.33.0
datasets==3.6.0
trl==0.14.0
jinja2==3.1.2
markupsafe==2.0.1
tabulate==0.9.0
pandas==2.3.0
uv pip install -r requirements.txt
uv pip list
Look for: torch
, transformers
, pandas
, etc.
source .venv/bin/activate
jupyter notebook
huggingface-cli login
huggingface-cli download Qwen/Qwen3-0.6B-Base --local-dir ./models/Qwen/Qwen3-0.6B-Base --local-dir-use-symlinks False
from transformers import AutoModelForCausalLM, AutoTokenizer
from pathlib import Path
def load_model_and_tokenizer(model_path, use_gpu=True):
resolved_path = str(Path(model_path).resolve())
model = AutoModelForCausalLM.from_pretrained(
resolved_path,
local_files_only=True,
device_map="auto" if use_gpu else None
)
tokenizer = AutoTokenizer.from_pretrained(
resolved_path,
local_files_only=True
)
return model, tokenizer
model, tokenizer = load_model_and_tokenizer("./models/Qwen/Qwen3-0.6B-Base", use_gpu=False)
input_ids = tokenizer("Hello, world!", return_tensors="pt").input_ids
output = model.generate(input_ids)
print(tokenizer.decode(output[0], skip_special_tokens=True))
- Runs fast on Mac M3
- No remote HF calls
- Jupyter is isolated in
.venv
- Fully reproducible with
requirements.txt