Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Split RLLib and Ray Tune examples in two separate examples notebooks #186

Merged
merged 5 commits into from
Jul 9, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .github/workflows/test-examples.yml
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ jobs:
- integrations/model-training/yolov5/notebooks/Comet_and_YOLOv5.ipynb
- integrations/model-training/yolov8/notebooks/YOLOv8_and_Comet.ipynb
- integrations/reinforcement-learning/gymnasium/notebooks/comet_gymnasium_example.ipynb
- integrations/reinforcement-learning/rllib/notebooks/Comet_and_RLLib.ipynb
- integrations/workflow-orchestration/metaflow/notebooks/metaflow_hello_world.ipynb
env:
NOTEBOOK_TO_TEST: ${{ matrix.notebooks }}
Expand Down
94 changes: 57 additions & 37 deletions SageMaker/Linear_example.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -36,9 +36,9 @@
"metadata": {},
"outputs": [],
"source": [
"bucket = 'NAME_YOUR_BUCKET'\n",
"prefix = 'sagemaker/DEMO-linear-mnist'\n",
" \n",
"bucket = \"NAME_YOUR_BUCKET\"\n",
"prefix = \"sagemaker/DEMO-linear-mnist\"\n",
"\n",
"# Define IAM role\n",
"import boto3\n",
"import re\n",
Expand All @@ -64,9 +64,11 @@
"import pickle, gzip, numpy, urllib.request, json\n",
"\n",
"# Load the dataset\n",
"urllib.request.urlretrieve(\"http://deeplearning.net/data/mnist/mnist.pkl.gz\", \"mnist.pkl.gz\")\n",
"with gzip.open('mnist.pkl.gz', 'rb') as f:\n",
" train_set, valid_set, test_set = pickle.load(f, encoding='latin1')"
"urllib.request.urlretrieve(\n",
" \"http://deeplearning.net/data/mnist/mnist.pkl.gz\", \"mnist.pkl.gz\"\n",
")\n",
"with gzip.open(\"mnist.pkl.gz\", \"rb\") as f:\n",
" train_set, valid_set, test_set = pickle.load(f, encoding=\"latin1\")"
]
},
{
Expand All @@ -84,18 +86,20 @@
"source": [
"%matplotlib inline\n",
"import matplotlib.pyplot as plt\n",
"plt.rcParams[\"figure.figsize\"] = (2,10)\n",
"\n",
"plt.rcParams[\"figure.figsize\"] = (2, 10)\n",
"\n",
"\n",
"def show_digit(img, caption='', subplot=None):\n",
" if subplot==None:\n",
" _,(subplot)=plt.subplots(1,1)\n",
" imgr=img.reshape((28,28))\n",
" subplot.axis('off')\n",
" subplot.imshow(imgr, cmap='gray')\n",
"def show_digit(img, caption=\"\", subplot=None):\n",
" if subplot == None:\n",
" _, (subplot) = plt.subplots(1, 1)\n",
" imgr = img.reshape((28, 28))\n",
" subplot.axis(\"off\")\n",
" subplot.imshow(imgr, cmap=\"gray\")\n",
" plt.title(caption)\n",
"\n",
"show_digit(train_set[0][30], 'This is a {}'.format(train_set[1][30]))"
"\n",
"show_digit(train_set[0][30], \"This is a {}\".format(train_set[1][30]))"
]
},
{
Expand All @@ -115,8 +119,10 @@
"import numpy as np\n",
"import sagemaker.amazon.common as smac\n",
"\n",
"vectors = np.array([t.tolist() for t in train_set[0]]).astype('float32')\n",
"labels = np.where(np.array([t.tolist() for t in train_set[1]]) == 0, 1, 0).astype('float32')\n",
"vectors = np.array([t.tolist() for t in train_set[0]]).astype(\"float32\")\n",
"labels = np.where(np.array([t.tolist() for t in train_set[1]]) == 0, 1, 0).astype(\n",
" \"float32\"\n",
")\n",
"\n",
"buf = io.BytesIO()\n",
"smac.write_numpy_to_dense_tensor(buf, vectors, labels)\n",
Expand All @@ -139,10 +145,12 @@
"import boto3\n",
"import os\n",
"\n",
"key = 'recordio-pb-data'\n",
"boto3.resource('s3').Bucket(bucket).Object(os.path.join(prefix, 'train', key)).upload_fileobj(buf)\n",
"s3_train_data = 's3://{}/{}/train/{}'.format(bucket, prefix, key)\n",
"print('uploaded training data location: {}'.format(s3_train_data))"
"key = \"recordio-pb-data\"\n",
"boto3.resource(\"s3\").Bucket(bucket).Object(\n",
" os.path.join(prefix, \"train\", key)\n",
").upload_fileobj(buf)\n",
"s3_train_data = \"s3://{}/{}/train/{}\".format(bucket, prefix, key)\n",
"print(\"uploaded training data location: {}\".format(s3_train_data))"
]
},
{
Expand All @@ -158,8 +166,8 @@
"metadata": {},
"outputs": [],
"source": [
"output_location = 's3://{}/{}/output'.format(bucket, prefix)\n",
"print('training artifacts will be uploaded to: {}'.format(output_location))"
"output_location = \"s3://{}/{}/output\".format(bucket, prefix)\n",
"print(\"training artifacts will be uploaded to: {}\".format(output_location))"
]
},
{
Expand All @@ -176,7 +184,8 @@
"outputs": [],
"source": [
"from sagemaker.amazon.amazon_estimator import get_image_uri\n",
"container = get_image_uri(boto3.Session().region_name, 'linear-learner')"
"\n",
"container = get_image_uri(boto3.Session().region_name, \"linear-learner\")"
]
},
{
Expand All @@ -190,17 +199,19 @@
"\n",
"sess = sagemaker.Session()\n",
"\n",
"linear = sagemaker.estimator.Estimator(container,\n",
" role, \n",
" train_instance_count=1, \n",
" train_instance_type='ml.c4.xlarge',\n",
" output_path=output_location,\n",
" sagemaker_session=sess)\n",
"linear.set_hyperparameters(feature_dim=784,\n",
" predictor_type='binary_classifier',\n",
" mini_batch_size=200)\n",
"linear = sagemaker.estimator.Estimator(\n",
" container,\n",
" role,\n",
" train_instance_count=1,\n",
" train_instance_type=\"ml.c4.xlarge\",\n",
" output_path=output_location,\n",
" sagemaker_session=sess,\n",
")\n",
"linear.set_hyperparameters(\n",
" feature_dim=784, predictor_type=\"binary_classifier\", mini_batch_size=200\n",
")\n",
"\n",
"linear.fit({'train': s3_train_data})"
"linear.fit({\"train\": s3_train_data})"
]
},
{
Expand All @@ -214,7 +225,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"Define your Comet [REST API](https://www.comet.ml/docs/rest-api/getting-started/) and your [workspace](https://www.comet.ml/docs/user-interface/#workspaces). See the [configuration documentation](http://docs.comet.ml/python-sdk/advanced/#python-configuration) for info on both specifications."
"Define your Comet [REST API](https://www.comet.com/docs/rest-api/getting-started/) and your [workspace](https://www.comet.com/docs/user-interface/#workspaces). See the [configuration documentation](http://docs.comet.ml/python-sdk/advanced/#python-configuration) for info on both specifications."
]
},
{
Expand Down Expand Up @@ -264,7 +275,9 @@
"source": [
"# .log_sagemaker_job(regressor/estimator object from Sagemaker SDK, Comet Rest API key (optional, can be taken from usual config source), workspace (comet), project (comet))\n",
"# I have used the Sagemaker SDK to train a model. I have the estimator/regressor object. I want to log whatever I just trained\n",
"experiment = comet_ml_sagemaker.log_sagemaker_job(linear, api_key=COMET_REST_API, workspace=COMET_WORKSPACE, project_name=\"sagemaker\")\n",
"experiment = comet_ml_sagemaker.log_sagemaker_job(\n",
" linear, api_key=COMET_REST_API, workspace=COMET_WORKSPACE, project_name=\"sagemaker\"\n",
")\n",
"print(experiment.url)"
]
},
Expand All @@ -290,7 +303,12 @@
"# I have the name of a completed training job I want to lob\n",
"# Same as .log_sagemaker_job, except instead of passing the regressor/estimator object, you pass the job name\n",
"SAGEMAKER_TRAINING_JOB_NAME = \"SAGEMAKER_TRAINING_JOB_NAME\"\n",
"experiment = comet_ml_sagemaker.log_sagemaker_job_by_name(SAGEMAKER_TRAINING_JOB_NAME, api_key=COMET_REST_API, workspace=COMET_WORKSPACE, project_name=\"sagemaker\")\n",
"experiment = comet_ml_sagemaker.log_sagemaker_job_by_name(\n",
" SAGEMAKER_TRAINING_JOB_NAME,\n",
" api_key=COMET_REST_API,\n",
" workspace=COMET_WORKSPACE,\n",
" project_name=\"sagemaker\",\n",
")\n",
"print(experiment.url)"
]
},
Expand All @@ -313,7 +331,9 @@
"outputs": [],
"source": [
"# Logs the last job for your current Amazon Region / S3\n",
"experiment = comet_ml_sagemaker.log_last_sagemaker_job(api_key=COMET_REST_API, workspace=COMET_WORKSPACE, project_name=\"sagemaker\")\n",
"experiment = comet_ml_sagemaker.log_last_sagemaker_job(\n",
" api_key=COMET_REST_API, workspace=COMET_WORKSPACE, project_name=\"sagemaker\"\n",
")\n",
"print(experiment.url)"
]
},
Expand Down
59 changes: 36 additions & 23 deletions SageMaker/random_forest.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -41,26 +41,28 @@
"import sys\n",
"\n",
"\n",
"bucket = 'NAME_YOUR_BUCKET' # <--- specify a bucket you have access to\n",
"prefix = 'sagemaker/rcf-benchmarks'\n",
"bucket = \"NAME_YOUR_BUCKET\" # <--- specify a bucket you have access to\n",
"prefix = \"sagemaker/rcf-benchmarks\"\n",
"execution_role = sagemaker.get_execution_role()\n",
"\n",
"\n",
"# check if the bucket exists\n",
"try:\n",
" boto3.Session().client('s3').head_bucket(Bucket=bucket)\n",
" boto3.Session().client(\"s3\").head_bucket(Bucket=bucket)\n",
"except botocore.exceptions.ParamValidationError as e:\n",
" print('Hey! You either forgot to specify your S3 bucket'\n",
" ' or you gave your bucket an invalid name!')\n",
" print(\n",
" \"Hey! You either forgot to specify your S3 bucket\"\n",
" \" or you gave your bucket an invalid name!\"\n",
" )\n",
"except botocore.exceptions.ClientError as e:\n",
" if e.response['Error']['Code'] == '403':\n",
" if e.response[\"Error\"][\"Code\"] == \"403\":\n",
" print(\"Hey! You don't have permission to access the bucket, {}.\".format(bucket))\n",
" elif e.response['Error']['Code'] == '404':\n",
" elif e.response[\"Error\"][\"Code\"] == \"404\":\n",
" print(\"Hey! Your bucket, {}, doesn't exist!\".format(bucket))\n",
" else:\n",
" raise\n",
"else:\n",
" print('Training input/output will be stored in: s3://{}/{}'.format(bucket, prefix))"
" print(\"Training input/output will be stored in: s3://{}/{}\".format(bucket, prefix))"
]
},
{
Expand All @@ -81,11 +83,11 @@
"import pandas as pd\n",
"import urllib.request\n",
"\n",
"data_filename = 'nyc_taxi.csv'\n",
"data_source = 'https://raw.githubusercontent.com/numenta/NAB/master/data/realKnownCause/nyc_taxi.csv'\n",
"data_filename = \"nyc_taxi.csv\"\n",
"data_source = \"https://raw.githubusercontent.com/numenta/NAB/master/data/realKnownCause/nyc_taxi.csv\"\n",
"\n",
"urllib.request.urlretrieve(data_source, data_filename)\n",
"taxi_data = pd.read_csv(data_filename, delimiter=',')"
"taxi_data = pd.read_csv(data_filename, delimiter=\",\")"
]
},
{
Expand All @@ -108,16 +110,18 @@
"session = sagemaker.Session()\n",
"\n",
"# specify general training job information\n",
"rcf = RandomCutForest(role=execution_role,\n",
" train_instance_count=1,\n",
" train_instance_type='ml.m4.xlarge',\n",
" data_location='s3://{}/{}/'.format(bucket, prefix),\n",
" output_path='s3://{}/{}/output'.format(bucket, prefix),\n",
" num_samples_per_tree=512,\n",
" num_trees=50)\n",
"rcf = RandomCutForest(\n",
" role=execution_role,\n",
" train_instance_count=1,\n",
" train_instance_type=\"ml.m4.xlarge\",\n",
" data_location=\"s3://{}/{}/\".format(bucket, prefix),\n",
" output_path=\"s3://{}/{}/output\".format(bucket, prefix),\n",
" num_samples_per_tree=512,\n",
" num_trees=50,\n",
")\n",
"\n",
"# automatically upload the training data to S3 and run the training job\n",
"rcf.fit(rcf.record_set(taxi_data.value.as_matrix().reshape(-1,1)))"
"rcf.fit(rcf.record_set(taxi_data.value.as_matrix().reshape(-1, 1)))"
]
},
{
Expand All @@ -131,7 +135,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"Define your Comet [REST API](https://www.comet.ml/docs/rest-api/getting-started/) and your [workspace](https://www.comet.ml/docs/user-interface/#workspaces). See the [configuration documentation](http://docs.comet.ml/python-sdk/advanced/#python-configuration) for info on both specifications."
"Define your Comet [REST API](https://www.comet.com/docs/rest-api/getting-started/) and your [workspace](https://www.comet.com/docs/user-interface/#workspaces). See the [configuration documentation](http://docs.comet.ml/python-sdk/advanced/#python-configuration) for info on both specifications."
]
},
{
Expand Down Expand Up @@ -181,7 +185,9 @@
"source": [
"# .log_sagemaker_job(regressor/estimator object from Sagemaker SDK, Comet Rest API key (optional, can be taken from usual config source), workspace (comet), project (comet))\n",
"# I have used the Sagemaker SDK to train a model. I have the estimator/regressor object. I want to log whatever I just trained\n",
"experiment = comet_ml_sagemaker.log_sagemaker_job(rcf, api_key=COMET_REST_API, workspace=COMET_WORKSPACE, project_name=\"sagemaker\")\n",
"experiment = comet_ml_sagemaker.log_sagemaker_job(\n",
" rcf, api_key=COMET_REST_API, workspace=COMET_WORKSPACE, project_name=\"sagemaker\"\n",
")\n",
"print(experiment.url)\n",
"experiment.add_tags([\"random_forest\"])"
]
Expand All @@ -208,7 +214,12 @@
"# I have the name of a completed training job I want to lob\n",
"# Same as .log_sagemaker_job, except instead of passing the regressor/estimator object, you pass the job name\n",
"SAGEMAKER_TRAINING_JOB_NAME = \"SAGEMAKER_TRAINING_JOB_NAME\"\n",
"experiment = comet_ml_sagemaker.log_sagemaker_job_by_name(SAGEMAKER_TRAINING_JOB_NAME, api_key=COMET_REST_API, workspace=COMET_WORKSPACE, project_name=\"sagemaker\")\n",
"experiment = comet_ml_sagemaker.log_sagemaker_job_by_name(\n",
" SAGEMAKER_TRAINING_JOB_NAME,\n",
" api_key=COMET_REST_API,\n",
" workspace=COMET_WORKSPACE,\n",
" project_name=\"sagemaker\",\n",
")\n",
"print(experiment.url)"
]
},
Expand All @@ -231,7 +242,9 @@
"outputs": [],
"source": [
"# Logs the last job for your current Amazon Region / S3\n",
"experiment = comet_ml_sagemaker.log_last_sagemaker_job(api_key=COMET_REST_API, workspace=COMET_WORKSPACE, project_name=\"sagemaker\")\n",
"experiment = comet_ml_sagemaker.log_last_sagemaker_job(\n",
" api_key=COMET_REST_API, workspace=COMET_WORKSPACE, project_name=\"sagemaker\"\n",
")\n",
"print(experiment.url)\n",
"experiment.add_tags([\"random_forest\"])"
]
Expand Down
38 changes: 22 additions & 16 deletions catalyst/comet_catalyst_example.py
Original file line number Diff line number Diff line change
@@ -1,27 +1,33 @@
import comet_ml
# coding: utf-8

import os

import comet_ml

from torch import nn, optim
from torch.utils.data import DataLoader

from catalyst import dl
from catalyst.contrib.datasets import MNIST
from catalyst.data import ToTensor
from catalyst.loggers.comet import CometLogger
from catalyst.contrib.datasets import MNIST

comet_ml.init()
comet_ml.login()
logger = CometLogger(logging_frequency=10)

model = nn.Sequential(nn.Flatten(), nn.Linear(28 * 28, 10))
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.02)



loaders = {
"train": DataLoader(
MNIST(os.getcwd(), train=True, download=True, transform=ToTensor()), batch_size=32
MNIST(os.getcwd(), train=True, download=True, transform=ToTensor()),
batch_size=32,
),
"valid": DataLoader(
MNIST(os.getcwd(), train=False, download=True, transform=ToTensor()), batch_size=32
MNIST(os.getcwd(), train=False, download=True, transform=ToTensor()),
batch_size=32,
),
}

Expand All @@ -37,14 +43,16 @@
loaders=loaders,
num_epochs=1,
hparams={
'lr': 0.02,
'betas': (0.9, 0.999),
'eps': 1e-08,
'weight_decay': 0,
'amsgrad': False
"lr": 0.02,
"betas": (0.9, 0.999),
"eps": 1e-08,
"weight_decay": 0,
"amsgrad": False,
},
callbacks=[
dl.AccuracyCallback(input_key="logits", target_key="targets", topk_args=(1, 3, 5)),
dl.AccuracyCallback(
input_key="logits", target_key="targets", topk_args=(1, 3, 5)
),
dl.PrecisionRecallF1SupportCallback(
input_key="logits", target_key="targets", num_classes=10
),
Expand All @@ -55,7 +63,5 @@
minimize_valid_metric=True,
verbose=True,
load_best_on_end=True,
loggers={
"comet": logger
}
)
loggers={"comet": logger},
)
2 changes: 1 addition & 1 deletion guides/manage_data/Introduction_to_Artifacts.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -534,7 +534,7 @@
"\n",
"We hope you enjoyed this introductory guide to Artifacts, a simple, light weight way to version your datasets and models, while providing information about the lineage of your data through your experiments. \n",
"\n",
"Interested in learning more about Artifacts? Check out the [docs](https://www.comet.ml/docs/user-interface/artifacts/?utm_campaign-artifacts-launch&utm_source=colab-example&utm_medium=additional-resources)"
"Interested in learning more about Artifacts? Check out the [docs](https://www.comet.com/docs/v2/guides/artifacts/using-artifacts/?utm_campaign-artifacts-launch&utm_source=colab-example&utm_medium=additional-resources)"
]
}
],
Expand Down
Loading
Loading