Skip to content

Commit 1a0976f

Browse files
authored
Added cpu support for pod creation from python sdk (#403)
1 parent 9f994ac commit 1a0976f

File tree

4 files changed

+138
-68
lines changed

4 files changed

+138
-68
lines changed

README.md

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -137,9 +137,12 @@ pods = runpod.get_pods()
137137
# Get a specific pod
138138
pod = runpod.get_pod(pod.id)
139139

140-
# Create a pod
140+
# Create a pod with GPU
141141
pod = runpod.create_pod("test", "runpod/stack", "NVIDIA GeForce RTX 3070")
142142

143+
# Create a pod with CPU
144+
pod = runpod.create_pod("test", "runpod/stack", instance_id="cpu3c-2-4")
145+
143146
# Stop the pod
144147
runpod.stop_pod(pod.id)
145148

runpod/api/ctl_commands.py

Lines changed: 20 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,7 @@ def get_pod(pod_id: str):
8989
def create_pod(
9090
name: str,
9191
image_name: str,
92-
gpu_type_id: str,
92+
gpu_type_id: Optional[str] = None,
9393
cloud_type: str = "ALL",
9494
support_public_ip: bool = True,
9595
start_ssh: bool = True,
@@ -109,17 +109,18 @@ def create_pod(
109109
allowed_cuda_versions: Optional[list] = None,
110110
min_download = None,
111111
min_upload = None,
112+
instance_id: Optional[str] = None,
112113
) -> dict:
113114
"""
114115
Create a pod
115116
116117
:param name: the name of the pod
117118
:param image_name: the name of the docker image to be used by the pod
118-
:param gpu_type_id: the gpu type wanted by the pod (retrievable by get_gpus)
119+
:param gpu_type_id: the gpu type wanted by the pod (retrievable by get_gpus). If None, creates a CPU-only pod
119120
:param cloud_type: if secure cloud, community cloud or all is wanted
120121
:param data_center_id: the id of the data center
121122
:param country_code: the code for country to start the pod in
122-
:param gpu_count: how many gpus should be attached to the pod
123+
:param gpu_count: how many gpus should be attached to the pod (ignored for CPU-only pods)
123124
:param volume_in_gb: how big should the pod volume be
124125
:param ports: the ports to open in the pod, example format - "8888/http,666/tcp"
125126
:param volume_mount_path: where to mount the volume?
@@ -129,12 +130,19 @@ def create_pod(
129130
:param template_id: the id of the template to use for the pod
130131
:param min_download: minimum download speed in Mbps
131132
:param min_upload: minimum upload speed in Mbps
133+
:param instance_id: the id of a specific instance to deploy to (for CPU pods)
132134
:example:
133135
136+
>>> # Create GPU pod
134137
>>> pod_id = runpod.create_pod("test", "runpod/stack", "NVIDIA GeForce RTX 3070")
138+
>>> # Create CPU pod
139+
>>> pod_id = runpod.create_pod("test", "runpod/stack")
140+
>>> # Create CPU pod on specific instance
141+
>>> pod_id = runpod.create_pod("test", "runpod/stack", instance_id="cpu3c-2-4")
135142
"""
136143
# Input Validation
137-
get_gpu(gpu_type_id) # Check if GPU exists, will raise ValueError if not.
144+
if gpu_type_id is not None:
145+
get_gpu(gpu_type_id) # Check if GPU exists, will raise ValueError if not.
138146
if cloud_type not in ["ALL", "COMMUNITY", "SECURE"]:
139147
raise ValueError("cloud_type must be one of ALL, COMMUNITY or SECURE")
140148

@@ -158,7 +166,7 @@ def create_pod(
158166
start_ssh,
159167
data_center_id,
160168
country_code,
161-
gpu_count,
169+
gpu_count if gpu_type_id is not None else None,
162170
volume_in_gb,
163171
container_disk_in_gb,
164172
min_vcpu_count,
@@ -172,10 +180,16 @@ def create_pod(
172180
allowed_cuda_versions,
173181
min_download,
174182
min_upload,
183+
instance_id,
175184
)
176185
)
177186

178-
cleaned_response = raw_response["data"]["podFindAndDeployOnDemand"]
187+
print(f"raw_response: {raw_response}")
188+
189+
if gpu_type_id is not None:
190+
cleaned_response = raw_response["data"]["podFindAndDeployOnDemand"]
191+
else:
192+
cleaned_response = raw_response["data"]["deployCpuPod"]
179193
return cleaned_response
180194

181195

runpod/api/mutations/pods.py

Lines changed: 85 additions & 56 deletions
Original file line numberDiff line numberDiff line change
@@ -10,104 +10,133 @@
1010
def generate_pod_deployment_mutation(
1111
name: str,
1212
image_name: str,
13-
gpu_type_id: str,
13+
gpu_type_id: Optional[str] = None,
1414
cloud_type: str = "ALL",
1515
support_public_ip: bool = True,
1616
start_ssh: bool = True,
17-
data_center_id=None,
18-
country_code=None,
19-
gpu_count=None,
20-
volume_in_gb=None,
21-
container_disk_in_gb=None,
22-
min_vcpu_count=None,
23-
min_memory_in_gb=None,
24-
docker_args=None,
25-
ports=None,
26-
volume_mount_path=None,
27-
env: dict = None,
28-
template_id=None,
29-
network_volume_id=None,
17+
data_center_id: Optional[str] = None,
18+
country_code: Optional[str] = None,
19+
gpu_count: Optional[int] = None,
20+
volume_in_gb: Optional[int] = None,
21+
container_disk_in_gb: Optional[int] = None,
22+
min_vcpu_count: Optional[int] = None,
23+
min_memory_in_gb: Optional[int] = None,
24+
docker_args: Optional[str] = None,
25+
ports: Optional[str] = None,
26+
volume_mount_path: Optional[str] = None,
27+
env: Optional[dict] = None,
28+
template_id: Optional[str] = None,
29+
network_volume_id: Optional[str] = None,
3030
allowed_cuda_versions: Optional[List[str]] = None,
31-
min_download=None,
32-
min_upload=None,
33-
):
31+
min_download: Optional[int] = None,
32+
min_upload: Optional[int] = None,
33+
instance_id: Optional[str] = None,
34+
) -> str:
3435
"""
3536
Generates a mutation to deploy a pod on demand.
37+
38+
Args:
39+
name: Name of the pod
40+
image_name: Docker image name
41+
gpu_type_id: GPU type ID for GPU pods, None for CPU pods
42+
cloud_type: Cloud type (ALL, COMMUNITY, or SECURE)
43+
support_public_ip: Whether to support public IP
44+
start_ssh: Whether to start SSH service
45+
data_center_id: Data center ID
46+
country_code: Country code for pod location
47+
gpu_count: Number of GPUs (for GPU pods)
48+
volume_in_gb: Volume size in GB
49+
container_disk_in_gb: Container disk size in GB
50+
min_vcpu_count: Minimum vCPU count
51+
min_memory_in_gb: Minimum memory in GB
52+
docker_args: Docker arguments
53+
ports: Port mappings (e.g. "8080/tcp,22/tcp")
54+
volume_mount_path: Volume mount path
55+
env: Environment variables dict
56+
template_id: Template ID
57+
network_volume_id: Network volume ID
58+
allowed_cuda_versions: List of allowed CUDA versions
59+
min_download: Minimum download speed in Mbps
60+
min_upload: Minimum upload speed in Mbps
61+
instance_id: Instance ID for CPU pods
62+
63+
Returns:
64+
str: GraphQL mutation string
3665
"""
3766
input_fields = []
3867

39-
# ------------------------------ Required Fields ----------------------------- #
40-
input_fields.append(f'name: "{name}"')
41-
input_fields.append(f'imageName: "{image_name}"')
42-
input_fields.append(f'gpuTypeId: "{gpu_type_id}"')
43-
44-
# ------------------------------ Default Fields ------------------------------ #
45-
input_fields.append(f"cloudType: {cloud_type}")
68+
# Required Fields
69+
input_fields.extend([
70+
f'name: "{name}"',
71+
f'imageName: "{image_name}"',
72+
f"cloudType: {cloud_type}"
73+
])
4674

4775
if start_ssh:
4876
input_fields.append("startSsh: true")
4977

50-
if support_public_ip:
51-
input_fields.append("supportPublicIp: true")
78+
# GPU Pod Fields
79+
if gpu_type_id is not None:
80+
input_fields.append(f'gpuTypeId: "{gpu_type_id}"')
81+
input_fields.append(f"supportPublicIp: {str(support_public_ip).lower()}")
82+
83+
if gpu_count is not None:
84+
input_fields.append(f"gpuCount: {gpu_count}")
85+
if volume_in_gb is not None:
86+
input_fields.append(f"volumeInGb: {volume_in_gb}")
87+
if min_vcpu_count is not None:
88+
input_fields.append(f"minVcpuCount: {min_vcpu_count}")
89+
if min_memory_in_gb is not None:
90+
input_fields.append(f"minMemoryInGb: {min_memory_in_gb}")
91+
if docker_args is not None:
92+
input_fields.append(f'dockerArgs: "{docker_args}"')
93+
if allowed_cuda_versions is not None:
94+
cuda_versions = ", ".join(f'"{v}"' for v in allowed_cuda_versions)
95+
input_fields.append(f"allowedCudaVersions: [{cuda_versions}]")
96+
97+
# CPU Pod Fields
5298
else:
53-
input_fields.append("supportPublicIp: false")
99+
if instance_id is not None:
100+
input_fields.append(f'instanceId: "{instance_id}"')
101+
template_id = template_id or "runpod-ubuntu"
54102

55-
# ------------------------------ Optional Fields ----------------------------- #
103+
# Optional Fields
56104
if data_center_id is not None:
57105
input_fields.append(f'dataCenterId: "{data_center_id}"')
106+
else:
107+
input_fields.append("dataCenterId: null")
108+
58109
if country_code is not None:
59110
input_fields.append(f'countryCode: "{country_code}"')
60-
if gpu_count is not None:
61-
input_fields.append(f"gpuCount: {gpu_count}")
62-
if volume_in_gb is not None:
63-
input_fields.append(f"volumeInGb: {volume_in_gb}")
64111
if container_disk_in_gb is not None:
65112
input_fields.append(f"containerDiskInGb: {container_disk_in_gb}")
66-
if min_vcpu_count is not None:
67-
input_fields.append(f"minVcpuCount: {min_vcpu_count}")
68-
if min_memory_in_gb is not None:
69-
input_fields.append(f"minMemoryInGb: {min_memory_in_gb}")
70-
if docker_args is not None:
71-
input_fields.append(f'dockerArgs: "{docker_args}"')
72113
if ports is not None:
73-
ports = ports.replace(" ", "")
74-
input_fields.append(f'ports: "{ports}"')
114+
input_fields.append(f'ports: "{ports.replace(" ", "")}"')
75115
if volume_mount_path is not None:
76116
input_fields.append(f'volumeMountPath: "{volume_mount_path}"')
77117
if env is not None:
78-
env_string = ", ".join(
79-
[f'{{ key: "{key}", value: "{value}" }}' for key, value in env.items()]
80-
)
81-
input_fields.append(f"env: [{env_string}]")
118+
env_items = [f'{{ key: "{k}", value: "{v}" }}' for k, v in env.items()]
119+
input_fields.append(f"env: [{', '.join(env_items)}]")
82120
if template_id is not None:
83121
input_fields.append(f'templateId: "{template_id}"')
84-
85122
if network_volume_id is not None:
86123
input_fields.append(f'networkVolumeId: "{network_volume_id}"')
87-
88-
if allowed_cuda_versions is not None:
89-
allowed_cuda_versions_string = ", ".join(
90-
[f'"{version}"' for version in allowed_cuda_versions]
91-
)
92-
input_fields.append(f"allowedCudaVersions: [{allowed_cuda_versions_string}]")
93-
94124
if min_download is not None:
95125
input_fields.append(f'minDownload: {min_download}')
96-
97126
if min_upload is not None:
98127
input_fields.append(f'minUpload: {min_upload}')
99128

100-
# Format input fields
129+
mutation_type = "podFindAndDeployOnDemand" if gpu_type_id else "deployCpuPod"
101130
input_string = ", ".join(input_fields)
131+
102132
return f"""
103133
mutation {{
104-
podFindAndDeployOnDemand(
134+
{mutation_type}(
105135
input: {{
106136
{input_string}
107137
}}
108138
) {{
109139
id
110-
desiredStatus
111140
imageName
112141
env
113142
machineId

tests/test_api/test_mutations_pods.py

Lines changed: 29 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -10,11 +10,12 @@ class TestPodMutations(unittest.TestCase):
1010

1111
def test_generate_pod_deployment_mutation(self):
1212
"""
13-
Test generate_pod_deployment_mutation
13+
Test generate_pod_deployment_mutation for both GPU and CPU pods
1414
"""
15-
result = pods.generate_pod_deployment_mutation(
15+
# Test GPU pod deployment
16+
gpu_result = pods.generate_pod_deployment_mutation(
1617
name="test",
17-
image_name="test_image",
18+
image_name="test_image",
1819
gpu_type_id="1",
1920
cloud_type="cloud",
2021
data_center_id="1",
@@ -33,8 +34,31 @@ def test_generate_pod_deployment_mutation(self):
3334
allowed_cuda_versions=["11.8", "12.0"],
3435
)
3536

36-
# Here you should check the correct structure of the result
37-
self.assertIn("mutation", result)
37+
# Test CPU pod deployment
38+
cpu_result = pods.generate_pod_deployment_mutation(
39+
name="test-cpu",
40+
image_name="test_image",
41+
cloud_type="cloud",
42+
data_center_id="1",
43+
country_code="US",
44+
volume_in_gb=100,
45+
container_disk_in_gb=10,
46+
min_vcpu_count=2,
47+
min_memory_in_gb=4,
48+
docker_args="args",
49+
ports="8080",
50+
volume_mount_path="/path",
51+
env={"ENV": "test"},
52+
instance_id="cpu3c-2-4"
53+
)
54+
55+
# Check GPU pod mutation structure
56+
self.assertIn("mutation", gpu_result)
57+
self.assertIn("podFindAndDeployOnDemand", gpu_result)
58+
59+
# Check CPU pod mutation structure
60+
self.assertIn("mutation", cpu_result)
61+
self.assertIn("deployCpuPod", cpu_result)
3862

3963
def test_generate_pod_stop_mutation(self):
4064
"""

0 commit comments

Comments
 (0)