|
10 | 10 | def generate_pod_deployment_mutation(
|
11 | 11 | name: str,
|
12 | 12 | image_name: str,
|
13 |
| - gpu_type_id: str, |
| 13 | + gpu_type_id: Optional[str] = None, |
14 | 14 | cloud_type: str = "ALL",
|
15 | 15 | support_public_ip: bool = True,
|
16 | 16 | start_ssh: bool = True,
|
17 |
| - data_center_id=None, |
18 |
| - country_code=None, |
19 |
| - gpu_count=None, |
20 |
| - volume_in_gb=None, |
21 |
| - container_disk_in_gb=None, |
22 |
| - min_vcpu_count=None, |
23 |
| - min_memory_in_gb=None, |
24 |
| - docker_args=None, |
25 |
| - ports=None, |
26 |
| - volume_mount_path=None, |
27 |
| - env: dict = None, |
28 |
| - template_id=None, |
29 |
| - network_volume_id=None, |
| 17 | + data_center_id: Optional[str] = None, |
| 18 | + country_code: Optional[str] = None, |
| 19 | + gpu_count: Optional[int] = None, |
| 20 | + volume_in_gb: Optional[int] = None, |
| 21 | + container_disk_in_gb: Optional[int] = None, |
| 22 | + min_vcpu_count: Optional[int] = None, |
| 23 | + min_memory_in_gb: Optional[int] = None, |
| 24 | + docker_args: Optional[str] = None, |
| 25 | + ports: Optional[str] = None, |
| 26 | + volume_mount_path: Optional[str] = None, |
| 27 | + env: Optional[dict] = None, |
| 28 | + template_id: Optional[str] = None, |
| 29 | + network_volume_id: Optional[str] = None, |
30 | 30 | allowed_cuda_versions: Optional[List[str]] = None,
|
31 |
| - min_download=None, |
32 |
| - min_upload=None, |
33 |
| -): |
| 31 | + min_download: Optional[int] = None, |
| 32 | + min_upload: Optional[int] = None, |
| 33 | + instance_id: Optional[str] = None, |
| 34 | +) -> str: |
34 | 35 | """
|
35 | 36 | Generates a mutation to deploy a pod on demand.
|
| 37 | + |
| 38 | + Args: |
| 39 | + name: Name of the pod |
| 40 | + image_name: Docker image name |
| 41 | + gpu_type_id: GPU type ID for GPU pods, None for CPU pods |
| 42 | + cloud_type: Cloud type (ALL, COMMUNITY, or SECURE) |
| 43 | + support_public_ip: Whether to support public IP |
| 44 | + start_ssh: Whether to start SSH service |
| 45 | + data_center_id: Data center ID |
| 46 | + country_code: Country code for pod location |
| 47 | + gpu_count: Number of GPUs (for GPU pods) |
| 48 | + volume_in_gb: Volume size in GB |
| 49 | + container_disk_in_gb: Container disk size in GB |
| 50 | + min_vcpu_count: Minimum vCPU count |
| 51 | + min_memory_in_gb: Minimum memory in GB |
| 52 | + docker_args: Docker arguments |
| 53 | + ports: Port mappings (e.g. "8080/tcp,22/tcp") |
| 54 | + volume_mount_path: Volume mount path |
| 55 | + env: Environment variables dict |
| 56 | + template_id: Template ID |
| 57 | + network_volume_id: Network volume ID |
| 58 | + allowed_cuda_versions: List of allowed CUDA versions |
| 59 | + min_download: Minimum download speed in Mbps |
| 60 | + min_upload: Minimum upload speed in Mbps |
| 61 | + instance_id: Instance ID for CPU pods |
| 62 | +
|
| 63 | + Returns: |
| 64 | + str: GraphQL mutation string |
36 | 65 | """
|
37 | 66 | input_fields = []
|
38 | 67 |
|
39 |
| - # ------------------------------ Required Fields ----------------------------- # |
40 |
| - input_fields.append(f'name: "{name}"') |
41 |
| - input_fields.append(f'imageName: "{image_name}"') |
42 |
| - input_fields.append(f'gpuTypeId: "{gpu_type_id}"') |
43 |
| - |
44 |
| - # ------------------------------ Default Fields ------------------------------ # |
45 |
| - input_fields.append(f"cloudType: {cloud_type}") |
| 68 | + # Required Fields |
| 69 | + input_fields.extend([ |
| 70 | + f'name: "{name}"', |
| 71 | + f'imageName: "{image_name}"', |
| 72 | + f"cloudType: {cloud_type}" |
| 73 | + ]) |
46 | 74 |
|
47 | 75 | if start_ssh:
|
48 | 76 | input_fields.append("startSsh: true")
|
49 | 77 |
|
50 |
| - if support_public_ip: |
51 |
| - input_fields.append("supportPublicIp: true") |
| 78 | + # GPU Pod Fields |
| 79 | + if gpu_type_id is not None: |
| 80 | + input_fields.append(f'gpuTypeId: "{gpu_type_id}"') |
| 81 | + input_fields.append(f"supportPublicIp: {str(support_public_ip).lower()}") |
| 82 | + |
| 83 | + if gpu_count is not None: |
| 84 | + input_fields.append(f"gpuCount: {gpu_count}") |
| 85 | + if volume_in_gb is not None: |
| 86 | + input_fields.append(f"volumeInGb: {volume_in_gb}") |
| 87 | + if min_vcpu_count is not None: |
| 88 | + input_fields.append(f"minVcpuCount: {min_vcpu_count}") |
| 89 | + if min_memory_in_gb is not None: |
| 90 | + input_fields.append(f"minMemoryInGb: {min_memory_in_gb}") |
| 91 | + if docker_args is not None: |
| 92 | + input_fields.append(f'dockerArgs: "{docker_args}"') |
| 93 | + if allowed_cuda_versions is not None: |
| 94 | + cuda_versions = ", ".join(f'"{v}"' for v in allowed_cuda_versions) |
| 95 | + input_fields.append(f"allowedCudaVersions: [{cuda_versions}]") |
| 96 | + |
| 97 | + # CPU Pod Fields |
52 | 98 | else:
|
53 |
| - input_fields.append("supportPublicIp: false") |
| 99 | + if instance_id is not None: |
| 100 | + input_fields.append(f'instanceId: "{instance_id}"') |
| 101 | + template_id = template_id or "runpod-ubuntu" |
54 | 102 |
|
55 |
| - # ------------------------------ Optional Fields ----------------------------- # |
| 103 | + # Optional Fields |
56 | 104 | if data_center_id is not None:
|
57 | 105 | input_fields.append(f'dataCenterId: "{data_center_id}"')
|
| 106 | + else: |
| 107 | + input_fields.append("dataCenterId: null") |
| 108 | + |
58 | 109 | if country_code is not None:
|
59 | 110 | input_fields.append(f'countryCode: "{country_code}"')
|
60 |
| - if gpu_count is not None: |
61 |
| - input_fields.append(f"gpuCount: {gpu_count}") |
62 |
| - if volume_in_gb is not None: |
63 |
| - input_fields.append(f"volumeInGb: {volume_in_gb}") |
64 | 111 | if container_disk_in_gb is not None:
|
65 | 112 | input_fields.append(f"containerDiskInGb: {container_disk_in_gb}")
|
66 |
| - if min_vcpu_count is not None: |
67 |
| - input_fields.append(f"minVcpuCount: {min_vcpu_count}") |
68 |
| - if min_memory_in_gb is not None: |
69 |
| - input_fields.append(f"minMemoryInGb: {min_memory_in_gb}") |
70 |
| - if docker_args is not None: |
71 |
| - input_fields.append(f'dockerArgs: "{docker_args}"') |
72 | 113 | if ports is not None:
|
73 |
| - ports = ports.replace(" ", "") |
74 |
| - input_fields.append(f'ports: "{ports}"') |
| 114 | + input_fields.append(f'ports: "{ports.replace(" ", "")}"') |
75 | 115 | if volume_mount_path is not None:
|
76 | 116 | input_fields.append(f'volumeMountPath: "{volume_mount_path}"')
|
77 | 117 | if env is not None:
|
78 |
| - env_string = ", ".join( |
79 |
| - [f'{{ key: "{key}", value: "{value}" }}' for key, value in env.items()] |
80 |
| - ) |
81 |
| - input_fields.append(f"env: [{env_string}]") |
| 118 | + env_items = [f'{{ key: "{k}", value: "{v}" }}' for k, v in env.items()] |
| 119 | + input_fields.append(f"env: [{', '.join(env_items)}]") |
82 | 120 | if template_id is not None:
|
83 | 121 | input_fields.append(f'templateId: "{template_id}"')
|
84 |
| - |
85 | 122 | if network_volume_id is not None:
|
86 | 123 | input_fields.append(f'networkVolumeId: "{network_volume_id}"')
|
87 |
| - |
88 |
| - if allowed_cuda_versions is not None: |
89 |
| - allowed_cuda_versions_string = ", ".join( |
90 |
| - [f'"{version}"' for version in allowed_cuda_versions] |
91 |
| - ) |
92 |
| - input_fields.append(f"allowedCudaVersions: [{allowed_cuda_versions_string}]") |
93 |
| - |
94 | 124 | if min_download is not None:
|
95 | 125 | input_fields.append(f'minDownload: {min_download}')
|
96 |
| - |
97 | 126 | if min_upload is not None:
|
98 | 127 | input_fields.append(f'minUpload: {min_upload}')
|
99 | 128 |
|
100 |
| - # Format input fields |
| 129 | + mutation_type = "podFindAndDeployOnDemand" if gpu_type_id else "deployCpuPod" |
101 | 130 | input_string = ", ".join(input_fields)
|
| 131 | + |
102 | 132 | return f"""
|
103 | 133 | mutation {{
|
104 |
| - podFindAndDeployOnDemand( |
| 134 | + {mutation_type}( |
105 | 135 | input: {{
|
106 | 136 | {input_string}
|
107 | 137 | }}
|
108 | 138 | ) {{
|
109 | 139 | id
|
110 |
| - desiredStatus |
111 | 140 | imageName
|
112 | 141 | env
|
113 | 142 | machineId
|
|
0 commit comments