@@ -2262,6 +2262,105 @@ typedef enum CUmemPool_attribute_enum {
2262
2262
CU_MEMPOOL_ATTR_RELEASE_THRESHOLD
2263
2263
} CUmemPool_attribute ;
2264
2264
2265
+ /**
2266
+ * Execution Affinity Types
2267
+ */
2268
+ typedef enum CUexecAffinityType_enum {
2269
+ CU_EXEC_AFFINITY_TYPE_SM_COUNT = 0 , /**< Create a context with limited SMs. */
2270
+ CU_EXEC_AFFINITY_TYPE_MAX
2271
+ } CUexecAffinityType ;
2272
+
2273
+ /**
2274
+ * Value for ::CU_EXEC_AFFINITY_TYPE_SM_COUNT
2275
+ */
2276
+ typedef struct CUexecAffinitySmCount_st {
2277
+ unsigned int val ; /**< The number of SMs the context is limited to use. */
2278
+ } CUexecAffinitySmCount_v1 ;
2279
+ typedef CUexecAffinitySmCount_v1 CUexecAffinitySmCount ;
2280
+
2281
+ /**
2282
+ * Execution Affinity Parameters
2283
+ */
2284
+ typedef struct CUexecAffinityParam_st {
2285
+ CUexecAffinityType type ;
2286
+ union {
2287
+ CUexecAffinitySmCount
2288
+ smCount ; /** Value for ::CU_EXEC_AFFINITY_TYPE_SM_COUNT */
2289
+ } param ;
2290
+ } CUexecAffinityParam_v1 ;
2291
+ typedef CUexecAffinityParam_v1 CUexecAffinityParam ;
2292
+
2293
+ typedef enum CUgraphMem_attribute_enum {
2294
+ /**
2295
+ * (value type = cuuint64_t)
2296
+ * Amount of memory, in bytes, currently associated with graphs
2297
+ */
2298
+ CU_GRAPH_MEM_ATTR_USED_MEM_CURRENT ,
2299
+
2300
+ /**
2301
+ * (value type = cuuint64_t)
2302
+ * High watermark of memory, in bytes, associated with graphs since the
2303
+ * last time it was reset. High watermark can only be reset to zero.
2304
+ */
2305
+ CU_GRAPH_MEM_ATTR_USED_MEM_HIGH ,
2306
+
2307
+ /**
2308
+ * (value type = cuuint64_t)
2309
+ * Amount of memory, in bytes, currently allocated for use by
2310
+ * the CUDA graphs asynchronous allocator.
2311
+ */
2312
+ CU_GRAPH_MEM_ATTR_RESERVED_MEM_CURRENT ,
2313
+
2314
+ /**
2315
+ * (value type = cuuint64_t)
2316
+ * High watermark of memory, in bytes, currently allocated for use by
2317
+ * the CUDA graphs asynchronous allocator.
2318
+ */
2319
+ CU_GRAPH_MEM_ATTR_RESERVED_MEM_HIGH
2320
+ } CUgraphMem_attribute ;
2321
+
2322
+ /**
2323
+ * Memory allocation node parameters
2324
+ */
2325
+ typedef struct CUDA_MEM_ALLOC_NODE_PARAMS_st {
2326
+ /**
2327
+ * in: location where the allocation should reside (specified in ::location).
2328
+ * ::handleTypes must be ::CU_MEM_HANDLE_TYPE_NONE. IPC is not supported.
2329
+ */
2330
+ CUmemPoolProps poolProps ;
2331
+ const CUmemAccessDesc
2332
+ * accessDescs ; /**< in: array of memory access descriptors. Used to
2333
+ describe peer GPU access */
2334
+ size_t accessDescCount ; /**< in: number of memory access descriptors. Must
2335
+ not exceed the number of GPUs. */
2336
+ size_t bytesize ; /**< in: size in bytes of the requested allocation */
2337
+ CUdeviceptr dptr ; /**< out: address of the allocation returned by CUDA */
2338
+ } CUDA_MEM_ALLOC_NODE_PARAMS ;
2339
+
2340
+ typedef struct CUuserObject_st
2341
+ * CUuserObject ; /**< CUDA user object for graphs */
2342
+
2343
+ /**
2344
+ * The targets for ::cuFlushGPUDirectRDMAWrites
2345
+ */
2346
+ typedef enum CUflushGPUDirectRDMAWritesTarget_enum {
2347
+ CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TARGET_CURRENT_CTX =
2348
+ 0 /**< Sets the target for ::cuFlushGPUDirectRDMAWrites() to the currently
2349
+ active CUDA device context. */
2350
+ } CUflushGPUDirectRDMAWritesTarget ;
2351
+
2352
+ /**
2353
+ * The scopes for ::cuFlushGPUDirectRDMAWrites
2354
+ */
2355
+ typedef enum CUflushGPUDirectRDMAWritesScope_enum {
2356
+ CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TO_OWNER =
2357
+ 100 , /**< Blocks until remote writes are visible to the CUDA device
2358
+ context owning the data. */
2359
+ CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TO_ALL_DEVICES =
2360
+ 200 /**< Blocks until remote writes are visible to all CUDA device
2361
+ contexts. */
2362
+ } CUflushGPUDirectRDMAWritesScope ;
2363
+
2265
2364
#ifdef __cplusplus
2266
2365
}
2267
2366
#endif
0 commit comments