Main Page   Namespace List   Class Hierarchy   Alphabetical List   Compound List   File List   Namespace Members   Compound Members   File Members   Related Pages  

CUDAWrapNVML.h

Go to the documentation of this file.
00001 
00021 #if defined(__cplusplus)
00022 extern "C" {
00023 #endif
00024 
00025 /* 
00026  * Ugly hacks to avoid dependencies on the real nvml.h until it starts
00027  * getting included with the CUDA toolkit or a GDK that's got a known 
00028  * install location, etc.
00029  */
00030 typedef enum wrap_nvmlReturn_enum {
00031   WRAPNVML_SUCCESS = 0
00032 } wrap_nvmlReturn_t;
00033 
00034 typedef void * wrap_nvmlDevice_t;
00035 
00036 /* our own version of the PCI info struct */
00037 typedef struct {
00038   char bus_id_str[16];             /* string form of bus info */
00039   unsigned int domain;
00040   unsigned int bus;
00041   unsigned int device;
00042   unsigned int pci_device_id;      /* combined device and vendor id */
00043   unsigned int pci_subsystem_id;
00044   unsigned int res0;               /* NVML internal use only */
00045   unsigned int res1;
00046   unsigned int res2;
00047   unsigned int res3;
00048 } wrap_nvmlPciInfo_t;
00049 
00050 
00051 /* 
00052  * Handle to hold the function pointers for the entry points we need,
00053  * and the shared library itself.
00054  */
00055 typedef struct {
00056   void *nvml_dll;
00057   int nvml_gpucount;
00058   int cuda_gpucount;
00059   unsigned int *nvml_pci_domain_id;
00060   unsigned int *nvml_pci_bus_id;
00061   unsigned int *nvml_pci_device_id;
00062   int *nvml_cuda_device_id;          /* map NVML dev to CUDA dev */
00063   int *cuda_nvml_device_id;          /* map CUDA dev to NVML dev */
00064   wrap_nvmlDevice_t *devs;
00065   wrap_nvmlReturn_t (*nvmlInit)(void);
00066   wrap_nvmlReturn_t (*nvmlDeviceGetCount)(int *);
00067   wrap_nvmlReturn_t (*nvmlDeviceGetHandleByIndex)(int, wrap_nvmlDevice_t *);
00068   wrap_nvmlReturn_t (*nvmlDeviceGetPciInfo)(wrap_nvmlDevice_t, wrap_nvmlPciInfo_t *);
00069   wrap_nvmlReturn_t (*nvmlDeviceGetName)(wrap_nvmlDevice_t, char *, int);
00070   wrap_nvmlReturn_t (*nvmlDeviceGetTemperature)(wrap_nvmlDevice_t, int, unsigned int *);
00071   wrap_nvmlReturn_t (*nvmlDeviceGetFanSpeed)(wrap_nvmlDevice_t, unsigned int *);
00072   wrap_nvmlReturn_t (*nvmlDeviceGetPowerUsage)(wrap_nvmlDevice_t, unsigned int *);
00073   wrap_nvmlReturn_t (*nvmlDeviceGetCpuAffinity)(wrap_nvmlDevice_t, unsigned int cpuSetSize, unsigned long *cpuSet);
00074   wrap_nvmlReturn_t (*nvmlDeviceSetCpuAffinity)(wrap_nvmlDevice_t);
00075   wrap_nvmlReturn_t (*nvmlShutdown)(void);
00076 } wrap_nvml_handle;
00077 
00078 
00079 wrap_nvml_handle * wrap_nvml_create();
00080 int wrap_nvml_destroy(wrap_nvml_handle *nvmlh);
00081 
00082 /*
00083  * Query the number of GPUs seen by NVML
00084  */
00085 int wrap_nvml_get_gpucount(wrap_nvml_handle *nvmlh, int *gpucount);
00086 
00087 /*
00088  * Query the number of GPUs seen by CUDA 
00089  */
00090 int wrap_cuda_get_gpucount(wrap_nvml_handle *nvmlh, int *gpucount);
00091 
00092 
00093 /*
00094  * query the name of the GPU model from the CUDA device ID
00095  *
00096  */
00097 int wrap_nvml_get_gpu_name(wrap_nvml_handle *nvmlh,
00098                            int gpuindex, 
00099                            char *namebuf,
00100                            int bufsize);
00101 
00102 /* 
00103  * Query the current GPU temperature (Celsius), from the CUDA device ID
00104  */
00105 int wrap_nvml_get_tempC(wrap_nvml_handle *nvmlh,
00106                         int gpuindex, unsigned int *tempC);
00107 
00108 /* 
00109  * Query the current GPU fan speed (percent) from the CUDA device ID
00110  */
00111 int wrap_nvml_get_fanpcnt(wrap_nvml_handle *nvmlh,
00112                           int gpuindex, unsigned int *fanpcnt);
00113 
00114 /* 
00115  * Query the current GPU power usage in millwatts from the CUDA device ID
00116  *
00117  * This feature is only available on recent GPU generations and may be
00118  * limited in some cases only to Tesla series GPUs.
00119  * If the query is run on an unsupported GPU, this routine will return -1.
00120  */
00121 int wrap_nvml_get_power_usage(wrap_nvml_handle *nvmlh,
00122                               int gpuindex,
00123                               unsigned int *milliwatts);
00124 
00125 /* 
00126  * Query the current GPU's CPU affinity mask
00127  *
00128  * If the query is run on an unsupported GPU, this routine will return -1.
00129  */
00130 int wrap_nvml_get_cpu_affinity(wrap_nvml_handle *nvmlh,
00131                                int gpuindex, unsigned int cpuSetSize,
00132                                unsigned long *cpuSet);
00133 
00134 /* 
00135  * Set the CPU affinity mask for best access to the indexed GPU
00136  *
00137  * The routine returns -1 on failure.
00138  */
00139 int wrap_nvml_set_cpu_affinity(wrap_nvml_handle *nvmlh, int gpuindex);
00140 
00141 #if defined(__cplusplus)
00142 }
00143 #endif
00144 

Generated on Sat Apr 20 02:42:37 2024 for VMD (current) by doxygen1.2.14 written by Dimitri van Heesch, © 1997-2002