mirror of
https://github.com/PacktPublishing/Hands-On-GPU-Programming-with-CUDA-C-and-Python-3.x-Second-Edition.git
synced 2025-07-21 21:01:06 +02:00
44 lines
1.8 KiB
Python
44 lines
1.8 KiB
Python
import pycuda
|
|
import pycuda.driver as drv
|
|
drv.init()
|
|
|
|
print ('CUDA device query (PyCUDA version) \n')
|
|
|
|
print ('Detected {} CUDA Capable device(s) \n'.format(drv.Device.count()))
|
|
|
|
for i in range(drv.Device.count()):
|
|
|
|
gpu_device = drv.Device(i)
|
|
print ('Device {}: {}'.format( i, gpu_device.name() ) )
|
|
compute_capability = float( '%d.%d' % gpu_device.compute_capability() )
|
|
print ('\t Compute Capability: {}'.format(compute_capability))
|
|
print ('\t Total Memory: {} megabytes'.format(gpu_device.total_memory()//(1024**2)))
|
|
|
|
# The following will give us all remaining device attributes as seen
|
|
# in the original deviceQuery.
|
|
# We set up a dictionary as such so that we can easily index
|
|
# the values using a string descriptor.
|
|
|
|
device_attributes_tuples = gpu_device.get_attributes().items()
|
|
device_attributes = {}
|
|
|
|
for k, v in device_attributes_tuples:
|
|
device_attributes[str(k)] = v
|
|
|
|
num_mp = device_attributes['MULTIPROCESSOR_COUNT']
|
|
|
|
# Cores per multiprocessor is not reported by the GPU!
|
|
# We must use a lookup table based on compute capability.
|
|
# See the following:
|
|
# http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#compute-capabilities
|
|
|
|
cuda_cores_per_mp = { 3.0 : 192, 3.2 : 192, 3.5 : 192, 3.7 : 192, 5.0 : 128, 5.1 : 128, 5.2 : 128, 5.3 : 128, 6.0 : 64, 6.1 : 128,\
|
|
6.2 : 128, 7.0 : 64, 7.1 : 64, 7.2 : 64, 7.3 : 64, 7.4 : 64, 7.5 : 64}[compute_capability]
|
|
|
|
print ('\t ({}) Multiprocessors, ({}) CUDA Cores / Multiprocessor: {} CUDA Cores'.format(num_mp, cuda_cores_per_mp, num_mp*cuda_cores_per_mp))
|
|
|
|
device_attributes.pop('MULTIPROCESSOR_COUNT')
|
|
|
|
for k in device_attributes.keys():
|
|
print ('\t {}: {}'.format(k, device_attributes[k]))
|