OpenCL限制为循环大小?

UPDATE: clEnqueueReadBuffer(command_queue, c_mem_obj, CL_TRUE, 0, LIST_SIZE * sizeof(double), C, 0, NULL, NULL); 返回-5, CL_OUT_OF_RESOURCES 。 这个函数/调用应该永远不会返回!

我已经开始使用OpenCL并遇到了一个问题。 如果我允许for循环(在内核中)运行10000次,那么如果我允许循环运行8000,则所有C都为0,结果都是正确的。

我已经在内核周围添加了等待以确保它完成,认为我在完成之前将数据拉出并尝试了Clwaitforevent和CLFinish。 任何呼叫都不会发出任何错误信号。 当我使用int时,for循环将工作在4000000的大小。浮点数和双打有相同的问题但是浮点数工作在10000,但不是20000,当我使用浮点数时我删除了#pragma OPENCL EXTENSION cl_khr_fp64 : enable to check那不是问题。

这是一些奇怪的记忆事,我是否使用OpenCL错了? 我意识到在大多数内核中我都不会像这样实现循环,但这似乎是一个问题。 我也删除了__private ,看看是不是问题,没有变化。 那么OpenCL内核中for循环的大小是否有限制? 硬件是否具体? 或者这是一个错误?

内核是一个简单的内核,它将2个数组(A + B)加在一起并输出另一个(C)。 为了获得性能感觉,我在每次计算周围放置一个for循环来减慢它/增加每次运行的操作数。

内核的代码如下:

 #pragma OPENCL EXTENSION cl_khr_fp64 : enable __kernel void vector_add(__global double *A, __global double *B, __global double *C) { // Get the index of the current element int i = get_global_id(0); // Do the operation for (__private unsigned int j = 0; j < 10000; j++) { C[i] = A[i] + B[i]; } } 

我正在运行的代码如下:(当我在float和double之间切换时,我确保两段代码之间的变量是一致的)

 #include  #include  #include  #ifdef __APPLE__ #include  #else #include  #endif #define MAX_SOURCE_SIZE (0x100000) int main(void) { // Create the two input vectors int i; const int LIST_SIZE = 4000000; double *A = (double*)malloc(sizeof(double)*LIST_SIZE); double *B = (double*)malloc(sizeof(double)*LIST_SIZE); for(i = 0; i < LIST_SIZE; i++) { A[i] = static_cast(i); B[i] = static_cast(LIST_SIZE - i); } // Load the kernel source code into the array source_str FILE *fp; char *source_str; size_t source_size; fp = fopen("vector_add_kernel.cl", "r"); if (!fp) { fprintf(stderr, "Failed to load kernel.\n"); exit(1); } source_str = (char*)malloc(MAX_SOURCE_SIZE); source_size = fread( source_str, 1, MAX_SOURCE_SIZE, fp); fclose( fp ); // Get platform and device information cl_platform_id platform_id = NULL; cl_device_id device_id = NULL; cl_uint ret_num_devices; cl_uint ret_num_platforms; // clGetPlatformIDs(1, &platform_id, NULL); //clGetDeviceIDs(platform_id, CL_DEVICE_TYPE_GPU, 1, &device_id, ret_num_devices); cl_int ret = clGetPlatformIDs(1, &platform_id, NULL); if (ret != CL_SUCCESS) { printf("Error: Failed to get platforms! (%d) \n", ret); return EXIT_FAILURE; } ret = clGetDeviceIDs(platform_id, CL_DEVICE_TYPE_GPU, 1, &device_id, &ret_num_devices); if (ret != CL_SUCCESS) { printf("Error: Failed to query platforms to get devices! (%d) \n", ret); return EXIT_FAILURE; } /* cl_int ret = clGetPlatformIDs(1, &platform_id, NULL); if (ret != CL_SUCCESS) { printf("Error: Failed to get platforms! (%d) \n", ret); return EXIT_FAILURE; } ret = clGetDeviceIDs( platform_id, CL_DEVICE_TYPE_CPU, 1, &device_id, &ret_num_devices); if (ret != CL_SUCCESS) { printf("Error: Failed to query platforms to get devices! (%d) \n", ret); return EXIT_FAILURE; } */ // Create an OpenCL context cl_context context = clCreateContext( NULL, 1, &device_id, NULL, NULL, &ret); // Create a command queue cl_command_queue command_queue = clCreateCommandQueue(context, device_id, 0, &ret); // Create memory buffers on the device for each vector cl_mem a_mem_obj = clCreateBuffer(context, CL_MEM_READ_ONLY, LIST_SIZE * sizeof(double), NULL, &ret); cl_mem b_mem_obj = clCreateBuffer(context, CL_MEM_READ_ONLY, LIST_SIZE * sizeof(double), NULL, &ret); cl_mem c_mem_obj = clCreateBuffer(context, CL_MEM_WRITE_ONLY, LIST_SIZE * sizeof(double), NULL, &ret); if (ret != CL_SUCCESS) { printf("Error: Buffer Fail! (%d) \n", ret); return EXIT_FAILURE; } // Copy the lists A and B to their respective memory buffers ret = clEnqueueWriteBuffer(command_queue, a_mem_obj, CL_TRUE, 0, LIST_SIZE * sizeof(double), A, 0, NULL, NULL); ret = clEnqueueWriteBuffer(command_queue, b_mem_obj, CL_TRUE, 0, LIST_SIZE * sizeof(double), B, 0, NULL, NULL); std::cout << "Begin Compile" << "\n"; // Create a program from the kernel source cl_program program = clCreateProgramWithSource(context, 1, (const char **)&source_str, (const size_t *)&source_size, &ret); if (ret != CL_SUCCESS) { printf("Error: Program Fail! (%d) \n", ret); return EXIT_FAILURE; } // Build the program ret = clBuildProgram(program, 1, &device_id, NULL, NULL, NULL); if (ret != CL_SUCCESS) { printf("Error: ProgramBuild Fail! (%d) \n", ret); return EXIT_FAILURE; } // Create the OpenCL kernel cl_kernel kernel = clCreateKernel(program, "vector_add", &ret); if (ret != CL_SUCCESS) { printf("Error: Kernel Build Fail! (%d) \n", ret); return EXIT_FAILURE; } std::cout << "End Compile" << "\n"; std::cout << "Begin Data Move" << "\n"; // Set the arguments of the kernel ret = clSetKernelArg(kernel, 0, sizeof(cl_mem), (void *)&a_mem_obj); ret = clSetKernelArg(kernel, 1, sizeof(cl_mem), (void *)&b_mem_obj); ret = clSetKernelArg(kernel, 2, sizeof(cl_mem), (void *)&c_mem_obj); std::cout << "End Data Move" << "\n"; // Execute the OpenCL kernel on the list size_t global_item_size = LIST_SIZE; // Process the entire lists size_t local_item_size = 64; // Process in groups of 64 std::cout << "Begin Execute" << "\n"; cl_event event; ret = clEnqueueNDRangeKernel(command_queue, kernel, 1, NULL, &global_item_size, &local_item_size, 0, NULL, &event); clFinish(command_queue); //clWaitForEvents(1, &event); std::cout << "End Execute" << "\n"; if (ret != CL_SUCCESS) { printf("Error: Execute Fail! (%d) \n", ret); return EXIT_FAILURE; } // Read the memory buffer C on the device to the local variable C std::cout << "Begin Data Move" << "\n"; double *C = (double*)malloc(sizeof(double)*LIST_SIZE); ret = clEnqueueReadBuffer(command_queue, c_mem_obj, CL_TRUE, 0, LIST_SIZE * sizeof(double), C, 0, NULL, NULL); if (ret != CL_SUCCESS) { printf("Error: Read Fail! (%d) \n", ret); return EXIT_FAILURE; } clFinish(command_queue); std::cout << "End Data Move" << "\n"; std::cout << "Done" << "\n"; std::cin.get(); // Display the result to the screen for(i = 0; i < LIST_SIZE; i++) printf("%f + %f = %f \n", A[i], B[i], C[i]); // Clean up ret = clFlush(command_queue); ret = clFinish(command_queue); ret = clReleaseKernel(kernel); ret = clReleaseProgram(program); ret = clReleaseMemObject(a_mem_obj); ret = clReleaseMemObject(b_mem_obj); ret = clReleaseMemObject(c_mem_obj); ret = clReleaseCommandQueue(command_queue); ret = clReleaseContext(context); free(A); free(B); free(C); std::cout << "Number of Devices: " << ret_num_devices << "\n"; std::cin.get(); return 0; } 

我已经在互联网上看过,找不到有类似问题的人,这是一个值得关注的问题,因为它可能会导致代码在扩展之前运行良好…

我正在运行Ubuntu 14.04,并为RC520配备了笔记本电脑显卡,我使用bumblebee / optirun运行。 如果这个bug在其他机器上无法重现,最大循环大小为4000000,那么我将使用bumblebee / optirun记录一个bug。

干杯

我发现问题,连接到显示器/有源VGA /等的GPU有一个看门狗定时器,在大约5秒后超时。 非特斯拉的卡就是这种情况,它可以关闭此function。 在二级卡上运行是一种解决方法。 这很糟糕,需要尽快修复。 这绝对是一个NVidia问题,无论如何都不确定AMD,这很糟糕。

解决方法是Windows中的注册表更改,在Linux / Ubuntu中,更改X conf并放置:

选项“交互式”“0”

在与显卡的差距中,X Conf现在不在更高版本中生成,可能必须手动创建。 如果有人有复制和粘贴控制台代码修复这将是伟大的和更好的答案。