OpenCL.clSetKernelArg returns -51 - c++

I tried to make parallel bfs in openCL but I didn't have enough experience with c++.
So this is probably memory error, but I really don't know how to fix it.
I also can't find what does error value -51 means.
As a result I got "Unhandled exception at 0x00007FFCFB06A549 (amdocl64.dll) in my project.exe: 0xC0000005: Access violation reading location 0xFFFFFFFFFFFFFFFF" in next line.
main
Graph G(AdjacencyList, Directed);
int startVertex;
vector<int> distance;
vector<bool> visited;
distance = vector<int>(G.numVertices);
visited = vector<bool>(G.numVertices);
bool done = false;
const bool true_value = true;
int level = 0;
// Allocation on device
const int size = G.numVertices * sizeof(int);
const int adjacencySize = G.adjacencyList.size() * sizeof(int);
//OpenCL
cl_int status;
cl_int ret;
cl_platform_id platform_id;
clGetPlatformIDs(1, &platform_id, NULL);
cl_device_id device_id;
ret = clGetDeviceIDs(platform_id, CL_DEVICE_TYPE_GPU, 1, &device_id, NULL);
cl_context context = clCreateContext(NULL, 1, &device_id, NULL, NULL, &status);
cl_command_queue command_queue = clCreateCommandQueueWithProperties(context, device_id, NULL, &status);
cl_mem d_adjacencyList = clCreateBuffer(context, CL_MEM_READ_WRITE, adjacencySize, NULL, &status);
cl_mem d_edgesOffset = clCreateBuffer(context, CL_MEM_READ_WRITE, size, NULL, &status);
cl_mem d_edgesSize = clCreateBuffer(context, CL_MEM_READ_WRITE, size, NULL, &status);
cl_mem d_distance = clCreateBuffer(context, CL_MEM_READ_WRITE, size, NULL, &status);
cl_mem d_done = clCreateBuffer(context, CL_MEM_READ_WRITE, sizeof(bool), NULL, &status);
status = clEnqueueWriteBuffer(command_queue, d_adjacencyList, CL_TRUE, 0, adjacencySize, &G.adjacencyList[0], 0, NULL, NULL);
status = clEnqueueWriteBuffer(command_queue, d_edgesOffset, CL_TRUE, 0, size, &G.edgesOffset[0], 0, NULL, NULL);
status = clEnqueueWriteBuffer(command_queue, d_edgesSize, CL_TRUE, 0, size, &G.edgesSize[0], 0, NULL, NULL);
distance = vector<int>(G.numVertices, INT_MAX);
distance[start] = 0;
status = clEnqueueWriteBuffer(command_queue, d_distance, CL_TRUE, 0, size, distance.data(), 0, NULL, NULL);
char* source_str = NULL;
size_t source_size;
FILE* fp;
fp = fopen("bfs.cl", "r");
if (!fp)
{
cout << "Failed to load Kernel\n";
exit(1);
}
source_str = (char*)malloc(MAX_SOURCE_SIZE);
source_size = fread(source_str, 1, MAX_SOURCE_SIZE, fp);
cl_program program = clCreateProgramWithSource(context, 1, (const char**)&source_str, (const size_t*)&source_size, &status);
status = clBuildProgram(program, 1, &device_id, NULL, NULL, NULL);
cl_kernel kernel = clCreateKernel(program, "bfs", &status);
status = clSetKernelArg(kernel, 0, sizeof(int), (void*)&G.numVertices);
status = clSetKernelArg(kernel, 1, sizeof(cl_mem), (void*)&d_adjacencyList);
status = clSetKernelArg(kernel, 2, sizeof(cl_mem), (void*)&d_edgesOffset);
status = clSetKernelArg(kernel, 3, sizeof(cl_mem), (void*)&d_edgesOffset);
status = clSetKernelArg(kernel, 4, sizeof(cl_mem), (void*)&d_edgesSize);
status = clSetKernelArg(kernel, 5, sizeof(cl_mem), (void*)&d_distance); //here retirns -51
status = clSetKernelArg(kernel, 6, sizeof(cl_mem), (void*)&level);
status = clSetKernelArg(kernel, 7, sizeof(cl_mem), (void*)&d_done);
kernel
__kernel void bfs(int n, __global int *adjacencyList,__global int *edgesOffset,__global int *edgesSize,__global int *distance, int level,__global bool *done) {
int tid = get_global_id(0);
if (tid < n) {
if (distance[tid] == level) {
for (int i = edgesOffset[tid]; i < edgesOffset[tid] + edgesSize[tid]; ++i) {
int v = adjacencyList[i];
if (distance[v] == INT_MAX) {
*done = false;
distance[v] = level + 1;
}
}
}
}
}

Hi #Parrison welcome to StackOverflow!
All the OpenCL error codes are defined in cl.h. In the latest (version 3) cl.h you will find the error codes defined between lines 194 and 270, where on line 241 you will find:
#define CL_INVALID_ARG_SIZE -51
So the OpenCL ICD reckons that you have passed the wrong variable size for distance.
However, I can see many other errors before this one. For example, you need to set the size of the OpenCL buffers based on the sizes of OpenCL variable not native variables, e.g.:
cl_int instead of int
cl_float instead of float
and especially cl_bool instead of bool.
There is no guarantee that an OpenCL cl_int is the same size a host int and an OpenCL cl_bool is defined as an unsigned int which is highly unlikely to be the same size as a bool!
Ensure that all the parameters to your OpenCL kernel are defined correctly and that
you are creating the correct buffers and variables for them in the main program.

Related

C++ OpenCL Build Error: kernelSource undeclared

Im trying to run a OpenCL sample from the internet. It looks like this:
VecAdd.c
#define PROGRAM_FILE "vecAdd.cl"
#define KERNEL_FUNC "vecAdd"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#ifdef MAC
#include <OpenCL/cl.h>
#else
#include <CL/cl.h>
#endif
int main( int argc, char* argv[] )
{
// Length of vectors
unsigned int n = 100000;
// Host input vectors
double *h_a;
double *h_b;
// Host output vector
double *h_c;
// Device input buffers
cl_mem d_a;
cl_mem d_b;
// Device output buffer
cl_mem d_c;
cl_platform_id cpPlatform; // OpenCL platform
cl_device_id device_id; // device ID
cl_context context; // context
cl_command_queue queue; // command queue
cl_program program; // program
cl_kernel kernel; // kernel
// Size, in bytes, of each vector
size_t bytes = n*sizeof(double);
// Allocate memory for each vector on host
h_a = (double*)malloc(bytes);
h_b = (double*)malloc(bytes);
h_c = (double*)malloc(bytes);
// Initialize vectors on host
int i;
for( i = 0; i < n; i++ )
{
h_a[i] = sinf(i)*sinf(i);
h_b[i] = cosf(i)*cosf(i);
}
size_t globalSize, localSize;
cl_int err;
// Number of work items in each local work group
localSize = 64;
// Number of total work items - localSize must be devisor
globalSize = ceil(n/(float)localSize)*localSize;
// Bind to platform
err = clGetPlatformIDs(1, &cpPlatform, NULL);
// Get ID for the device
err = clGetDeviceIDs(cpPlatform, CL_DEVICE_TYPE_GPU, 1, &device_id, NULL);
// Create a context
context = clCreateContext(0, 1, &device_id, NULL, NULL, &err);
// Create a command queue
queue = clCreateCommandQueue(context, device_id, 0, &err);
// Create the compute program from the source buffer
program = clCreateProgramWithSource(context, 1,
(const char **) & kernelSource, NULL, &err);
// Build the program executable
clBuildProgram(program, 0, NULL, NULL, NULL, NULL);
// Create the compute kernel in the program we wish to run
kernel = clCreateKernel(program, "vecAdd", &err);
// Create the input and output arrays in device memory for our calculation
d_a = clCreateBuffer(context, CL_MEM_READ_ONLY, bytes, NULL, NULL);
d_b = clCreateBuffer(context, CL_MEM_READ_ONLY, bytes, NULL, NULL);
d_c = clCreateBuffer(context, CL_MEM_WRITE_ONLY, bytes, NULL, NULL);
// Write our data set into the input array in device memory
err = clEnqueueWriteBuffer(queue, d_a, CL_TRUE, 0,
bytes, h_a, 0, NULL, NULL);
err |= clEnqueueWriteBuffer(queue, d_b, CL_TRUE, 0,
bytes, h_b, 0, NULL, NULL);
// Set the arguments to our compute kernel
err = clSetKernelArg(kernel, 0, sizeof(cl_mem), &d_a);
err |= clSetKernelArg(kernel, 1, sizeof(cl_mem), &d_b);
err |= clSetKernelArg(kernel, 2, sizeof(cl_mem), &d_c);
err |= clSetKernelArg(kernel, 3, sizeof(unsigned int), &n);
// Execute the kernel over the entire range of the data set
err = clEnqueueNDRangeKernel(queue, kernel, 1, NULL, &globalSize, &localSize,
0, NULL, NULL);
// Wait for the command queue to get serviced before reading back results
clFinish(queue);
// Read the results from the device
clEnqueueReadBuffer(queue, d_c, CL_TRUE, 0,
bytes, h_c, 0, NULL, NULL );
//Sum up vector c and print result divided by n, this should equal 1 within error
double sum = 0;
for(i=0; i<n; i++)
sum += h_c[i];
printf("final result: %f\n", sum/n);
// release OpenCL resources
clReleaseMemObject(d_a);
clReleaseMemObject(d_b);
clReleaseMemObject(d_c);
clReleaseProgram(program);
clReleaseKernel(kernel);
clReleaseCommandQueue(queue);
clReleaseContext(context);
//release host memory
free(h_a);
free(h_b);
free(h_c);
return 0;
}
VecAdd.cl
// OpenCL kernel. Each work item takes care of one element of c
__kernel void vecAdd( __global double *a,
__global double *b,
__global double *c,
const unsigned int n)
{
//Get our global thread ID
int id = get_global_id(0);
//Make sure we do not go out of bounds
if (id < n)
c[id] = a[id] + b[id];
}
When I try to run VecAdd.c with CodeBlocks I get an error on this line:
program = clCreateProgramWithSource(context, 1, (const char **) & kernelSource, NULL, &err);
The Error look like this:
vecAdd.c|79|error: 'kernelSource' undeclared (first use in this function)
I expected no error since the print_info.cpp sample worked fine and printed:
OpenCL Device Info:
Name: Intel(R) UHD Graphics 620
Vendor: Intel(R) Corporation
Version: OpenCL 3.0 NEO
Max size of work-items: (256,256,256)
Max size of work-groups: 256
Number of compute units: 24
Global memory size (bytes): 6762340352
Local memory size per compute unit (bytes): 2730
The sample code is incomplete. It's missing the part where it reads the VecAdd.cl file to the string kernelSource. You may add:
#include <iostream> // write to console
#include <fstream> // read/write files
// ...
int main( int argc, char* argv[] )
{
// ...
std::string kernelSource = "";
{
std::ifstream file("./VecAdd.cl", std::ios::in); // path might be different for you
if(file.fail()) stc::cout << "Error: File does not exist!\n";
kernelSource = std::string((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
file.close();
}
// Create the compute program from the source buffer
program = clCreateProgramWithSource(context, 1, (const char**)&kernelSource, NULL, &err);
// ...
}
For a much easier start with OpenCL, have a look at this OpenCL-Wrapper. This simplifies using the API a lot, without giving up any functionality or performance. By default it comes with a vector addition example. Notice how much shorter and less complicated the code is compared to the regular OpenCL bloat.

Can dedicated memory be released on the GPU using OpenCL?

Is there a way to free up dedicated memory when a function that runs on GPU using OpenCL ends? I have noticed that if you repeatedly call a program that uses OpenCL in GPU and it ends, a certain amount of dedicated memory is not released, which would cause an error if the function is called too many times.
UPDATE 22/12/2019:
I enclose a fragment of the code that is within the iteration. The configuration of the cl_program and cl_context is done outside the iteration:
void launch_kernel(float * blockC, float * blockR, float * blockEst, float * blockEstv, float * blockL, cl_program program, cl_context context, std::vector<cl_device_id> deviceIds, int n){
cl_kernel kernel_function = clCreateKernel(program, "function_example", &error);
cl_event event;
cl_command_queue queue = clCreateCommandQueue(context, (deviceIds)[0], 0, &error);
std::size_t size[1] = { (size_t)n };
cl_mem blockCBuffer = clCreateBuffer(context, CL_MEM_READ_WRITE, sizeof(float) * (n * s), (void *)&blockC[0], &error);
cl_mem blockRBuffer = clCreateBuffer(context, CL_MEM_READ_WRITE, sizeof(float) * (n * s), (void *)&blockR[0], &error);
cl_mem blockEstBuffer = clCreateBuffer(context, CL_MEM_READ_WRITE | CL_MEM_USE_HOST_PTR, sizeof(float) * n, (void *)&blockEst[0], &error);
cl_mem blockEstvBuffer = clCreateBuffer(context, CL_MEM_READ_WRITE | CL_MEM_USE_HOST_PTR, sizeof(float) * n, (void *)&blockEstv[0], &error);
cl_mem blockLBuffer = clCreateBuffer(context, CL_MEM_READ_WRITE | CL_MEM_USE_HOST_PTR, sizeof(float) * n, (void *)&blockL[0], &error);
clSetKernelArg(kernel_solution, 0, sizeof(cl_mem), &blockCBuffer);
clSetKernelArg(kernel_solution, 1, sizeof(cl_mem), &blockRBuffer);
clSetKernelArg(kernel_solution, 2, sizeof(cl_mem), &blockEstBuffer);
clSetKernelArg(kernel_solution, 3, sizeof(cl_mem), &blockEstvBuffer);
clSetKernelArg(kernel_solution, 4, sizeof(cl_mem), &blockLBuffer);
clSetKernelArg(kernel_solution, 5, sizeof(int), &s);
openclSingleton->checkError(clEnqueueNDRangeKernel(queue, kernel_function, 1, nullptr, size, nullptr, 0, nullptr, nullptr));
clEnqueueMapBuffer(queue, blockEstBuffer, CL_TRUE, CL_MAP_READ, 0, n * sizeof(float) , 0, nullptr, nullptr, &error);
clEnqueueMapBuffer(queue, blockEstvBuffer, CL_TRUE, CL_MAP_READ, 0, n * sizeof(float) , 0, nullptr, nullptr, &error);
clEnqueueMapBuffer(queue, blockLBuffer, CL_TRUE, CL_MAP_READ, 0, n * sizeof(float) , 0, nullptr, nullptr, &error);
clReleaseMemObject(blockCBuffer);
clReleaseMemObject(blockRBuffer);
clReleaseMemObject(blockEstBuffer);
clReleaseMemObject(blockEstvBuffer);
clReleaseMemObject(blockLBuffer);
clFlush(queue);
clFinish(queue);
clWaitForEvents(1, &event);
clReleaseCommandQueue(queue);
clReleaseKernel(kernel_function);
}
UPDATE 23/12/2019
Updated the code with the iterative process that calls the function in OpenCl. The problem arises when at the end of the launch_kernel function it leaves some dedicated memory used, which causes that if the variable m is too large, the memory becomes full and the program crashes due to lack of resources.
std::vector<cl_device_id> deviceIds;
cl_program program;
cl_context context;
... //configuration program and context
int n;
float *blockEst, *blockEstv, *blockL, *blockC, *blockR;
for(int i = 0; i < m; m++){
blockC = (float*)std::malloc(sizeof(float)*n*s);
blockR = (float*)std::malloc(sizeof(float)*n*s);
blockEst = (float*)std::malloc(sizeof(float)*n);
blockEstv = (float*)std::malloc(sizeof(float)*n);
blockL = (float*)std::malloc(sizeof(float)*n);
.... //Set values blockC and blockR
launch_kernel(blockC, blockR, blockEst, blockEstv, blockL, program, context, deviceIds, n);
... //Save value blockEst, blockEstv and blockL
std::free(blockC);
std::free(blockR);
std::free(blockEst);
std::free(blockEstv);
std::free(blockL);
}
clReleaseProgram(program);
clReleaseContext(context);
clReleaseDevice(deviceIds[0]);

OpenCL alignment issue

I want to fill an array of glm::vec3 with an OpenCL kernel.
All I want to do is fill the array with [1.0, 2.0, 3.0].
So upon success I should get the triplet repeated 256 times.
[1.0, 2.0, 3.0][1.0, 2.0, 3.0][1.0, 2.0, 3.0] ... [1.0, 2.0, 3.0]
However the result looks like this
[1.0, 2.0, 2.0][2.0, 2.0, 2.0] ... [2.0, 2.0, 2.0]
Why?
Here is the code for the kernel
__kernel void fill_array(__global float *output_values)
{
int i = get_global_id(0);
float3 pos = (float3)(1.0, 2.0, 3.0);
vstore3(pos, 0, &(output_values[i]));
}
And here is the code to run it
#include <stdio.h>
#include <stdlib.h>
#include <vector>
#include "glm/glm.hpp"
#ifdef __APPLE__
#include <OpenCL/opencl.h>
#else
#include <CL/cl.h>
#endif
#define MAX_SOURCE_SIZE (0x100000)
int main(void)
{
std::vector<glm::vec3> values;
values.resize(256);
// Load the kernel source code into the array source_str
FILE *fp;
char *source_str;
size_t source_size;
fp = fopen("E:/Dev/fill_array_kernel.cl", "r");
if (!fp) {
fprintf(stderr, "Failed to load kernel.\n");
exit(1);
}
source_str = (char*)malloc(MAX_SOURCE_SIZE);
source_size = fread( source_str, 1, MAX_SOURCE_SIZE, fp);
fclose( fp );
// Get platform and device information
cl_platform_id platform_id = NULL;
cl_device_id device_id = NULL;
cl_uint ret_num_devices;
cl_uint ret_num_platforms;
cl_int ret = clGetPlatformIDs(1, &platform_id, &ret_num_platforms);
ret = clGetDeviceIDs( platform_id, CL_DEVICE_TYPE_ALL, 1,
&device_id, &ret_num_devices);
// Create an OpenCL context
cl_context context = clCreateContext( NULL, 1, &device_id, NULL, NULL, &ret);
// Create a command queue
cl_command_queue command_queue = clCreateCommandQueue(context, device_id, 0, &ret);
// Create memory buffers on the device for each vector
cl_mem output_mem = clCreateBuffer(context, CL_MEM_WRITE_ONLY, values.size() * sizeof(glm::vec3), NULL, &ret);
// Create a program from the kernel source
cl_program program = clCreateProgramWithSource(context, 1,
(const char **)&source_str, (const size_t *)&source_size, &ret);
// Build the program
ret = clBuildProgram(program, 1, &device_id, NULL, NULL, NULL);
if(ret != CL_SUCCESS)
{
cl_build_status build_status;
ret = clGetProgramBuildInfo(program, device_id, CL_PROGRAM_BUILD_STATUS, sizeof(cl_build_status), &build_status, NULL);
size_t ret_val_size;
ret = clGetProgramBuildInfo(program, device_id, CL_PROGRAM_BUILD_LOG, 0, NULL, &ret_val_size);
char *build_log = (char*)malloc(sizeof(char)*(ret_val_size + 1));
ret = clGetProgramBuildInfo(program, device_id, CL_PROGRAM_BUILD_LOG, ret_val_size, build_log, NULL);
build_log[ret_val_size] = '\0';
printf("%s\n", build_log);
free(build_log);
return -1;
}
// Create the OpenCL kernel
cl_kernel kernel = clCreateKernel(program, "fill_array", &ret);
// Set the arguments of the kernel
ret = clSetKernelArg(kernel, 0, sizeof(cl_mem), (void *)&output_mem);
// Execute the OpenCL kernel on the list
size_t global_item_size = values.size(); // Process the entire lists
size_t local_item_size = 64; // Process in groups of 64
ret = clEnqueueNDRangeKernel(command_queue, kernel, 1, NULL,
&global_item_size, &local_item_size, 0, NULL, NULL);
// Read the memory buffer C on the device to the local variable C
ret = clEnqueueReadBuffer(command_queue, output_mem, CL_TRUE, 0, values.size() * sizeof(glm::vec3), values.data(), 0, NULL, NULL);
// Clean up
ret = clFlush(command_queue);
ret = clFinish(command_queue);
ret = clReleaseKernel(kernel);
ret = clReleaseProgram(program);
ret = clReleaseMemObject(output_mem);
ret = clReleaseCommandQueue(command_queue);
ret = clReleaseContext(context);
return 0;
}
I was misusing the vstore function.
I should have used the 2nd parameter to specify the index in the array.
https://www.khronos.org/registry/OpenCL/sdk/1.0/docs/man/xhtml/vstoren.html
__kernel void fill_array(__global float *output_values)
{
int i = get_global_id(0);
float3 pos = (float3)(1.0, 2.0, 3.0);
vstore3(pos, i, output_values);
}

OpenCL - Kernel method returns unexpected results

I am a beginner at OpenCL. I tried to run a very simple kernel code, adding 1 to each value of vector. Everything runs fine, returns no error code (I checked return value after each step). The source Code :
cl_device_id device_id = NULL;
cl_context context = NULL;
cl_command_queue command_queue = NULL;
cl_mem memobj , resobj = NULL;
cl_program program = NULL;
cl_kernel kernel = NULL;
cl_platform_id platform_id = NULL;
cl_uint ret_num_devices;
cl_uint ret_num_platforms;
cl_int ret;
size_t work_units_per_kernels;
int input[10] = {1,2,3,4,5,6,7,8,9,10};
int output[10];
int length = 10 ;
FILE *fp;
char fileName[] = "/home/tuan/OpenCLPlayaround/hello.cl";
char *source_str;
size_t source_size;
/* Load the source code containing the kernel*/
fp = fopen(fileName, "r");
if (!fp) {
fprintf(stderr, "Failed to load kernel.\n");
exit(1);
}
source_str = (char*)malloc(0x100000);
source_size = fread(source_str,1,0x100000, fp);
fclose(fp);
ret = clGetPlatformIDs(1, &platform_id, &ret_num_platforms);
std::cout<<ret<<" code"<<std::endl;
ret = clGetDeviceIDs(platform_id, CL_DEVICE_TYPE_DEFAULT, 1, &device_id, &ret_num_devices);
std::cout<<ret<<" code"<<std::endl;
context = clCreateContext(NULL, 1, &device_id, NULL, NULL, &ret);
std::cout<<ret<<" code"<<std::endl;
command_queue = clCreateCommandQueue(context, device_id, 0, &ret);
//Check Concept of memory
memobj = clCreateBuffer(context, CL_MEM_READ_WRITE | CL_MEM_COPY_HOST_PTR,length * sizeof(int), input, &ret);
resobj = clCreateBuffer(context, CL_MEM_READ_WRITE | CL_MEM_COPY_HOST_PTR, length * sizeof(int), output, &ret);
std::cout<<ret<<" code"<<std::endl;
program = clCreateProgramWithSource(context,1,(const char**)&source_str, (const size_t*)&source_size, &ret);
ret = clBuildProgram(program, 1, &device_id, NULL, NULL, NULL);
kernel = clCreateKernel(program, "hello", &ret);
ret = clSetKernelArg(kernel,0, sizeof(memobj),(void *)&memobj);
ret = clSetKernelArg(kernel,1, sizeof(resobj),(void *)&resobj);
ret = clEnqueueTask(command_queue, kernel, 0, NULL,NULL);
ret = clEnqueueReadBuffer(command_queue, resobj, CL_TRUE, 0, length* sizeof(int),output, 0, NULL, NULL);
for (int i = 0 ; i <10 ; i++) {
std::cout<<output[i]<<" "<<std::endl;
}
return 0;
The result is somewhat bizarre, while it should be {2,3,4,5,6,7,8,9,10,11} :
2
-16777216
65535
1
-1242789408
32767
4201449
0
2
0
And my kernel :
__kernel void hello(__global int* a, __global int* b)
{
int sam = 0;
int gid = get_global_id(0);
b[gid] = sam + a[gid] +1 ;
}
Can somebody explain why ? Its bursting my head for hours !
clEnqueueTask is equivalent to calling clEnqueueNDRangeKernel with work_dim = 1, global_work_offset = NULL, global_work_size[0] set to 1, and local_work_size[0] set to 1.
so use clEnqueueNDRangeKernel.

OpenCL: clSetKernelArg returns CL_INVALID_ARG_SIZE

I'm a newbie at OpenCL. I'm trying to pass 5 arguments into a kernel: an input buffer, an output buffer, an integer, and 2 local arrays the size of the input buffer.
//Create input/output cl_mem objects
cl_mem inputBuffer = clCreateBuffer(context, CL_MEM_READ_ONLY |
CL_MEM_COPY_HOST_PTR, inputVector.size(), (void *)inputVector.data(), NULL);
cl_mem outputBuffer = clCreateBuffer(context, CL_MEM_WRITE_ONLY,
inputVector.size(), NULL, NULL);
//get iterator from command line
cl_uint iterations = (cl_uint) atoi(argv[3]);
//std::cout << "iterations: " << iterations << std::endl
//cout confirms I'm getting this correctly
//Set kernel arguments
bool argOK = (CL_SUCCESS == clSetKernelArg(kernel, 0, sizeof(cl_mem), (void*)&inputBuffer));
argOK = argOK && (CL_SUCCESS == clSetKernelArg(kernel, 1, sizeof(cl_mem), (void *)&outputBuffer));
argOK = argOK && (CL_SUCCESS == clSetKernelArg(kernel, 2, sizeof(cl_uint), &iterations));
argOK = argOK && (CL_SUCCESS == clSetKernelArg(kernel, 3, sizeof(inputBuffer), NULL));
argOK = argOK && (CL_SUCCESS == clSetKernelArg(kernel, 4, sizeof(inputBuffer), NULL));
//Check for failure
if(!argOK) {
std::cerr << "Error: clSetKernelArg failed\n";
return SDK_FAILURE;
}
When I run the program, it prints:
Error: clSetKernelArg failed
I did some digging/debugging, and eventually found that this line:
argOK = argOK && (CL_SUCCESS == clSetKernelArg(kernel, 2, sizeof(cl_uint), &iterations));
was the culprit. Changing it to:
argOK = argOK && (CL_INVALID_ARG_SIZE == clSetKernelArg(kernel, 2, sizeof(cl_uint), &iterations));
Lets the program continue successfully. Therefore, the clSetKernelArg(kernel, 2, ...) statement is returning CL_INVALID_ARG_SIZE. It's weird though, since it looks like I'm passing in the correct size for the iterations variable.
Here's my kernel for reference:
__kernel void do_the_thing (__global uchar* in, __global uchar* out,
__global uint * numIterations, __local uchar* arr1, __local uchar* arr2)
{
//Do stuff that I haven't written yet.
}
TL;DR: Why is the setKernelArg call returning CL_INVALID_ARG_SIZE?
The third argument of your kernel (index = 2) is a buffer (__global uint *) not a cl_uint scalar.
Change your kernel to:
__kernel void do_the_thing (__global uchar* in, __global uchar* out, uint numIterations, __local uchar* arr1, __local uchar* arr2)
{
//Do stuff that I haven't written yet.
}