Note that there are some explanatory texts on larger screens.

plurals
  1. PODoes "more threads" mean more speed?
    primarykey
    data
    text
    <p>I've got a Jacobi implementation on CUDA, but the problem is:</p> <p>I assign threads at this way:</p> <pre><code>#define imin(a,b) (a &lt; b ? a : b) int dimBlocks, dimThreads; dimThreads = 256; dimBlocks = imin(32, (dimThreads + dim - 1)/dimThreads); </code></pre> <p>But if I use 32 threads it's fastest than using 256 threads or moreover... </p> <p>I've got these results:</p> <pre><code>Sequential times: 9900 5.882000 9900 6.071000 Parallel times: 9900 1.341000 //using 32 9900 1.626000 //using 256 </code></pre> <p>Where 9900 is matrix WIDTH... And we can see the following:</p> <pre><code>5.882 / 1.34 = 4.39 6.07 / 1.62 = 3.74 </code></pre> <p>So 32 threads is more efficient than 256?</p> <p>Sorry, I don't know if I should upload the code(since they are a bit long), if you request it I will do it.</p> <p>EDIT:</p> <pre><code>//Based on doubletony algorithm #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "Jacobi.cuh" #include "thrust\host_vector.h" #include "thrust\device_vector.h" #include "thrust\extrema.h" #include &lt;cstdio&gt; #include &lt;cstdlib&gt; #include &lt;cmath&gt; #include &lt;ctime&gt; #define imin(a,b) (a &lt; b ? a : b) // name OF FUNCTION: __copy_vector // PURPOSE: // The function will copy a vector. // // PARAMETERS: // name type value/reference description // --------------------------------------------------------------------- // source double* value vector to be copied // dest double* reference vector copied // dim int value vector dimension // RETURN VALUE: // name type description // --------------------------------------------------------------------- // __global__ void __copy_vector(double *source, double *dest, const int dim) { int tIdx = blockDim.x * blockIdx.x + threadIdx.x; while(tIdx &lt; dim){ dest[tIdx] = source[tIdx]; tIdx += gridDim.x * blockDim.x; } } // name OF FUNCTION: __Jacobi_sum // PURPOSE: // The function will execute matrix vector multiplication // // PARAMETERS: // name type value/reference description // --------------------------------------------------------------------- // A double* value A // B double* value B // C double* reference A*B // dim int value vector dimension // RETURN VALUE: // name type description // --------------------------------------------------------------------- // __global__ void __Jacobi_sum(const double *A, const double *B, double *resul, const int dim) { int tIdx = blockIdx.x * blockDim.x + threadIdx.x; while(tIdx &lt; dim){ resul[tIdx] = 0; for(int i = 0; i &lt; dim; i++) if(tIdx != i) resul[tIdx] += A[tIdx * dim + i] * B[i]; tIdx += gridDim.x * blockDim.x; } __syncthreads; } // name OF FUNCTION: __substract // PURPOSE: // The function will execute A-B=resul // // PARAMETERS: // name type value/reference description // --------------------------------------------------------------------- // A double* value A // B double* value B // C double* reference A-B // dim int value vector dimension // RETURN VALUE: // name type description // --------------------------------------------------------------------- // __global__ void __substract(const double *A, const double *B, double *C, const int dim) { int tIdx = blockIdx.x * blockDim.x + threadIdx.x; while(tIdx &lt; dim){ C[tIdx] = A[tIdx] - B[tIdx]; tIdx += gridDim.x * blockDim.x; } } // name OF FUNCTION: __divide // PURPOSE: // The function will execute the jacobi division, that is, // (B-sum)/A[i,i] // // PARAMETERS: // name type value/reference description // --------------------------------------------------------------------- // A double* value A // B double* reference (B-sum)/A[i,i] // dim int value vector dimension // RETURN VALUE: // name type description // --------------------------------------------------------------------- // __global__ void __divide(const double *A, double *B, const int dim) { int tIdx = blockIdx.x * blockDim.x + threadIdx.x; while(tIdx &lt; dim){ //if(A[tIdx * dim + tIdx] != 0) B[tIdx] /= A[tIdx * dim + tIdx]; tIdx += blockDim.x * gridDim.x; } } // name OF FUNCTION: __absolute // PURPOSE: // The function will calculate the absolute value for each // number in an array // // PARAMETERS: // name type value/reference description // --------------------------------------------------------------------- // A double* reference |A[i]| // dim int value vector dimension // RETURN VALUE: // name type description // --------------------------------------------------------------------- // __global__ void __absolute(double *A, const int dim) { int tIdx = blockIdx.x * blockDim.x + threadIdx.x; while(tIdx &lt; dim){ if(A[tIdx] &lt; 0) A[tIdx] = -A[tIdx]; tIdx += blockDim.x * gridDim.x; } } // name OF FUNCTION: Jacobi_Cuda // PURPOSE: // The function will calculate a X solution for a linear system // using Jacobi's Method. // // PARAMETERS: // name type value/reference description // --------------------------------------------------------------------- // Matrix_A double* value Matrix A(coefficients) // Vector_B double* value Vector B // Vector_X double* reference Solution // dim int value Matrix Dimension // e double value Error allowed // maxIter int value Maximum iterations allowed // RETURN VALUE: // name type description // --------------------------------------------------------------------- // void Jacobi_Cuda(const double *Matrix_A, const double *Vector_B, double *Vector_X, const int dim, const double e, const int maxIter, double *t) { /** Host variables **/ int iter = 0; // iter counter double err = 1; // error between X^k and X^k-1 double *tmp; // temporary for thrust norm double *norm; // Vector norm tmp = (double *) malloc(sizeof(double) * dim); norm = (double *) malloc(sizeof(double)); int dimBlocks, dimThreads; dimThreads = 64; dimBlocks = imin(32, (dim + dimThreads - 1)/dimThreads); /** ************** **/ /** Device variables **/ double *d_Matrix_A, *d_Vector_B, *d_Vector_X, *d_Vector_Y, *d_Vector_Resul; cudaMalloc((void**)&amp;d_Matrix_A, sizeof(double) * dim * dim); cudaMalloc((void**)&amp;d_Vector_B, sizeof(double) * dim); cudaMalloc((void**)&amp;d_Vector_X, sizeof(double) * dim); cudaMalloc((void**)&amp;d_Vector_Y, sizeof(double) * dim); cudaMalloc((void**)&amp;d_Vector_Resul, sizeof(double) * dim); /** **************** **/ /** Initialize **/ cudaMemcpy(d_Matrix_A, Matrix_A, sizeof(double) * dim * dim, cudaMemcpyHostToDevice); cudaMemcpy(d_Vector_B, Vector_B, sizeof(double) * dim, cudaMemcpyHostToDevice); cudaMemcpy(d_Vector_X, Vector_X, sizeof(double) * dim, cudaMemcpyHostToDevice); /** ********** **/ clock_t start,finish; double totaltime; start = clock(); /** Jacobi **/ while(err &gt; e &amp;&amp; iter &lt; maxIter){ __copy_vector&lt;&lt;&lt;dimBlocks, dimThreads&gt;&gt;&gt;(d_Vector_X, d_Vector_Y, dim); __Jacobi_sum&lt;&lt;&lt;dimBlocks, dimThreads&gt;&gt;&gt;(d_Matrix_A, d_Vector_Y, d_Vector_Resul, dim); __substract&lt;&lt;&lt;dimBlocks, dimThreads&gt;&gt;&gt;(d_Vector_B, d_Vector_Resul, d_Vector_X, dim); __divide&lt;&lt;&lt;dimBlocks, dimThreads&gt;&gt;&gt;(d_Matrix_A, d_Vector_X, dim); __substract&lt;&lt;&lt;dimBlocks, dimThreads&gt;&gt;&gt;(d_Vector_Y, d_Vector_X, d_Vector_Resul, dim); __absolute&lt;&lt;&lt;dimBlocks, dimThreads&gt;&gt;&gt;(d_Vector_Resul, dim); cudaMemcpy(tmp, d_Vector_Resul, sizeof(double) * dim, cudaMemcpyDeviceToHost); double *t = thrust::max_element(tmp, tmp + dim); //vector norm err = *t; iter++; } finish = clock(); totaltime=(double)(finish-start)/CLOCKS_PER_SEC; *t = totaltime; cudaMemcpy(Vector_X, d_Vector_X, sizeof(double) * dim, cudaMemcpyDeviceToHost); if(iter == maxIter) puts("Jacobi has reached maxIter!"); /** ****** **/ /** Free memory **/ cudaFree(d_Matrix_A); cudaFree(d_Vector_B); cudaFree(d_Vector_X); cudaFree(d_Vector_Y); cudaFree(d_Vector_Resul); free(tmp); free(norm); /** *********** **/ } </code></pre>
    singulars
    1. This table or related slice is empty.
    plurals
    1. This table or related slice is empty.
    1. This table or related slice is empty.
 

Querying!

 
Guidance

SQuiL has stopped working due to an internal error.

If you are curious you may find further information in the browser console, which is accessible through the devtools (F12).

Reload