I want to replace the call to "cblas_dgemm()" with cublasDgemm(). Here is the original wrapper from Shark machine learning library:
inline void gemm(
CBLAS_ORDER const Order, CBLAS_TRANSPOSE TransA, CBLAS_TRANSPOSE TransB,
int M, int N, int K,
double alpha, double const *A, int lda,
double const *B, int ldb,
double beta, double *C, int ldc
){
cblas_dgemm(
Order, TransA, TransB,
M, N, K,
alpha,
A, lda,
B, ldb,
beta,
C, ldc
);
}
And here is the modified code using OpenAcc pragmas:
inline void gemm(
CBLAS_ORDER const Order, CBLAS_TRANSPOSE TransA, CBLAS_TRANSPOSE TransB,
int M, int N, int K,
double alpha, double const *A, int lda,
double const *B, int ldb,
double beta, double *C, int ldc
){
#ifdef _OPENACC
cublasOperation_t OpT_A, OpT_B;
switch (TransA)
{
case CblasNoTrans:
OpT_A = CUBLAS_OP_N;
break;
case CblasTrans:
OpT_A = CUBLAS_OP_T;
break;
case CblasConjTrans:
OpT_A = CUBLAS_OP_C;
break;
default:
OpT_A = CUBLAS_OP_N;
}
switch (TransB)
{
case CblasNoTrans:
OpT_B = CUBLAS_OP_N;
break;
case CblasTrans:
OpT_B = CUBLAS_OP_T;
break;
case CblasConjTrans:
OpT_B = CUBLAS_OP_C;
break;
default:
OpT_B = CUBLAS_OP_N;
}
cublasHandle_t handle;
#pragma acc data copyin(OpT_A, OpT_B, M, N, K, alpha, A[0:M][0:K], lda, B[0:K][0:N], ldb, beta, ldc) copy(C[0:M][0:N])
{
#pragma acc host_data use_device(handle,OpT_A, OpT_B, A, B, C, M, N, K, lda, ldb, ldc, alpha, beta)
{
cublasDgemm(handle,OpT_A,OpT_B,M,N,K,&alpha,A,lda,B,ldb,&beta,C,ldc);
}
}
#else
cblas_dgemm(
Order, TransA, TransB,
M, N, K,
alpha,
A, lda,
B, ldb,
beta,
C, ldc
);
#endif
}
The problem is when I compile the code with OpenAcc flag, the elements of the result matrix, i.e. C, are all zeros before and after the kernel execution. I am not sure what I am missing here. I appreciate any help.