计算库层
clBLAS 是一个开源的高性能线性代数库,专为 OpenCL 平台设计,支持多种基本线性代数操作,如矩阵乘法和矩阵-向量乘法。clBLAS 利用 OpenCL 的并行计算能力,提供灵活的内存管理和高效的内核优化,显著提升线性代数运算的性能。
参考仓库地址:clBLAS
clblasChemm
展示了如何使用 clBLAS 进行复数矩阵的乘法操作。
示例代码如下:
/* ************************************************************************
* Copyright 2013 Advanced Micro Devices, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* ************************************************************************/
#include <sys/types.h>
#include <stdio.h>
#include <string.h>
/* Include CLBLAS header. It automatically includes needed OpenCL header,
* so we can drop out explicit inclusion of cl.h header.
*/
#include <clBLAS.h>
/* This example uses predefined matrices and their characteristics for
* simplicity purpose.
*/
static const clblasOrder order = clblasRowMajor;
#define M 4
#define N 3
static const cl_float2 alpha = {{10, 10}};
static const clblasSide side = clblasLeft;
static const clblasUplo uplo = clblasLower;
static const cl_float2 A[M*M] = {
{{11, 12}}, {{-1, -1}}, {{-1, -1}}, {{-1, -1}},
{{21, 22}}, {{22, 23}}, {{-1, -1}}, {{-1, -1}},
{{31, 32}}, {{32, 33}}, {{33, 34}}, {{-1, -1}},
{{41, 61}}, {{42, 62}}, {{43, 73}}, {{44, 23}}
};
static const size_t lda = M;
static const cl_float2 B[M*N] = {
{{11, -21}}, {{-12, 23}}, {{13, 33}},
{{21, 12}}, {{22, -10}}, {{23, 5}},
{{31, 1}}, {{-32, 65}}, {{33, -1}},
{{1, 41}}, {{-33, 42}}, {{12, 43}},
};
static const size_t ldb = N;
static const cl_float2 beta = {{20, 20}};
static cl_float2 C[M*N] = {
{{11, 11}}, {{-12, 12}}, {{13, 33}},
{{21, -32}}, {{22, -1}}, {{23, 0}},
{{31, 13}}, {{-32, 78}}, {{33, 45}},
{{41, 14}}, {{0, 42}}, {{43, -1}},
};
static const size_t ldc = N;
static void
printResult(void)
{
size_t i, j, nrows;
printf("Result:\n");
nrows = (sizeof(C) / sizeof(cl_float2)) / ldc;
for (i = 0; i < nrows; i++) {
for (j = 0; j < ldc; j++) {
printf("<%9.2f, %-9.2f> ", CREAL(C[i * ldc + j]), CIMAG(C[i*ldc + j]));
}
printf("\n");
}
}
int
main(void)
{
cl_int err;
cl_platform_id platform = 0;
cl_device_id device = 0;
cl_context_properties props[3] = { CL_CONTEXT_PLATFORM, 0, 0 };
cl_context ctx = 0;
cl_command_queue queue = 0;
cl_mem bufA, bufB, bufC;
cl_event event = NULL;
int ret = 0;
/* Setup OpenCL environment. */
err = clGetPlatformIDs(1, &platform, NULL);
if (err != CL_SUCCESS) {
printf( "clGetPlatformIDs() failed with %d\n", err );
return 1;
}
err = clGetDeviceIDs(platform, CL_DEVICE_TYPE_GPU, 1, &device, NULL);
if (err != CL_SUCCESS) {
printf( "clGetDeviceIDs() failed with %d\n", err );
return 1;
}
props[1] = (cl_context_properties)platform;
ctx = clCreateContext(props, 1, &device, NULL, NULL, &err);
if (err != CL_SUCCESS) {
printf( "clCreateContext() failed with %d\n", err );
return 1;
}
queue = clCreateCommandQueue(ctx, device, 0, &err);
if (err != CL_SUCCESS) {
printf( "clCreateCommandQueue() failed with %d\n", err );
clReleaseContext(ctx);
return 1;
}
/* Setup clblas. */
err = clblasSetup();
if (err != CL_SUCCESS) {
printf("clblasSetup() failed with %d\n", err);
clReleaseCommandQueue(queue);
clReleaseContext(ctx);
return 1;
}
/* Prepare OpenCL memory objects and place matrices inside them. */
bufA = clCreateBuffer(ctx, CL_MEM_READ_ONLY, M * M * sizeof(*A),
NULL, &err);
bufB = clCreateBuffer(ctx, CL_MEM_READ_ONLY, M * N * sizeof(*B),
NULL, &err);
bufC = clCreateBuffer(ctx, CL_MEM_READ_WRITE, M * N * sizeof(*C),
NULL, &err);
err = clEnqueueWriteBuffer(queue, bufA, CL_TRUE, 0,
M * M * sizeof(*A), A, 0, NULL, NULL);
err = clEnqueueWriteBuffer(queue, bufB, CL_TRUE, 0,
M * N * sizeof(*B), B, 0, NULL, NULL);
err = clEnqueueWriteBuffer(queue, bufC, CL_TRUE, 0,
M * N * sizeof(*C), C, 0, NULL, NULL);
/* Call clblas function. */
err = clblasChemm(order, side, uplo, M, N, alpha, bufA,
0, lda, bufB, 0, ldb, beta, bufC, 0, ldc, 1, &queue,
0, NULL, &event);
if (err != CL_SUCCESS) {
printf("clblasSsymm() failed with %d\n", err);
ret = 1;
}
else {
/* Wait for calculations to be finished. */
err = clWaitForEvents(1, &event);
/* Fetch results of calculations from GPU memory. */
err = clEnqueueReadBuffer(queue, bufC, CL_TRUE, 0, M * N * sizeof(*C),
C, 0, NULL, NULL);
/* At this point you will get the result of SYMM placed in C array. */
printResult();
}
/* Release OpenCL events. */
clReleaseEvent(event);
/* Release OpenCL memory objects. */
clReleaseMemObject(bufC);
clReleaseMemObject(bufB);
clReleaseMemObject(bufA);
/* Finalize work with clblas. */
clblasTeardown();
/* Release OpenCL working objects. */
clReleaseCommandQueue(queue);
clReleaseContext(ctx);
return ret;
}
结果:
Result:
< 41430.00, 46230.00 > <-39740.00, 87400.00 > < 48960.00, 48400.00 >
< 41360.00, 54760.00 > <-48340.00, 90520.00 > < 32620.00, 53220.00 >
< 28830.00, 79370.00 > <-67980.00, 77040.00 > < 13400.00, 81160.00 >
<-24980.00, 90100.00 > <-114700.00, -43780.00> <-67560.00, 93200.00 >
clblasScopy
是 clBLAS
库中的一个函数,它是 BLAS 标准中 scopy
函数的 OpenCL 版本。scopy
函数的作用是复制浮点数组。在 clBLAS
中,clblasScopy
用于将一个浮点数组复制到另一个浮点数组,这两个数组可以位于不同的内存区域。
示例代码如下:
/* ************************************************************************
* Copyright 2013 Advanced Micro Devices, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* ************************************************************************/
#include <sys/types.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
/* Include CLBLAS header. It automatically includes needed OpenCL header,
* so we can drop out explicit inclusion of cl.h header.
*/
#include <clBLAS.h>
/* This example uses predefined matrices and their characteristics for
* simplicity purpose.
*/
static const size_t N = 7;
static cl_float X[] = {
11,
21,
31,
41,
51,
61,
71,
};
static const int incx = 1;
static cl_float Y[] = {
0,
2,
0,
0,
0,
5,
0,
};
static const int incy = 1;
static void
printResult(void)
{
size_t i;
printf("\nResult:\n");
printf(" X\n");
for (i = 0; i < N; i++) {
printf("\t%f\n", X[i]);
}
printf("Y\n");
for (i = 0; i < N; i++) {
printf("\t%f\n", Y[i]);
}
}
int
main(void)
{
cl_int err;
cl_platform_id platform = 0;
cl_device_id device = 0;
cl_context_properties props[3] = { CL_CONTEXT_PLATFORM, 0, 0 };
cl_context ctx = 0;
cl_command_queue queue = 0;
cl_mem bufX, bufY;
cl_event event = NULL;
int ret = 0;
int lenX = 1 + (N-1)*abs(incx);
int lenY = 1 + (N-1)*abs(incy);
/* Setup OpenCL environment. */
err = clGetPlatformIDs(1, &platform, NULL);
if (err != CL_SUCCESS) {
printf( "clGetPlatformIDs() failed with %d\n", err );
return 1;
}
err = clGetDeviceIDs(platform, CL_DEVICE_TYPE_GPU, 1, &device, NULL);
if (err != CL_SUCCESS) {
printf( "clGetDeviceIDs() failed with %d\n", err );
return 1;
}
props[1] = (cl_context_properties)platform;
ctx = clCreateContext(props, 1, &device, NULL, NULL, &err);
if (err != CL_SUCCESS) {
printf( "clCreateContext() failed with %d\n", err );
return 1;
}
queue = clCreateCommandQueue(ctx, device, 0, &err);
if (err != CL_SUCCESS) {
printf( "clCreateCommandQueue() failed with %d\n", err );
clReleaseContext(ctx);
return 1;
}
/* Setup clblas. */
err = clblasSetup();
if (err != CL_SUCCESS) {
printf("clblasSetup() failed with %d\n", err);
clReleaseCommandQueue(queue);
clReleaseContext(ctx);
return 1;
}
/* Prepare OpenCL memory objects and place matrices inside them. */
bufX = clCreateBuffer(ctx, CL_MEM_READ_ONLY, (lenX*sizeof(cl_float)), NULL, &err);
bufY = clCreateBuffer(ctx, CL_MEM_READ_WRITE, (lenY*sizeof(cl_float)), NULL, &err);
err = clEnqueueWriteBuffer(queue, bufX, CL_TRUE, 0, (lenX*sizeof(cl_float)), X, 0, NULL, NULL);
err = clEnqueueWriteBuffer(queue, bufY, CL_TRUE, 0, (lenY*sizeof(cl_float)), Y, 0, NULL, NULL);
/* Call clblas function. */
err = clblasScopy( N, bufX, 0, incx, bufY, 0, incy, 1, &queue, 0, NULL, &event);
if (err != CL_SUCCESS) {
printf("clblasScopy() failed with %d\n", err);
ret = 1;
}
else {
/* Wait for calculations to be finished. */
err = clWaitForEvents(1, &event);
/* Fetch results of calculations from GPU memory. */
err = clEnqueueReadBuffer(queue, bufX, CL_TRUE, 0, (lenX*sizeof(cl_float)),
X, 0, NULL, NULL);
err = clEnqueueReadBuffer(queue, bufY, CL_TRUE, 0, (lenY*sizeof(cl_float)),
Y, 0, NULL, NULL);
/* At this point you will get the result of SSWAP placed in vector Y. */
printResult();
}
/* Release OpenCL events. */
clReleaseEvent(event);
/* Release OpenCL memory objects. */
clReleaseMemObject(bufY);
clReleaseMemObject(bufX);
/* Finalize work with clblas. */
clblasTeardown();
/* Release OpenCL working objects. */
clReleaseCommandQueue(queue);
clReleaseContext(ctx);
return ret;
}
结果:
Result:
X
11.000000
21.000000
31.000000
41.000000
51.000000
61.000000
71.000000
Y
11.000000
21.000000
31.000000
41.000000
51.000000
61.000000
71.000000
clblasSgemm
是 clBLAS
库中的一个函数,用于执行单精度浮点数的矩阵乘法。Sgemm
代表单精度(Single precision)和矩阵乘法(GEneral Matrix-Matrix multiplication)。这个函数是 BLAS 库中最基本的函数之一,广泛用于科学计算、工程模拟、数据分析和机器学习等领域。
示例代码如下:
#include <stdio.h>
#include <stdlib.h>
#include <CL/cl.h>
#include <clBLAS.h>
#include <sys/time.h>
#define M 320
#define N 320
#define K 320
#define ITERATIONS 300
static const clblasOrder order = clblasRowMajor;
static const cl_float alpha = 1.0f;
static const clblasTranspose transA = clblasNoTrans;
static const clblasTranspose transB = clblasNoTrans;
static const cl_float beta = 0.0f;
static cl_float A[M*K];
static cl_float B[K*N];
static cl_float C[M*N];
static cl_float result[M*N];
void initMatrix(cl_float *mat, size_t size, cl_float value) {
for (size_t i = 0; i < size; i++) {
mat[i] = value;
}
}
double getCurrentTimeInMilliseconds() {
struct timeval time;
gettimeofday(&time, NULL);
return time.tv_sec * 1000.0 + time.tv_usec / 1000.0;
}
int main(void) {
cl_int err;
cl_platform_id platform = 0;
cl_device_id device = 0;
cl_context_properties props[3] = { CL_CONTEXT_PLATFORM, 0, 0 };
cl_context ctx = 0;
cl_command_queue queue = 0;
cl_mem bufA, bufB, bufC;
cl_event event = NULL;
printf("[Matrix Multiply Using clBLAS] - Starting...\n");
// Initialize matrices
initMatrix(A, M * K, 1.0f);
initMatrix(B, K * N, 0.01f);
initMatrix(C, M * N, 0.0f);
// Setup OpenCL environment
err = clGetPlatformIDs(1, &platform, NULL);
err = clGetDeviceIDs(platform, CL_DEVICE_TYPE_GPU, 1, &device, NULL);
// Create OpenCL context and command queue
props[1] = (cl_context_properties)platform;
ctx = clCreateContext(props, 1, &device, NULL, NULL, &err);
queue = clCreateCommandQueue(ctx, device, 0, &err);
// Setup clBLAS
clblasSetup();
// Prepare OpenCL memory objects
bufA = clCreateBuffer(ctx, CL_MEM_READ_ONLY, M * K * sizeof(*A), NULL, &err);
bufB = clCreateBuffer(ctx, CL_MEM_READ_ONLY, K * N * sizeof(*B), NULL, &err);
bufC = clCreateBuffer(ctx, CL_MEM_READ_WRITE, M * N * sizeof(*C), NULL, &err);
clEnqueueWriteBuffer(queue, bufA, CL_TRUE, 0, M * K * sizeof(*A), A, 0, NULL, NULL);
clEnqueueWriteBuffer(queue, bufB, CL_TRUE, 0, K * N * sizeof(*B), B, 0, NULL, NULL);
clEnqueueWriteBuffer(queue, bufC, CL_TRUE, 0, M * N * sizeof(*C), C, 0, NULL, NULL);
// Perform gemm and time it
double startTime = getCurrentTimeInMilliseconds();
for (int i = 0; i < ITERATIONS; i++) {
err = clblasSgemm(order, transA, transB, M, N, K,
alpha, bufA, 0, K,
bufB, 0, N, beta,
bufC, 0, N,
1, &queue, 0, NULL, &event);
clWaitForEvents(1, &event);
}
double endTime = getCurrentTimeInMilliseconds();
// Calculate performance metrics
double elapsedTimeMs = endTime - startTime;
double timePerIterationMs = elapsedTimeMs / ITERATIONS;
double flops = 2.0 * M * N * K; // 2 * M * N * K floating-point operations per matrix multiplication
double gflops = (flops / (timePerIterationMs / 1000.0)) / 1e9;
// Fetch results of calculations from GPU memory
clEnqueueReadBuffer(queue, bufC, CL_TRUE, 0, M * N * sizeof(*result), result, 0, NULL, NULL);
// Print performance results
printf("MatrixA(%dx%d), MatrixB(%dx%d)\n", M, K, K, N);
printf("clBLAS Performance = %.2f GFlop/s, Time = %.3f msec\n", gflops, timePerIterationMs);
// Cleanup
clReleaseEvent(event);
clReleaseMemObject(bufC);
clReleaseMemObject(bufB);
clReleaseMemObject(bufA);
clblasTeardown();
clReleaseCommandQueue(queue);
clReleaseContext(ctx);
return 0;
}
结果:
[Matrix Multiply Using clBLAS] - Starting...
MatrixA(320x320), MatrixB(320x320)
clBLAS Performance = 972.25 GFlop/s, Time = 0.067 msec