{
// Block index
int bx = blockIdx.x;
int by = blockIdx.
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float CSub[2] = { 0,0 };
// Declaration of the shared memory arrays used to
// store the sub-matrix of A
__shared__ float Aa[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Ab[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory arrays used to
// store the sub-matrix of B
__shared__ float Ba[BLOCK_SIZE][2 * BLOCK_SIZE];
__shared__ float Bb[BLOCK_SIZE][2 * BLOCK_SIZE];
// Initial load
Aa[ty][tx] = A[aBegin + wA * ty + tx];
Ba[ty][tx] = B[bBegin + wB * ty + tx];
Ba[ty][tx + BLOCK_SIZE] = B[bBegin + wB * ty + tx + BLOCK_SIZE];
// Synchronize to make sure that initial matrices are loaded
__syncthreads();
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
a
a += aStep, b += bStep)
{
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep)
{
}
Ab[ty][tx]
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE / 2][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
if (x == 1)
{
if (ty < BLOCK_SIZE / 2)
{
As[ty][tx] =
Bb[ty][tx]
}
Bs[ty][tx] =
Bb[ty][tx
}
else if (x == 2)
{
if (ty >= BLOCK_SIZE / 2)
{
As[ty][tx] =
}
}
// Synchronize to make sure the matrices are
__syncthreads();
// Load the matrices from device memory
// to shared memory; each thread loads
// two elements of each matrix
if (a + aStep < aEnd) {
Aa[ty][tx] = A[a + wA * ty + tx];
Ba[ty][tx] = B[b + wB * ty + tx];
Ba[ty][tx + BLOCK_SIZE] = B[b * wB * ty + tx + BLOCK_SIZE];
}
__syncthreads();
// Multiply the two matrices
//
// of the block sub-matrix
#pragma unroll
{
if (ty < BLOCK_SIZE / 2)
{
Csub += As[ty][k] * Bs[k][tx];
}
else
{
Csub += As[ty][k + BLOCK_SIZE / 2] * Bs[k + BLOCK_SIZE / 2][tx];
}
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
if (ty < BLOCK_SIZE / 2 && x == 1)
{
CSub[1] += Ab[ty][k] * Bb[k][tx + BLOCK_SIZE];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
}
else if (ty >= BLOCK_SIZE / 2 && x == 2)
{
C[c + wB * ty +
}
}
}