Usage Examples

This section contains practical examples and usage patterns for SKaiNET operators.

Basic Operations

Linear Algebra

Matrix Multiplication Examples

Basic Usage

Simple Matrix Multiplication
// Create two matrices
val a = tensor(shape = intArrayOf(3, 2)) {
    floatArrayOf(1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f)
}

val b = tensor(shape = intArrayOf(2, 4)) {
    floatArrayOf(1.0f, 0.0f, 1.0f, 0.0f,
                 0.0f, 1.0f, 0.0f, 1.0f)
}

// Perform matrix multiplication
val result = a.matmul(b)
println("Result shape: ${result.shape.contentToString()}") // [3, 4]

Batch Operations

Batch Matrix Multiplication
// Batch of matrices: [batch_size, m, k] × [batch_size, k, n] → [batch_size, m, n]
val batchA = tensor(shape = intArrayOf(2, 3, 2)) {
    floatArrayOf(
        1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f,  // First batch
        2.0f, 1.0f, 4.0f, 3.0f, 6.0f, 5.0f   // Second batch
    )
}

val batchB = tensor(shape = intArrayOf(2, 2, 3)) {
    floatArrayOf(
        1.0f, 0.0f, 1.0f, 0.0f, 1.0f, 0.0f,  // First batch
        0.0f, 1.0f, 0.0f, 1.0f, 0.0f, 1.0f   // Second batch
    )
}

val batchResult = batchA.matmul(batchB)
println("Batch result shape: ${batchResult.shape.contentToString()}") // [2, 3, 3]

Neural Network Applications

Linear Layer Implementation
class LinearLayer(
    private val weights: Tensor<Float>,
    private val bias: Tensor<Float>? = null
) {
    fun forward(input: Tensor<Float>): Tensor<Float> {
        // input: [batch_size, in_features]
        // weights: [in_features, out_features]
        // output: [batch_size, out_features]

        var output = input.matmul(weights)

        bias?.let { b ->
            output = output + b  // Broadcasting addition
        }

        return output
    }
}

// Usage example
val inputSize = 784  // MNIST image flattened
val hiddenSize = 256
val batchSize = 32

val weights = tensor(shape = intArrayOf(inputSize, hiddenSize)) {
    // Initialize with Xavier/Glorot initialization
    randomNormal(0.0f, sqrt(2.0f / (inputSize + hiddenSize)))
}
val bias = zeros(shape = intArrayOf(hiddenSize))

val layer = LinearLayer(weights, bias)
val input = randomNormal(shape = intArrayOf(batchSize, inputSize))
val output = layer.forward(input)

Performance Considerations

Memory Layout Optimization
// Prefer row-major order for better cache locality
val a = tensor(shape = intArrayOf(1000, 500), layout = TensorLayout.RowMajor)
val b = tensor(shape = intArrayOf(500, 200), layout = TensorLayout.RowMajor)

// For very large matrices, consider blocking/tiling
val result = a.matmul(b, blockSize = 64)

Common Patterns

Matrix-Vector Multiplication
val matrix = randomNormal(shape = intArrayOf(100, 50))
val vector = randomNormal(shape = intArrayOf(50, 1))

// Equivalent operations:
val result1 = matrix.matmul(vector)           // [100, 1]
val result2 = matrix.dot(vector.squeeze())    // [100] - squeezed result
Transpose Patterns
val a = randomNormal(shape = intArrayOf(3, 4))
val b = randomNormal(shape = intArrayOf(5, 3))

// Compute b @ a.T without explicit transpose
val result = b.matmul(a, transposeB = true)  // [5, 4]

Tensor Creation and Manipulation

Broadcasting Operations

Neural Network Examples

Layer Implementations

Training Loops

Model Architectures

Performance Optimization

Memory Management

Backend-Specific Optimizations

Cross-References