|
static uint8_t | nn_equal_f32 (float golden, float actual, float rel_err) |
|
Tensor0D_F32 * | nn_tensor0d_f32 (float data) |
|
Tensor1D_F32 * | nn_tensor1d_f32 (size_t shape[1], const float *data) |
|
Tensor2D_F32 * | nn_tensor2d_f32 (size_t shape[2], const float *data) |
|
Tensor3D_F32 * | nn_tensor3d_f32 (size_t shape[3], const float *data) |
|
Tensor4D_F32 * | nn_tensor4d_f32 (size_t shape[4], const float *data) |
|
Tensor1D_F32 * | nn_as_tensor1d_f32 (size_t shape[1], float *data) |
|
Tensor2D_F32 * | nn_as_tensor2d_f32 (size_t shape[2], float *data) |
|
Tensor3D_F32 * | nn_as_tensor3d_f32 (size_t shape[3], float *data) |
|
Tensor4D_F32 * | nn_as_tensor4d_f32 (size_t shape[4], float *data) |
|
Tensor0D_F32 * | nn_zeros0d_f32 () |
|
Tensor1D_F32 * | nn_zeros1d_f32 (size_t shape[1]) |
|
Tensor2D_F32 * | nn_zeros2d_f32 (size_t shape[2]) |
|
Tensor3D_F32 * | nn_zeros3d_f32 (size_t shape[3]) |
|
Tensor4D_F32 * | nn_zeros4d_f32 (size_t shape[4]) |
|
Tensor0D_F32 * | nn_ones0d_f32 () |
|
Tensor1D_F32 * | nn_ones1d_f32 (size_t shape[1]) |
|
Tensor2D_F32 * | nn_ones2d_f32 (size_t shape[2]) |
|
Tensor0D_F32 * | nn_full0d_f32 (float data) |
|
Tensor1D_F32 * | nn_full1d_f32 (size_t shape[1], float data) |
|
Tensor2D_F32 * | nn_full2d_f32 (size_t shape[2], float data) |
|
Tensor0D_F32 * | nn_rand0d_f32 () |
|
Tensor1D_F32 * | nn_rand1d_f32 (size_t shape[1]) |
|
Tensor2D_F32 * | nn_rand2d_f32 (size_t shape[2]) |
|
void | nn_print_tensor1d_f32 (const Tensor1D_F32 *tensor) |
|
void | nn_print_tensor2d_f32 (const Tensor2D_F32 *tensor) |
|
void | nn_print_tensor3d_f32 (const Tensor3D_F32 *tensor) |
|
void | nn_print_tensor4d_f32 (const Tensor4D_F32 *tensor) |
|
uint8_t | nn_equals0d_f32 (const Tensor0D_F32 *a, const Tensor0D_F32 *b, float rel_err) |
|
uint8_t | nn_equals1d_f32 (const Tensor1D_F32 *a, const Tensor1D_F32 *b, float rel_err) |
|
uint8_t | nn_equals2d_f32 (const Tensor2D_F32 *a, const Tensor2D_F32 *b, float rel_err) |
|
uint8_t | nn_equals3d_f32 (const Tensor3D_F32 *a, const Tensor3D_F32 *b, float rel_err) |
|
uint8_t | nn_equals4d_f32 (const Tensor4D_F32 *a, const Tensor4D_F32 *b, float rel_err) |
|
void | nn_max1d_f32 (Tensor0D_F32 *y, const Tensor1D_F32 *x) |
|
void | nn_max2d_f32 (Tensor0D_F32 *y, const Tensor2D_F32 *x) |
|
void | nn_min1d_f32 (Tensor0D_F32 *y, const Tensor1D_F32 *x) |
|
void | nn_min2d_f32 (Tensor0D_F32 *y, const Tensor2D_F32 *x) |
|
void | nn_add1d_f32 (Tensor1D_F32 *y, const Tensor1D_F32 *x1, const Tensor1D_F32 *x2) |
|
void | nn_add2d_f32 (Tensor2D_F32 *y, const Tensor2D_F32 *x1, const Tensor2D_F32 *x2) |
|
void | nn_addscalar1d_f32 (Tensor1D_F32 *y, const Tensor1D_F32 *x, float scalar) |
|
void | nn_addscalar2d_f32 (Tensor2D_F32 *y, const Tensor2D_F32 *x, float scalar) |
|
void | nn_mul1d_f32 (Tensor1D_F32 *y, const Tensor1D_F32 *x1, const Tensor1D_F32 *x2) |
|
void | nn_mul2d_f32 (Tensor2D_F32 *y, const Tensor2D_F32 *x1, const Tensor2D_F32 *x2) |
|
void | nn_mulscalar1d_f32 (Tensor1D_F32 *y, const Tensor1D_F32 *x, float scalar) |
|
void | nn_mulscalar2d_f32 (Tensor2D_F32 *y, const Tensor2D_F32 *x, float scalar) |
|
void | nn_dot_f32 (Tensor1D_F32 *y, const Tensor1D_F32 *x1, const Tensor1D_F32 *x2) |
|
void | nn_mm_f32 (Tensor2D_F32 *y, const Tensor2D_F32 *x1, const Tensor2D_F32 *x2) |
|
void | nn_addmm_f32 (Tensor2D_F32 *y, const Tensor2D_F32 *c, const Tensor2D_F32 *x1, const Tensor2D_F32 *x2) |
|
void | nn_linear_f32 (Tensor2D_F32 *y, const Tensor2D_F32 *x, const Tensor2D_F32 *weight, const Tensor1D_F32 *bias) |
|
void | nn_elu2d_f32 (Tensor2D_F32 *y, const Tensor2D_F32 *x, float alpha) |
|
void | nn_relu2d_f32 (Tensor2D_F32 *y, const Tensor2D_F32 *x) |
|
void | nn_softmax1d_f32 (Tensor1D_F32 *y, const Tensor1D_F32 *x) |
|
void | nn_softmax2d_f32 (Tensor2D_F32 *y, const Tensor2D_F32 *x, size_t dim) |
|
void | nn_tanh2d_f32 (Tensor2D_F32 *y, const Tensor2D_F32 *x) |
|
void | nn_scaled_dot_product_attention_f32 (Tensor4D_F32 *y, const Tensor4D_F32 *query, const Tensor4D_F32 *key, const Tensor4D_F32 *value) |
|