Fix build errors and warnings

This commit is contained in:
saharNooby 2023-04-02 17:23:39 +04:00
parent f2b1dad22b
commit 1262ad0456
2 changed files with 19 additions and 22 deletions

8
ggml.c
View File

@ -1646,7 +1646,7 @@ inline static void ggml_vec_exp_f32 (const int n, float * y, const float * x)
inline static void ggml_vec_1_minus_x_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = 1 - x[i]; }
inline static void ggml_vec_mul_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]*y[i]; }
inline static void ggml_vec_div_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]/y[i]; }
inline static void ggml_vec_element_wise_max_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = max(x[i], y[i]); }
inline static void ggml_vec_element_wise_max_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = fmaxf(x[i], y[i]); }
inline static void ggml_vec_dot_f32(const int n, float * restrict s, const float * restrict x, const float * restrict y) {
#ifdef GGML_SIMD
@ -10832,11 +10832,11 @@ int ggml_cpu_has_vsx(void) {
#define GGML_TEST_ASSERT_ELEMENT_F32(tensor, i, expected_value) do {\
float actual = *(float *) ((char *) tensor->data + 4 * i);\
GGML_TEST_ASSERT(fabs(actual - expected_value) <= 0.0001F, "At %s[%d]: expected %f, actual %f", #tensor, i, expected_value, actual);\
GGML_TEST_ASSERT(fabsf(actual - expected_value) <= 0.0001F, "At %s[%d]: expected %f, actual %f", #tensor, i, expected_value, actual);\
} while (0)
// Copied from https://github.com/ggerganov/llama.cpp/blob/6e7801d08d81c931a5427bae46f00763e993f54a/tests/test-quantize.c
void ggml_test_quantization() {
void ggml_test_quantization(void) {
#define QK 32
float src[QK];
uint8_t dst[24];
@ -10872,7 +10872,7 @@ void ggml_test_quantization() {
}
}
void ggml_run_test_suite() {
void ggml_run_test_suite(void) {
ggml_test_quantization();
struct ggml_init_params params;

View File

@ -206,12 +206,13 @@ struct rwkv_context * rwkv_init_from_file(const char * file_path, uint32_t n_thr
while (true) {
int32_t dim_count;
fread(&dim_count, 4, 1, file);
size_t elements_read = fread(&dim_count, 4, 1, file);
if (feof(file)) {
break;
}
RWKV_ASSERT_NULL(elements_read == 1, "Failed to read dimension count");
RWKV_ASSERT_NULL(dim_count == 1 || dim_count == 2, "Unsupported dimension count %d", dim_count);
int32_t key_length;
@ -243,23 +244,20 @@ struct rwkv_context * rwkv_init_from_file(const char * file_path, uint32_t n_thr
int32_t x = -1;
int32_t y = -1;
int32_t element_count;
if (dim_count == 1) {
read_int32(file, &x);
element_count = x;
tensor = ggml_new_tensor_1d(ctx, ggml_data_type, x);
} else if (dim_count == 2) {
read_int32(file, &x);
read_int32(file, &y);
element_count = x * y;
tensor = ggml_new_tensor_2d(ctx, ggml_data_type, x, y);
} else {
abort();
}
std::string key(key_length, 0);
RWKV_ASSERT_NULL(fread(&key[0], 1, key_length, file) == key_length, "Failed to read parameter key");
RWKV_ASSERT_NULL(fread(&key[0], 1, key_length, file) == uint32_t(key_length), "Failed to read parameter key");
RWKV_ASSERT_NULL(fread(tensor->data, 1, ggml_nbytes(tensor), file) == ggml_nbytes(tensor), "Failed to read parameter data");
@ -314,7 +312,6 @@ struct rwkv_context * rwkv_init_from_file(const char * file_path, uint32_t n_thr
RWKV_ASSERT_NULL(emb->ne[0] == model->n_embed, "Unexpected dimension of embedding matrix %d", emb->ne[0]);
RWKV_ASSERT_NULL(emb->ne[1] == model->n_vocab, "Unexpected dimension of embedding matrix %d", emb->ne[1]);
int32_t n_vocab = model->n_vocab;
int32_t n_embed = model->n_embed;
int32_t n_layer = model->n_layer;
@ -542,7 +539,7 @@ bool rwkv_eval(struct rwkv_context * ctx, int32_t token, float * state_in, float
ggml_graph_compute(ctx->ctx, ctx->graph);
for (size_t i = 0; i < n_layer * 5; i++) {
for (size_t i = 0; i < size_t(n_layer * 5); i++) {
struct ggml_tensor * part = ctx->state_parts[i];
memcpy(state_out + i * n_embed, part->data, part->ne[0] * FP32_SIZE);