diff --git a/compression/distortion.h b/compression/distortion.h index 8c0742a..5fd778f 100644 --- a/compression/distortion.h +++ b/compression/distortion.h @@ -25,6 +25,8 @@ namespace gcpp { class DistortionStats { public: void Notify(float original, float distorted) { + (void)padding_; // prevent unused member warning + const double l1 = hwy::ScalarAbs(original - distorted); if (l1 > max_l1_) { diff --git a/gemma.cc b/gemma.cc index 70777ac..4775f89 100644 --- a/gemma.cc +++ b/gemma.cc @@ -633,30 +633,32 @@ void ForEachTensor(const Weights* weights, c_weights.c_final_norm_scale); char name[16]; - for (size_t layer_idx = 0; layer_idx < TConfig::kLayers; ++layer_idx) { - Layer* layer = weights ? &weights->layers[layer_idx] : nullptr; - CompressedLayer* c_layer = c_weights.CLayer(layer_idx); + for (int layer_idx = 0; layer_idx < static_cast(TConfig::kLayers); + ++layer_idx) { + const size_t idx = static_cast(layer_idx); + Layer* layer = weights ? &weights->layers[idx] : nullptr; + CompressedLayer* c_layer = c_weights.CLayer(idx); - snprintf(name, sizeof(name), "pre_ff_ns_%lu", layer_idx); + snprintf(name, sizeof(name), "pre_ff_ns_%d", layer_idx); func(name, layer ? layer->pre_ffw_norm_scale.data() : nullptr, c_layer->c_pre_ffw_norm_scale); - snprintf(name, sizeof(name), "gating_ein_%lu", layer_idx); + snprintf(name, sizeof(name), "gating_ein_%d", layer_idx); func(name, layer ? layer->gating_einsum_w.data() : nullptr, c_layer->c_gating_einsum_w); - snprintf(name, sizeof(name), "linear_w_%lu", layer_idx); + snprintf(name, sizeof(name), "linear_w_%d", layer_idx); func(name, layer ? layer->linear_w.data() : nullptr, c_layer->c_linear_w); - snprintf(name, sizeof(name), "qkv_ein_%lu", layer_idx); + snprintf(name, sizeof(name), "qkv_ein_%d", layer_idx); func(name, layer ? layer->qkv_einsum_w.data() : nullptr, c_layer->c_qkv_einsum_w); - snprintf(name, sizeof(name), "att_ein_%lu", layer_idx); + snprintf(name, sizeof(name), "att_ein_%d", layer_idx); func(name, layer ? layer->attn_vec_einsum_w.data() : nullptr, c_layer->c_attn_vec_einsum_w); - snprintf(name, sizeof(name), "pre_att_ns_%lu", layer_idx); + snprintf(name, sizeof(name), "pre_att_ns_%d", layer_idx); func(name, layer ? layer->pre_attention_norm_scale.data() : nullptr, c_layer->c_pre_attention_norm_scale); } diff --git a/util/args.h b/util/args.h index ce03ef2..b9ab985 100644 --- a/util/args.h +++ b/util/args.h @@ -204,7 +204,7 @@ class ArgsBase { } }; -static bool HasHelp(int argc, char* argv[]) { +static inline HWY_MAYBE_UNUSED bool HasHelp(int argc, char* argv[]) { // TODO(austinvhuang): handle case insensitivity if (argc == 1) { // no arguments - print help