Clamp values

This commit is contained in:
Ed Addario 2025-10-28 23:10:44 +00:00
parent ab015065b8
commit 86fabce58d
No known key found for this signature in database
GPG Key ID: E7875815A3230993
1 changed files with 32 additions and 19 deletions

View File

@ -298,12 +298,15 @@ static void compute_tensor_statistics(std::vector<tensor_statistics> & tstats) {
}
// Compute Cosine Similarity
float cs = 0.0f;
if (norm1_sq > 0.0f && norm2_sq > 0.0f) {
float cs = dot_prod / (std::sqrt(norm1_sq) * std::sqrt(norm2_sq));
cs = dot_prod / (std::sqrt(norm1_sq) * std::sqrt(norm2_sq));
cs = std::min(cs, 1.0f);
cs = std::max(cs, -1.0f);
ts.cossim = cs;
} else if (norm1_sq == 0.0f && norm2_sq == 0.0f) {
cs = 1.0f;
}
ts.cossim = cs;
// Compute L2 Norm (Euclidean Distance)
ts.l2_norm = std::sqrt(l2_dist_sq);
@ -332,14 +335,19 @@ static void compute_layer_statistics(const std::vector<tensor_statistics> & tsta
const int blk = std::stoi(match[1]);
if (blk <= 0) { continue; }
std::string prev_lyr(ts.tensor);
prev_lyr.replace(match.position(1), match.length(1), std::to_string(blk-1));
if (auto it_prev = tidx.find(prev_lyr); it_prev == tidx.end()) { continue; }
const auto curr_avg = compute_tensor_averages(stats_map.at(ts.tensor));
const auto prev_avg = compute_tensor_averages(stats_map.at(prev_lyr));
prev_lyr.replace(match.position(1), match.length(1), std::to_string(blk - 1));
if (tidx.find(prev_lyr) == tidx.end()) { continue; }
auto it_curr = stats_map.find(ts.tensor);
auto it_prev = stats_map.find(prev_lyr);
if (it_curr == stats_map.end() || it_prev == stats_map.end()) { continue; }
const auto curr_avg = compute_tensor_averages(it_curr->second);
const auto prev_avg = compute_tensor_averages(it_prev->second);
if (curr_avg.empty() || prev_avg.empty() || curr_avg.size() != prev_avg.size()) { continue; }
auto & [curr, prev] = agr[blk];
curr.insert(curr.end(), curr_avg.begin(), curr_avg.end());
prev.insert(prev.end(), prev_avg.begin(), prev_avg.end());
auto & entry = agr[blk];
entry.curr_avg.insert(entry.curr_avg.end(), curr_avg.begin(), curr_avg.end());
entry.prev_avg.insert(entry.prev_avg.end(), prev_avg.begin(), prev_avg.end());
}
for (auto & kv : agr) {
@ -347,18 +355,18 @@ static void compute_layer_statistics(const std::vector<tensor_statistics> & tsta
const auto & prev = kv.second.prev_avg;
if (curr.size() != prev.size() || curr.empty()) { continue; }
float dot_prod = 0.0f;
float norm1_sq = 0.0f;
float norm2_sq = 0.0f;
float l2_dist_sq = 0.0f;
double dot_prod = 0.0;
double norm1_sq = 0.0;
double norm2_sq = 0.0;
double l2_dist_sq = 0.0;
for (size_t i = 0; i < curr.size(); ++i) {
const float c_val = curr[i];
const float p_val = prev[i];
const double c_val = curr[i];
const double p_val = prev[i];
dot_prod += c_val * p_val;
norm1_sq += c_val * c_val;
norm2_sq += p_val * p_val;
const float diff = c_val - p_val;
const double diff = c_val - p_val;
l2_dist_sq += diff * diff;
}
@ -366,11 +374,15 @@ static void compute_layer_statistics(const std::vector<tensor_statistics> & tsta
float cossim = 0.0f;
if (norm1_sq > 0.0f && norm2_sq > 0.0f) {
cossim = dot_prod / (std::sqrt(norm1_sq) * std::sqrt(norm2_sq));
cossim = std::min(cossim, 1.0f);
cossim = std::max(cossim, -1.0f);
} else if (norm1_sq == 0.0f && norm2_sq == 0.0f) {
cossim = 1.0f;
}
layer_cossim[kv.first] = cossim;
// Compute aggregated L2 Norm (Euclidean Distance)
layer_l2_norm[kv.first] = std::sqrt(l2_dist_sq);
layer_l2_norm[kv.first] = (float)std::sqrt(l2_dist_sq);
}
}
@ -1309,8 +1321,8 @@ static bool show_statistics(const common_params & params) {
float layer_zd = 0.0f;
int n = 0;
};
std::map<int, layer_stats> ls;
std::map<int, layer_stats> ls;
LOG_INF("\nComputing tensor statistics for %s (%d tensors)\n", params.in_files[0].c_str(), static_cast<int>(ts.size()));
LOG_INF("\n%6s\t%18s\t%13s\t%8s\t%8s\t%7s\t%15s\t%13s\t%11s\t%8s\t%5s\t%10s\n",
"Layer",
@ -1330,7 +1342,8 @@ static bool show_statistics(const common_params & params) {
"=============================================================\n");
for (const auto & tstat : ts) {
std::string layer, name;
std::string layer;
std::string name;
process_tensor_name(tstat.tensor, layer, name);
int blk;