mirror of https://github.com/google/gemma.cpp.git
[WIP] Remove InferenceArgs from hello_world example, fix ordering of LoaderArgs validation, revert ReplGemma EOT token behavior
This commit is contained in:
parent
7042316013
commit
e781007836
|
|
@ -27,7 +27,7 @@ FetchContent_MakeAvailable(highway)
|
||||||
FetchContent_Declare(sentencepiece GIT_REPOSITORY https://github.com/google/sentencepiece GIT_TAG 53de76561cfc149d3c01037f0595669ad32a5e7c)
|
FetchContent_Declare(sentencepiece GIT_REPOSITORY https://github.com/google/sentencepiece GIT_TAG 53de76561cfc149d3c01037f0595669ad32a5e7c)
|
||||||
FetchContent_MakeAvailable(sentencepiece)
|
FetchContent_MakeAvailable(sentencepiece)
|
||||||
|
|
||||||
FetchContent_Declare(gemma GIT_REPOSITORY https://github.com/google/gemma.cpp.git GIT_TAG 808dbdc42b216c3ac1f1c40dfa638bcff24bbd2b)
|
FetchContent_Declare(gemma GIT_REPOSITORY https://github.com/google/gemma.cpp.git GIT_TAG 7042316013d7e2ad06532f420b551802ec114f89)
|
||||||
FetchContent_MakeAvailable(gemma)
|
FetchContent_MakeAvailable(gemma)
|
||||||
|
|
||||||
if(NOT CMAKE_BUILD_TYPE)
|
if(NOT CMAKE_BUILD_TYPE)
|
||||||
|
|
|
||||||
|
|
@ -7,9 +7,6 @@
|
||||||
#include "gemma.h" // Gemma
|
#include "gemma.h" // Gemma
|
||||||
// copybara:end
|
// copybara:end
|
||||||
// copybara:import_next_line:gemma_cpp
|
// copybara:import_next_line:gemma_cpp
|
||||||
#include "util/app.h"
|
|
||||||
// copybara:end
|
|
||||||
// copybara:import_next_line:gemma_cpp
|
|
||||||
#include "util/args.h" // HasHelp
|
#include "util/args.h" // HasHelp
|
||||||
// copybara:end
|
// copybara:end
|
||||||
// copybara:import_next_line:gemma_cpp
|
// copybara:import_next_line:gemma_cpp
|
||||||
|
|
@ -22,30 +19,35 @@
|
||||||
#include "hwy/profiler.h"
|
#include "hwy/profiler.h"
|
||||||
#include "hwy/timer.h"
|
#include "hwy/timer.h"
|
||||||
|
|
||||||
std::vector<int> tokenize(std::string prompt_string, const sentencepiece::SentencePieceProcessor& tokenizer) {
|
std::vector<int> tokenize(
|
||||||
|
std::string prompt_string,
|
||||||
|
const sentencepiece::SentencePieceProcessor* tokenizer) {
|
||||||
prompt_string = "<start_of_turn>user\n" + prompt_string +
|
prompt_string = "<start_of_turn>user\n" + prompt_string +
|
||||||
"<end_of_turn>\n<start_of_turn>model\n";
|
"<end_of_turn>\n<start_of_turn>model\n";
|
||||||
std::vector<int> tokens;
|
std::vector<int> tokens;
|
||||||
HWY_ASSERT(tokenizer.Encode(prompt_string, &tokens).ok());
|
HWY_ASSERT(tokenizer->Encode(prompt_string, &tokens).ok());
|
||||||
tokens.insert(tokens.begin(), 2); // BOS token
|
tokens.insert(tokens.begin(), 2); // BOS token
|
||||||
return tokens;
|
return tokens;
|
||||||
}
|
}
|
||||||
|
|
||||||
int main(int argc, char** argv) {
|
int main(int argc, char** argv) {
|
||||||
gcpp::InferenceArgs inference(argc, argv);
|
|
||||||
gcpp::LoaderArgs loader(argc, argv);
|
gcpp::LoaderArgs loader(argc, argv);
|
||||||
gcpp::AppArgs app(argc, argv);
|
// A rough heuristic for a reasonable number of threads given hardware
|
||||||
hwy::ThreadPool pool(app.num_threads);
|
// concurrency estimate
|
||||||
|
size_t num_threads = static_cast<size_t>(std::clamp(
|
||||||
|
static_cast<int>(std::thread::hardware_concurrency()) - 2, 1, 18));
|
||||||
|
hwy::ThreadPool pool(num_threads);
|
||||||
hwy::ThreadPool inner_pool(0);
|
hwy::ThreadPool inner_pool(0);
|
||||||
gcpp::Gemma model(loader, pool);
|
gcpp::Gemma model(loader, pool);
|
||||||
std::mt19937 gen;
|
std::mt19937 gen;
|
||||||
std::random_device rd;
|
std::random_device rd;
|
||||||
gen.seed(rd());
|
gen.seed(rd());
|
||||||
|
std::vector<int> tokens =
|
||||||
std::vector<int> tokens = tokenize("Hello, how are you?", model.Tokenizer());
|
tokenize("Write a greeting to the world.", model.Tokenizer());
|
||||||
size_t ntokens = tokens.size();
|
size_t ntokens = tokens.size();
|
||||||
size_t pos = 0;
|
size_t pos = 0;
|
||||||
auto stream_token = [&pos, &gen, &ntokens, tokenizer = &model.Tokenizer()](int token, float) {
|
auto stream_token = [&pos, &gen, &ntokens, tokenizer = model.Tokenizer()](
|
||||||
|
int token, float) {
|
||||||
++pos;
|
++pos;
|
||||||
if (pos < ntokens) {
|
if (pos < ntokens) {
|
||||||
// print feedback
|
// print feedback
|
||||||
|
|
@ -60,14 +62,9 @@ int main(int argc, char** argv) {
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
};
|
};
|
||||||
|
|
||||||
inference.temperature = 1.0f;
|
|
||||||
inference.deterministic = true;
|
|
||||||
inference.multiturn = false;
|
|
||||||
|
|
||||||
GenerateGemma(
|
GenerateGemma(
|
||||||
model, /*max_tokens=*/2048, /*max_generated_tokens=*/1024, /*temperature=*/1.0, tokens, 0, pool, inner_pool, stream_token,
|
model, /*max_tokens=*/2048, /*max_generated_tokens=*/1024,
|
||||||
|
/*temperature=*/1.0, tokens, 0, pool, inner_pool, stream_token,
|
||||||
[](int) { return true; }, gen, 0);
|
[](int) { return true; }, gen, 0);
|
||||||
|
|
||||||
std::cout << std::endl;
|
std::cout << std::endl;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
8
gemma.cc
8
gemma.cc
|
|
@ -782,6 +782,8 @@ void GemmaImpl<ConfigGemma7B>::Generate(
|
||||||
pool, inner_pool, stream_token, accept_token, gen, verbosity);
|
pool, inner_pool, stream_token, accept_token, gen, verbosity);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: Make Gemma type independent of LoaderArgs, create a factory function
|
||||||
|
// that takes LoaderArgs and creates a Gemma instance.
|
||||||
Gemma::Gemma(const LoaderArgs& args, hwy::ThreadPool& pool) {
|
Gemma::Gemma(const LoaderArgs& args, hwy::ThreadPool& pool) {
|
||||||
const Model model_type = args.ModelType();
|
const Model model_type = args.ModelType();
|
||||||
model_training = args.ModelTraining();
|
model_training = args.ModelTraining();
|
||||||
|
|
@ -817,9 +819,9 @@ void GenerateGemma(Gemma& gemma, size_t max_tokens, size_t max_generated_tokens,
|
||||||
const AcceptFunc& accept_token, std::mt19937& gen,
|
const AcceptFunc& accept_token, std::mt19937& gen,
|
||||||
int verbosity) {
|
int verbosity) {
|
||||||
pool.SetWaitMode(hwy::PoolWaitMode::kSpin);
|
pool.SetWaitMode(hwy::PoolWaitMode::kSpin);
|
||||||
gemma.impl_->Generate(max_tokens, max_generated_tokens,
|
gemma.impl_->Generate(max_tokens, max_generated_tokens, temperature, prompt,
|
||||||
temperature, prompt, start_pos, pool, inner_pool,
|
start_pos, pool, inner_pool, stream_token, accept_token,
|
||||||
stream_token, accept_token, gen, verbosity);
|
gen, verbosity);
|
||||||
pool.SetWaitMode(hwy::PoolWaitMode::kBlock);
|
pool.SetWaitMode(hwy::PoolWaitMode::kBlock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
8
gemma.h
8
gemma.h
|
|
@ -104,6 +104,10 @@ struct LoaderArgs : public ArgsBase<LoaderArgs> {
|
||||||
// Returns error string or nullptr if OK.
|
// Returns error string or nullptr if OK.
|
||||||
const char* Validate() const {
|
const char* Validate() const {
|
||||||
const std::string model_type_lc = ToLower(model_type);
|
const std::string model_type_lc = ToLower(model_type);
|
||||||
|
if (model_type.empty()) {
|
||||||
|
return "Missing --model flag, need to specify either 2b-pt, 7b-pt, "
|
||||||
|
"2b-it, or 7b-it.";
|
||||||
|
}
|
||||||
if (model_type_lc != "2b-pt" && model_type_lc != "7b-pt" &&
|
if (model_type_lc != "2b-pt" && model_type_lc != "7b-pt" &&
|
||||||
model_type_lc != "2b-it" && model_type_lc != "7b-it") {
|
model_type_lc != "2b-it" && model_type_lc != "7b-it") {
|
||||||
return "Model type must be 2b-pt, 7b-pt, 2b-it, or "
|
return "Model type must be 2b-pt, 7b-pt, 2b-it, or "
|
||||||
|
|
@ -112,10 +116,6 @@ struct LoaderArgs : public ArgsBase<LoaderArgs> {
|
||||||
if (tokenizer.path.empty()) {
|
if (tokenizer.path.empty()) {
|
||||||
return "Missing --tokenizer flag, a file for the tokenizer is required.";
|
return "Missing --tokenizer flag, a file for the tokenizer is required.";
|
||||||
}
|
}
|
||||||
if (model_type.empty()) {
|
|
||||||
return "Missing --model flag, need to specify either 2b-pt, 7b-pt, "
|
|
||||||
"2b-it, or 7b-it.";
|
|
||||||
}
|
|
||||||
if (cache.path.empty()) {
|
if (cache.path.empty()) {
|
||||||
return "Missing --compressed_weights flag, a file for the compressed "
|
return "Missing --compressed_weights flag, a file for the compressed "
|
||||||
"model.";
|
"model.";
|
||||||
|
|
|
||||||
2
run.cc
2
run.cc
|
|
@ -186,7 +186,7 @@ void ReplGemma(gcpp::Gemma& model, hwy::ThreadPool& pool,
|
||||||
if (abs_pos > 0) {
|
if (abs_pos > 0) {
|
||||||
// Prepend "<end_of_turn>" token if this is a multi-turn dialogue
|
// Prepend "<end_of_turn>" token if this is a multi-turn dialogue
|
||||||
// continuation.
|
// continuation.
|
||||||
prompt_string = "<end_of_turn>model\n" + prompt_string;
|
prompt_string = "<end_of_turn>\n" + prompt_string;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue