[WIP] Remove InferenceArgs from hello_world example, fix ordering of LoaderArgs validation, revert ReplGemma EOT token behavior

This commit is contained in:
austinvhuang 2024-03-06 23:21:13 -05:00
parent 7042316013
commit e781007836
5 changed files with 28 additions and 29 deletions

View File

@ -27,7 +27,7 @@ FetchContent_MakeAvailable(highway)
FetchContent_Declare(sentencepiece GIT_REPOSITORY https://github.com/google/sentencepiece GIT_TAG 53de76561cfc149d3c01037f0595669ad32a5e7c)
FetchContent_MakeAvailable(sentencepiece)
FetchContent_Declare(gemma GIT_REPOSITORY https://github.com/google/gemma.cpp.git GIT_TAG 808dbdc42b216c3ac1f1c40dfa638bcff24bbd2b)
FetchContent_Declare(gemma GIT_REPOSITORY https://github.com/google/gemma.cpp.git GIT_TAG 7042316013d7e2ad06532f420b551802ec114f89)
FetchContent_MakeAvailable(gemma)
if(NOT CMAKE_BUILD_TYPE)

View File

@ -7,9 +7,6 @@
#include "gemma.h" // Gemma
// copybara:end
// copybara:import_next_line:gemma_cpp
#include "util/app.h"
// copybara:end
// copybara:import_next_line:gemma_cpp
#include "util/args.h" // HasHelp
// copybara:end
// copybara:import_next_line:gemma_cpp
@ -22,30 +19,35 @@
#include "hwy/profiler.h"
#include "hwy/timer.h"
std::vector<int> tokenize(std::string prompt_string, const sentencepiece::SentencePieceProcessor& tokenizer) {
std::vector<int> tokenize(
std::string prompt_string,
const sentencepiece::SentencePieceProcessor* tokenizer) {
prompt_string = "<start_of_turn>user\n" + prompt_string +
"<end_of_turn>\n<start_of_turn>model\n";
std::vector<int> tokens;
HWY_ASSERT(tokenizer.Encode(prompt_string, &tokens).ok());
tokens.insert(tokens.begin(), 2); // BOS token
HWY_ASSERT(tokenizer->Encode(prompt_string, &tokens).ok());
tokens.insert(tokens.begin(), 2); // BOS token
return tokens;
}
int main(int argc, char** argv) {
gcpp::InferenceArgs inference(argc, argv);
gcpp::LoaderArgs loader(argc, argv);
gcpp::AppArgs app(argc, argv);
hwy::ThreadPool pool(app.num_threads);
// A rough heuristic for a reasonable number of threads given hardware
// concurrency estimate
size_t num_threads = static_cast<size_t>(std::clamp(
static_cast<int>(std::thread::hardware_concurrency()) - 2, 1, 18));
hwy::ThreadPool pool(num_threads);
hwy::ThreadPool inner_pool(0);
gcpp::Gemma model(loader, pool);
std::mt19937 gen;
std::random_device rd;
gen.seed(rd());
std::vector<int> tokens = tokenize("Hello, how are you?", model.Tokenizer());
std::vector<int> tokens =
tokenize("Write a greeting to the world.", model.Tokenizer());
size_t ntokens = tokens.size();
size_t pos = 0;
auto stream_token = [&pos, &gen, &ntokens, tokenizer = &model.Tokenizer()](int token, float) {
auto stream_token = [&pos, &gen, &ntokens, tokenizer = model.Tokenizer()](
int token, float) {
++pos;
if (pos < ntokens) {
// print feedback
@ -60,14 +62,9 @@ int main(int argc, char** argv) {
}
return true;
};
inference.temperature = 1.0f;
inference.deterministic = true;
inference.multiturn = false;
GenerateGemma(
model, /*max_tokens=*/2048, /*max_generated_tokens=*/1024, /*temperature=*/1.0, tokens, 0, pool, inner_pool, stream_token,
[](int) {return true;}, gen, 0);
model, /*max_tokens=*/2048, /*max_generated_tokens=*/1024,
/*temperature=*/1.0, tokens, 0, pool, inner_pool, stream_token,
[](int) { return true; }, gen, 0);
std::cout << std::endl;
}

View File

@ -782,6 +782,8 @@ void GemmaImpl<ConfigGemma7B>::Generate(
pool, inner_pool, stream_token, accept_token, gen, verbosity);
}
// TODO: Make Gemma type independent of LoaderArgs, create a factory function
// that takes LoaderArgs and creates a Gemma instance.
Gemma::Gemma(const LoaderArgs& args, hwy::ThreadPool& pool) {
const Model model_type = args.ModelType();
model_training = args.ModelTraining();
@ -817,9 +819,9 @@ void GenerateGemma(Gemma& gemma, size_t max_tokens, size_t max_generated_tokens,
const AcceptFunc& accept_token, std::mt19937& gen,
int verbosity) {
pool.SetWaitMode(hwy::PoolWaitMode::kSpin);
gemma.impl_->Generate(max_tokens, max_generated_tokens,
temperature, prompt, start_pos, pool, inner_pool,
stream_token, accept_token, gen, verbosity);
gemma.impl_->Generate(max_tokens, max_generated_tokens, temperature, prompt,
start_pos, pool, inner_pool, stream_token, accept_token,
gen, verbosity);
pool.SetWaitMode(hwy::PoolWaitMode::kBlock);
}

View File

@ -104,6 +104,10 @@ struct LoaderArgs : public ArgsBase<LoaderArgs> {
// Returns error string or nullptr if OK.
const char* Validate() const {
const std::string model_type_lc = ToLower(model_type);
if (model_type.empty()) {
return "Missing --model flag, need to specify either 2b-pt, 7b-pt, "
"2b-it, or 7b-it.";
}
if (model_type_lc != "2b-pt" && model_type_lc != "7b-pt" &&
model_type_lc != "2b-it" && model_type_lc != "7b-it") {
return "Model type must be 2b-pt, 7b-pt, 2b-it, or "
@ -112,10 +116,6 @@ struct LoaderArgs : public ArgsBase<LoaderArgs> {
if (tokenizer.path.empty()) {
return "Missing --tokenizer flag, a file for the tokenizer is required.";
}
if (model_type.empty()) {
return "Missing --model flag, need to specify either 2b-pt, 7b-pt, "
"2b-it, or 7b-it.";
}
if (cache.path.empty()) {
return "Missing --compressed_weights flag, a file for the compressed "
"model.";

2
run.cc
View File

@ -186,7 +186,7 @@ void ReplGemma(gcpp::Gemma& model, hwy::ThreadPool& pool,
if (abs_pos > 0) {
// Prepend "<end_of_turn>" token if this is a multi-turn dialogue
// continuation.
prompt_string = "<end_of_turn>model\n" + prompt_string;
prompt_string = "<end_of_turn>\n" + prompt_string;
}
}