import Foundation // import llama enum LlamaError: Error { case couldNotInitializeContext } func llama_batch_clear(_ batch: inout llama_batch) { batch.n_tokens = 0 } func llama_batch_add(_ batch: inout llama_batch, _ id: llama_token, _ pos: llama_pos, _ seq_ids: [llama_seq_id], _ logits: Bool) { batch.token [Int(batch.n_tokens)] = id batch.pos [Int(batch.n_tokens)] = pos batch.n_seq_id[Int(batch.n_tokens)] = Int32(seq_ids.count) for i in 0.. LlamaContext { llama_backend_init(false) var model_params = llama_model_default_params() #if targetEnvironment(simulator) model_params.n_gpu_layers = 0 print("Running on simulator, force use n_gpu_layers = 0") #endif let model = llama_load_model_from_file(path, model_params) guard let model else { print("Could not load model at \(path)") throw LlamaError.couldNotInitializeContext } let n_threads = max(1, min(8, ProcessInfo.processInfo.processorCount - 2)) print("Using \(n_threads) threads") var ctx_params = llama_context_default_params() ctx_params.seed = 1234 ctx_params.n_ctx = 2048 ctx_params.n_threads = UInt32(n_threads) ctx_params.n_threads_batch = UInt32(n_threads) let context = llama_new_context_with_model(model, ctx_params) guard let context else { print("Could not load context!") throw LlamaError.couldNotInitializeContext } return LlamaContext(model: model, context: context) } func model_info() -> String { let result = UnsafeMutablePointer.allocate(capacity: 256) result.initialize(repeating: Int8(0), count: 256) defer { result.deallocate() } // TODO: this is probably very stupid way to get the string from C let nChars = llama_model_desc(model, result, 256) let bufferPointer = UnsafeBufferPointer(start: result, count: Int(nChars)) var SwiftString = "" for char in bufferPointer { SwiftString.append(Character(UnicodeScalar(UInt8(char)))) } return SwiftString } func get_n_tokens() -> Int32 { return batch.n_tokens; } func completion_init(text: String) { print("attempting to complete \"\(text)\"") tokens_list = tokenize(text: text, add_bos: true) temporary_invalid_cchars = [] let n_ctx = llama_n_ctx(context) let n_kv_req = tokens_list.count + (Int(n_len) - tokens_list.count) print("\n n_len = \(n_len), n_ctx = \(n_ctx), n_kv_req = \(n_kv_req)") if n_kv_req > n_ctx { print("error: n_kv_req > n_ctx, the required KV cache size is not big enough") } for id in tokens_list { print(String(cString: token_to_piece(token: id) + [0])) } llama_batch_clear(&batch) for i1 in 0.. String { var new_token_id: llama_token = 0 let n_vocab = llama_n_vocab(model) let logits = llama_get_logits_ith(context, batch.n_tokens - 1) var candidates = Array() candidates.reserveCapacity(Int(n_vocab)) for token_id in 0.. String { let pp = 512 let tg = 128 let pl = 1 // bench prompt processing llama_batch_clear(&batch) let n_tokens = pp for i in 0.. [llama_token] { let utf8Count = text.utf8.count let n_tokens = utf8Count + (add_bos ? 1 : 0) + 1 let tokens = UnsafeMutablePointer.allocate(capacity: n_tokens) let tokenCount = llama_tokenize(model, text, Int32(utf8Count), tokens, Int32(n_tokens), add_bos, false) var swiftTokens: [llama_token] = [] for i in 0.. [CChar] { let result = UnsafeMutablePointer.allocate(capacity: 8) result.initialize(repeating: Int8(0), count: 8) defer { result.deallocate() } let nTokens = llama_token_to_piece(model, token, result, 8) if nTokens < 0 { let newResult = UnsafeMutablePointer.allocate(capacity: Int(-nTokens)) newResult.initialize(repeating: Int8(0), count: Int(-nTokens)) defer { newResult.deallocate() } let nNewTokens = llama_token_to_piece(model, token, newResult, -nTokens) let bufferPointer = UnsafeBufferPointer(start: newResult, count: Int(nNewTokens)) return Array(bufferPointer) } else { let bufferPointer = UnsafeBufferPointer(start: result, count: Int(nTokens)) return Array(bufferPointer) } } }