From 8f644a0a859938c787d329d27f98e03c58d7df27 Mon Sep 17 00:00:00 2001
From: Georgi Gerganov <ggerganov@gmail.com>
Date: Tue, 21 Mar 2023 17:32:14 +0200
Subject: [PATCH] Change default repeat_penalty to 1.0

I feel this penalty is not really helping.
Especially for the example from the README it makes results pretty bad
---
 utils.h | 31 +++++++++++++++----------------
 1 file changed, 15 insertions(+), 16 deletions(-)

diff --git a/utils.h b/utils.h
index 971cc0e..4aa7c63 100644
--- a/utils.h
+++ b/utils.h
@@ -13,33 +13,32 @@
 //
 
 struct gpt_params {
-    int32_t seed      = -1; // RNG seed
-    int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency());
-    int32_t n_predict = 128; // new tokens to predict
+    int32_t seed          = -1; // RNG seed
+    int32_t n_threads     = std::min(4, (int32_t) std::thread::hardware_concurrency());
+    int32_t n_predict     = 128; // new tokens to predict
     int32_t repeat_last_n = 64;  // last n tokens to penalize
-    int32_t n_ctx = 512; //context size
-    bool memory_f16 = false; // use f16 instead of f32 for memory kv
+    int32_t n_ctx         = 512; //context size
 
     // sampling parameters
     int32_t top_k = 40;
     float   top_p = 0.95f;
     float   temp  = 0.80f;
-    float   repeat_penalty  = 1.30f;
+    float   repeat_penalty  = 1.10f;
 
     int32_t n_batch = 8; // batch size for prompt processing
 
-    std::string model      = "models/lamma-7B/ggml-model.bin"; // model path
-    std::string prompt     = "";
+    std::string model  = "models/lamma-7B/ggml-model.bin"; // model path
+    std::string prompt = "";
 
-    bool random_prompt = false;
-
-    bool use_color = false; // use color to distinguish generations and inputs
-
-    bool interactive = false; // interactive mode
-    bool interactive_start = false; // reverse prompt immediately
     std::vector<std::string> antiprompt; // string upon seeing which more user input is prompted
-    bool instruct    = false; // instruction mode (used for Alpaca models)
-    bool ignore_eos = false; // do not stop generating after eos
+
+    bool memory_f16        = false; // use f16 instead of f32 for memory kv
+    bool random_prompt     = false; // do not randomize prompt if none provided
+    bool use_color         = false; // use color to distinguish generations and inputs
+    bool interactive       = false; // interactive mode
+    bool interactive_start = false; // reverse prompt immediately
+    bool instruct          = false; // instruction mode (used for Alpaca models)
+    bool ignore_eos        = false; // do not stop generating after eos
 };
 
 bool gpt_params_parse(int argc, char ** argv, gpt_params & params);