From 167f662abc6ef4da8eac6af43fde9f17f0ee3191 Mon Sep 17 00:00:00 2001
From: ed <ed@brz9.dev>
Date: Tue, 17 Sep 2024 02:02:26 +0200
Subject: [PATCH] vault backup: 2024-09-17 02:02:26

Affected files:
.obsidian/workspace.json
Untitled 3.md
---
 .obsidian/workspace.json |  8 ++++----
 Untitled 3.md            | 15 +++++++++++++++
 2 files changed, 19 insertions(+), 4 deletions(-)

diff --git a/.obsidian/workspace.json b/.obsidian/workspace.json
index 11d005b..7dd3988 100644
--- a/.obsidian/workspace.json
+++ b/.obsidian/workspace.json
@@ -145,7 +145,7 @@
             "state": {
               "type": "markdown",
               "state": {
-                "file": "ComfyUI.md",
+                "file": "Untitled 3.md",
                 "mode": "source",
                 "source": false
               }
@@ -218,7 +218,7 @@
             "state": {
               "type": "backlink",
               "state": {
-                "file": "ComfyUI.md",
+                "file": "Untitled 3.md",
                 "collapseAll": false,
                 "extraContext": false,
                 "sortOrder": "alphabetical",
@@ -235,7 +235,7 @@
             "state": {
               "type": "outgoing-link",
               "state": {
-                "file": "ComfyUI.md",
+                "file": "Untitled 3.md",
                 "linksCollapsed": false,
                 "unlinkedCollapsed": true
               }
@@ -258,7 +258,7 @@
             "state": {
               "type": "outline",
               "state": {
-                "file": "ComfyUI.md"
+                "file": "Untitled 3.md"
               }
             }
           },
diff --git a/Untitled 3.md b/Untitled 3.md
index e69de29..0894bb0 100644
--- a/Untitled 3.md	
+++ b/Untitled 3.md	
@@ -0,0 +1,15 @@
+
+
+
+
+"Conditional instructions: You could modify your system prompt to make the fallacy detection conditional, e.g., "You are a helpful assistant. When asked or when relevant, you can act as a critical thinker good at unveiling logical fallacies." This might help the model understand that fallacy detection is a capability, not a constant requirement." - That's a big no. I want my model to be able to always use critical thinking. If user chat and talk about fake news or dangerous cult like thinking, I want my model to engage in "street epistemology"
+
+"Multi-task training: Instead of focusing solely on fallacy detection, you could include a variety of critical thinking tasks in your dataset. This broader approach might lead to a more balanced model." Yes, there will be multiple dataset and before I'll start training I'll merge all the parts of the system prompt, the goal being to have one fine-tuned model that works well with one specific system prompt, that will surely help to add "normal" examples.
+
+"Adversarial examples: Include some examples where a user incorrectly identifies a fallacy, and the AI correctly points out that there isn't actually a fallacy present." - Great idea!
+
+"Context-aware responses: Train the model to consider the broader context of a conversation before applying fallacy detection. This could help it understand when such analysis is appropriate." - Yes, I will definitely need multi-turn chat examples
+
+"Explicit "no fallacy" examples: Include examples where the AI explicitly states that it doesn't detect any fallacies in a given statement or argument." - Nah
+
+"Gradual fine-tuning: Start with a more general critical thinking dataset, then progressively introduce more specific fallacy detection examples. This might help the model develop a more nuanced understanding." - That's interesting, are you suggesting that splitting the fine-tuning in sets of "difficulty" will help? Like I first fine-tune Llama3.1 with simple example, save the weights, then fine-tune again with medium, then hard?
\ No newline at end of file