From f45f6b41bf1d18ff61346f72b644d272b223d172 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E8=B0=AD=E4=B9=9D=E9=BC=8E?= <109224573@qq.com>
Date: Mon, 11 Nov 2024 17:53:11 +0800
Subject: [PATCH] fix some doc issues (#1067)

* llama.cpp doc fixes

* fix some zh translation issues

* Update llama.cpp.po

---------

Co-authored-by: Ren Xuancheng <jklj077@users.noreply.github.com>
---
 docs/locales/zh_CN/LC_MESSAGES/framework/Langchain.po   | 2 +-
 docs/locales/zh_CN/LC_MESSAGES/run_locally/llama.cpp.po | 4 ++--
 docs/locales/zh_CN/LC_MESSAGES/run_locally/ollama.po    | 2 +-
 docs/source/run_locally/llama.cpp.md                    | 6 +++---
 4 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/docs/locales/zh_CN/LC_MESSAGES/framework/Langchain.po b/docs/locales/zh_CN/LC_MESSAGES/framework/Langchain.po
index 856eb3f..6f3e6f6 100644
--- a/docs/locales/zh_CN/LC_MESSAGES/framework/Langchain.po
+++ b/docs/locales/zh_CN/LC_MESSAGES/framework/Langchain.po
@@ -32,7 +32,7 @@ msgstr "基础用法"
 
 #: ../../source/framework/Langchain.rst:11 b93bd8165fbe4340970f3942884a91dd
 msgid "The implementation process of this project includes loading files -> reading text -> segmenting text -> vectorizing text -> vectorizing questions -> matching the top k most similar text vectors with the question vectors -> incorporating the matched text as context along with the question into the prompt -> submitting to the Qwen2.5-7B-Instruct to generate an answer. Below is an example:"
-msgstr "您可以仅使用您的文档配合``langchain``来构建一个问答应用。该项目的实现流程包括加载文件 -> 阅读文本 -> 文本分段 -> 文本向量化 -> 问题向量化 -> 将最相似的前k个文本向量与问题向量匹配 -> 将匹配的文本作为上下文连同问题一起纳入提示 -> 提交给Qwen2.5-7B-Instruct生成答案。以下是一个示例:"
+msgstr "您可以仅使用您的文档配合 ``langchain`` 来构建一个问答应用。该项目的实现流程包括加载文件 -> 阅读文本 -> 文本分段 -> 文本向量化 -> 问题向量化 -> 将最相似的前k个文本向量与问题向量匹配 -> 将匹配的文本作为上下文连同问题一起纳入提示 -> 提交给Qwen2.5-7B-Instruct生成答案。以下是一个示例:"
 
 #: ../../source/framework/Langchain.rst:95 db8fe123a81d481c91f22710ead3993a
 msgid "After loading the Qwen2.5-7B-Instruct model, you should specify the txt file for retrieval."
diff --git a/docs/locales/zh_CN/LC_MESSAGES/run_locally/llama.cpp.po b/docs/locales/zh_CN/LC_MESSAGES/run_locally/llama.cpp.po
index 85df91c..74124d9 100644
--- a/docs/locales/zh_CN/LC_MESSAGES/run_locally/llama.cpp.po
+++ b/docs/locales/zh_CN/LC_MESSAGES/run_locally/llama.cpp.po
@@ -495,8 +495,8 @@ msgid "Enter interactive mode. You can interrupt model generation and append new
 msgstr "进入互动模式。你可以中断模型生成并添加新文本。"
 
 #: ../../source/run_locally/llama.cpp.md:309 fa961800b1584d93b9315ae358c0d70d
-msgid "-i or --interactive-first"
-msgstr "-i 或 --interactive-first"
+msgid "-if or --interactive-first"
+msgstr "-if 或 --interactive-first"
 
 #: ../../source/run_locally/llama.cpp.md:309 ec896aaf5dfc44f99f2033044df8f4a0
 msgid "Immediately wait for user input. Otherwise, the model will run at once and generate based on the prompt."
diff --git a/docs/locales/zh_CN/LC_MESSAGES/run_locally/ollama.po b/docs/locales/zh_CN/LC_MESSAGES/run_locally/ollama.po
index 5d37a41..d268105 100644
--- a/docs/locales/zh_CN/LC_MESSAGES/run_locally/ollama.po
+++ b/docs/locales/zh_CN/LC_MESSAGES/run_locally/ollama.po
@@ -74,7 +74,7 @@ msgstr "用Ollama运行你自己的GGUF文件"
 
 #: ../../source/run_locally/ollama.md:34 a45b6bcaab944f00ae23384aaf4bebfe
 msgid "Sometimes you don't want to pull models and you just want to use Ollama with your own GGUF files. Suppose you have a GGUF file of Qwen2.5, `qwen2.5-7b-instruct-q5_0.gguf`. For the first step, you need to create a file called `Modelfile`. The content of the file is shown below:"
-msgstr "有时您可能不想拉取模型,而是希望直接使用自己的GGUF文件来配合Ollama。假设您有一个名为`qwen2.5-7b-instruct-q5_0.gguf`的Qwen2.5的GGUF文件。在第一步中,您需要创建一个名为`Modelfile``的文件。该文件的内容如下所示:"
+msgstr "有时您可能不想拉取模型,而是希望直接使用自己的GGUF文件来配合Ollama。假设您有一个名为`qwen2.5-7b-instruct-q5_0.gguf`的Qwen2.5的GGUF文件。在第一步中,您需要创建一个名为`Modelfile`的文件。该文件的内容如下所示:"
 
 #: ../../source/run_locally/ollama.md:97 0300ccc8902641e689c5214717fb588d
 msgid "Then create the ollama model by running:"
diff --git a/docs/source/run_locally/llama.cpp.md b/docs/source/run_locally/llama.cpp.md
index 1adb478..9efeb67 100644
--- a/docs/source/run_locally/llama.cpp.md
+++ b/docs/source/run_locally/llama.cpp.md
@@ -175,12 +175,12 @@ We provide a series of GGUF models in our Hugging Face organization, and to sear
 
 Download the GGUF model that you want with `huggingface-cli` (you need to install it first with `pip install huggingface_hub`):
 ```bash
-huggingface-cli download <model_repo> <gguf_file> --local-dir <local_dir> --local-dir-use-symlinks False
+huggingface-cli download <model_repo> <gguf_file> --local-dir <local_dir>
 ```
 
 For example:
 ```bash
-huggingface-cli download Qwen/Qwen2.5-7B-Instruct-GGUF qwen2.5-7b-instruct-q5_k_m.gguf --local-dir . --local-dir-use-symlinks False
+huggingface-cli download Qwen/Qwen2.5-7B-Instruct-GGUF qwen2.5-7b-instruct-q5_k_m.gguf --local-dir .
 ```
 
 This will download the Qwen2.5-7B-Instruct model in GGUF format quantized with the scheme Q5_K_M.
@@ -306,7 +306,7 @@ We use some new options here:
 
 :`-sp` or `--special`: Show the special tokens.
 :`-i` or `--interactive`: Enter interactive mode. You can interrupt model generation and append new texts.
-:`-i` or `--interactive-first`: Immediately wait for user input. Otherwise, the model will run at once and generate based on the prompt.
+:`-if` or `--interactive-first`: Immediately wait for user input. Otherwise, the model will run at once and generate based on the prompt.
 :`-p` or `--prompt`: In interactive mode, it is the contexts based on which the model predicts the continuation.
 :`--in-prefix`: String to prefix user inputs with.
 :`--in-suffix`: String to suffix after user inputs with.
-- 
GitLab