Skip to content
Snippets Groups Projects
Unverified Commit 1b98c109 authored by Patrick von Platen's avatar Patrick von Platen Committed by GitHub
Browse files

Merge pull request #199 from mistralai/improve_mistral_inference

Improve mistral inference
parents 80bd6a84 a0e05297
No related branches found
No related tags found
No related merge requests found
[tool.poetry]
name = "mistral_inference"
version = "1.3.0"
version = "1.3.1"
description = ""
authors = ["bam4d <bam4d@mistral.ai>"]
readme = "README.md"
......
__version__ = "1.3.0"
__version__ = "1.3.1"
......@@ -12,6 +12,7 @@ from mistral_common.protocol.instruct.messages import AssistantMessage, UserMess
from mistral_common.protocol.instruct.request import ChatCompletionRequest
from mistral_common.tokens.tokenizers.base import Tokenizer
from mistral_common.tokens.tokenizers.mistral import MistralTokenizer
from mistral_common.tokens.tokenizers.tekken import Tekkenizer, SpecialTokenPolicy
from mistral_common.tokens.tokenizers.sentencepiece import is_sentencepiece
from mistral_common.tokens.tokenizers.tekken import is_tekken
......@@ -36,6 +37,9 @@ def load_tokenizer(model_path: Path) -> MistralTokenizer:
mistral_tokenizer = MistralTokenizer.from_file(str(model_path / tokenizer[0]))
if isinstance(mistral_tokenizer.instruct_tokenizer.tokenizer, Tekkenizer):
mistral_tokenizer.instruct_tokenizer.tokenizer.special_token_policy = SpecialTokenPolicy.KEEP
logging.info(f"Loaded tokenizer of type {mistral_tokenizer.instruct_tokenizer.__class__}")
return mistral_tokenizer
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment