Skip to content

Commit 6aade19

Browse files
authored
Add StableLM2 pre-tokenizer (#7349)
* Add StableLM pre-tokenizer * Fix space * Fix trailing whitespace
1 parent ab33f7a commit 6aade19

File tree

4 files changed

+12
-3
lines changed

4 files changed

+12
-3
lines changed

convert-hf-to-gguf-update.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -72,6 +72,7 @@ class TOKENIZER_TYPE(IntEnum):
7272
{"name": "mpt", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/mosaicml/mpt-7b", },
7373
{"name": "starcoder", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/bigcode/starcoder2-3b", },
7474
{"name": "gpt-2", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/openai-community/gpt2", },
75+
{"name": "stablelm", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/stabilityai/stablelm-2-zephyr-1_6b", },
7576
{"name": "refact", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/smallcloudai/Refact-1_6-base", },
7677
{"name": "command-r", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/CohereForAI/c4ai-command-r-v01", },
7778
{"name": "qwen2", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/Qwen/Qwen1.5-7B", },

convert-hf-to-gguf.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -446,6 +446,9 @@ def get_vocab_base_pre(self, tokenizer) -> str:
446446
if chkhsh == "3ce83efda5659b07b1ad37ca97ca5797ea4285d9b9ab0dc679e4a720c9da7454":
447447
# ref: https://huggingface.co/openai-community/gpt2
448448
res = "gpt-2"
449+
if chkhsh == "32d85c31273f8019248f2559fed492d929ea28b17e51d81d3bb36fff23ca72b3":
450+
# ref: https://huggingface.co/stabilityai/stablelm-2-1_6b
451+
res = "stablelm2"
449452
if chkhsh == "6221ad2852e85ce96f791f476e0b390cf9b474c9e3d1362f53a24a06dc8220ff":
450453
# ref: https://huggingface.co/smallcloudai/Refact-1_6-base
451454
res = "refact"

llama.cpp

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4463,6 +4463,9 @@ static void llm_load_vocab(
44634463
} else if (
44644464
tokenizer_pre == "qwen2") {
44654465
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_QWEN2;
4466+
} else if (
4467+
tokenizer_pre == "stablelm2") {
4468+
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_STABLELM2;
44664469
} else if (
44674470
tokenizer_pre == "olmo") {
44684471
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_OLMO;
@@ -12363,6 +12366,7 @@ struct llm_tokenizer_bpe {
1236312366
"'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)",
1236412367
});
1236512368
break;
12369+
case LLAMA_VOCAB_PRE_TYPE_STABLELM2:
1236612370
case LLAMA_VOCAB_PRE_TYPE_QWEN2:
1236712371
word_collection = unicode_regex_split(text, {
1236812372
// original regex from tokenizer.json

llama.h

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -81,9 +81,10 @@ extern "C" {
8181
LLAMA_VOCAB_PRE_TYPE_GPT2 = 7,
8282
LLAMA_VOCAB_PRE_TYPE_REFACT = 8,
8383
LLAMA_VOCAB_PRE_TYPE_COMMAND_R = 9,
84-
LLAMA_VOCAB_PRE_TYPE_QWEN2 = 10,
85-
LLAMA_VOCAB_PRE_TYPE_OLMO = 11,
86-
LLAMA_VOCAB_PRE_TYPE_DBRX = 12,
84+
LLAMA_VOCAB_PRE_TYPE_STABLELM2 = 10,
85+
LLAMA_VOCAB_PRE_TYPE_QWEN2 = 11,
86+
LLAMA_VOCAB_PRE_TYPE_OLMO = 12,
87+
LLAMA_VOCAB_PRE_TYPE_DBRX = 13,
8788
};
8889

8990
// note: these values should be synchronized with ggml_rope

0 commit comments

Comments
 (0)