Skip to content

Commit b3de7ca

Browse files
authored
llama : add Trillion 7B model support (#12556)
* Support Trillion 7B * Update llama.h * Update llama.h * Update llama-vocab.cpp for Trillion * Update llama-vocab.cpp
1 parent 7242dd9 commit b3de7ca

File tree

5 files changed

+11
-0
lines changed

5 files changed

+11
-0
lines changed

README.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -112,6 +112,7 @@ Instructions for adding support for new models: [HOWTO-add-model.md](docs/develo
112112
- [x] [RWKV-6](https://github.com/BlinkDL/RWKV-LM)
113113
- [x] [QRWKV-6](https://huggingface.co/recursal/QRWKV6-32B-Instruct-Preview-v0.1)
114114
- [x] [GigaChat-20B-A3B](https://huggingface.co/ai-sage/GigaChat-20B-A3B-instruct)
115+
- [X] [Trillion-7B-preview](https://huggingface.co/trillionlabs/Trillion-7B-preview)
115116

116117
#### Multimodal
117118

convert_hf_to_gguf.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -708,6 +708,9 @@ def get_vocab_base_pre(self, tokenizer) -> str:
708708
if chkhsh == "7dec86086fcc38b66b7bc1575a160ae21cf705be7718b9d5598190d7c12db76f":
709709
# ref: https://huggingface.co/UW/OLMo2-8B-SuperBPE-t180k
710710
res = "superbpe"
711+
if chkhsh == "1994ffd01900cfb37395608534236ecd63f2bd5995d6cb1004dda1af50240f15":
712+
# ref: https://huggingface.co/trillionlabs/Trillion-7B-preview
713+
res = "trillion"
711714

712715
if res is None:
713716
logger.warning("\n")

convert_hf_to_gguf_update.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -111,6 +111,7 @@ class TOKENIZER_TYPE(IntEnum):
111111
{"name": "deepseek-r1-qwen", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B"},
112112
{"name": "gpt-4o", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/Xenova/gpt-4o", },
113113
{"name": "superbpe", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/UW/OLMo2-8B-SuperBPE-t180k", },
114+
{"name": "trillion", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/trillionlabs/Trillion-7B-preview", },
114115
]
115116

116117

include/llama.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -108,6 +108,7 @@ extern "C" {
108108
LLAMA_VOCAB_PRE_TYPE_DEEPSEEK3_LLM = 28,
109109
LLAMA_VOCAB_PRE_TYPE_GPT4O = 29,
110110
LLAMA_VOCAB_PRE_TYPE_SUPERBPE = 30,
111+
LLAMA_VOCAB_PRE_TYPE_TRILLION = 31,
111112
};
112113

113114
enum llama_rope_type {

src/llama-vocab.cpp

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -342,6 +342,7 @@ struct llm_tokenizer_bpe : llm_tokenizer {
342342
case LLAMA_VOCAB_PRE_TYPE_MPT:
343343
case LLAMA_VOCAB_PRE_TYPE_OLMO:
344344
case LLAMA_VOCAB_PRE_TYPE_JAIS:
345+
case LLAMA_VOCAB_PRE_TYPE_TRILLION:
345346
regex_exprs = {
346347
"'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)",
347348
};
@@ -1614,6 +1615,10 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
16141615
tokenizer_pre == "superbpe") {
16151616
pre_type = LLAMA_VOCAB_PRE_TYPE_SUPERBPE;
16161617
clean_spaces = false;
1618+
} else if (
1619+
tokenizer_pre == "trillion") {
1620+
pre_type = LLAMA_VOCAB_PRE_TYPE_TRILLION;
1621+
clean_spaces = false;
16171622
} else {
16181623
throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str()));
16191624
}

0 commit comments

Comments
 (0)