当前位置: 首页 > news >正文

llama.cpp LLM_ARCH_DEEPSEEK and LLM_ARCH_DEEPSEEK2

llama.cpp LLM_ARCH_DEEPSEEK and LLM_ARCH_DEEPSEEK2

  • 1. `LLM_ARCH_DEEPSEEK` and `LLM_ARCH_DEEPSEEK2`
  • 2. `LLM_ARCH_DEEPSEEK` and `LLM_ARCH_DEEPSEEK2`
  • 3. `struct ggml_cgraph * build_deepseek()` and `struct ggml_cgraph * build_deepseek2()`
  • References

不宜吹捧中国大语言模型的同时,又去贬低美国大语言模型。

水是人体的主要化学成分,约占体重的 50% 至 70%。大语言模型的含水量也不会太少。

llama.cpp
https://github.com/ggerganov/llama.cpp

1. LLM_ARCH_DEEPSEEK and LLM_ARCH_DEEPSEEK2

/home/yongqiang/llm_work/llama_cpp_25_01_05/llama.cpp/src/llama-arch.h
/home/yongqiang/llm_work/llama_cpp_25_01_05/llama.cpp/src/llama-arch.cpp

  • LLM_ARCH_DEEPSEEK and LLM_ARCH_DEEPSEEK2
//
// gguf constants (sync with gguf.py)
//enum llm_arch {LLM_ARCH_LLAMA,LLM_ARCH_DECI,LLM_ARCH_FALCON,LLM_ARCH_BAICHUAN,LLM_ARCH_GROK,LLM_ARCH_GPT2,LLM_ARCH_GPTJ,LLM_ARCH_GPTNEOX,LLM_ARCH_MPT,LLM_ARCH_STARCODER,LLM_ARCH_REFACT,LLM_ARCH_BERT,LLM_ARCH_NOMIC_BERT,LLM_ARCH_JINA_BERT_V2,LLM_ARCH_BLOOM,LLM_ARCH_STABLELM,LLM_ARCH_QWEN,LLM_ARCH_QWEN2,LLM_ARCH_QWEN2MOE,LLM_ARCH_QWEN2VL,LLM_ARCH_PHI2,LLM_ARCH_PHI3,LLM_ARCH_PHIMOE,LLM_ARCH_PLAMO,LLM_ARCH_CODESHELL,LLM_ARCH_ORION,LLM_ARCH_INTERNLM2,LLM_ARCH_MINICPM,LLM_ARCH_MINICPM3,LLM_ARCH_GEMMA,LLM_ARCH_GEMMA2,LLM_ARCH_STARCODER2,LLM_ARCH_MAMBA,LLM_ARCH_XVERSE,LLM_ARCH_COMMAND_R,LLM_ARCH_COHERE2,LLM_ARCH_DBRX,LLM_ARCH_OLMO,LLM_ARCH_OLMO2,LLM_ARCH_OLMOE,LLM_ARCH_OPENELM,LLM_ARCH_ARCTIC,LLM_ARCH_DEEPSEEK,LLM_ARCH_DEEPSEEK2,LLM_ARCH_CHATGLM,LLM_ARCH_BITNET,LLM_ARCH_T5,LLM_ARCH_T5ENCODER,LLM_ARCH_JAIS,LLM_ARCH_NEMOTRON,LLM_ARCH_EXAONE,LLM_ARCH_RWKV6,LLM_ARCH_RWKV6QWEN2,LLM_ARCH_GRANITE,LLM_ARCH_GRANITE_MOE,LLM_ARCH_CHAMELEON,LLM_ARCH_WAVTOKENIZER_DEC,LLM_ARCH_UNKNOWN,
};
  • { LLM_ARCH_DEEPSEEK, "deepseek" } and { LLM_ARCH_DEEPSEEK2, "deepseek2" }
static const std::map<llm_arch, const char *> LLM_ARCH_NAMES = {{ LLM_ARCH_LLAMA,            "llama"            },{ LLM_ARCH_DECI,             "deci"             },{ LLM_ARCH_FALCON,           "falcon"           },{ LLM_ARCH_GROK,             "grok"             },{ LLM_ARCH_GPT2,             "gpt2"             },{ LLM_ARCH_GPTJ,             "gptj"             },{ LLM_ARCH_GPTNEOX,          "gptneox"          },{ LLM_ARCH_MPT,              "mpt"              },{ LLM_ARCH_BAICHUAN,         "baichuan"         },{ LLM_ARCH_STARCODER,        "starcoder"        },{ LLM_ARCH_REFACT,           "refact"           },{ LLM_ARCH_BERT,             "bert"             },{ LLM_ARCH_NOMIC_BERT,       "nomic-bert"       },{ LLM_ARCH_JINA_BERT_V2,     "jina-bert-v2"     },{ LLM_ARCH_BLOOM,            "bloom"            },{ LLM_ARCH_STABLELM,         "stablelm"         },{ LLM_ARCH_QWEN,             "qwen"             },{ LLM_ARCH_QWEN2,            "qwen2"            },{ LLM_ARCH_QWEN2MOE,         "qwen2moe"         },{ LLM_ARCH_QWEN2VL,          "qwen2vl"          },{ LLM_ARCH_PHI2,             "phi2"             },{ LLM_ARCH_PHI3,             "phi3"             },{ LLM_ARCH_PHIMOE,           "phimoe"           },{ LLM_ARCH_PLAMO,            "plamo"            },{ LLM_ARCH_CODESHELL,        "codeshell"        },{ LLM_ARCH_ORION,            "orion"            },{ LLM_ARCH_INTERNLM2,        "internlm2"        },{ LLM_ARCH_MINICPM,          "minicpm"          },{ LLM_ARCH_MINICPM3,         "minicpm3"         },{ LLM_ARCH_GEMMA,            "gemma"            },{ LLM_ARCH_GEMMA2,           "gemma2"           },{ LLM_ARCH_STARCODER2,       "starcoder2"       },{ LLM_ARCH_MAMBA,            "mamba"            },{ LLM_ARCH_XVERSE,           "xverse"           },{ LLM_ARCH_COMMAND_R,        "command-r"        },{ LLM_ARCH_COHERE2,          "cohere2"          },{ LLM_ARCH_DBRX,             "dbrx"             },{ LLM_ARCH_OLMO,             "olmo"             },{ LLM_ARCH_OLMO2,            "olmo2"            },{ LLM_ARCH_OLMOE,            "olmoe"            },{ LLM_ARCH_OPENELM,          "openelm"          },{ LLM_ARCH_ARCTIC,           "arctic"           },{ LLM_ARCH_DEEPSEEK,         "deepseek"         },{ LLM_ARCH_DEEPSEEK2,        "deepseek2"        },{ LLM_ARCH_CHATGLM,          "chatglm"          },{ LLM_ARCH_BITNET,           "bitnet"           },{ LLM_ARCH_T5,               "t5"               },{ LLM_ARCH_T5ENCODER,        "t5encoder"        },{ LLM_ARCH_JAIS,             "jais"             },{ LLM_ARCH_NEMOTRON,         "nemotron"         },{ LLM_ARCH_EXAONE,           "exaone"           },{ LLM_ARCH_RWKV6,            "rwkv6"            },{ LLM_ARCH_RWKV6QWEN2,       "rwkv6qwen2"       },{ LLM_ARCH_GRANITE,          "granite"          },{ LLM_ARCH_GRANITE_MOE,      "granitemoe"       },{ LLM_ARCH_CHAMELEON,        "chameleon"        },{ LLM_ARCH_WAVTOKENIZER_DEC, "wavtokenizer-dec" },{ LLM_ARCH_UNKNOWN,          "(unknown)"        },
};

2. LLM_ARCH_DEEPSEEK and LLM_ARCH_DEEPSEEK2

/home/yongqiang/llm_work/llama_cpp_25_01_05/llama.cpp/src/llama-arch.cpp

  • LLM_ARCH_DEEPSEEK and LLM_ARCH_DEEPSEEK2
static const std::map<llm_arch, std::map<llm_tensor, const char *>> LLM_TENSOR_NAMES = {{LLM_ARCH_LLAMA,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },{ LLM_TENSOR_FFN_GATE_INP,    "blk.%d.ffn_gate_inp" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },{ LLM_TENSOR_FFN_GATE_EXP,    "blk.%d.ffn_gate.%d" },{ LLM_TENSOR_FFN_DOWN_EXP,    "blk.%d.ffn_down.%d" },{ LLM_TENSOR_FFN_UP_EXP,      "blk.%d.ffn_up.%d" },{ LLM_TENSOR_FFN_GATE_EXPS,   "blk.%d.ffn_gate_exps" },{ LLM_TENSOR_FFN_DOWN_EXPS,   "blk.%d.ffn_down_exps" },{ LLM_TENSOR_FFN_UP_EXPS,     "blk.%d.ffn_up_exps" },},},{LLM_ARCH_DECI,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },{ LLM_TENSOR_FFN_GATE_INP,    "blk.%d.ffn_gate_inp" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },{ LLM_TENSOR_FFN_GATE_EXP,    "blk.%d.ffn_gate.%d" },{ LLM_TENSOR_FFN_DOWN_EXP,    "blk.%d.ffn_down.%d" },{ LLM_TENSOR_FFN_UP_EXP,      "blk.%d.ffn_up.%d" },{ LLM_TENSOR_FFN_GATE_EXPS,   "blk.%d.ffn_gate_exps" },{ LLM_TENSOR_FFN_DOWN_EXPS,   "blk.%d.ffn_down_exps" },{ LLM_TENSOR_FFN_UP_EXPS,     "blk.%d.ffn_up_exps" },},},{LLM_ARCH_BAICHUAN,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },},},{LLM_ARCH_FALCON,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_NORM_2,     "blk.%d.attn_norm_2" },{ LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },},},{LLM_ARCH_GROK,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },{ LLM_TENSOR_FFN_GATE_INP,    "blk.%d.ffn_gate_inp" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE_EXP,    "blk.%d.ffn_gate.%d" },{ LLM_TENSOR_FFN_DOWN_EXP,    "blk.%d.ffn_down.%d" },{ LLM_TENSOR_FFN_UP_EXP,      "blk.%d.ffn_up.%d" },{ LLM_TENSOR_FFN_GATE_EXPS,   "blk.%d.ffn_gate_exps" },{ LLM_TENSOR_FFN_DOWN_EXPS,   "blk.%d.ffn_down_exps" },{ LLM_TENSOR_FFN_UP_EXPS,     "blk.%d.ffn_up_exps" },{ LLM_TENSOR_LAYER_OUT_NORM,  "blk.%d.layer_output_norm" },{ LLM_TENSOR_ATTN_OUT_NORM,   "blk.%d.attn_output_norm" },},},{LLM_ARCH_GPT2,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_POS_EMBD,        "position_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },},},{LLM_ARCH_GPTJ,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },},},{LLM_ARCH_GPTNEOX,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },},},{LLM_ARCH_MPT,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output"},{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },{ LLM_TENSOR_FFN_ACT,         "blk.%d.ffn.act" },{ LLM_TENSOR_POS_EMBD,        "position_embd" },{ LLM_TENSOR_ATTN_Q_NORM,     "blk.%d.attn_q_norm"},{ LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm"},},},{LLM_ARCH_STARCODER,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_POS_EMBD,        "position_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },},},{LLM_ARCH_REFACT,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },},},{LLM_ARCH_BERT,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },{ LLM_TENSOR_TOKEN_TYPES,     "token_types" },{ LLM_TENSOR_POS_EMBD,        "position_embd" },{ LLM_TENSOR_ATTN_OUT_NORM,   "blk.%d.attn_output_norm" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_LAYER_OUT_NORM,  "blk.%d.layer_output_norm" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },{ LLM_TENSOR_CLS,             "cls" },{ LLM_TENSOR_CLS_OUT,         "cls.output" },},},{LLM_ARCH_NOMIC_BERT,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },{ LLM_TENSOR_TOKEN_TYPES,     "token_types" },{ LLM_TENSOR_ATTN_OUT_NORM,   "blk.%d.attn_output_norm" },{ LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_LAYER_OUT_NORM,  "blk.%d.layer_output_norm" },{ LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },},},{LLM_ARCH_JINA_BERT_V2,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },{ LLM_TENSOR_TOKEN_TYPES,     "token_types" },{ LLM_TENSOR_ATTN_NORM_2,     "blk.%d.attn_norm_2" },{ LLM_TENSOR_ATTN_OUT_NORM,   "blk.%d.attn_output_norm" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_Q_NORM,     "blk.%d.attn_q_norm" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_LAYER_OUT_NORM,  "blk.%d.layer_output_norm" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },{ LLM_TENSOR_CLS,             "cls" },},},{LLM_ARCH_BLOOM,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },},},{LLM_ARCH_STABLELM,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },{ LLM_TENSOR_ATTN_Q_NORM,     "blk.%d.attn_q_norm" },{ LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm" },},},{LLM_ARCH_QWEN,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },},},{LLM_ARCH_QWEN2,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },},},{LLM_ARCH_QWEN2VL,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },},},{LLM_ARCH_QWEN2MOE,{{ LLM_TENSOR_TOKEN_EMBD,         "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,        "output_norm" },{ LLM_TENSOR_OUTPUT,             "output" },{ LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,             "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,             "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },{ LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE_INP,       "blk.%d.ffn_gate_inp" },{ LLM_TENSOR_FFN_GATE_EXPS,      "blk.%d.ffn_gate_exps" },{ LLM_TENSOR_FFN_DOWN_EXPS,      "blk.%d.ffn_down_exps" },{ LLM_TENSOR_FFN_UP_EXPS,        "blk.%d.ffn_up_exps" },{ LLM_TENSOR_FFN_GATE_INP_SHEXP, "blk.%d.ffn_gate_inp_shexp" },{ LLM_TENSOR_FFN_GATE_SHEXP,     "blk.%d.ffn_gate_shexp" },{ LLM_TENSOR_FFN_DOWN_SHEXP,     "blk.%d.ffn_down_shexp" },{ LLM_TENSOR_FFN_UP_SHEXP,       "blk.%d.ffn_up_shexp" },},},{LLM_ARCH_PHI2,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },},},{LLM_ARCH_PHI3,{{ LLM_TENSOR_TOKEN_EMBD,         "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,        "output_norm" },{ LLM_TENSOR_OUTPUT,             "output" },{ LLM_TENSOR_ROPE_FACTORS_LONG,  "rope_factors_long" },{ LLM_TENSOR_ROPE_FACTORS_SHORT, "rope_factors_short" },{ LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_QKV,           "blk.%d.attn_qkv" },{ LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,             "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,             "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },{ LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_DOWN,           "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,             "blk.%d.ffn_up" },},},{LLM_ARCH_PHIMOE,{{ LLM_TENSOR_TOKEN_EMBD,         "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,        "output_norm" },{ LLM_TENSOR_OUTPUT,             "output" },{ LLM_TENSOR_ROPE_FACTORS_LONG,  "rope_factors_long" },{ LLM_TENSOR_ROPE_FACTORS_SHORT, "rope_factors_short" },{ LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_QKV,           "blk.%d.attn_qkv" },{ LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,             "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,             "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },{ LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE_INP,       "blk.%d.ffn_gate_inp" },{ LLM_TENSOR_FFN_GATE_EXPS,      "blk.%d.ffn_gate_exps" },{ LLM_TENSOR_FFN_DOWN_EXPS,      "blk.%d.ffn_down_exps" },{ LLM_TENSOR_FFN_UP_EXPS,        "blk.%d.ffn_up_exps" },},},{LLM_ARCH_PLAMO,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },{ LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },},},{LLM_ARCH_CODESHELL,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },},},{LLM_ARCH_ORION,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },},},{LLM_ARCH_INTERNLM2,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },},},{LLM_ARCH_MINICPM,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },{ LLM_TENSOR_ROPE_FACTORS_LONG,  "rope_factors_long" },{ LLM_TENSOR_ROPE_FACTORS_SHORT, "rope_factors_short" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },{ LLM_TENSOR_FFN_GATE_INP,    "blk.%d.ffn_gate_inp" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },{ LLM_TENSOR_FFN_GATE_EXP,    "blk.%d.ffn_gate.%d" },{ LLM_TENSOR_FFN_DOWN_EXP,    "blk.%d.ffn_down.%d" },{ LLM_TENSOR_FFN_UP_EXP,      "blk.%d.ffn_up.%d" },},},{LLM_ARCH_MINICPM3,{{ LLM_TENSOR_TOKEN_EMBD,         "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,        "output_norm" },{ LLM_TENSOR_OUTPUT,             "output" },{ LLM_TENSOR_ROPE_FACTORS_LONG,  "rope_factors_long" },{ LLM_TENSOR_ROPE_FACTORS_SHORT, "rope_factors_short" },{ LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q_A_NORM,      "blk.%d.attn_q_a_norm" },{ LLM_TENSOR_ATTN_KV_A_NORM,     "blk.%d.attn_kv_a_norm" },{ LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_Q_A,           "blk.%d.attn_q_a" },{ LLM_TENSOR_ATTN_Q_B,           "blk.%d.attn_q_b" },{ LLM_TENSOR_ATTN_KV_A_MQA,      "blk.%d.attn_kv_a_mqa" },{ LLM_TENSOR_ATTN_KV_B,          "blk.%d.attn_kv_b" },{ LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },{ LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE,           "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_UP,             "blk.%d.ffn_up" },{ LLM_TENSOR_FFN_DOWN,           "blk.%d.ffn_down" },},},{LLM_ARCH_GEMMA,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },},},{LLM_ARCH_GEMMA2,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_ATTN_POST_NORM,  "blk.%d.post_attention_norm" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },{ LLM_TENSOR_FFN_POST_NORM,   "blk.%d.post_ffw_norm" },},},{LLM_ARCH_STARCODER2,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },},},{LLM_ARCH_MAMBA,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_SSM_IN,          "blk.%d.ssm_in" },{ LLM_TENSOR_SSM_CONV1D,      "blk.%d.ssm_conv1d" },{ LLM_TENSOR_SSM_X,           "blk.%d.ssm_x" },{ LLM_TENSOR_SSM_DT,          "blk.%d.ssm_dt" },{ LLM_TENSOR_SSM_A,           "blk.%d.ssm_a" },{ LLM_TENSOR_SSM_D,           "blk.%d.ssm_d" },{ LLM_TENSOR_SSM_OUT,         "blk.%d.ssm_out" },},},{LLM_ARCH_XVERSE,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },},},{LLM_ARCH_COMMAND_R,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },{ LLM_TENSOR_ATTN_Q_NORM,     "blk.%d.attn_q_norm" },{ LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm" },},},{LLM_ARCH_COHERE2,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },},},{LLM_ARCH_DBRX,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_ATTN_OUT_NORM,   "blk.%d.attn_output_norm" },{ LLM_TENSOR_FFN_GATE_INP,    "blk.%d.ffn_gate_inp" },{ LLM_TENSOR_FFN_GATE_EXPS,   "blk.%d.ffn_gate_exps" },{ LLM_TENSOR_FFN_DOWN_EXPS,   "blk.%d.ffn_down_exps" },{ LLM_TENSOR_FFN_UP_EXPS,     "blk.%d.ffn_up_exps" },},},{LLM_ARCH_OLMO,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },},},{LLM_ARCH_OLMO2,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_ATTN_POST_NORM,  "blk.%d.post_attention_norm" },{ LLM_TENSOR_ATTN_Q_NORM,     "blk.%d.attn_q_norm" },{ LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm" },{ LLM_TENSOR_FFN_POST_NORM,   "blk.%d.post_ffw_norm" },{ LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },},},{LLM_ARCH_OLMOE,{{ LLM_TENSOR_TOKEN_EMBD,         "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,        "output_norm" },{ LLM_TENSOR_OUTPUT,             "output" },{ LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,             "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,             "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },{ LLM_TENSOR_ATTN_Q_NORM,        "blk.%d.attn_q_norm" },{ LLM_TENSOR_ATTN_K_NORM,        "blk.%d.attn_k_norm" },{ LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE_INP,       "blk.%d.ffn_gate_inp" },{ LLM_TENSOR_FFN_GATE_EXPS,      "blk.%d.ffn_gate_exps" },{ LLM_TENSOR_FFN_DOWN_EXPS,      "blk.%d.ffn_down_exps" },{ LLM_TENSOR_FFN_UP_EXPS,        "blk.%d.ffn_up_exps" },},},{LLM_ARCH_OPENELM,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },{ LLM_TENSOR_ATTN_Q_NORM,     "blk.%d.attn_q_norm" },{ LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },},},{LLM_ARCH_ARCTIC,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_FFN_GATE_INP,    "blk.%d.ffn_gate_inp" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },{ LLM_TENSOR_FFN_NORM_EXPS,   "blk.%d.ffn_norm_exps" },{ LLM_TENSOR_FFN_GATE_EXPS,   "blk.%d.ffn_gate_exps" },{ LLM_TENSOR_FFN_DOWN_EXPS,   "blk.%d.ffn_down_exps" },{ LLM_TENSOR_FFN_UP_EXPS,     "blk.%d.ffn_up_exps" },},},{LLM_ARCH_DEEPSEEK,{{ LLM_TENSOR_TOKEN_EMBD,         "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,        "output_norm" },{ LLM_TENSOR_OUTPUT,             "output" },{ LLM_TENSOR_ROPE_FREQS,         "rope_freqs" },{ LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,             "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,             "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },{ LLM_TENSOR_ATTN_ROT_EMBD,      "blk.%d.attn_rot_embd" },{ LLM_TENSOR_FFN_GATE_INP,       "blk.%d.ffn_gate_inp" },{ LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE,           "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,           "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,             "blk.%d.ffn_up" },{ LLM_TENSOR_FFN_GATE_EXPS,      "blk.%d.ffn_gate_exps" },{ LLM_TENSOR_FFN_DOWN_EXPS,      "blk.%d.ffn_down_exps" },{ LLM_TENSOR_FFN_UP_EXPS,        "blk.%d.ffn_up_exps" },{ LLM_TENSOR_FFN_GATE_INP_SHEXP, "blk.%d.ffn_gate_inp_shexp" },{ LLM_TENSOR_FFN_GATE_SHEXP,     "blk.%d.ffn_gate_shexp" },{ LLM_TENSOR_FFN_DOWN_SHEXP,     "blk.%d.ffn_down_shexp" },{ LLM_TENSOR_FFN_UP_SHEXP,       "blk.%d.ffn_up_shexp" },},},{LLM_ARCH_DEEPSEEK2,{{ LLM_TENSOR_TOKEN_EMBD,         "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,        "output_norm" },{ LLM_TENSOR_OUTPUT,             "output" },{ LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q_A_NORM,      "blk.%d.attn_q_a_norm" },{ LLM_TENSOR_ATTN_KV_A_NORM,     "blk.%d.attn_kv_a_norm" },{ LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_Q_A,           "blk.%d.attn_q_a" },{ LLM_TENSOR_ATTN_Q_B,           "blk.%d.attn_q_b" },{ LLM_TENSOR_ATTN_KV_A_MQA,      "blk.%d.attn_kv_a_mqa" },{ LLM_TENSOR_ATTN_KV_B,          "blk.%d.attn_kv_b" },{ LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },{ LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE,           "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_UP,             "blk.%d.ffn_up" },{ LLM_TENSOR_FFN_DOWN,           "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_GATE_INP,       "blk.%d.ffn_gate_inp" },{ LLM_TENSOR_FFN_GATE_EXPS,      "blk.%d.ffn_gate_exps" },{ LLM_TENSOR_FFN_DOWN_EXPS,      "blk.%d.ffn_down_exps" },{ LLM_TENSOR_FFN_UP_EXPS,        "blk.%d.ffn_up_exps" },{ LLM_TENSOR_FFN_GATE_INP_SHEXP, "blk.%d.ffn_gate_inp_shexp" },{ LLM_TENSOR_FFN_GATE_SHEXP,     "blk.%d.ffn_gate_shexp" },{ LLM_TENSOR_FFN_DOWN_SHEXP,     "blk.%d.ffn_down_shexp" },{ LLM_TENSOR_FFN_UP_SHEXP,       "blk.%d.ffn_up_shexp" },{ LLM_TENSOR_FFN_EXP_PROBS_B,    "blk.%d.exp_probs_b" },},},{LLM_ARCH_CHATGLM,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },},},{LLM_ARCH_BITNET,{{ LLM_TENSOR_TOKEN_EMBD,         "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,        "output_norm" },{ LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,             "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,             "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },{ LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_SUB_NORM,      "blk.%d.attn_sub_norm" },{ LLM_TENSOR_FFN_GATE,           "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,           "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,             "blk.%d.ffn_up" },{ LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_SUB_NORM,       "blk.%d.ffn_sub_norm" },},},{LLM_ARCH_T5,{{ LLM_TENSOR_TOKEN_EMBD,           "token_embd" },{ LLM_TENSOR_OUTPUT,               "output" },{ LLM_TENSOR_DEC_OUTPUT_NORM,      "dec.output_norm" },{ LLM_TENSOR_DEC_ATTN_NORM,        "dec.blk.%d.attn_norm" },{ LLM_TENSOR_DEC_ATTN_Q,           "dec.blk.%d.attn_q" },{ LLM_TENSOR_DEC_ATTN_K,           "dec.blk.%d.attn_k" },{ LLM_TENSOR_DEC_ATTN_V,           "dec.blk.%d.attn_v" },{ LLM_TENSOR_DEC_ATTN_OUT,         "dec.blk.%d.attn_o" },{ LLM_TENSOR_DEC_ATTN_REL_B,       "dec.blk.%d.attn_rel_b" },{ LLM_TENSOR_DEC_CROSS_ATTN_NORM,  "dec.blk.%d.cross_attn_norm" },{ LLM_TENSOR_DEC_CROSS_ATTN_Q,     "dec.blk.%d.cross_attn_q" },{ LLM_TENSOR_DEC_CROSS_ATTN_K,     "dec.blk.%d.cross_attn_k" },{ LLM_TENSOR_DEC_CROSS_ATTN_V,     "dec.blk.%d.cross_attn_v" },{ LLM_TENSOR_DEC_CROSS_ATTN_OUT,   "dec.blk.%d.cross_attn_o" },{ LLM_TENSOR_DEC_CROSS_ATTN_REL_B, "dec.blk.%d.cross_attn_rel_b" },{ LLM_TENSOR_DEC_FFN_NORM,         "dec.blk.%d.ffn_norm" },{ LLM_TENSOR_DEC_FFN_GATE,         "dec.blk.%d.ffn_gate" },{ LLM_TENSOR_DEC_FFN_DOWN,         "dec.blk.%d.ffn_down" },{ LLM_TENSOR_DEC_FFN_UP,           "dec.blk.%d.ffn_up" },{ LLM_TENSOR_ENC_OUTPUT_NORM,      "enc.output_norm" },{ LLM_TENSOR_ENC_ATTN_NORM,        "enc.blk.%d.attn_norm" },{ LLM_TENSOR_ENC_ATTN_Q,           "enc.blk.%d.attn_q" },{ LLM_TENSOR_ENC_ATTN_K,           "enc.blk.%d.attn_k" },{ LLM_TENSOR_ENC_ATTN_V,           "enc.blk.%d.attn_v" },{ LLM_TENSOR_ENC_ATTN_OUT,         "enc.blk.%d.attn_o" },{ LLM_TENSOR_ENC_ATTN_REL_B,       "enc.blk.%d.attn_rel_b" },{ LLM_TENSOR_ENC_FFN_NORM,         "enc.blk.%d.ffn_norm" },{ LLM_TENSOR_ENC_FFN_GATE,         "enc.blk.%d.ffn_gate" },{ LLM_TENSOR_ENC_FFN_DOWN,         "enc.blk.%d.ffn_down" },{ LLM_TENSOR_ENC_FFN_UP,           "enc.blk.%d.ffn_up" },},},{LLM_ARCH_T5ENCODER,{{ LLM_TENSOR_TOKEN_EMBD,           "token_embd" },{ LLM_TENSOR_OUTPUT,               "output" },{ LLM_TENSOR_ENC_OUTPUT_NORM,      "enc.output_norm" },{ LLM_TENSOR_ENC_ATTN_NORM,        "enc.blk.%d.attn_norm" },{ LLM_TENSOR_ENC_ATTN_Q,           "enc.blk.%d.attn_q" },{ LLM_TENSOR_ENC_ATTN_K,           "enc.blk.%d.attn_k" },{ LLM_TENSOR_ENC_ATTN_V,           "enc.blk.%d.attn_v" },{ LLM_TENSOR_ENC_ATTN_OUT,         "enc.blk.%d.attn_o" },{ LLM_TENSOR_ENC_ATTN_REL_B,       "enc.blk.%d.attn_rel_b" },{ LLM_TENSOR_ENC_FFN_NORM,         "enc.blk.%d.ffn_norm" },{ LLM_TENSOR_ENC_FFN_GATE,         "enc.blk.%d.ffn_gate" },{ LLM_TENSOR_ENC_FFN_DOWN,         "enc.blk.%d.ffn_down" },{ LLM_TENSOR_ENC_FFN_UP,           "enc.blk.%d.ffn_up" },},},{LLM_ARCH_JAIS,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },{ LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },},},{LLM_ARCH_NEMOTRON,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },},},{LLM_ARCH_EXAONE,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },},},{LLM_ARCH_RWKV6,{{ LLM_TENSOR_TOKEN_EMBD,                "token_embd" },{ LLM_TENSOR_TOKEN_EMBD_NORM,           "token_embd_norm" },{ LLM_TENSOR_OUTPUT_NORM,               "output_norm" },{ LLM_TENSOR_OUTPUT,                    "output" },{ LLM_TENSOR_ATTN_NORM,                 "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_NORM_2,               "blk.%d.attn_norm_2" },{ LLM_TENSOR_TIME_MIX_W1,               "blk.%d.time_mix_w1" },{ LLM_TENSOR_TIME_MIX_W2,               "blk.%d.time_mix_w2" },{ LLM_TENSOR_TIME_MIX_LERP_X,           "blk.%d.time_mix_lerp_x" },{ LLM_TENSOR_TIME_MIX_LERP_W,           "blk.%d.time_mix_lerp_w" },{ LLM_TENSOR_TIME_MIX_LERP_K,           "blk.%d.time_mix_lerp_k" },{ LLM_TENSOR_TIME_MIX_LERP_V,           "blk.%d.time_mix_lerp_v" },{ LLM_TENSOR_TIME_MIX_LERP_R,           "blk.%d.time_mix_lerp_r" },{ LLM_TENSOR_TIME_MIX_LERP_G,           "blk.%d.time_mix_lerp_g" },{ LLM_TENSOR_TIME_MIX_LERP_FUSED,       "blk.%d.time_mix_lerp_fused" },{ LLM_TENSOR_TIME_MIX_FIRST,            "blk.%d.time_mix_first" },{ LLM_TENSOR_TIME_MIX_DECAY,            "blk.%d.time_mix_decay" },{ LLM_TENSOR_TIME_MIX_DECAY_W1,         "blk.%d.time_mix_decay_w1" },{ LLM_TENSOR_TIME_MIX_DECAY_W2,         "blk.%d.time_mix_decay_w2" },{ LLM_TENSOR_TIME_MIX_KEY,              "blk.%d.time_mix_key" },{ LLM_TENSOR_TIME_MIX_VALUE,            "blk.%d.time_mix_value" },{ LLM_TENSOR_TIME_MIX_RECEPTANCE,       "blk.%d.time_mix_receptance" },{ LLM_TENSOR_TIME_MIX_GATE,             "blk.%d.time_mix_gate" },{ LLM_TENSOR_TIME_MIX_LN,               "blk.%d.time_mix_ln" },{ LLM_TENSOR_TIME_MIX_OUTPUT,           "blk.%d.time_mix_output" },{ LLM_TENSOR_CHANNEL_MIX_LERP_K,        "blk.%d.channel_mix_lerp_k" },{ LLM_TENSOR_CHANNEL_MIX_LERP_R,        "blk.%d.channel_mix_lerp_r" },{ LLM_TENSOR_CHANNEL_MIX_KEY,           "blk.%d.channel_mix_key" },{ LLM_TENSOR_CHANNEL_MIX_VALUE,         "blk.%d.channel_mix_value" },{ LLM_TENSOR_CHANNEL_MIX_RECEPTANCE,    "blk.%d.channel_mix_receptance" },},},{LLM_ARCH_RWKV6QWEN2,{{ LLM_TENSOR_TOKEN_EMBD,                "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,               "output_norm" },{ LLM_TENSOR_OUTPUT,                    "output" },{ LLM_TENSOR_ATTN_NORM,                 "blk.%d.attn_norm" },{ LLM_TENSOR_TIME_MIX_W1,               "blk.%d.time_mix_w1" },{ LLM_TENSOR_TIME_MIX_W2,               "blk.%d.time_mix_w2" },{ LLM_TENSOR_TIME_MIX_LERP_X,           "blk.%d.time_mix_lerp_x" },{ LLM_TENSOR_TIME_MIX_LERP_FUSED,       "blk.%d.time_mix_lerp_fused" },{ LLM_TENSOR_TIME_MIX_FIRST,            "blk.%d.time_mix_first" },{ LLM_TENSOR_TIME_MIX_DECAY,            "blk.%d.time_mix_decay" },{ LLM_TENSOR_TIME_MIX_DECAY_W1,         "blk.%d.time_mix_decay_w1" },{ LLM_TENSOR_TIME_MIX_DECAY_W2,         "blk.%d.time_mix_decay_w2" },{ LLM_TENSOR_TIME_MIX_KEY,              "blk.%d.time_mix_key" },{ LLM_TENSOR_TIME_MIX_VALUE,            "blk.%d.time_mix_value" },{ LLM_TENSOR_TIME_MIX_RECEPTANCE,       "blk.%d.time_mix_receptance" },{ LLM_TENSOR_TIME_MIX_GATE,             "blk.%d.time_mix_gate" },{ LLM_TENSOR_TIME_MIX_OUTPUT,           "blk.%d.time_mix_output" },{ LLM_TENSOR_FFN_NORM,                  "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE,                  "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,                  "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,                    "blk.%d.ffn_up" },},},{LLM_ARCH_GRANITE,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },},},{LLM_ARCH_GRANITE_MOE,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE_INP,    "blk.%d.ffn_gate_inp" },{ LLM_TENSOR_FFN_GATE_EXPS,   "blk.%d.ffn_gate_exps" },{ LLM_TENSOR_FFN_DOWN_EXPS,   "blk.%d.ffn_down_exps" },{ LLM_TENSOR_FFN_UP_EXPS,     "blk.%d.ffn_up_exps" },},},{LLM_ARCH_CHAMELEON,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },{ LLM_TENSOR_ATTN_Q_NORM,     "blk.%d.attn_q_norm" },{ LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm" },},},{LLM_ARCH_WAVTOKENIZER_DEC,{{ LLM_TENSOR_TOKEN_EMBD,        "token_embd" },{ LLM_TENSOR_TOKEN_EMBD_NORM,   "token_embd_norm" },{ LLM_TENSOR_CONV1D,            "conv1d" },{ LLM_TENSOR_CONVNEXT_DW,       "convnext.%d.dw" },{ LLM_TENSOR_CONVNEXT_NORM,     "convnext.%d.norm" },{ LLM_TENSOR_CONVNEXT_PW1,      "convnext.%d.pw1" },{ LLM_TENSOR_CONVNEXT_PW2,      "convnext.%d.pw2" },{ LLM_TENSOR_CONVNEXT_GAMMA,    "convnext.%d.gamma" },{ LLM_TENSOR_OUTPUT_NORM,       "output_norm" },{ LLM_TENSOR_OUTPUT,            "output" },{ LLM_TENSOR_POS_NET_CONV1,     "posnet.%d.conv1" },{ LLM_TENSOR_POS_NET_CONV2,     "posnet.%d.conv2" },{ LLM_TENSOR_POS_NET_NORM,      "posnet.%d.norm" },{ LLM_TENSOR_POS_NET_NORM1,     "posnet.%d.norm1" },{ LLM_TENSOR_POS_NET_NORM2,     "posnet.%d.norm2" },{ LLM_TENSOR_POS_NET_ATTN_NORM, "posnet.%d.attn_norm" },{ LLM_TENSOR_POS_NET_ATTN_Q,    "posnet.%d.attn_q" },{ LLM_TENSOR_POS_NET_ATTN_K,    "posnet.%d.attn_k" },{ LLM_TENSOR_POS_NET_ATTN_V,    "posnet.%d.attn_v" },{ LLM_TENSOR_POS_NET_ATTN_OUT,  "posnet.%d.attn_output" },},},{LLM_ARCH_UNKNOWN,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },},},
};

3. struct ggml_cgraph * build_deepseek() and struct ggml_cgraph * build_deepseek2()

/home/yongqiang/llm_work/llama_cpp_25_01_05/llama.cpp/src/llama.cpp

  • struct ggml_cgraph * build_deepseek()
    struct ggml_cgraph * build_deepseek() {struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);// mutable variable, needed during the last layer of the computation to skip unused tokensint32_t n_tokens = this->n_tokens;const int64_t n_embd_head = hparams.n_embd_head_v;GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);GGML_ASSERT(n_embd_head == hparams.n_rot);struct ggml_tensor * cur;struct ggml_tensor * inpL;inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb);// inp_pos - contains the positionsstruct ggml_tensor * inp_pos = build_inp_pos();// KQ_mask (mask for 1 head, it will be broadcasted to all heads)struct ggml_tensor * KQ_mask = build_inp_KQ_mask();const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale;for (int il = 0; il < n_layer; ++il) {struct ggml_tensor * inpSA = inpL;// normcur = llm_build_norm(ctx0, inpL, hparams,model.layers[il].attn_norm, NULL,LLM_NORM_RMS, cb, il);cb(cur, "attn_norm", il);// self-attention{// rope freq factors for llama3; may return nullptr for llama2 and other modelsstruct ggml_tensor * rope_factors = build_rope_factors(il);// compute Q and K and RoPE themstruct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur);cb(Qcur, "Qcur", il);if (model.layers[il].bq) {Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);cb(Qcur, "Qcur", il);}struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur);cb(Kcur, "Kcur", il);if (model.layers[il].bk) {Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);cb(Kcur, "Kcur", il);}struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur);cb(Vcur, "Vcur", il);if (model.layers[il].bv) {Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);cb(Vcur, "Vcur", il);}Qcur = ggml_rope_ext(ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, rope_factors,n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,ext_factor, attn_factor, beta_fast, beta_slow);cb(Qcur, "Qcur", il);Kcur = ggml_rope_ext(ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, rope_factors,n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,ext_factor, attn_factor, beta_fast, beta_slow);cb(Kcur, "Kcur", il);cur = llm_build_kv(ctx0, lctx, kv_self, gf,model.layers[il].wo, model.layers[il].bo,Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, kq_scale, cb, il);}if (il == n_layer - 1) {// skip computing output for unused tokensstruct ggml_tensor * inp_out_ids = build_inp_out_ids();n_tokens = n_outputs;cur   = ggml_get_rows(ctx0,   cur, inp_out_ids);inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);}struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);cb(ffn_inp, "ffn_inp", il);cur = llm_build_norm(ctx0, ffn_inp, hparams,model.layers[il].ffn_norm, NULL,LLM_NORM_RMS, cb, il);cb(cur, "ffn_norm", il);if ((uint32_t) il < hparams.n_layer_dense_lead) {cur = llm_build_ffn(ctx0, lctx, cur,model.layers[il].ffn_up,   NULL, NULL,model.layers[il].ffn_gate, NULL, NULL,model.layers[il].ffn_down, NULL, NULL,NULL,LLM_FFN_SILU, LLM_FFN_PAR, cb, il);cb(cur, "ffn_out", il);} else {// MoE branchggml_tensor * moe_out =llm_build_moe_ffn(ctx0, lctx, cur,model.layers[il].ffn_gate_inp,model.layers[il].ffn_up_exps,model.layers[il].ffn_gate_exps,model.layers[il].ffn_down_exps,nullptr,n_expert, n_expert_used,LLM_FFN_SILU, false,false, hparams.expert_weights_scale,LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX,cb, il);cb(moe_out, "ffn_moe_out", il);// FFN shared expert{ggml_tensor * ffn_shexp = llm_build_ffn(ctx0, lctx, cur,model.layers[il].ffn_up_shexp,   NULL, NULL,model.layers[il].ffn_gate_shexp, NULL, NULL,model.layers[il].ffn_down_shexp, NULL, NULL,NULL,LLM_FFN_SILU, LLM_FFN_PAR, cb, il);cb(ffn_shexp, "ffn_shexp", il);cur = ggml_add(ctx0, moe_out, ffn_shexp);cb(cur, "ffn_out", il);}}cur = ggml_add(ctx0, cur, ffn_inp);cur = lctx.cvec.apply_to(ctx0, cur, il);cb(cur, "l_out", il);// input for next layerinpL = cur;}cur = inpL;cur = llm_build_norm(ctx0, cur, hparams,model.output_norm, NULL,LLM_NORM_RMS, cb, -1);cb(cur, "result_norm", -1);// lm_headcur = llm_build_lora_mm(lctx, ctx0, model.output, cur);cb(cur, "result_output", -1);ggml_build_forward_expand(gf, cur);return gf;}
  • struct ggml_cgraph * build_deepseek2()
    struct ggml_cgraph * build_deepseek2() {struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);// mutable variable, needed during the last layer of the computation to skip unused tokensint32_t n_tokens = this->n_tokens;bool is_lite = (hparams.n_layer == 27);// We have to pre-scale kq_scale and attn_factor to make the YaRN RoPE work correctly.// See https://github.com/ggerganov/llama.cpp/discussions/7416 for detailed explanation.const float mscale = attn_factor * (1.0f + hparams.rope_yarn_log_mul * logf(1.0f / freq_scale));const float kq_scale = 1.0f*mscale*mscale/sqrtf(float(hparams.n_embd_head_k));const float attn_factor_scaled = 1.0f / (1.0f + 0.1f * logf(1.0f / freq_scale));const uint32_t n_embd_head_qk_rope = hparams.n_rot;const uint32_t n_embd_head_qk_nope = hparams.n_embd_head_k - hparams.n_rot;const uint32_t kv_lora_rank = hparams.n_lora_kv;struct ggml_tensor * cur;struct ggml_tensor * inpL;// {n_embd, n_tokens}inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb);// inp_pos - contains the positionsstruct ggml_tensor * inp_pos = build_inp_pos();// KQ_mask (mask for 1 head, it will be broadcasted to all heads)struct ggml_tensor * KQ_mask = build_inp_KQ_mask();for (int il = 0; il < n_layer; ++il) {struct ggml_tensor * inpSA = inpL;// normcur = llm_build_norm(ctx0, inpL, hparams,model.layers[il].attn_norm, NULL,LLM_NORM_RMS, cb, il);cb(cur, "attn_norm", il);// self_attention{struct ggml_tensor * q = NULL;if (!is_lite) {// {n_embd, q_lora_rank} * {n_embd, n_tokens} -> {q_lora_rank, n_tokens}q = ggml_mul_mat(ctx0, model.layers[il].wq_a, cur);cb(q, "q", il);q = llm_build_norm(ctx0, q, hparams,model.layers[il].attn_q_a_norm, NULL,LLM_NORM_RMS, cb, il);cb(q, "q", il);// {q_lora_rank, n_head * hparams.n_embd_head_k} * {q_lora_rank, n_tokens} -> {n_head * hparams.n_embd_head_k, n_tokens}q = ggml_mul_mat(ctx0, model.layers[il].wq_b, q);cb(q, "q", il);} else {q = ggml_mul_mat(ctx0, model.layers[il].wq, cur);cb(q, "q", il);}// split into {n_head * n_embd_head_qk_nope, n_tokens}struct ggml_tensor * q_nope = ggml_view_3d(ctx0, q, n_embd_head_qk_nope, n_head, n_tokens,ggml_row_size(q->type, hparams.n_embd_head_k),ggml_row_size(q->type, hparams.n_embd_head_k * n_head),0);cb(q_nope, "q_nope", il);// and {n_head * n_embd_head_qk_rope, n_tokens}struct ggml_tensor * q_pe = ggml_view_3d(ctx0, q, n_embd_head_qk_rope, n_head, n_tokens,ggml_row_size(q->type, hparams.n_embd_head_k),ggml_row_size(q->type, hparams.n_embd_head_k * n_head),ggml_row_size(q->type, n_embd_head_qk_nope));cb(q_pe, "q_pe", il);// {n_embd, kv_lora_rank + n_embd_head_qk_rope} * {n_embd, n_tokens} -> {kv_lora_rank + n_embd_head_qk_rope, n_tokens}struct ggml_tensor * kv_pe_compresseed = ggml_mul_mat(ctx0, model.layers[il].wkv_a_mqa, cur);cb(kv_pe_compresseed, "kv_pe_compresseed", il);// split into {kv_lora_rank, n_tokens}struct ggml_tensor * kv_compressed = ggml_view_2d(ctx0, kv_pe_compresseed, kv_lora_rank, n_tokens,kv_pe_compresseed->nb[1],0);cb(kv_compressed, "kv_compressed", il);// and {n_embd_head_qk_rope, n_tokens}struct ggml_tensor * k_pe = ggml_view_3d(ctx0, kv_pe_compresseed, n_embd_head_qk_rope, 1, n_tokens,kv_pe_compresseed->nb[1],kv_pe_compresseed->nb[1],ggml_row_size(kv_pe_compresseed->type, kv_lora_rank));cb(k_pe, "k_pe", il);kv_compressed = ggml_cont(ctx0, kv_compressed); // TODO: the CUDA backend does not support non-contiguous normkv_compressed = llm_build_norm(ctx0, kv_compressed, hparams,model.layers[il].attn_kv_a_norm, NULL,LLM_NORM_RMS, cb, il);cb(kv_compressed, "kv_compressed", il);// {kv_lora_rank, n_head * (n_embd_head_qk_nope + n_embd_head_v)} * {kv_lora_rank, n_tokens} -> {n_head * (n_embd_head_qk_nope + n_embd_head_v), n_tokens}struct ggml_tensor * kv = ggml_mul_mat(ctx0, model.layers[il].wkv_b, kv_compressed);cb(kv, "kv", il);// split into {n_head * n_embd_head_qk_nope, n_tokens}struct ggml_tensor * k_nope = ggml_view_3d(ctx0, kv, n_embd_head_qk_nope, n_head, n_tokens,ggml_row_size(kv->type, n_embd_head_qk_nope + hparams.n_embd_head_v),ggml_row_size(kv->type, n_head * (n_embd_head_qk_nope + hparams.n_embd_head_v)),0);cb(k_nope, "k_nope", il);// and {n_head * n_embd_head_v, n_tokens}struct ggml_tensor * v_states = ggml_view_3d(ctx0, kv, hparams.n_embd_head_v, n_head, n_tokens,ggml_row_size(kv->type, (n_embd_head_qk_nope + hparams.n_embd_head_v)),ggml_row_size(kv->type, (n_embd_head_qk_nope + hparams.n_embd_head_v)*n_head),ggml_row_size(kv->type, (n_embd_head_qk_nope)));cb(v_states, "v_states", il);v_states = ggml_cont(ctx0, v_states);cb(v_states, "v_states", il);v_states = ggml_view_2d(ctx0, v_states, hparams.n_embd_head_v * n_head, n_tokens,ggml_row_size(kv->type, hparams.n_embd_head_v * n_head),0);cb(v_states, "v_states", il);q_pe = ggml_cont(ctx0, q_pe); // TODO: the CUDA backend used to not support non-cont. RoPE, investigate removing thisq_pe = ggml_rope_ext(ctx0, q_pe, inp_pos, nullptr,n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,ext_factor, attn_factor_scaled, beta_fast, beta_slow);cb(q_pe, "q_pe", il);// shared RoPE keyk_pe = ggml_cont(ctx0, k_pe); // TODO: the CUDA backend used to not support non-cont. RoPE, investigate removing thisk_pe = ggml_rope_ext(ctx0, k_pe, inp_pos, nullptr,n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,ext_factor, attn_factor_scaled, beta_fast, beta_slow);cb(k_pe, "k_pe", il);struct ggml_tensor * q_states = ggml_concat(ctx0, q_nope, q_pe, 0);cb(q_states, "q_states", il);struct ggml_tensor * k_states = ggml_concat(ctx0, k_nope, ggml_repeat(ctx0, k_pe, q_pe), 0);cb(k_states, "k_states", il);cur = llm_build_kv(ctx0, lctx, kv_self, gf,model.layers[il].wo, NULL,k_states, v_states, q_states, KQ_mask, n_tokens, kv_head, n_kv, kq_scale, cb, il);}if (il == n_layer - 1) {// skip computing output for unused tokensstruct ggml_tensor * inp_out_ids = build_inp_out_ids();n_tokens = n_outputs;cur   = ggml_get_rows(ctx0,   cur, inp_out_ids);inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);}struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);cb(ffn_inp, "ffn_inp", il);cur = llm_build_norm(ctx0, ffn_inp, hparams,model.layers[il].ffn_norm, NULL,LLM_NORM_RMS, cb, il);cb(cur, "ffn_norm", il);if ((uint32_t) il < hparams.n_layer_dense_lead) {cur = llm_build_ffn(ctx0, lctx, cur,model.layers[il].ffn_up,   NULL, NULL,model.layers[il].ffn_gate, NULL, NULL,model.layers[il].ffn_down, NULL, NULL,NULL,LLM_FFN_SILU, LLM_FFN_PAR, cb, il);cb(cur, "ffn_out", il);} else {// MoE branchggml_tensor * moe_out =llm_build_moe_ffn(ctx0, lctx, cur,model.layers[il].ffn_gate_inp,model.layers[il].ffn_up_exps,model.layers[il].ffn_gate_exps,model.layers[il].ffn_down_exps,model.layers[il].ffn_exp_probs_b,n_expert, n_expert_used,LLM_FFN_SILU, hparams.expert_weights_norm,true, hparams.expert_weights_scale,(enum llama_expert_gating_func_type) hparams.expert_gating_func,cb, il);cb(moe_out, "ffn_moe_out", il);// FFN shared expert{ggml_tensor * ffn_shexp = llm_build_ffn(ctx0, lctx, cur,model.layers[il].ffn_up_shexp,   NULL, NULL,model.layers[il].ffn_gate_shexp, NULL, NULL,model.layers[il].ffn_down_shexp, NULL, NULL,NULL,LLM_FFN_SILU, LLM_FFN_PAR, cb, il);cb(ffn_shexp, "ffn_shexp", il);cur = ggml_add(ctx0, moe_out, ffn_shexp);cb(cur, "ffn_out", il);}}cur = ggml_add(ctx0, cur, ffn_inp);cur = lctx.cvec.apply_to(ctx0, cur, il);cb(cur, "l_out", il);// input for next layerinpL = cur;}cur = inpL;cur = llm_build_norm(ctx0, cur, hparams,model.output_norm, NULL,LLM_NORM_RMS, cb, -1);cb(cur, "result_norm", -1);// lm_headcur = ggml_mul_mat(ctx0, model.output, cur);cb(cur, "result_output", -1);ggml_build_forward_expand(gf, cur);return gf;}
  • case LLM_ARCH_DEEPSEEK: and case LLM_ARCH_DEEPSEEK2:
    switch (model.arch) {case LLM_ARCH_LLAMA:case LLM_ARCH_MINICPM:case LLM_ARCH_GRANITE:case LLM_ARCH_GRANITE_MOE:{result = llm.build_llama();} break;case LLM_ARCH_DECI:{result = llm.build_deci();} break;case LLM_ARCH_BAICHUAN:{result = llm.build_baichuan();} break;case LLM_ARCH_FALCON:{result = llm.build_falcon();} break;case LLM_ARCH_GROK:{result = llm.build_grok();} break;case LLM_ARCH_STARCODER:{result = llm.build_starcoder();} break;case LLM_ARCH_REFACT:{result = llm.build_refact();} break;case LLM_ARCH_BERT:case LLM_ARCH_JINA_BERT_V2:case LLM_ARCH_NOMIC_BERT:{result = llm.build_bert();} break;case LLM_ARCH_BLOOM:{result = llm.build_bloom();} break;case LLM_ARCH_MPT:{result = llm.build_mpt();} break;case LLM_ARCH_STABLELM:{result = llm.build_stablelm();} break;case LLM_ARCH_QWEN:{result = llm.build_qwen();} break;case LLM_ARCH_QWEN2:{result = llm.build_qwen2();} break;case LLM_ARCH_QWEN2VL:{lctx.n_pos_per_token = 4;result = llm.build_qwen2vl();} break;case LLM_ARCH_QWEN2MOE:{result = llm.build_qwen2moe();} break;case LLM_ARCH_PHI2:{result = llm.build_phi2();} break;case LLM_ARCH_PHI3:case LLM_ARCH_PHIMOE:{result = llm.build_phi3();} break;case LLM_ARCH_PLAMO:{result = llm.build_plamo();} break;case LLM_ARCH_GPT2:{result = llm.build_gpt2();} break;case LLM_ARCH_CODESHELL:{result = llm.build_codeshell();} break;case LLM_ARCH_ORION:{result = llm.build_orion();} break;case LLM_ARCH_INTERNLM2:{result = llm.build_internlm2();} break;case LLM_ARCH_MINICPM3:{result = llm.build_minicpm3();} break;case LLM_ARCH_GEMMA:{result = llm.build_gemma();} break;case LLM_ARCH_GEMMA2:{result = llm.build_gemma2();} break;case LLM_ARCH_STARCODER2:{result = llm.build_starcoder2();} break;case LLM_ARCH_MAMBA:{result = llm.build_mamba();} break;case LLM_ARCH_XVERSE:{result = llm.build_xverse();} break;case LLM_ARCH_COMMAND_R:{result = llm.build_command_r();} break;case LLM_ARCH_COHERE2:{result = llm.build_cohere2();} break;case LLM_ARCH_DBRX:{result = llm.build_dbrx();} break;case LLM_ARCH_OLMO:{result = llm.build_olmo();} break;case LLM_ARCH_OLMO2:{result = llm.build_olmo2();} break;case LLM_ARCH_OLMOE:{result = llm.build_olmoe();} break;case LLM_ARCH_OPENELM:{result = llm.build_openelm();} break;case LLM_ARCH_GPTNEOX:{result = llm.build_gptneox();} break;case LLM_ARCH_ARCTIC:{result = llm.build_arctic();} break;case LLM_ARCH_DEEPSEEK:{result = llm.build_deepseek();} break;case LLM_ARCH_DEEPSEEK2:{result = llm.build_deepseek2();} break;case LLM_ARCH_CHATGLM:{result = llm.build_chatglm();} break;case LLM_ARCH_BITNET:{result = llm.build_bitnet();} break;case LLM_ARCH_T5:{if (lctx.is_encoding) {result = llm.build_t5_enc();} else {result = llm.build_t5_dec();}} break;case LLM_ARCH_T5ENCODER:{result = llm.build_t5_enc();} break;case LLM_ARCH_JAIS:{result = llm.build_jais();} break;case LLM_ARCH_NEMOTRON:{result = llm.build_nemotron();} break;case LLM_ARCH_EXAONE:{result = llm.build_exaone();} break;case LLM_ARCH_RWKV6:{result = llm.build_rwkv6();} break;case LLM_ARCH_RWKV6QWEN2:{result = llm.build_rwkv6qwen2();} break;case LLM_ARCH_CHAMELEON:{result = llm.build_chameleon();} break;case LLM_ARCH_WAVTOKENIZER_DEC:{result = llm.build_wavtokenizer_dec();} break;default:GGML_ABORT("fatal error");}

References

[1] Yongqiang Cheng, https://yongqiang.blog.csdn.net/
[2] huggingface/gguf, https://github.com/huggingface/huggingface.js/tree/main/packages/gguf

相关文章:

llama.cpp LLM_ARCH_DEEPSEEK and LLM_ARCH_DEEPSEEK2

llama.cpp LLM_ARCH_DEEPSEEK and LLM_ARCH_DEEPSEEK2 1. LLM_ARCH_DEEPSEEK and LLM_ARCH_DEEPSEEK22. LLM_ARCH_DEEPSEEK and LLM_ARCH_DEEPSEEK23. struct ggml_cgraph * build_deepseek() and struct ggml_cgraph * build_deepseek2()References 不宜吹捧中国大语言模型的同…...

k8s简介,k8s环境搭建

目录 K8s简介环境搭建和准备工作修改主机名&#xff08;所有节点&#xff09;配置静态IP&#xff08;所有节点&#xff09;关闭防火墙和seLinux&#xff0c;清除iptables规则&#xff08;所有节点&#xff09;关闭交换分区&#xff08;所有节点&#xff09;修改/etc/hosts文件&…...

2024年个人总结

序 照例&#xff0c;每年都有的个人年度总结来了&#xff0c;看了很多其他大佬的总结&#xff0c;感觉自己的2024过于单薄&#xff0c;故事也不太丰满&#xff0c;自己就回去比较&#xff0c;自己哪里做的不好 &#xff1f;但后来发现已经进入了一个思维误区。 年度总结年度总结…...

【落羽的落羽 数据结构篇】顺序表

文章目录 一、线性表二、顺序表1. 概念与分类2. 准备工作3. 静态顺序表4. 动态顺序表4.1 定义顺序表结构4.2 顺序表的初始化4.3 检查空间是否足够4.3 尾部插入数据4.4 头部插入数据4.5 尾部删除数据4.6 头部删除数据4.7 在指定位置插入数据4.8 在指定位置删除数据4.9 顺序表的销…...

麒麟操作系统服务架构保姆级教程(十四)iptables防火墙四表五链和防火墙应用案例

如果你想拥有你从未拥有过的东西&#xff0c;那么你必须去做你从未做过的事情 防火墙在运维工作中有着不可或缺的重要性。首先&#xff0c;它是保障网络安全的关键防线&#xff0c;通过设置访问控制规则&#xff0c;可精准过滤非法网络流量&#xff0c;有效阻挡外部黑客攻击、恶…...

Linux之详谈——权限管理

目录 小 峰 编 程 ​编辑 一、权限概述 1、什么是权限 2、为什么要设置权限 3、Linux中的权限类别- 4、Linux中文件所有者 1&#xff09;所有者分类&#xff08;谁&#xff09; 2&#xff09;所有者的表示方法 ① u(the user who owns it)&#xff08;属主权限&…...

第05章 13 椭球体张量可视化应用一则-神经束追踪

在神经束追踪&#xff08;Tractography&#xff09;中&#xff0c;椭球体张量&#xff08;Ellipsoid Tensor&#xff09;通常用于描述神经纤维的方向和扩散特性。这种技术广泛应用于磁共振成像&#xff08;MRI&#xff09;的扩散张量成像&#xff08;DTI&#xff09;数据中。VT…...

Celery

https://www.bilibili.com/video/BV1RGDEY5ERB 架构 简单任务 执行 包结构 本示例&#xff1a; app 添加任务 获取结果 配置延时任务 任务配置 beat 提交定时任务...

JavaScript系列(48)-- 3D渲染引擎实现详解

JavaScript 3D渲染引擎实现详解 &#x1f3ae; 今天&#xff0c;让我们深入探讨JavaScript的3D渲染引擎实现。通过WebGL和现代JavaScript技术&#xff0c;我们可以构建一个功能完整的3D渲染系统。 3D渲染基础概念 &#x1f31f; &#x1f4a1; 小知识&#xff1a;3D渲染引擎的…...

Golang并发机制及CSP并发模型

Golang 并发机制及 CSP 并发模型 Golang 是一门为并发而生的语言&#xff0c;其并发机制基于 CSP&#xff08;Communicating Sequential Processes&#xff0c;通信顺序过程&#xff09; 模型。CSP 是一种描述并发系统中交互模式的正式语言&#xff0c;强调通过通信来共享内存…...

使用 Docker + Nginx + Certbot 实现自动化管理 SSL 证书

使用 Docker Nginx Certbot 实现自动化管理 SSL 证书 在互联网安全环境日益重要的今天&#xff0c;为站点或应用部署 HTTPS 已经成为一种常态。然而&#xff0c;手动申请并续期证书既繁琐又容易出错。本文将以 Nginx Certbot 为示例&#xff0c;基于 Docker 容器来搭建一个…...

游戏策划的分类

P3游戏策划分类 1.程序2.美术3.策划 程序&#xff1a;一般分为客户端程序和服务器程序 客户端程序一般负责游戏的前端画面表现 服务器程序负责游戏的后端运算 美术&#xff1a;角色原画&#xff0c;角色模型动作&#xff0c;场景原画&#xff0c;场景模型&#xff0c;UI设计&a…...

Edge-TTS在广电系统中的语音合成技术的创新应用

Edge-TTS在广电系统中的语音合成技术的创新应用 作者&#xff1a;本人是一名县级融媒体中心的工程师&#xff0c;多年来一直坚持学习、提升自己。喜欢Python编程、人工智能、网络安全等多领域的技术。 摘要 随着人工智能技术的快速发展&#xff0c;文字转语音&#xff08;Te…...

python学opencv|读取图像(四十七)使用cv2.bitwise_not()函数实现图像按位取反运算

【0】基础定义 按位与运算&#xff1a;两个等长度二进制数上下对齐&#xff0c;全1取1&#xff0c;其余取0。按位或运算&#xff1a;两个等长度二进制数上下对齐&#xff0c;有1取1&#xff0c;其余取0。 按位取反运算&#xff1a;一个二进制数&#xff0c;0变1,1变0。 【1】…...

一文讲解Java中Object类常用的方法

在Java中&#xff0c;经常提到一个词“万物皆对象”&#xff0c;其中的“万物”指的是Java中的所有类&#xff0c;而这些类都是Object类的子类&#xff1b; Object主要提供了11个方法&#xff0c;大致可以分为六类&#xff1a; 对象比较&#xff1a; public native int has…...

【算法篇·更新中】C++秒入门(附练习用题目)

一.二分 1.二分查找 我们来看这样一道题&#xff1a; 有一个保证有序的数组a&#xff0c;它的长度为n。现在我们需要知道这个序列是否含有x。 数据范围&#xff1a;保证n<1e9 我们看到这道题之后&#xff0c;第一时间想到的就是暴力枚举了&#xff0c;可是我们发现直接枚举…...

面向对象编程 vs 面向过程编程

面向对象编程 vs 面向过程编程&#xff1a;深入解析这两种编程范式的区别 在当今软件开发领域&#xff0c;编程范式的选择对于项目的可维护性和可扩展性至关重要。面向对象编程&#xff08;OOP&#xff09;和面向过程编程&#xff08;POP&#xff09;是两种根本的编程思想。本…...

【Rust自学】16.2. 使用消息传递来跨线程传递数据

喜欢的话别忘了点赞、收藏加关注哦&#xff08;加关注即可阅读全文&#xff09;&#xff0c;对接下来的教程有兴趣的可以关注专栏。谢谢喵&#xff01;(&#xff65;ω&#xff65;) 16.2.1. 消息传递 有一种很流行而且能保证安全并发的技术&#xff08;或者叫机制&#xff…...

【四川乡镇界面】图层shp格式arcgis数据乡镇名称和编码2020年wgs84无偏移内容测评

本文将详细解析标题和描述中提到的IT知识点&#xff0c;主要涉及GIS&#xff08;Geographic Information System&#xff0c;地理信息系统&#xff09;技术&#xff0c;以及与之相关的文件格式和坐标系统。 我们要了解的是"shp"格式&#xff0c;这是一种广泛用于存储…...

人物传记之新月篇

相关故事链接&#xff08;及时更新&#xff09;&#xff1a;Python的那些事第四篇&#xff1a;编程中的智慧之光控制结构-CSDN博客 Python的那些事第五篇&#xff1a;数据结构的艺术与应用-CSDN博客 目录 1. C语言程序&#xff1a;增强版加密与解密工具 2. Python程序&#x…...

TypeScript中的函数:类型安全与高级特性

&#x1f90d; 前端开发工程师、技术日更博主、已过CET6 &#x1f368; 阿珊和她的猫_CSDN博客专家、23年度博客之星前端领域TOP1 &#x1f560; 牛客高级专题作者、打造专栏《前端面试必备》 、《2024面试高频手撕题》 &#x1f35a; 蓝桥云课签约作者、上架课程《Vue.js 和 E…...

DDD 和 TDD

领域驱动设计&#xff08;DDD&#xff09; DDD 是一种软件开发方法&#xff0c;强调通过与领域专家的密切合作来构建一个反映业务逻辑的模型。其核心思想是将业务逻辑和技术实现紧密结合&#xff0c;以便更好地解决复杂的业务问题。 DDD 的关键概念&#xff1a; 1. 领域模型 …...

【C语言分支与循环结构详解】

目录 ---------------------------------------begin--------------------------------------- 一、分支结构 1. if语句 2. switch语句 二、循环结构 1. for循环 2. while循环 3. do-while循环 三、嵌套结构 结语 -----------------------------------------end----…...

FaceFusion

文章目录 一、关于 FaceFusion预览 二、安装三、用法 一、关于 FaceFusion FaceFusion 是行业领先的人脸操作平台 github : https://github.com/facefusion/facefusion官方文档&#xff1a;https://docs.facefusion.io/Discord : https://discord.com/invite/facefusion-1141…...

图论——单源最短路的扩展应用

acwing1137.选择最佳路线 本题有两个解决思路 1.建立虚拟源点&#xff0c;连接虚拟源点和 w w w个可作为起点的点&#xff0c;边权为0&#xff0c;这样只需要从虚拟源点开始做一遍最短路算法便可。 2.反向建边&#xff0c;把所有的add(a,b,c)变成add(b,a,c)&#xff0c;这样只…...

Linux shell脚本笔记-One

前言 本文主要汇总有关shell脚本常用的知识点&#xff0c;有时候使用忘记某些用法指令&#xff0c;特此汇总方便后续查阅。 一.shell脚本编写的头部定义: 定义的shell脚本头部有多种写法&#xff0c;具体根基实际系统结构处理&#xff0c;如下: #!/bin/sh &#xff…...

【C语言----函数详解】

目录 ----------------------------------------begin-------------------------------------- 引言 一、函数是什么 二、函数的定义和声明 1. 函数的定义 2. 函数的声明 三、函数的调用 四、函数参数传递 五、函数的返回值 六、递归函数 七、函数指针 八、总结 ---…...

QT交叉编译环境搭建(Cmake和qmake)

介绍一共有两种方法&#xff08;基于qmake和cmake&#xff09;&#xff1a; 1.直接调用虚拟机中的交叉编译工具编译 2.在QT中新建编译套件kits camke和qmake的区别&#xff1a;CMake 和 qmake 都是自动化构建工具&#xff0c;用于简化构建过程&#xff0c;管理编译设置&…...

Zemax 中的屋脊棱镜建模

光学棱镜是一种透明的光学元件&#xff0c;其表面平坦且抛光&#xff0c;可以折射光线。最常见的棱镜类型是三棱镜&#xff0c;它有两个三角形底座和三个矩形或略呈梯形的表面。棱镜通常由玻璃或丙烯酸等材料制成。当光线以一定角度进入棱镜时&#xff0c;它会在穿过棱镜时发生…...

CUDA学习-内存访问

一 访存合并 1.1 说明 本部分内容主要参考: 搞懂 CUDA Shared Memory 上的 bank conflicts 和向量化指令(LDS.128 / float4)的访存特点 - 知乎 1.2 share memory结构 图1.1 share memory结构 放在 shared memory 中的数据是以 4 bytes(即 32 bits)作为 1 个 word,依…...

Nginx 开发总结

文章目录 1. Nginx 基础概念1-1、什么是 Nginx1-2、Nginx 的工作原理1-3、Nginx 的核心特点1-4、Nginx 的常见应用场景1-5、Nginx 与 Apache 的区别1-6、 Nginx 配置的基本结构1-7、Nginx 常见指令 2. Nginx 配置基础2-1、Nginx 配置文件结构2-2、全局配置 (Global Block)2-3、…...

目标跟踪之sort算法(3)

这里写目录标题 1 流程1 预处理2 跟踪 2 代码 参考&#xff1a;sort代码 https://github.com/abewley/sort 1 流程 1 预处理 1.1 获取离线检测数据。1.2 实例化跟踪器。2 跟踪 2.1 轨迹处理。根据上一帧的轨迹预测当前帧的轨迹&#xff0c;剔除到当前轨迹中为空的轨迹得到当前…...

C++/stack_queue

目录 1.stack 1.1stack的介绍 1.2stack的使用 练习题&#xff1a; 1.3stack的模拟实现 2.queue的介绍和使用 2.1queue的介绍 2.2queue的使用 2.3queue的模拟实现 3.priority_queue的介绍和使用 3.1priority_queue的介绍 3.2priority_queue的使用 欢迎 1.stack 1.1stack…...

小程序电商运营内容真实性增强策略及开源链动2+1模式AI智能名片S2B2C商城系统源码的应用探索

摘要&#xff1a;随着互联网技术的不断发展&#xff0c;小程序电商已成为现代商业的重要组成部分。然而&#xff0c;如何在竞争激烈的市场中增强小程序内容的真实性&#xff0c;提高用户信任度&#xff0c;成为电商运营者面临的一大挑战。本文首先探讨了通过图片、视频等方式增…...

「Unity3D」在Unity中使用C#控制显示Android的状态栏

Unity打包的Android默认都是全屏&#xff0c;如果想要在真机上显示状态栏&#xff0c;就需要额外设置&#xff0c;有两种方式&#xff1a; 第一种&#xff0c;使用Android的Java代码去控制&#xff0c;然后以插件的方式放到Unity中&#xff0c;被C#调用。第二种&#xff0c;使…...

基于51单片机和ESP8266(01S)、LCD1602、DS1302、独立按键的WiFi时钟

目录 系列文章目录前言一、效果展示二、原理分析三、各模块代码1、延时2、定时器03、串口通信4、DS13025、LCD16026、独立按键 四、主函数总结 系列文章目录 前言 之前做了一个WiFi定时器时钟&#xff0c;用八位数码管进行显示&#xff0c;但是定时器时钟的精度较低&#xff0…...

AI 浪潮席卷中国年,开启科技新春新纪元

在这博主提前祝大家蛇年快乐呀&#xff01;&#xff01;&#xff01; 随着人工智能&#xff08;AI&#xff09;技术的飞速发展&#xff0c;其影响力已经渗透到社会生活的方方面面。在中国传统节日 —— 春节期间&#xff0c;AI 技术也展现出了巨大的潜力&#xff0c;为中国年带…...

STM32 LED呼吸灯

接线图&#xff1a; 这里将正极接到PA0引脚上&#xff0c;负极接到GND&#xff0c;这样就高电平点亮LED&#xff0c;低电平熄灭。 占空比越大&#xff0c;LED越亮&#xff0c;占空比越小&#xff0c;LED越暗 PWM初始化配置 输出比较函数介绍&#xff1a; 用这四个函数配置输…...

机器学习day4

自定义数据集 使用pytorch框架实现逻辑回归并保存模型&#xff0c;然后保存模型后再加载模型进行预测 import numpy as np import torch import torch.nn as nn import torch.optim as optimizer import matplotlib.pyplot as pltclass1_points np.array([[2.1, 1.8],[1.9, 2…...

《哈佛家训》

《哈佛家训》是一本以教育为主题的书籍&#xff0c;旨在通过一系列富有哲理的故事和案例&#xff0c;传递积极的人生观、价值观和教育理念。虽然它并非直接由哈佛大学官方出版&#xff0c;但其内容深受读者喜爱&#xff0c;尤其是在家庭教育和个人成长领域。 以下是《哈佛家训…...

为什么redis会开小差?Redis 频繁异常的深度剖析与解决方案

文章目录 导读为什么redis会开小差&#xff1f;1.连接数过多2.bigkey3.慢命令操作4.内存策略不合理5.外部数据双写一致性6.保护机制未开启7. 数据集中过期8. CPU饱和9. 持久化阻塞10. 网络问题结论 导读 提起分布式缓存&#xff0c;想必大多数同学脑海中都会浮出redis这个名字…...

window中80端口被占用问题

1&#xff0c;查看报错信息 可以看到在启动项目的时候&#xff0c;8081端口被占用了&#xff0c;导致项目无法启动。 2&#xff0c;查看被占用端口的pid #语法 netstat -aon|findstr :被占用端口#示例 netstat -aon|findstr :8080 3&#xff0c;杀死进程 #语法 taikkill /pid…...

< OS 有关 > 阿里云:轻量应用服务器 的使用 :轻量化 阿里云 vpm 主机

原因&#xff1a; &#xff1c; OS 有关 &#xff1e; 阿里云&#xff1a;轻量应用服务器 的使用 &#xff1a;从新开始 配置 SSH 主机名 DNS Tailscale 更新OS安装包 最主要是 清除阿里云客户端这个性能杀手-CSDN博客 防止 I/O 祸害系统 操作&#xff1a; 查看进程&#x…...

解决报错“The layer xxx has never been called and thus has no defined input shape”

解决报错“The layer xxx has never been called and thus has no defined input shape”(这里写自定义目录标题) 报错显示 最近在跑yolo的代码时遇到这样一个错误&#xff0c;显示“the layer {self.name} has never been called”.这个程序闲置了很久&#xff0c;每次一遇到…...

Microsoft Visual Studio 2022 主题修改(补充)

Microsoft Visual Studio 2022 透明背景修改这方面已经有很多佬介绍过了&#xff0c;今天闲来无事就补充几点细节。 具体的修改可以参考&#xff1a;Microsoft Visual Studio 2022 透明背景修改&#xff08;快捷方法&#xff09;_material studio怎么把背景弄成透明-CSDN博客文…...

PyCharm接入DeepSeek实现AI编程

目录 效果演示 创建API key 在PyCharm中下载CodeGPT插件 配置Continue DeepSeek 是一家专注于人工智能技术研发的公司&#xff0c;致力于开发高性能、低成本的 AI 模型。DeepSeek-V3 是 DeepSeek 公司推出的最新一代 AI 模型。其前身是 DeepSeek-V2.5&#xff0c;经过持续的…...

C++ 包装器与绑定器的应用之回调函数的实现

回调函数的实现 在消息队列和网络库的框架中&#xff0c;当接收到消息&#xff08;报文&#xff09;时&#xff0c;回调用户自定义的函数对象&#xff0c;把消息&#xff08;报文&#xff09;参数传给它&#xff0c;由它决定如何处理。 queue参考文章:C queue(STL queue&…...

一文读懂DeepSeek-R1论文

目录 论文总结 摘要 1. 引言 1.1. 贡献 1.2. 评估结果总结 2. 方法 2.1. 概述 2.2. DeepSeek-R1-Zero&#xff1a;在基础模型上进行强化学习 2.2.1. 强化学习算法 2.2.2. 奖励建模 2.2.3. 训练模板 2.2.4. DeepSeek-R1-Zero 的性能、自我进化过程和顿悟时刻 2.3. …...

文献阅读 250128-Tropical forests are approaching critical temperature thresholds

Tropical forests are approaching critical temperature thresholds 来自 <Tropical forests are approaching critical temperature thresholds | Nature> 热带森林正在接近临界温度阈值 ## Abstract: The critical temperature beyond which photosynthetic machinery…...

【python】python基于机器学习与数据分析的二手手机特性关联与分类预测(源码+数据集)【独一无二】

&#x1f449;博__主&#x1f448;&#xff1a;米码收割机 &#x1f449;技__能&#x1f448;&#xff1a;C/Python语言 &#x1f449;专__注&#x1f448;&#xff1a;专注主流机器人、人工智能等相关领域的开发、测试技术。 python基于机器学习与数据分析的二手手机特性关联与…...