Skip to content

Commit 957670a

Browse files
committed
llamafile : improve moe prompt eval speed on cpu
This change introduces a llamafile_mixmul() API that allows tinyBLAS to speed up "Mixture of Expert" models. On my Threadripper, Mixtral's 8x7b F16 weights now process prompts 2x faster. I'm also seeing a 60 percent improvement with Mixtral 8x22b Q4_0. The same applies to Q8_0, which is also supported by tinyBLAS. MoE models spend the majority of their time inside MUL_MAT_ID rather than MUL_MAT, which is why llamafile_sgemm was not able to help them before. llamafile_mixmul works by decomposing the mixmul operation into sgemm calls.
1 parent 7bd4ffb commit 957670a

File tree

4 files changed

+715
-210
lines changed

4 files changed

+715
-210
lines changed

common/common.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,7 @@
7474
using json = nlohmann::ordered_json;
7575

7676
int32_t get_num_physical_cores() {
77-
#ifdef __linux__
77+
#if defined(__linux__) || defined(__COSMOPOLITAN__)
7878
// enumerate the set of thread siblings, num entries is num cores
7979
std::unordered_set<std::string> siblings;
8080
for (uint32_t cpu=0; cpu < UINT32_MAX; ++cpu) {
@@ -109,7 +109,7 @@ int32_t get_num_physical_cores() {
109109
return n_threads > 0 ? (n_threads <= 4 ? n_threads : n_threads / 2) : 4;
110110
}
111111

112-
#if defined(__x86_64__) && defined(__linux__) && !defined(__ANDROID__)
112+
#if defined(__x86_64__) && (defined(__linux__) || defined(__COSMOPOLITAN__)) && !defined(__ANDROID__)
113113
#include <pthread.h>
114114

115115
static void cpuid(unsigned leaf, unsigned subleaf,
@@ -163,7 +163,7 @@ static int count_math_cpus(int cpu_count) {
163163
* Returns number of CPUs on system that are useful for math.
164164
*/
165165
int get_math_cpu_count() {
166-
#if defined(__x86_64__) && defined(__linux__) && !defined(__ANDROID__)
166+
#if defined(__x86_64__) && (defined(__linux__) || defined(__COSMOPOLITAN__)) && !defined(__ANDROID__)
167167
int cpu_count = sysconf(_SC_NPROCESSORS_ONLN);
168168
if (cpu_count < 1) {
169169
return get_num_physical_cores();

ggml.c

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12068,11 +12068,14 @@ static void ggml_compute_forward_mul_mat_id(
1206812068
const struct ggml_tensor * src1 = dst->src[1];
1206912069
const struct ggml_tensor * ids = dst->src[2];
1207012070

12071-
GGML_TENSOR_BINARY_OP_LOCALS
12071+
if (llamafile_mixmul(params, src0, src1, ids, dst))
12072+
return;
1207212073

1207312074
const int ith = params->ith;
1207412075
const int nth = params->nth;
1207512076

12077+
GGML_TENSOR_BINARY_OP_LOCALS
12078+
1207612079
const enum ggml_type type = src0->type;
1207712080

1207812081
const bool src1_cont = ggml_is_contiguous(src1);
@@ -19659,6 +19662,7 @@ struct ggml_cplan ggml_graph_plan(const struct ggml_cgraph * cgraph, int n_threa
1965919662
cur = 0;
1966019663
const struct ggml_tensor * src0 = node->src[0];
1966119664
const struct ggml_tensor * src1 = node->src[1];
19665+
const struct ggml_tensor * src2 = node->src[2];
1966219666
const enum ggml_type vec_dot_type = type_traits[src0->type].vec_dot_type;
1966319667
if (src1->type != vec_dot_type) {
1966419668
cur += ggml_row_size(vec_dot_type, ggml_nelements(src1));
@@ -19667,6 +19671,8 @@ struct ggml_cplan ggml_graph_plan(const struct ggml_cgraph * cgraph, int n_threa
1966719671
cur += GGML_PAD(cur, sizeof(int64_t)); // align
1966819672
cur += n_as * sizeof(int64_t); // matrix_row_counts
1966919673
cur += n_as * src1->ne[2] * sizeof(int64_t); // matrix_rows
19674+
size_t cur2 = llamafile_mixmul_needs(src0, src1, src2);
19675+
cur = cur > cur2 ? cur : cur2;
1967019676
} break;
1967119677
case GGML_OP_OUT_PROD:
1967219678
{

0 commit comments

Comments
 (0)