@@ -1437,17 +1437,17 @@ static void quantize_row_q8_0(const float * restrict x, void * restrict vy, int
1437
1437
1438
1438
// reference implementation for deterministic creation of model files
1439
1439
static void quantize_row_q8_0c_reference (const float * restrict x , void * restrict y , int k ) {
1440
- assert (k % QK8_0 == 0 );
1441
- const int nb = k / QK8_0 ;
1440
+ assert (k % QK8_0C == 0 );
1441
+ const int nb = k / QK8_0C ;
1442
1442
1443
1443
uint8_t * restrict qs = y ;
1444
1444
float * restrict ds = (float * ) ((uint8_t * ) y + QK8_0C * nb );
1445
1445
1446
1446
for (int i = 0 ; i < nb ; i ++ ) {
1447
1447
float amax = 0.0f ; // absolute max
1448
1448
1449
- for (int l = 0 ; l < QK8_0 ; l ++ ) {
1450
- const float v = x [i * QK8_0 + l ];
1449
+ for (int l = 0 ; l < QK8_0C ; l ++ ) {
1450
+ const float v = x [i * QK8_0C + l ];
1451
1451
amax = MAX (amax , fabsf (v ));
1452
1452
}
1453
1453
@@ -1456,17 +1456,46 @@ static void quantize_row_q8_0c_reference(const float * restrict x, void * restri
1456
1456
1457
1457
ds [i ] = d ;
1458
1458
1459
- for (int l = 0 ; l < QK8_0 ; ++ l ) {
1460
- const float v = x [i * QK8_0 + l ]* id ;
1461
- qs [i * QK8_0 + l ] = roundf (v );
1459
+ for (int l = 0 ; l < QK8_0C ; ++ l ) {
1460
+ const float v = x [i * QK8_0C + l ]* id ;
1461
+ qs [i * QK8_0C + l ] = roundf (v );
1462
1462
}
1463
1463
}
1464
1464
}
1465
1465
1466
1466
static void quantize_row_q8_0c (const float * restrict x , void * restrict vy , int k ) {
1467
- assert (k % QK8_0 == 0 );
1467
+ assert (k % QK8_0C == 0 );
1468
+ const int nb = k / QK8_0C ;
1469
+
1470
+ int8_t * restrict qs = vy ;
1471
+ float * restrict ds = (float * ) ((uint8_t * ) vy + nb * QK8_0C );
1472
+
1473
+ #if __AVX512F__
1474
+ for (int i = 0 ; i < nb ; i ++ ) {
1475
+ const __m512 x0 = _mm512_loadu_ps ( x + i * QK8_0C );
1476
+ const __m512 x1 = _mm512_loadu_ps ( x + i * QK8_0C + QK8_0C /2 );
1477
+
1478
+ // Find absolute max
1479
+ const __m512 x0abs = _mm512_abs_ps (x0 );
1480
+ const __m512 x1abs = _mm512_abs_ps (x1 );
1481
+ const float amax = _mm512_reduce_max_ps (_mm512_max_ps (x0abs , x1abs ));
1482
+
1483
+ const float d = amax / ((1 << 7 ) - 1 );
1484
+ const float id = d ? 1.0f /d : 0.0f ;
1485
+
1486
+ ds [i ] = d ;
1468
1487
1488
+ const __m512 mul = _mm512_set1_ps ( id );
1489
+ const __m512i x0q = _mm512_cvt_roundps_epi32 (_mm512_mul_ps (x0 , mul ), (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC ));
1490
+ const __m512i x1q = _mm512_cvt_roundps_epi32 (_mm512_mul_ps (x1 , mul ), (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC ));
1491
+
1492
+ _mm512_mask_cvtepi32_storeu_epi8 (qs + i * QK8_0C , 0xffff , x0q );
1493
+ _mm512_mask_cvtepi32_storeu_epi8 (qs + i * QK8_0C + QK8_0C /2 , 0xffff , x1q );
1494
+ }
1495
+ #else
1496
+ // scalar
1469
1497
quantize_row_q8_0c_reference (x , vy , k );
1498
+ #endif
1470
1499
}
1471
1500
1472
1501
static void dequantize_row_q4_0 (const void * restrict vx , float * restrict y , int k ) {
@@ -2371,6 +2400,73 @@ inline static void ggml_vec_dot_f32(const int n, float * restrict s, const float
2371
2400
* s = sumf ;
2372
2401
}
2373
2402
2403
+ #if __AVX512F__ && QK4_0 == 32
2404
+
2405
+ // Dot product of four blocks of q4_0c with four blocks of q8_0c
2406
+ static inline __m512 dot_q4_0c_fourblocks_avx512 (
2407
+ __m512 acc ,
2408
+ const uint8_t * restrict xqs ,
2409
+ const float * restrict xds ,
2410
+ const int8_t * restrict yqs ,
2411
+ const float * restrict yds
2412
+ ) {
2413
+ // load quantized bytes
2414
+ // TODO: change back to aligned loads
2415
+ const __m512i xqs0123 = _mm512_loadu_epi64 ( xqs );
2416
+ const __m512i low_nibble_mask = _mm512_set1_epi8 ( 0xf );
2417
+ const __m512i xqs01 = _mm512_and_si512 ( low_nibble_mask , xqs0123 );
2418
+ // TODO: try srlv/i?
2419
+ const __m512i xqs23 = _mm512_and_si512 ( low_nibble_mask , _mm512_srli_epi32 ( xqs0123 , 4 ) );
2420
+ const __m512i yqs01 = _mm512_loadu_epi64 ( yqs );
2421
+ const __m512i yqs23 = _mm512_loadu_epi64 ( yqs + 2 * QK8_0C );
2422
+
2423
+ // load scales
2424
+ const __m512i scale_mask0 = _mm512_set_epi32 (1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 );
2425
+ const __m512i scale_mask1 = _mm512_set_epi32 (3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 );
2426
+ const __m128 xyds = _mm_mul_ps (_mm_load_ps (xds ), _mm_load_ps (yds ));
2427
+ const __m512 xyds0123 = _mm512_broadcast_f32x4 (xyds );
2428
+ const __m512 xyds01 = _mm512_permutevar_ps (xyds0123 , scale_mask0 );
2429
+ const __m512 xyds23 = _mm512_permutevar_ps (xyds0123 , scale_mask1 );
2430
+
2431
+ // take dot product of x and y bytes
2432
+ const __m512i plus_8 = _mm512_set1_epi8 ( 8 );
2433
+ #ifdef __AVX512VNNI__
2434
+ // We have VPDPBUSDS in AVX512-VNNI, which does exactly what we want, but with a catch:
2435
+ // the *left* operand is supposed to be unsigned, while Q4_0 quantization subtracts 8
2436
+ // from each nibble, so they can be negative. So, instead of `(xqs01 - 8) * yqs01`,
2437
+ // we compute `xqs01 * yqs01 - 8 * yqks`.
2438
+ const __m512i zero = _mm512_setzero_epi32 ();
2439
+ const __m512i yqs01_mul8 = _mm512_dpbusds_epi32 ( zero , plus_8 , yqs01 );
2440
+ const __m512i yqs23_mul8 = _mm512_dpbusds_epi32 ( zero , plus_8 , yqs23 );
2441
+ const __m512i xy01 = _mm512_dpbusds_epi32 ( zero , xqs01 , yqs01 );
2442
+ const __m512i xy23 = _mm512_dpbusds_epi32 ( zero , xqs23 , yqs23 );
2443
+ const __m512i res0_int = _mm512_sub_epi32 ( xy01 , yqs01_mul8 );
2444
+ const __m512i res1_int = _mm512_sub_epi32 ( xy23 , yqs23_mul8 );
2445
+ #else
2446
+ // As a fallback, we have VPMADDUBSW in AVX512-BW, which uses 16-bit products instead of 32-bit ones.
2447
+ // It has the same catch as VPDPBUSDS: the left operand should be unsigned.
2448
+ // This is essentially the AVX-512 version of the AVX-2 trick used by GH user Const-me
2449
+ // ref: https://gist.github.com/Const-me/4d30e1fc767ab314596e16e90f53b6f4#file-matmultest-cpp-L119
2450
+ const __m512i one = _mm512_set1_epi16 ( 1 );
2451
+ const __m512i prod_0 = _mm512_maddubs_epi16 ( xqs01 , yqs01 );
2452
+ const __m512i prod_1 = _mm512_maddubs_epi16 ( plus_8 , yqs01 );
2453
+ const __m512i prod_2 = _mm512_maddubs_epi16 ( xqs23 , yqs23 );
2454
+ const __m512i prod_3 = _mm512_maddubs_epi16 ( plus_8 , yqs23 );
2455
+ const __m512i diff0 = _mm512_sub_epi16 ( prod_0 , prod_1 );
2456
+ const __m512i diff1 = _mm512_sub_epi16 ( prod_2 , prod_3 );
2457
+ const __m512i res0_int = _mm512_madd_epi16 ( diff0 , one );
2458
+ const __m512i res1_int = _mm512_madd_epi16 ( diff1 , one );
2459
+ #endif
2460
+
2461
+ // Finally, we multiply the permuted scales and the 32-bit dot products, then accumulate.
2462
+ const __m512 res0_float = _mm512_cvtepi32_ps ( res0_int );
2463
+ const __m512 res1_float = _mm512_cvtepi32_ps ( res1_int );
2464
+
2465
+ return _mm512_fmadd_ps ( xyds23 , res1_float ,
2466
+ _mm512_fmadd_ps ( xyds01 , res0_float , acc ));
2467
+ }
2468
+ #endif
2469
+
2374
2470
inline static void ggml_vec_dot_f16 (const int n , float * restrict s , ggml_fp16_t * restrict x , ggml_fp16_t * restrict y ) {
2375
2471
ggml_float sumf = 0.0 ;
2376
2472
@@ -2617,6 +2713,15 @@ static void ggml_vec_dot_q4_0c_q8_0c(const int n, float * restrict s, const void
2617
2713
2618
2714
float sumf = 0.0 ;
2619
2715
2716
+ #if __AVX512F__
2717
+ // Initialize accumulator with zeros
2718
+ __m512 acc = _mm512_setzero_ps ();
2719
+ for (int i = 0 ; i < nb ; i += 4 ) {
2720
+ acc = dot_q4_0c_fourblocks_avx512 (acc , xqs + i * QK4_0 /2 , xds + i , yqs + i * QK8_0 , yds + i );
2721
+ }
2722
+ // Horizontal sum of all lanes of the accumulator
2723
+ sumf = _mm512_reduce_add_ps ( acc );
2724
+ #else
2620
2725
// scalar
2621
2726
for (int i = 0 ; i < nb /2 ; i ++ ) {
2622
2727
const int dst0 = i + i /2 * 2 ; // 0, 1, 4, 5, 8, 9, ...
@@ -2627,23 +2732,25 @@ static void ggml_vec_dot_q4_0c_q8_0c(const int n, float * restrict s, const void
2627
2732
const float dy0 = yds [dst0 ];
2628
2733
const float dy1 = yds [dst1 ];
2629
2734
2630
- int sumi0 = 0 ;
2631
- int sumi1 = 0 ;
2735
+ // NOTE: having these as plain int triggers a bug with AVX512 on GCC 12.2
2736
+ int64_t sumi0 = 0 ;
2737
+ int64_t sumi1 = 0 ;
2632
2738
2633
2739
for (int l = 0 ; l < QK4_0 ; l ++ ) {
2634
- const uint8_t v0 = xqs [i * QK4_0 + l ];
2740
+ const uint8_t v0 = xqs [i * QK4_0 + l ];
2635
2741
2636
- const int i0 = (int8_t ) (v0 & 0xf ) - 8 ;
2637
- const int i1 = (int8_t ) (v0 >> 4 ) - 8 ;
2742
+ const int i0 = (int ) (v0 & 0xf ) - 8 ;
2743
+ const int i1 = (int ) (v0 >> 4 ) - 8 ;
2638
2744
2639
- const int i2 = yqs [dst0 * QK4_0 + l ];
2640
- const int i3 = yqs [dst1 * QK4_0 + l ];
2745
+ const int i2 = yqs [dst0 * QK4_0 + l ];
2746
+ const int i3 = yqs [dst1 * QK4_0 + l ];
2641
2747
2642
- sumi0 += i0 * i2 ;
2643
- sumi1 += i1 * i3 ;
2748
+ sumi0 += i0 * i2 ;
2749
+ sumi1 += i1 * i3 ;
2644
2750
}
2645
2751
sumf += dx0 * dy0 * sumi0 + dx1 * dy1 * sumi1 ;
2646
2752
}
2753
+ #endif
2647
2754
2648
2755
* s = sumf ;
2649
2756
}
0 commit comments