10
10
#include <string.h>
11
11
12
12
#include "cache.h"
13
+ #include "mpool.h"
13
14
14
15
#define MIN (a , b ) ((a < b) ? a : b)
15
16
#define GOLDEN_RATIO_32 0x61C88647
16
17
#define HASH (val ) \
17
18
(((val) * (GOLDEN_RATIO_32)) >> (32 - (cache_size_bits))) & (cache_size - 1)
19
+ /* THRESHOLD is set to identify hot spots. Once the frequency of use for a block
20
+ * exceeds the THRESHOLD, the JIT compiler flow is triggered. */
21
+ #define THRESHOLD 1000
18
22
19
23
static uint32_t cache_size , cache_size_bits ;
24
+ static struct mpool * cache_mp ;
20
25
26
+ #if RV32_HAS (ARC )
21
27
/*
22
28
* Adaptive Replacement Cache (ARC) improves the fundamental LRU strategy
23
29
* by dividing the cache into two lists, T1 and T2. list T1 is for LRU
@@ -37,7 +43,7 @@ typedef enum {
37
43
LFU_ghost_list ,
38
44
N_CACHE_LIST_TYPES
39
45
} cache_list_t ;
40
-
46
+ #endif
41
47
struct list_head {
42
48
struct list_head * prev , * next ;
43
49
};
@@ -50,6 +56,7 @@ struct hlist_node {
50
56
struct hlist_node * next , * * pprev ;
51
57
};
52
58
59
+ #if RV32_HAS (ARC )
53
60
/*
54
61
* list maintains four cache lists T1, T2, B1, and B2.
55
62
* ht_list maintains hashtable and improves the performance of cache searching.
@@ -61,17 +68,31 @@ typedef struct {
61
68
struct list_head list ;
62
69
struct hlist_node ht_list ;
63
70
} arc_entry_t ;
71
+ #else /* !RV32_HAS(ARC) */
72
+ typedef struct {
73
+ void * value ;
74
+ uint32_t key ;
75
+ uint32_t frequency ;
76
+ struct list_head list ;
77
+ struct hlist_node ht_list ;
78
+ } lfu_entry_t ;
79
+ #endif
64
80
65
81
typedef struct {
66
82
struct hlist_head * ht_list_head ;
67
83
} hashtable_t ;
68
84
69
85
typedef struct cache {
86
+ #if RV32_HAS (ARC )
70
87
struct list_head * lists [N_CACHE_LIST_TYPES ];
71
88
uint32_t list_size [N_CACHE_LIST_TYPES ];
89
+ uint32_t lru_capacity ;
90
+ #else /* !RV32_HAS(ARC) */
91
+ struct list_head * lists [THRESHOLD ];
92
+ uint32_t list_size ;
93
+ #endif
72
94
hashtable_t * map ;
73
95
uint32_t capacity ;
74
- uint32_t lru_capacity ;
75
96
} cache_t ;
76
97
77
98
static inline void INIT_LIST_HEAD (struct list_head * head )
@@ -80,6 +101,13 @@ static inline void INIT_LIST_HEAD(struct list_head *head)
80
101
head -> prev = head ;
81
102
}
82
103
104
+ #if !RV32_HAS (ARC )
105
+ static inline int list_empty (const struct list_head * head )
106
+ {
107
+ return (head -> next == head );
108
+ }
109
+ #endif
110
+
83
111
static inline void list_add (struct list_head * node , struct list_head * head )
84
112
{
85
113
struct list_head * next = head -> next ;
@@ -107,6 +135,9 @@ static inline void list_del_init(struct list_head *node)
107
135
108
136
#define list_entry (node , type , member ) container_of(node, type, member)
109
137
138
+ #define list_first_entry (head , type , member ) \
139
+ list_entry((head)->next, type, member)
140
+
110
141
#define list_last_entry (head , type , member ) \
111
142
list_entry((head)->prev, type, member)
112
143
@@ -194,14 +225,15 @@ static inline void hlist_del_init(struct hlist_node *n)
194
225
pos = hlist_entry_safe((pos)->member.next, type, member))
195
226
#endif
196
227
228
+
197
229
cache_t * cache_create (int size_bits )
198
230
{
199
231
cache_t * cache = malloc (sizeof (cache_t ));
200
232
if (!cache )
201
233
return NULL ;
202
234
cache_size_bits = size_bits ;
203
235
cache_size = 1 << size_bits ;
204
-
236
+ #if RV32_HAS ( ARC )
205
237
for (int i = 0 ; i < N_CACHE_LIST_TYPES ; i ++ ) {
206
238
cache -> lists [i ] = malloc (sizeof (struct list_head ));
207
239
INIT_LIST_HEAD (cache -> lists [i ]);
@@ -224,12 +256,41 @@ cache_t *cache_create(int size_bits)
224
256
for (uint32_t i = 0 ; i < cache_size ; i ++ ) {
225
257
INIT_HLIST_HEAD (& cache -> map -> ht_list_head [i ]);
226
258
}
259
+ cache -> lru_capacity = cache_size / 2 ;
260
+ cache_mp =
261
+ mpool_create (cache_size * 2 * sizeof (arc_entry_t ), sizeof (arc_entry_t ));
262
+ #else /* !RV32_HAS(ARC) */
263
+ for (int i = 0 ; i < THRESHOLD ; i ++ ) {
264
+ cache -> lists [i ] = malloc (sizeof (struct list_head ));
265
+ INIT_LIST_HEAD (cache -> lists [i ]);
266
+ }
227
267
268
+ cache -> map = malloc (sizeof (hashtable_t ));
269
+ if (!cache -> map ) {
270
+ free (cache -> lists );
271
+ free (cache );
272
+ return NULL ;
273
+ }
274
+ cache -> map -> ht_list_head = malloc (cache_size * sizeof (struct hlist_head ));
275
+ if (!cache -> map -> ht_list_head ) {
276
+ free (cache -> map );
277
+ free (cache -> lists );
278
+ free (cache );
279
+ return NULL ;
280
+ }
281
+ for (uint32_t i = 0 ; i < cache_size ; i ++ ) {
282
+ INIT_HLIST_HEAD (& cache -> map -> ht_list_head [i ]);
283
+ }
284
+ cache -> list_size = 0 ;
285
+ cache_mp =
286
+ mpool_create (cache_size * sizeof (lfu_entry_t ), sizeof (lfu_entry_t ));
287
+ #endif
228
288
cache -> capacity = cache_size ;
229
- cache -> lru_capacity = cache_size / 2 ;
230
289
return cache ;
231
290
}
232
291
292
+
293
+ #if RV32_HAS (ARC )
233
294
/* Rules of ARC
234
295
* 1. size of LRU_list + size of LFU_list <= c
235
296
* 2. size of LRU_list + size of LRU_ghost_list <= c
@@ -273,12 +334,14 @@ static inline void move_to_mru(cache_t *cache,
273
334
list_del_init (& entry -> list );
274
335
list_add (& entry -> list , cache -> lists [type ]);
275
336
}
337
+ #endif
276
338
277
339
void * cache_get (cache_t * cache , uint32_t key )
278
340
{
279
341
if (!cache -> capacity || hlist_empty (& cache -> map -> ht_list_head [HASH (key )]))
280
342
return NULL ;
281
343
344
+ #if RV32_HAS (ARC )
282
345
arc_entry_t * entry = NULL ;
283
346
#ifdef __HAVE_TYPEOF
284
347
hlist_for_each_entry (entry , & cache -> map -> ht_list_head [HASH (key )], ht_list )
@@ -323,13 +386,38 @@ void *cache_get(cache_t *cache, uint32_t key)
323
386
}
324
387
325
388
CACHE_ASSERT (cache );
389
+ #else /* !RV32_HAS(ARC) */
390
+ lfu_entry_t * entry = NULL ;
391
+ #ifdef __HAVE_TYPEOF
392
+ hlist_for_each_entry (entry , & cache -> map -> ht_list_head [HASH (key )], ht_list )
393
+ #else
394
+ hlist_for_each_entry (entry , & cache -> map -> ht_list_head [HASH (key )], ht_list ,
395
+ lfu_entry_t )
396
+ #endif
397
+ {
398
+ if (entry -> key == key )
399
+ break ;
400
+ }
401
+ if (!entry || entry -> key != key )
402
+ return NULL ;
403
+
404
+ /* Once the frequency of use for a specific block exceeds the predetermined
405
+ * THRESHOLD, we dispatch the block to the code generator for the purpose of
406
+ * generating C code. Subsequently, the generated C code is compiled into
407
+ * machine code by the target compiler. */
408
+ if (entry -> frequency < THRESHOLD ) {
409
+ list_del_init (& entry -> list );
410
+ list_add (& entry -> list , cache -> lists [entry -> frequency ++ ]);
411
+ }
412
+ #endif
326
413
/* return NULL if cache miss */
327
414
return entry -> value ;
328
415
}
329
416
330
417
void * cache_put (cache_t * cache , uint32_t key , void * value )
331
418
{
332
419
void * delete_value = NULL ;
420
+ #if RV32_HAS (ARC )
333
421
assert (cache -> list_size [LRU_list ] + cache -> list_size [LRU_ghost_list ] <=
334
422
cache -> capacity );
335
423
/* Before adding new element to cach, we should check the status
@@ -343,7 +431,7 @@ void *cache_put(cache_t *cache, uint32_t key, void *value)
343
431
list_del_init (& delete_target -> list );
344
432
hlist_del_init (& delete_target -> ht_list );
345
433
delete_value = delete_target -> value ;
346
- free ( delete_target );
434
+ mpool_free ( cache_mp , delete_target );
347
435
cache -> list_size [LRU_ghost_list ]-- ;
348
436
if (cache -> list_size [LRU_list ] &&
349
437
cache -> list_size [LRU_list ] >= cache -> lru_capacity )
@@ -357,7 +445,7 @@ void *cache_put(cache_t *cache, uint32_t key, void *value)
357
445
list_del_init (& delete_target -> list );
358
446
hlist_del_init (& delete_target -> ht_list );
359
447
delete_value = delete_target -> value ;
360
- free ( delete_target );
448
+ mpool_free ( cache_mp , delete_target );
361
449
cache -> list_size [LRU_list ]-- ;
362
450
}
363
451
} else {
@@ -372,12 +460,12 @@ void *cache_put(cache_t *cache, uint32_t key, void *value)
372
460
list_del_init (& delete_target -> list );
373
461
hlist_del_init (& delete_target -> ht_list );
374
462
delete_value = delete_target -> value ;
375
- free ( delete_target );
463
+ mpool_free ( cache_mp , delete_target );
376
464
cache -> list_size [LFU_ghost_list ]-- ;
377
465
}
378
466
REPLACE_LIST (> , >=)
379
467
}
380
- arc_entry_t * new_entry = malloc ( sizeof ( arc_entry_t ) );
468
+ arc_entry_t * new_entry = mpool_alloc ( cache_mp );
381
469
new_entry -> key = key ;
382
470
new_entry -> value = value ;
383
471
/* check if all cache become LFU */
@@ -393,21 +481,61 @@ void *cache_put(cache_t *cache, uint32_t key, void *value)
393
481
hlist_add_head (& new_entry -> ht_list , & cache -> map -> ht_list_head [HASH (key )]);
394
482
395
483
CACHE_ASSERT (cache );
484
+ #else /* !RV32_HAS(ARC) */
485
+ assert (cache -> list_size <= cache -> capacity );
486
+ /* check the cache is full or not before adding a new entry */
487
+ if (cache -> list_size == cache -> capacity ) {
488
+ for (int i = 0 ; i < THRESHOLD ; i ++ ) {
489
+ if (list_empty (cache -> lists [i ]))
490
+ continue ;
491
+ lfu_entry_t * delete_target =
492
+ list_last_entry (cache -> lists [i ], lfu_entry_t , list );
493
+ list_del_init (& delete_target -> list );
494
+ hlist_del_init (& delete_target -> ht_list );
495
+ delete_value = delete_target -> value ;
496
+ cache -> list_size -- ;
497
+ mpool_free (cache_mp , delete_target );
498
+ break ;
499
+ }
500
+ }
501
+ lfu_entry_t * new_entry = mpool_alloc (cache_mp );
502
+ new_entry -> key = key ;
503
+ new_entry -> value = value ;
504
+ new_entry -> frequency = 0 ;
505
+ list_add (& new_entry -> list , cache -> lists [new_entry -> frequency ++ ]);
506
+ cache -> list_size ++ ;
507
+ hlist_add_head (& new_entry -> ht_list , & cache -> map -> ht_list_head [HASH (key )]);
508
+ assert (cache -> list_size <= cache -> capacity );
509
+ #endif
396
510
return delete_value ;
397
511
}
398
512
399
513
void cache_free (cache_t * cache , void (* callback )(void * ))
400
514
{
515
+ #if RV32_HAS (ARC )
401
516
for (int i = 0 ; i < N_CACHE_LIST_TYPES ; i ++ ) {
402
517
arc_entry_t * entry , * safe ;
403
518
#ifdef __HAVE_TYPEOF
404
519
list_for_each_entry_safe (entry , safe , cache -> lists [i ], list )
405
520
#else
406
521
list_for_each_entry_safe (entry , safe , cache -> lists [i ], list ,
407
522
arc_entry_t )
523
+ #endif
524
+ #else /* !RV32_HAS(ARC) */
525
+ for (int i = 0 ; i < THRESHOLD ; i ++ ) {
526
+ if (list_empty (cache -> lists [i ]))
527
+ continue ;
528
+ lfu_entry_t * entry , * safe ;
529
+ #ifdef __HAVE_TYPEOF
530
+ list_for_each_entry_safe (entry , safe , cache -> lists [i ], list )
531
+ #else
532
+ list_for_each_entry_safe (entry , safe , cache -> lists [i ], list ,
533
+ lfu_entry_t )
534
+ #endif
408
535
#endif
409
536
callback (entry -> value );
410
537
}
538
+ mpool_destory (cache_mp );
411
539
free (cache -> map -> ht_list_head );
412
540
free (cache -> map );
413
541
free (cache );
0 commit comments