10
10
#include <string.h>
11
11
12
12
#include "cache.h"
13
+ #include "mpool.h"
13
14
14
15
#define MIN (a , b ) ((a < b) ? a : b)
15
16
#define GOLDEN_RATIO_32 0x61C88647
16
17
#define HASH (val ) \
17
18
(((val) * (GOLDEN_RATIO_32)) >> (32 - (cache_size_bits))) & (cache_size - 1)
19
+ /* If the frequency of block exceeds THRESHOLD, it would be translated into
20
+ * machine code */
21
+ #define THRESHOLD 1000
18
22
19
23
static uint32_t cache_size , cache_size_bits ;
24
+ static struct mpool * cache_mp ;
20
25
26
+ #if RV32_HAS (ARC )
21
27
/*
22
28
* Adaptive Replacement Cache (ARC) improves the fundamental LRU strategy
23
29
* by dividing the cache into two lists, T1 and T2. list T1 is for LRU
@@ -30,14 +36,15 @@ static uint32_t cache_size, cache_size_bits;
30
36
* we increase T1's size while decreasing T2. But, if the cache hit occurs in
31
37
* B2, we would increase the size of T2 and decrease the size of T1.
32
38
*/
39
+
33
40
typedef enum {
34
41
LRU_list ,
35
42
LFU_list ,
36
43
LRU_ghost_list ,
37
44
LFU_ghost_list ,
38
45
N_CACHE_LIST_TYPES
39
46
} cache_list_t ;
40
-
47
+ #endif
41
48
struct list_head {
42
49
struct list_head * prev , * next ;
43
50
};
@@ -50,6 +57,7 @@ struct hlist_node {
50
57
struct hlist_node * next , * * pprev ;
51
58
};
52
59
60
+ #if RV32_HAS (ARC )
53
61
/*
54
62
* list maintains four cache lists T1, T2, B1, and B2.
55
63
* ht_list maintains hashtable and improves the performance of cache searching.
@@ -61,17 +69,31 @@ typedef struct {
61
69
struct list_head list ;
62
70
struct hlist_node ht_list ;
63
71
} arc_entry_t ;
72
+ #else
73
+ typedef struct {
74
+ void * value ;
75
+ uint32_t key ;
76
+ uint32_t frequency ;
77
+ struct list_head list ;
78
+ struct hlist_node ht_list ;
79
+ } lfu_entry_t ;
80
+ #endif
64
81
65
82
typedef struct {
66
83
struct hlist_head * ht_list_head ;
67
84
} hashtable_t ;
68
85
69
86
typedef struct cache {
87
+ #if RV32_HAS (ARC )
70
88
struct list_head * lists [N_CACHE_LIST_TYPES ];
71
89
uint32_t list_size [N_CACHE_LIST_TYPES ];
90
+ uint32_t lru_capacity ;
91
+ #else
92
+ struct list_head * lists [THRESHOLD ];
93
+ uint32_t list_size ;
94
+ #endif
72
95
hashtable_t * map ;
73
96
uint32_t capacity ;
74
- uint32_t lru_capacity ;
75
97
} cache_t ;
76
98
77
99
static inline void INIT_LIST_HEAD (struct list_head * head )
@@ -80,6 +102,13 @@ static inline void INIT_LIST_HEAD(struct list_head *head)
80
102
head -> prev = head ;
81
103
}
82
104
105
+ #if !RV32_HAS (ARC )
106
+ static inline int list_empty (const struct list_head * head )
107
+ {
108
+ return (head -> next == head );
109
+ }
110
+ #endif
111
+
83
112
static inline void list_add (struct list_head * node , struct list_head * head )
84
113
{
85
114
struct list_head * next = head -> next ;
@@ -107,6 +136,9 @@ static inline void list_del_init(struct list_head *node)
107
136
108
137
#define list_entry (node , type , member ) container_of(node, type, member)
109
138
139
+ #define list_first_entry (head , type , member ) \
140
+ list_entry((head)->next, type, member)
141
+
110
142
#define list_last_entry (head , type , member ) \
111
143
list_entry((head)->prev, type, member)
112
144
@@ -194,14 +226,15 @@ static inline void hlist_del_init(struct hlist_node *n)
194
226
pos = hlist_entry_safe((pos)->member.next, type, member))
195
227
#endif
196
228
229
+
197
230
cache_t * cache_create (int size_bits )
198
231
{
199
232
cache_t * cache = malloc (sizeof (cache_t ));
200
233
if (!cache )
201
234
return NULL ;
202
235
cache_size_bits = size_bits ;
203
236
cache_size = 1 << size_bits ;
204
-
237
+ #if RV32_HAS ( ARC )
205
238
for (int i = 0 ; i < N_CACHE_LIST_TYPES ; i ++ ) {
206
239
cache -> lists [i ] = malloc (sizeof (struct list_head ));
207
240
INIT_LIST_HEAD (cache -> lists [i ]);
@@ -224,12 +257,41 @@ cache_t *cache_create(int size_bits)
224
257
for (uint32_t i = 0 ; i < cache_size ; i ++ ) {
225
258
INIT_HLIST_HEAD (& cache -> map -> ht_list_head [i ]);
226
259
}
260
+ cache -> lru_capacity = cache_size / 2 ;
261
+ cache_mp =
262
+ mpool_create (cache_size * 2 * sizeof (arc_entry_t ), sizeof (arc_entry_t ));
263
+ #else
264
+ for (int i = 0 ; i < THRESHOLD ; i ++ ) {
265
+ cache -> lists [i ] = malloc (sizeof (struct list_head ));
266
+ INIT_LIST_HEAD (cache -> lists [i ]);
267
+ }
227
268
269
+ cache -> map = malloc (sizeof (hashtable_t ));
270
+ if (!cache -> map ) {
271
+ free (cache -> lists );
272
+ free (cache );
273
+ return NULL ;
274
+ }
275
+ cache -> map -> ht_list_head = malloc (cache_size * sizeof (struct hlist_head ));
276
+ if (!cache -> map -> ht_list_head ) {
277
+ free (cache -> map );
278
+ free (cache -> lists );
279
+ free (cache );
280
+ return NULL ;
281
+ }
282
+ for (uint32_t i = 0 ; i < cache_size ; i ++ ) {
283
+ INIT_HLIST_HEAD (& cache -> map -> ht_list_head [i ]);
284
+ }
285
+ cache -> list_size = 0 ;
286
+ cache_mp =
287
+ mpool_create (cache_size * sizeof (lfu_entry_t ), sizeof (lfu_entry_t ));
288
+ #endif
228
289
cache -> capacity = cache_size ;
229
- cache -> lru_capacity = cache_size / 2 ;
230
290
return cache ;
231
291
}
232
292
293
+
294
+ #if RV32_HAS (ARC )
233
295
/* Rules of ARC
234
296
* 1. size of LRU_list + size of LFU_list <= c
235
297
* 2. size of LRU_list + size of LRU_ghost_list <= c
@@ -273,12 +335,14 @@ static inline void move_to_mru(cache_t *cache,
273
335
list_del_init (& entry -> list );
274
336
list_add (& entry -> list , cache -> lists [type ]);
275
337
}
338
+ #endif
276
339
277
340
void * cache_get (cache_t * cache , uint32_t key )
278
341
{
279
342
if (!cache -> capacity || hlist_empty (& cache -> map -> ht_list_head [HASH (key )]))
280
343
return NULL ;
281
344
345
+ #if RV32_HAS (ARC )
282
346
arc_entry_t * entry = NULL ;
283
347
#ifdef __HAVE_TYPEOF
284
348
hlist_for_each_entry (entry , & cache -> map -> ht_list_head [HASH (key )], ht_list )
@@ -323,13 +387,35 @@ void *cache_get(cache_t *cache, uint32_t key)
323
387
}
324
388
325
389
CACHE_ASSERT (cache );
390
+ #else
391
+ lfu_entry_t * entry = NULL ;
392
+ #ifdef __HAVE_TYPEOF
393
+ hlist_for_each_entry (entry , & cache -> map -> ht_list_head [HASH (key )], ht_list )
394
+ #else
395
+ hlist_for_each_entry (entry , & cache -> map -> ht_list_head [HASH (key )], ht_list ,
396
+ lfu_entry_t )
397
+ #endif
398
+ {
399
+ if (entry -> key == key )
400
+ break ;
401
+ }
402
+ if (!entry || entry -> key != key )
403
+ return NULL ;
404
+
405
+ /* We would translate the block with a frequency of more thanTHRESHOLD */
406
+ if (entry -> frequency < THRESHOLD ) {
407
+ list_del_init (& entry -> list );
408
+ list_add (& entry -> list , cache -> lists [entry -> frequency ++ ]);
409
+ }
410
+ #endif
326
411
/* return NULL if cache miss */
327
412
return entry -> value ;
328
413
}
329
414
330
415
void * cache_put (cache_t * cache , uint32_t key , void * value )
331
416
{
332
417
void * delete_value = NULL ;
418
+ #if RV32_HAS (ARC )
333
419
assert (cache -> list_size [LRU_list ] + cache -> list_size [LRU_ghost_list ] <=
334
420
cache -> capacity );
335
421
/* Before adding new element to cach, we should check the status
@@ -343,7 +429,7 @@ void *cache_put(cache_t *cache, uint32_t key, void *value)
343
429
list_del_init (& delete_target -> list );
344
430
hlist_del_init (& delete_target -> ht_list );
345
431
delete_value = delete_target -> value ;
346
- free ( delete_target );
432
+ mpool_free ( cache_mp , delete_target );
347
433
cache -> list_size [LRU_ghost_list ]-- ;
348
434
if (cache -> list_size [LRU_list ] &&
349
435
cache -> list_size [LRU_list ] >= cache -> lru_capacity )
@@ -357,7 +443,7 @@ void *cache_put(cache_t *cache, uint32_t key, void *value)
357
443
list_del_init (& delete_target -> list );
358
444
hlist_del_init (& delete_target -> ht_list );
359
445
delete_value = delete_target -> value ;
360
- free ( delete_target );
446
+ mpool_free ( cache_mp , delete_target );
361
447
cache -> list_size [LRU_list ]-- ;
362
448
}
363
449
} else {
@@ -372,12 +458,12 @@ void *cache_put(cache_t *cache, uint32_t key, void *value)
372
458
list_del_init (& delete_target -> list );
373
459
hlist_del_init (& delete_target -> ht_list );
374
460
delete_value = delete_target -> value ;
375
- free ( delete_target );
461
+ mpool_free ( cache_mp , delete_target );
376
462
cache -> list_size [LFU_ghost_list ]-- ;
377
463
}
378
464
REPLACE_LIST (> , >=)
379
465
}
380
- arc_entry_t * new_entry = malloc ( sizeof ( arc_entry_t ) );
466
+ arc_entry_t * new_entry = mpool_alloc ( cache_mp );
381
467
new_entry -> key = key ;
382
468
new_entry -> value = value ;
383
469
/* check if all cache become LFU */
@@ -393,21 +479,63 @@ void *cache_put(cache_t *cache, uint32_t key, void *value)
393
479
hlist_add_head (& new_entry -> ht_list , & cache -> map -> ht_list_head [HASH (key )]);
394
480
395
481
CACHE_ASSERT (cache );
482
+ #else
483
+ assert (cache -> list_size <= cache -> capacity );
484
+ /* Before adding new element to cach, we should check the status
485
+ * of cache.
486
+ */
487
+ if (cache -> list_size == cache -> capacity ) {
488
+ for (int i = 0 ; i < THRESHOLD ; i ++ ) {
489
+ if (!list_empty (cache -> lists [i ])) {
490
+ lfu_entry_t * delete_target =
491
+ list_last_entry (cache -> lists [i ], lfu_entry_t , list );
492
+ list_del_init (& delete_target -> list );
493
+ hlist_del_init (& delete_target -> ht_list );
494
+ delete_value = delete_target -> value ;
495
+ cache -> list_size -- ;
496
+ mpool_free (cache_mp , delete_target );
497
+ break ;
498
+ }
499
+ }
500
+ }
501
+ lfu_entry_t * new_entry = mpool_alloc (cache_mp );
502
+ new_entry -> key = key ;
503
+ new_entry -> value = value ;
504
+ new_entry -> frequency = 0 ;
505
+ list_add (& new_entry -> list , cache -> lists [new_entry -> frequency ++ ]);
506
+ cache -> list_size ++ ;
507
+ hlist_add_head (& new_entry -> ht_list , & cache -> map -> ht_list_head [HASH (key )]);
508
+ assert (cache -> list_size <= cache -> capacity );
509
+ #endif
396
510
return delete_value ;
397
511
}
398
512
399
513
void cache_free (cache_t * cache , void (* callback )(void * ))
400
514
{
515
+ #if RV32_HAS (ARC )
401
516
for (int i = 0 ; i < N_CACHE_LIST_TYPES ; i ++ ) {
402
517
arc_entry_t * entry , * safe ;
403
518
#ifdef __HAVE_TYPEOF
404
519
list_for_each_entry_safe (entry , safe , cache -> lists [i ], list )
405
520
#else
406
521
list_for_each_entry_safe (entry , safe , cache -> lists [i ], list ,
407
522
arc_entry_t )
523
+ #endif
524
+ #else
525
+ for (int i = 0 ; i < THRESHOLD ; i ++ ) {
526
+ if (list_empty (cache -> lists [i ]))
527
+ continue ;
528
+ lfu_entry_t * entry , * safe ;
529
+ #ifdef __HAVE_TYPEOF
530
+ list_for_each_entry_safe (entry , safe , cache -> lists [i ], list )
531
+ #else
532
+ list_for_each_entry_safe (entry , safe , cache -> lists [i ], list ,
533
+ lfu_entry_t )
534
+ #endif
408
535
#endif
409
536
callback (entry -> value );
410
537
}
538
+ mpool_destory (cache_mp );
411
539
free (cache -> map -> ht_list_head );
412
540
free (cache -> map );
413
541
free (cache );
0 commit comments