Skip to content

Commit fa04997

Browse files
committed
Refine comments
1 parent 6cb1663 commit fa04997

File tree

8 files changed

+137
-141
lines changed

8 files changed

+137
-141
lines changed

src/breakpoint.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ bool breakpoint_map_insert(breakpoint_map_t map, riscv_word_t addr)
2525
breakpoint_t bp = (breakpoint_t){.addr = addr, .orig_insn = 0};
2626
map_iter_t it;
2727
map_find(map, &it, &addr);
28-
/* We don't expect to set breakpoint at duplicate address */
28+
/* breakpoints are not expected to be set at duplicate addresses */
2929
if (!map_at_end(map, &it))
3030
return false;
3131

src/cache.c

+24-23
Original file line numberDiff line numberDiff line change
@@ -16,25 +16,27 @@
1616
#define GOLDEN_RATIO_32 0x61C88647
1717
#define HASH(val) \
1818
(((val) * (GOLDEN_RATIO_32)) >> (32 - (cache_size_bits))) & (cache_size - 1)
19+
1920
/* THRESHOLD is set to identify hot spots. Once the frequency of use for a block
20-
* exceeds the THRESHOLD, the JIT compiler flow is triggered. */
21+
* exceeds the THRESHOLD, the JIT compiler flow is triggered.
22+
*/
2123
#define THRESHOLD 1000
2224

2325
static uint32_t cache_size, cache_size_bits;
2426
static struct mpool *cache_mp;
2527

2628
#if RV32_HAS(ARC)
27-
/*
28-
* Adaptive Replacement Cache (ARC) improves the fundamental LRU strategy
29-
* by dividing the cache into two lists, T1 and T2. list T1 is for LRU
30-
* strategy and list T2 is for LFU strategy. Moreover, it keeps two ghost
31-
* lists, B1 and B2, with replaced entries from the LRU list going into B1
32-
* and the LFU list going into B2.
29+
/* The Adaptive Replacement Cache (ARC) improves the traditional LRU strategy
30+
* by dividing the cache into two lists: T1 and T2. T1 follows the LRU
31+
* strategy, while T2 follows the LFU strategy. Additionally, ARC maintains two
32+
* ghost lists, B1 and B2, which store replaced entries from the LRU and LFU
33+
* lists, respectively.
3334
*
34-
* Based on B1 and B2, ARC will modify the size of T1 and T2. When a cache
35-
* hit occurs in B1, it indicates that T1's capacity is too little, therefore
36-
* we increase T1's size while decreasing T2. But, if the cache hit occurs in
37-
* B2, we would increase the size of T2 and decrease the size of T1.
35+
* Based on the contents of B1 and B2, ARC dynamically adjusts the sizes of T1
36+
* and T2. If a cache hit occurs in B1, it indicates that the size of T1 is
37+
* insufficient, leading to an increase in T1's size and a decrease in T2's
38+
* size. Conversely, if a cache hit occurs in B2, T2's size is increased while
39+
* T1's size is decreased.
3840
*/
3941
typedef enum {
4042
LRU_list,
@@ -57,8 +59,7 @@ struct hlist_node {
5759
};
5860

5961
#if RV32_HAS(ARC)
60-
/*
61-
* list maintains four cache lists T1, T2, B1, and B2.
62+
/* list maintains four cache lists T1, T2, B1, and B2.
6263
* ht_list maintains hashtable and improves the performance of cache searching.
6364
*/
6465
typedef struct {
@@ -225,7 +226,6 @@ static inline void hlist_del_init(struct hlist_node *n)
225226
pos = hlist_entry_safe((pos)->member.next, type, member))
226227
#endif
227228

228-
229229
cache_t *cache_create(int size_bits)
230230
{
231231
cache_t *cache = malloc(sizeof(cache_t));
@@ -291,12 +291,12 @@ cache_t *cache_create(int size_bits)
291291

292292

293293
#if RV32_HAS(ARC)
294-
/* Rules of ARC
294+
/* Rules of ARC:
295295
* 1. size of LRU_list + size of LFU_list <= c
296296
* 2. size of LRU_list + size of LRU_ghost_list <= c
297297
* 3. size of LFU_list + size of LFU_ghost_list <= 2c
298298
* 4. size of LRU_list + size of LFU_list + size of LRU_ghost_list + size of
299-
* LFU_ghost_list <= 2c
299+
* LFU_ghost_list <= 2c
300300
*/
301301
#define CACHE_ASSERT(cache) \
302302
assert(cache->list_size[LRU_list] + cache->list_size[LFU_list] <= \
@@ -401,10 +401,11 @@ void *cache_get(cache_t *cache, uint32_t key)
401401
if (!entry || entry->key != key)
402402
return NULL;
403403

404-
/* Once the frequency of use for a specific block exceeds the predetermined
405-
* THRESHOLD, we dispatch the block to the code generator for the purpose of
406-
* generating C code. Subsequently, the generated C code is compiled into
407-
* machine code by the target compiler. */
404+
/* When the frequency of use for a specific block exceeds the predetermined
405+
* THRESHOLD, the block is dispatched to the code generator to generate C
406+
* code. The generated C code is then compiled into machine code by the
407+
* target compiler.
408+
*/
408409
if (entry->frequency < THRESHOLD) {
409410
list_del_init(&entry->list);
410411
list_add(&entry->list, cache->lists[entry->frequency++]);
@@ -420,8 +421,8 @@ void *cache_put(cache_t *cache, uint32_t key, void *value)
420421
#if RV32_HAS(ARC)
421422
assert(cache->list_size[LRU_list] + cache->list_size[LRU_ghost_list] <=
422423
cache->capacity);
423-
/* Before adding new element to cach, we should check the status
424-
* of cache.
424+
/* Before adding a new element to the cache, it is necessary to check the
425+
* status of the cache.
425426
*/
426427
if ((cache->list_size[LRU_list] + cache->list_size[LRU_ghost_list]) ==
427428
cache->capacity) {
@@ -539,4 +540,4 @@ void cache_free(cache_t *cache, void (*callback)(void *))
539540
free(cache->map->ht_list_head);
540541
free(cache->map);
541542
free(cache);
542-
}
543+
}

src/common.h

+4-4
Original file line numberDiff line numberDiff line change
@@ -24,9 +24,9 @@
2424
#define __ALIGNED(x)
2525
#endif
2626

27-
/* There is no tail-call optimization(TCO) in non-optimized builds. To work
28-
* around this, we attempts to use a compiler attribute called musttail that
29-
* forces the compiler to TCO even when optimizations aren't on.
27+
/* Non-optimized builds do not have tail-call optimization (TCO). To work
28+
* around this, the compiler attribute 'musttail' is used, which forces TCO
29+
* even without optimizations enabled.
3030
*/
3131
#if defined(__has_attribute) && __has_attribute(musttail)
3232
#define MUST_TAIL __attribute__((musttail))
@@ -73,4 +73,4 @@
7373
#define container_of(ptr, type, member) \
7474
((type *) ((char *) (ptr) - (offsetof(type, member))))
7575
#endif
76-
#endif
76+
#endif

src/decode.h

+17-16
Original file line numberDiff line numberDiff line change
@@ -244,27 +244,28 @@ typedef struct rv_insn {
244244
/* instruction length */
245245
uint8_t insn_len;
246246

247-
/* According to tail-call optimization (TCO), if a C function ends with
248-
* a function call to another function or itself and simply returns that
249-
* function's result, the compiler can substitute a simple jump to the
250-
* other function for the 'call' and 'return' instructions . The self
251-
* -recursive function can therefore use the same function stack frame.
247+
/* Tail-call optimization (TCO) allows a C function to replace a function
248+
* call to another function or itself, followed by a simple return of the
249+
* function's result, with a direct jump to the target function. This
250+
* optimization enables the self-recursive function to reuse the same
251+
* function stack frame.
252252
*
253-
* Using member tailcall, we can tell whether an IR is the final IR in
254-
* a basic block. Additionally, member 'impl' allows us to invoke next
255-
* instruction emulation directly without computing the jumping address.
256-
* In order to enable the compiler to perform TCO, we can use these two
257-
* members to rewrite all instruction emulations into a self-recursive
258-
* version.
253+
* The @tailcall member indicates whether an intermediate representation
254+
* (IR) is the final instruction in a basic block. The @impl member
255+
* facilitates the direct invocation of the next instruction emulation
256+
* without the need to compute the jump address. By utilizing these two
257+
* members, all instruction emulations can be rewritten into a
258+
* self-recursive version, enabling the compiler to leverage TCO.
259259
*/
260260
bool tailcall;
261261
bool (*impl)(riscv_t *, const struct rv_insn *);
262262

263-
/* We employ two pointers, branch taken and branch untaken, to avoid the
264-
* significant overhead resulting from aggressive memory copy. Instead of
265-
* copying the entire IR array, these pointers respectively point to the
266-
* first IR of the first basic block in the path of the taken and untaken
267-
* branches, so we can jump to the specific IR array directly.
263+
/* Two pointers, 'branch_taken' and 'branch_untaken', are employed to
264+
* avoid the overhead associated with aggressive memory copying. Instead
265+
* of copying the entire intermediate representation (IR) array, these
266+
* pointers indicate the first IR of the first basic block in the path of
267+
* the taken and untaken branches. This allows for direct jumping to the
268+
* specific IR array without the need for additional copying.
268269
*/
269270
struct rv_insn *branch_taken, *branch_untaken;
270271
} rv_insn_t;

0 commit comments

Comments
 (0)