@@ -30,7 +30,7 @@ internal class RemoteRenderer : HtmlRenderer
30
30
private long _nextRenderId = 1 ;
31
31
private bool _disposing = false ;
32
32
private bool _queueIsFullNotified ;
33
- private ConcurrentQueue < UnacknowledgedRenderBatch > _unacknowledgedRenderBatches = new ConcurrentQueue < UnacknowledgedRenderBatch > ( ) ;
33
+ private readonly ConcurrentQueue < UnacknowledgedRenderBatch > _unacknowledgedRenderBatches = new ConcurrentQueue < UnacknowledgedRenderBatch > ( ) ;
34
34
35
35
/// <summary>
36
36
/// Notifies when a rendering exception occured.
@@ -92,11 +92,11 @@ protected override void ProcessRenderQueue()
92
92
// If we got here it means we are at max capacity, so we don't want to actually process the queue,
93
93
// as we have a client that is not acknowledging render batches fast enough (something we consider needs
94
94
// to be fast).
95
- // The result is somethign as follows:
95
+ // The result is something as follows:
96
96
// Lets imagine an extreme case where the server produces a new batch every milisecond.
97
97
// Lets say the client is able to ACK a batch every 100 miliseconds.
98
- // When the app starts the client might see the sequence 0.000->0.{MAXUnacknowledgeRenderBatches} and then
99
- // after 100 miliseconds it sees it jump to 0. 1xx, then to 0. 2xx where xx is something between {0..99} the
98
+ // When the app starts the client might see the sequence 0->(MaxUnacknowledgedRenderBatches-1) and then
99
+ // after 100 miliseconds it sees it jump to 1xx, then to 2xx where xx is something between {0..99} the
100
100
// reason for this is that the server slows down rendering new batches to as fast as the client can consume
101
101
// them.
102
102
// Similarly, if a client were to send events at a faster pace than the server can consume them, the server
@@ -287,7 +287,7 @@ public void OnRenderCompleted(long incomingBatchId, string errorMessageOrNull)
287
287
while ( _unacknowledgedRenderBatches . TryPeek ( out nextUnacknowledgedBatch ) && nextUnacknowledgedBatch . BatchId <= incomingBatchId )
288
288
{
289
289
lastBatchId = nextUnacknowledgedBatch . BatchId ;
290
- // At this point the queue is no longer full, we have at least emptied one slot, so we allow a further
290
+ // At this point the queue is definitely not full, we have at least emptied one slot, so we allow a further
291
291
// full queue log entry the next time it fills up.
292
292
_queueIsFullNotified = false ;
293
293
_unacknowledgedRenderBatches . TryDequeue ( out _ ) ;
0 commit comments