@@ -71,31 +71,99 @@ const pollBlockSize = 4 * 1024
71
71
//go:notinheap
72
72
type pollDesc struct {
73
73
link * pollDesc // in pollcache, protected by pollcache.lock
74
+ fd uintptr // constant for pollDesc usage lifetime
75
+
76
+ // atomicInfo holds bits from closing, rd, and wd,
77
+ // which are only ever written while holding the lock,
78
+ // summarized for use by netpollcheckerr,
79
+ // which cannot acquire the lock.
80
+ // After writing these fields under lock in a way that
81
+ // might change the summary, code must call publishInfo
82
+ // before releasing the lock.
83
+ // Code that changes fields and then calls netpollunblock
84
+ // (while still holding the lock) must call publishInfo
85
+ // before calling netpollunblock, because publishInfo is what
86
+ // stops netpollblock from blocking anew
87
+ // (by changing the result of netpollcheckerr).
88
+ // atomicInfo also holds the eventErr bit,
89
+ // recording whether a poll event on the fd got an error;
90
+ // atomicInfo is the only source of truth for that bit.
91
+ atomicInfo atomic.Uint32 // atomic pollInfo
92
+
93
+ // rg, wg are accessed atomically and hold g pointers.
94
+ // (Using atomic.Uintptr here is similar to using guintptr elsewhere.)
95
+ rg atomic.Uintptr // pdReady, pdWait, G waiting for read or nil
96
+ wg atomic.Uintptr // pdReady, pdWait, G waiting for write or nil
74
97
75
- // The lock protects pollOpen, pollSetDeadline, pollUnblock and deadlineimpl operations.
76
- // This fully covers seq, rt and wt variables. fd is constant throughout the PollDesc lifetime.
77
- // pollReset, pollWait, pollWaitCanceled and runtime·netpollready (IO readiness notification)
78
- // proceed w/o taking the lock. So closing, everr, rg, rd, wg and wd are manipulated
79
- // in a lock-free way by all operations.
80
- // TODO(golang.org/issue/49008): audit these lock-free fields for continued correctness.
81
- // NOTE(dvyukov): the following code uses uintptr to store *g (rg/wg),
82
- // that will blow up when GC starts moving objects.
83
98
lock mutex // protects the following fields
84
- fd uintptr
85
99
closing bool
86
- everr bool // marks event scanning error happened
87
100
user uint32 // user settable cookie
88
101
rseq uintptr // protects from stale read timers
89
- rg uintptr // pdReady, pdWait, G waiting for read or nil. Accessed atomically.
90
102
rt timer // read deadline timer (set if rt.f != nil)
91
- rd int64 // read deadline
103
+ rd int64 // read deadline (a nanotime in the future, -1 when expired)
92
104
wseq uintptr // protects from stale write timers
93
- wg uintptr // pdReady, pdWait, G waiting for write or nil. Accessed atomically.
94
105
wt timer // write deadline timer
95
- wd int64 // write deadline
106
+ wd int64 // write deadline (a nanotime in the future, -1 when expired)
96
107
self * pollDesc // storage for indirect interface. See (*pollDesc).makeArg.
97
108
}
98
109
110
+ // pollInfo is the bits needed by netpollcheckerr, stored atomically,
111
+ // mostly duplicating state that is manipulated under lock in pollDesc.
112
+ // The one exception is the pollEventErr bit, which is maintained only
113
+ // in the pollInfo.
114
+ type pollInfo uint32
115
+
116
+ const (
117
+ pollClosing = 1 << iota
118
+ pollEventErr
119
+ pollExpiredReadDeadline
120
+ pollExpiredWriteDeadline
121
+ )
122
+
123
+ func (i pollInfo ) closing () bool { return i & pollClosing != 0 }
124
+ func (i pollInfo ) eventErr () bool { return i & pollEventErr != 0 }
125
+ func (i pollInfo ) expiredReadDeadline () bool { return i & pollExpiredReadDeadline != 0 }
126
+ func (i pollInfo ) expiredWriteDeadline () bool { return i & pollExpiredWriteDeadline != 0 }
127
+
128
+ // info returns the pollInfo corresponding to pd.
129
+ func (pd * pollDesc ) info () pollInfo {
130
+ return pollInfo (pd .atomicInfo .Load ())
131
+ }
132
+
133
+ // publishInfo updates pd.atomicInfo (returned by pd.info)
134
+ // using the other values in pd.
135
+ // It must be called while holding pd.lock,
136
+ // and it must be called after changing anything
137
+ // that might affect the info bits.
138
+ // In practice this means after changing closing
139
+ // or changing rd or wd from < 0 to >= 0.
140
+ func (pd * pollDesc ) publishInfo () {
141
+ var info uint32
142
+ if pd .closing {
143
+ info |= pollClosing
144
+ }
145
+ if pd .rd < 0 {
146
+ info |= pollExpiredReadDeadline
147
+ }
148
+ if pd .wd < 0 {
149
+ info |= pollExpiredWriteDeadline
150
+ }
151
+
152
+ // Set all of x except the pollEventErr bit.
153
+ x := pd .atomicInfo .Load ()
154
+ for ! pd .atomicInfo .CompareAndSwap (x , (x & pollEventErr )| info ) {
155
+ x = pd .atomicInfo .Load ()
156
+ }
157
+ }
158
+
159
+ // setEventErr sets the result of pd.info().eventErr() to b.
160
+ func (pd * pollDesc ) setEventErr (b bool ) {
161
+ x := pd .atomicInfo .Load ()
162
+ for (x & pollEventErr != 0 ) != b && ! pd .atomicInfo .CompareAndSwap (x , x ^ pollEventErr ) {
163
+ x = pd .atomicInfo .Load ()
164
+ }
165
+ }
166
+
99
167
type pollCache struct {
100
168
lock mutex
101
169
first * pollDesc
@@ -147,24 +215,25 @@ func poll_runtime_isPollServerDescriptor(fd uintptr) bool {
147
215
func poll_runtime_pollOpen (fd uintptr ) (* pollDesc , int ) {
148
216
pd := pollcache .alloc ()
149
217
lock (& pd .lock )
150
- wg := atomic . Loaduintptr ( & pd .wg )
218
+ wg := pd .wg . Load ( )
151
219
if wg != 0 && wg != pdReady {
152
220
throw ("runtime: blocked write on free polldesc" )
153
221
}
154
- rg := atomic . Loaduintptr ( & pd .rg )
222
+ rg := pd .rg . Load ( )
155
223
if rg != 0 && rg != pdReady {
156
224
throw ("runtime: blocked read on free polldesc" )
157
225
}
158
226
pd .fd = fd
159
227
pd .closing = false
160
- pd .everr = false
228
+ pd .setEventErr ( false )
161
229
pd .rseq ++
162
- atomic . Storeuintptr ( & pd .rg , 0 )
230
+ pd .rg . Store ( 0 )
163
231
pd .rd = 0
164
232
pd .wseq ++
165
- atomic . Storeuintptr ( & pd .wg , 0 )
233
+ pd .wg . Store ( 0 )
166
234
pd .wd = 0
167
235
pd .self = pd
236
+ pd .publishInfo ()
168
237
unlock (& pd .lock )
169
238
170
239
errno := netpollopen (fd , pd )
@@ -180,11 +249,11 @@ func poll_runtime_pollClose(pd *pollDesc) {
180
249
if ! pd .closing {
181
250
throw ("runtime: close polldesc w/o unblock" )
182
251
}
183
- wg := atomic . Loaduintptr ( & pd .wg )
252
+ wg := pd .wg . Load ( )
184
253
if wg != 0 && wg != pdReady {
185
254
throw ("runtime: blocked write on closing polldesc" )
186
255
}
187
- rg := atomic . Loaduintptr ( & pd .rg )
256
+ rg := pd .rg . Load ( )
188
257
if rg != 0 && rg != pdReady {
189
258
throw ("runtime: blocked read on closing polldesc" )
190
259
}
@@ -209,9 +278,9 @@ func poll_runtime_pollReset(pd *pollDesc, mode int) int {
209
278
return errcode
210
279
}
211
280
if mode == 'r' {
212
- atomic . Storeuintptr ( & pd .rg , 0 )
281
+ pd .rg . Store ( 0 )
213
282
} else if mode == 'w' {
214
- atomic . Storeuintptr ( & pd .wg , 0 )
283
+ pd .wg . Store ( 0 )
215
284
}
216
285
return pollNoError
217
286
}
@@ -273,6 +342,7 @@ func poll_runtime_pollSetDeadline(pd *pollDesc, d int64, mode int) {
273
342
if mode == 'w' || mode == 'r' + 'w' {
274
343
pd .wd = d
275
344
}
345
+ pd .publishInfo ()
276
346
combo := pd .rd > 0 && pd .rd == pd .wd
277
347
rtf := netpollReadDeadline
278
348
if combo {
@@ -314,15 +384,13 @@ func poll_runtime_pollSetDeadline(pd *pollDesc, d int64, mode int) {
314
384
}
315
385
}
316
386
// If we set the new deadline in the past, unblock currently pending IO if any.
387
+ // Note that pd.publishInfo has already been called, above, immediately after modifying rd and wd.
317
388
var rg , wg * g
318
- if pd .rd < 0 || pd .wd < 0 {
319
- atomic .StorepNoWB (noescape (unsafe .Pointer (& wg )), nil ) // full memory barrier between stores to rd/wd and load of rg/wg in netpollunblock
320
- if pd .rd < 0 {
321
- rg = netpollunblock (pd , 'r' , false )
322
- }
323
- if pd .wd < 0 {
324
- wg = netpollunblock (pd , 'w' , false )
325
- }
389
+ if pd .rd < 0 {
390
+ rg = netpollunblock (pd , 'r' , false )
391
+ }
392
+ if pd .wd < 0 {
393
+ wg = netpollunblock (pd , 'w' , false )
326
394
}
327
395
unlock (& pd .lock )
328
396
if rg != nil {
@@ -343,7 +411,7 @@ func poll_runtime_pollUnblock(pd *pollDesc) {
343
411
pd .rseq ++
344
412
pd .wseq ++
345
413
var rg , wg * g
346
- atomic . StorepNoWB ( noescape ( unsafe . Pointer ( & rg )), nil ) // full memory barrier between store to closing and read of rg/wg in netpollunblock
414
+ pd . publishInfo ()
347
415
rg = netpollunblock (pd , 'r' , false )
348
416
wg = netpollunblock (pd , 'w' , false )
349
417
if pd .rt .f != nil {
@@ -388,16 +456,17 @@ func netpollready(toRun *gList, pd *pollDesc, mode int32) {
388
456
}
389
457
390
458
func netpollcheckerr (pd * pollDesc , mode int32 ) int {
391
- if pd .closing {
459
+ info := pd .info ()
460
+ if info .closing () {
392
461
return pollErrClosing
393
462
}
394
- if (mode == 'r' && pd . rd < 0 ) || (mode == 'w' && pd . wd < 0 ) {
463
+ if (mode == 'r' && info . expiredReadDeadline ()) || (mode == 'w' && info . expiredWriteDeadline () ) {
395
464
return pollErrTimeout
396
465
}
397
466
// Report an event scanning error only on a read event.
398
467
// An error on a write event will be captured in a subsequent
399
468
// write call that is able to report a more specific error.
400
- if mode == 'r' && pd . everr {
469
+ if mode == 'r' && info . eventErr () {
401
470
return pollErrNotPollable
402
471
}
403
472
return pollNoError
@@ -432,28 +501,28 @@ func netpollblock(pd *pollDesc, mode int32, waitio bool) bool {
432
501
// set the gpp semaphore to pdWait
433
502
for {
434
503
// Consume notification if already ready.
435
- if atomic . Casuintptr ( gpp , pdReady , 0 ) {
504
+ if gpp . CompareAndSwap ( pdReady , 0 ) {
436
505
return true
437
506
}
438
- if atomic . Casuintptr ( gpp , 0 , pdWait ) {
507
+ if gpp . CompareAndSwap ( 0 , pdWait ) {
439
508
break
440
509
}
441
510
442
511
// Double check that this isn't corrupt; otherwise we'd loop
443
512
// forever.
444
- if v := atomic . Loaduintptr ( gpp ); v != pdReady && v != 0 {
513
+ if v := gpp . Load ( ); v != pdReady && v != 0 {
445
514
throw ("runtime: double wait" )
446
515
}
447
516
}
448
517
449
518
// need to recheck error states after setting gpp to pdWait
450
519
// this is necessary because runtime_pollUnblock/runtime_pollSetDeadline/deadlineimpl
451
- // do the opposite: store to closing/rd/wd, membarrier , load of rg/wg
520
+ // do the opposite: store to closing/rd/wd, publishInfo , load of rg/wg
452
521
if waitio || netpollcheckerr (pd , mode ) == pollNoError {
453
522
gopark (netpollblockcommit , unsafe .Pointer (gpp ), waitReasonIOWait , traceEvGoBlockNet , 5 )
454
523
}
455
524
// be careful to not lose concurrent pdReady notification
456
- old := atomic . Xchguintptr ( gpp , 0 )
525
+ old := gpp . Swap ( 0 )
457
526
if old > pdWait {
458
527
throw ("runtime: corrupted polldesc" )
459
528
}
@@ -467,7 +536,7 @@ func netpollunblock(pd *pollDesc, mode int32, ioready bool) *g {
467
536
}
468
537
469
538
for {
470
- old := atomic . Loaduintptr ( gpp )
539
+ old := gpp . Load ( )
471
540
if old == pdReady {
472
541
return nil
473
542
}
@@ -480,7 +549,7 @@ func netpollunblock(pd *pollDesc, mode int32, ioready bool) *g {
480
549
if ioready {
481
550
new = pdReady
482
551
}
483
- if atomic . Casuintptr ( gpp , old , new ) {
552
+ if gpp . CompareAndSwap ( old , new ) {
484
553
if old == pdWait {
485
554
old = 0
486
555
}
@@ -508,7 +577,7 @@ func netpolldeadlineimpl(pd *pollDesc, seq uintptr, read, write bool) {
508
577
throw ("runtime: inconsistent read deadline" )
509
578
}
510
579
pd .rd = - 1
511
- atomic . StorepNoWB ( unsafe . Pointer ( & pd .rt . f ), nil ) // full memory barrier between store to rd and load of rg in netpollunblock
580
+ pd .publishInfo ()
512
581
rg = netpollunblock (pd , 'r' , false )
513
582
}
514
583
var wg * g
@@ -517,7 +586,7 @@ func netpolldeadlineimpl(pd *pollDesc, seq uintptr, read, write bool) {
517
586
throw ("runtime: inconsistent write deadline" )
518
587
}
519
588
pd .wd = - 1
520
- atomic . StorepNoWB ( unsafe . Pointer ( & pd .wt . f ), nil ) // full memory barrier between store to wd and load of wg in netpollunblock
589
+ pd .publishInfo ()
521
590
wg = netpollunblock (pd , 'w' , false )
522
591
}
523
592
unlock (& pd .lock )
0 commit comments