17
17
* virtqueue. Access to each virtqueue is protected by spinlocks.
18
18
*/
19
19
20
+ #include <linux/completion.h>
20
21
#include <linux/errno.h>
22
+ #include <linux/refcount.h>
21
23
#include <linux/slab.h>
22
24
#include <linux/virtio.h>
23
25
#include <linux/virtio_config.h>
27
29
28
30
#include "common.h"
29
31
32
+ #define VIRTIO_MAX_RX_TIMEOUT_MS 60000
30
33
#define VIRTIO_SCMI_MAX_MSG_SIZE 128 /* Value may be increased. */
31
34
#define VIRTIO_SCMI_MAX_PDU_SIZE \
32
35
(VIRTIO_SCMI_MAX_MSG_SIZE + SCMI_MSG_MAX_PROT_OVERHEAD)
39
42
* @cinfo: SCMI Tx or Rx channel
40
43
* @free_list: List of unused scmi_vio_msg, maintained for Tx channels only
41
44
* @is_rx: Whether channel is an Rx channel
42
- * @ready: Whether transport user is ready to hear about channel
43
45
* @max_msg: Maximum number of pending messages for this channel.
44
- * @lock: Protects access to all members except ready .
45
- * @ready_lock: Protects access to ready. If required, it must be taken before
46
- * lock .
46
+ * @lock: Protects access to all members except users .
47
+ * @shutdown_done: A reference to a completion used when freeing this channel.
48
+ * @users: A reference count to currently active users of this channel .
47
49
*/
48
50
struct scmi_vio_channel {
49
51
struct virtqueue * vqueue ;
50
52
struct scmi_chan_info * cinfo ;
51
53
struct list_head free_list ;
52
54
bool is_rx ;
53
- bool ready ;
54
55
unsigned int max_msg ;
55
- /* lock to protect access to all members except ready . */
56
+ /* lock to protect access to all members except users . */
56
57
spinlock_t lock ;
57
- /* lock to rotects access to ready flag. */
58
- spinlock_t ready_lock ;
58
+ struct completion * shutdown_done ;
59
+ refcount_t users ;
59
60
};
60
61
61
62
/**
@@ -76,6 +77,63 @@ struct scmi_vio_msg {
76
77
/* Only one SCMI VirtIO device can possibly exist */
77
78
static struct virtio_device * scmi_vdev ;
78
79
80
+ static void scmi_vio_channel_ready (struct scmi_vio_channel * vioch ,
81
+ struct scmi_chan_info * cinfo )
82
+ {
83
+ unsigned long flags ;
84
+
85
+ spin_lock_irqsave (& vioch -> lock , flags );
86
+ cinfo -> transport_info = vioch ;
87
+ /* Indirectly setting channel not available any more */
88
+ vioch -> cinfo = cinfo ;
89
+ spin_unlock_irqrestore (& vioch -> lock , flags );
90
+
91
+ refcount_set (& vioch -> users , 1 );
92
+ }
93
+
94
+ static inline bool scmi_vio_channel_acquire (struct scmi_vio_channel * vioch )
95
+ {
96
+ return refcount_inc_not_zero (& vioch -> users );
97
+ }
98
+
99
+ static inline void scmi_vio_channel_release (struct scmi_vio_channel * vioch )
100
+ {
101
+ if (refcount_dec_and_test (& vioch -> users )) {
102
+ unsigned long flags ;
103
+
104
+ spin_lock_irqsave (& vioch -> lock , flags );
105
+ if (vioch -> shutdown_done ) {
106
+ vioch -> cinfo = NULL ;
107
+ complete (vioch -> shutdown_done );
108
+ }
109
+ spin_unlock_irqrestore (& vioch -> lock , flags );
110
+ }
111
+ }
112
+
113
+ static void scmi_vio_channel_cleanup_sync (struct scmi_vio_channel * vioch )
114
+ {
115
+ unsigned long flags ;
116
+ DECLARE_COMPLETION_ONSTACK (vioch_shutdown_done );
117
+
118
+ /*
119
+ * Prepare to wait for the last release if not already released
120
+ * or in progress.
121
+ */
122
+ spin_lock_irqsave (& vioch -> lock , flags );
123
+ if (!vioch -> cinfo || vioch -> shutdown_done ) {
124
+ spin_unlock_irqrestore (& vioch -> lock , flags );
125
+ return ;
126
+ }
127
+ vioch -> shutdown_done = & vioch_shutdown_done ;
128
+ virtio_break_device (vioch -> vqueue -> vdev );
129
+ spin_unlock_irqrestore (& vioch -> lock , flags );
130
+
131
+ scmi_vio_channel_release (vioch );
132
+
133
+ /* Let any possibly concurrent RX path release the channel */
134
+ wait_for_completion (vioch -> shutdown_done );
135
+ }
136
+
79
137
static bool scmi_vio_have_vq_rx (struct virtio_device * vdev )
80
138
{
81
139
return virtio_has_feature (vdev , VIRTIO_SCMI_F_P2A_CHANNELS );
@@ -119,7 +177,7 @@ static void scmi_finalize_message(struct scmi_vio_channel *vioch,
119
177
120
178
static void scmi_vio_complete_cb (struct virtqueue * vqueue )
121
179
{
122
- unsigned long ready_flags ;
180
+ unsigned long flags ;
123
181
unsigned int length ;
124
182
struct scmi_vio_channel * vioch ;
125
183
struct scmi_vio_msg * msg ;
@@ -130,27 +188,24 @@ static void scmi_vio_complete_cb(struct virtqueue *vqueue)
130
188
vioch = & ((struct scmi_vio_channel * )vqueue -> vdev -> priv )[vqueue -> index ];
131
189
132
190
for (;;) {
133
- spin_lock_irqsave (& vioch -> ready_lock , ready_flags );
134
-
135
- if (!vioch -> ready ) {
136
- if (!cb_enabled )
137
- (void )virtqueue_enable_cb (vqueue );
138
- goto unlock_ready_out ;
139
- }
191
+ if (!scmi_vio_channel_acquire (vioch ))
192
+ return ;
140
193
141
- /* IRQs already disabled here no need to irqsave */
142
- spin_lock (& vioch -> lock );
194
+ spin_lock_irqsave (& vioch -> lock , flags );
143
195
if (cb_enabled ) {
144
196
virtqueue_disable_cb (vqueue );
145
197
cb_enabled = false;
146
198
}
147
199
msg = virtqueue_get_buf (vqueue , & length );
148
200
if (!msg ) {
149
- if (virtqueue_enable_cb (vqueue ))
150
- goto unlock_out ;
201
+ if (virtqueue_enable_cb (vqueue )) {
202
+ spin_unlock_irqrestore (& vioch -> lock , flags );
203
+ scmi_vio_channel_release (vioch );
204
+ return ;
205
+ }
151
206
cb_enabled = true;
152
207
}
153
- spin_unlock (& vioch -> lock );
208
+ spin_unlock_irqrestore (& vioch -> lock , flags );
154
209
155
210
if (msg ) {
156
211
msg -> rx_len = length ;
@@ -161,19 +216,14 @@ static void scmi_vio_complete_cb(struct virtqueue *vqueue)
161
216
}
162
217
163
218
/*
164
- * Release ready_lock and re-enable IRQs between loop iterations
165
- * to allow virtio_chan_free() to possibly kick in and set the
166
- * flag vioch->ready to false even in between processing of
167
- * messages, so as to force outstanding messages to be ignored
168
- * when system is shutting down .
219
+ * Release vio channel between loop iterations to allow
220
+ * virtio_chan_free() to eventually fully release it when
221
+ * shutting down; in such a case, any outstanding message will
222
+ * be ignored since this loop will bail out at the next
223
+ * iteration .
169
224
*/
170
- spin_unlock_irqrestore ( & vioch -> ready_lock , ready_flags );
225
+ scmi_vio_channel_release ( vioch );
171
226
}
172
-
173
- unlock_out :
174
- spin_unlock (& vioch -> lock );
175
- unlock_ready_out :
176
- spin_unlock_irqrestore (& vioch -> ready_lock , ready_flags );
177
227
}
178
228
179
229
static const char * const scmi_vio_vqueue_names [] = { "tx" , "rx" };
@@ -273,35 +323,20 @@ static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
273
323
}
274
324
}
275
325
276
- spin_lock_irqsave (& vioch -> lock , flags );
277
- cinfo -> transport_info = vioch ;
278
- /* Indirectly setting channel not available any more */
279
- vioch -> cinfo = cinfo ;
280
- spin_unlock_irqrestore (& vioch -> lock , flags );
281
-
282
- spin_lock_irqsave (& vioch -> ready_lock , flags );
283
- vioch -> ready = true;
284
- spin_unlock_irqrestore (& vioch -> ready_lock , flags );
326
+ scmi_vio_channel_ready (vioch , cinfo );
285
327
286
328
return 0 ;
287
329
}
288
330
289
331
static int virtio_chan_free (int id , void * p , void * data )
290
332
{
291
- unsigned long flags ;
292
333
struct scmi_chan_info * cinfo = p ;
293
334
struct scmi_vio_channel * vioch = cinfo -> transport_info ;
294
335
295
- spin_lock_irqsave (& vioch -> ready_lock , flags );
296
- vioch -> ready = false;
297
- spin_unlock_irqrestore (& vioch -> ready_lock , flags );
336
+ scmi_vio_channel_cleanup_sync (vioch );
298
337
299
338
scmi_free_channel (cinfo , data , id );
300
339
301
- spin_lock_irqsave (& vioch -> lock , flags );
302
- vioch -> cinfo = NULL ;
303
- spin_unlock_irqrestore (& vioch -> lock , flags );
304
-
305
340
return 0 ;
306
341
}
307
342
@@ -316,10 +351,14 @@ static int virtio_send_message(struct scmi_chan_info *cinfo,
316
351
int rc ;
317
352
struct scmi_vio_msg * msg ;
318
353
354
+ if (!scmi_vio_channel_acquire (vioch ))
355
+ return - EINVAL ;
356
+
319
357
spin_lock_irqsave (& vioch -> lock , flags );
320
358
321
359
if (list_empty (& vioch -> free_list )) {
322
360
spin_unlock_irqrestore (& vioch -> lock , flags );
361
+ scmi_vio_channel_release (vioch );
323
362
return - EBUSY ;
324
363
}
325
364
@@ -342,6 +381,8 @@ static int virtio_send_message(struct scmi_chan_info *cinfo,
342
381
343
382
spin_unlock_irqrestore (& vioch -> lock , flags );
344
383
384
+ scmi_vio_channel_release (vioch );
385
+
345
386
return rc ;
346
387
}
347
388
@@ -416,7 +457,6 @@ static int scmi_vio_probe(struct virtio_device *vdev)
416
457
unsigned int sz ;
417
458
418
459
spin_lock_init (& channels [i ].lock );
419
- spin_lock_init (& channels [i ].ready_lock );
420
460
INIT_LIST_HEAD (& channels [i ].free_list );
421
461
channels [i ].vqueue = vqs [i ];
422
462
@@ -503,7 +543,8 @@ const struct scmi_desc scmi_virtio_desc = {
503
543
.transport_init = virtio_scmi_init ,
504
544
.transport_exit = virtio_scmi_exit ,
505
545
.ops = & scmi_virtio_ops ,
506
- .max_rx_timeout_ms = 60000 , /* for non-realtime virtio devices */
546
+ /* for non-realtime virtio devices */
547
+ .max_rx_timeout_ms = VIRTIO_MAX_RX_TIMEOUT_MS ,
507
548
.max_msg = 0 , /* overridden by virtio_get_max_msg() */
508
549
.max_msg_size = VIRTIO_SCMI_MAX_MSG_SIZE ,
509
550
};
0 commit comments