Skip to content

Commit 02fed6d

Browse files
Konstantin Taranovdavem330
authored andcommitted
net: mana: add msix index sharing between EQs
This patch allows to assign and poll more than one EQ on the same msix index. It is achieved by introducing a list of attached EQs in each IRQ context. It also removes the existing msix_index map that tried to ensure that there is only one EQ at each msix_index. This patch exports symbols for creating EQs from other MANA kernel modules. Signed-off-by: Konstantin Taranov <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 10b7572 commit 02fed6d

File tree

4 files changed

+43
-42
lines changed

4 files changed

+43
-42
lines changed

drivers/net/ethernet/microsoft/mana/gdma_main.c

Lines changed: 36 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -414,8 +414,12 @@ static void mana_gd_process_eq_events(void *arg)
414414

415415
old_bits = (eq->head / num_eqe - 1) & GDMA_EQE_OWNER_MASK;
416416
/* No more entries */
417-
if (owner_bits == old_bits)
417+
if (owner_bits == old_bits) {
418+
/* return here without ringing the doorbell */
419+
if (i == 0)
420+
return;
418421
break;
422+
}
419423

420424
new_bits = (eq->head / num_eqe) & GDMA_EQE_OWNER_MASK;
421425
if (owner_bits != new_bits) {
@@ -445,42 +449,29 @@ static int mana_gd_register_irq(struct gdma_queue *queue,
445449
struct gdma_dev *gd = queue->gdma_dev;
446450
struct gdma_irq_context *gic;
447451
struct gdma_context *gc;
448-
struct gdma_resource *r;
449452
unsigned int msi_index;
450453
unsigned long flags;
451454
struct device *dev;
452455
int err = 0;
453456

454457
gc = gd->gdma_context;
455-
r = &gc->msix_resource;
456458
dev = gc->dev;
459+
msi_index = spec->eq.msix_index;
457460

458-
spin_lock_irqsave(&r->lock, flags);
459-
460-
msi_index = find_first_zero_bit(r->map, r->size);
461-
if (msi_index >= r->size || msi_index >= gc->num_msix_usable) {
461+
if (msi_index >= gc->num_msix_usable) {
462462
err = -ENOSPC;
463-
} else {
464-
bitmap_set(r->map, msi_index, 1);
465-
queue->eq.msix_index = msi_index;
466-
}
467-
468-
spin_unlock_irqrestore(&r->lock, flags);
469-
470-
if (err) {
471-
dev_err(dev, "Register IRQ err:%d, msi:%u rsize:%u, nMSI:%u",
472-
err, msi_index, r->size, gc->num_msix_usable);
463+
dev_err(dev, "Register IRQ err:%d, msi:%u nMSI:%u",
464+
err, msi_index, gc->num_msix_usable);
473465

474466
return err;
475467
}
476468

469+
queue->eq.msix_index = msi_index;
477470
gic = &gc->irq_contexts[msi_index];
478471

479-
WARN_ON(gic->handler || gic->arg);
480-
481-
gic->arg = queue;
482-
483-
gic->handler = mana_gd_process_eq_events;
472+
spin_lock_irqsave(&gic->lock, flags);
473+
list_add_rcu(&queue->entry, &gic->eq_list);
474+
spin_unlock_irqrestore(&gic->lock, flags);
484475

485476
return 0;
486477
}
@@ -490,27 +481,29 @@ static void mana_gd_deregiser_irq(struct gdma_queue *queue)
490481
struct gdma_dev *gd = queue->gdma_dev;
491482
struct gdma_irq_context *gic;
492483
struct gdma_context *gc;
493-
struct gdma_resource *r;
494484
unsigned int msix_index;
495485
unsigned long flags;
486+
struct gdma_queue *eq;
496487

497488
gc = gd->gdma_context;
498-
r = &gc->msix_resource;
499489

500490
/* At most num_online_cpus() + 1 interrupts are used. */
501491
msix_index = queue->eq.msix_index;
502492
if (WARN_ON(msix_index >= gc->num_msix_usable))
503493
return;
504494

505495
gic = &gc->irq_contexts[msix_index];
506-
gic->handler = NULL;
507-
gic->arg = NULL;
508-
509-
spin_lock_irqsave(&r->lock, flags);
510-
bitmap_clear(r->map, msix_index, 1);
511-
spin_unlock_irqrestore(&r->lock, flags);
496+
spin_lock_irqsave(&gic->lock, flags);
497+
list_for_each_entry_rcu(eq, &gic->eq_list, entry) {
498+
if (queue == eq) {
499+
list_del_rcu(&eq->entry);
500+
break;
501+
}
502+
}
503+
spin_unlock_irqrestore(&gic->lock, flags);
512504

513505
queue->eq.msix_index = INVALID_PCI_MSIX_INDEX;
506+
synchronize_rcu();
514507
}
515508

516509
int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq)
@@ -588,6 +581,7 @@ static int mana_gd_create_eq(struct gdma_dev *gd,
588581
int err;
589582

590583
queue->eq.msix_index = INVALID_PCI_MSIX_INDEX;
584+
queue->id = INVALID_QUEUE_ID;
591585

592586
log2_num_entries = ilog2(queue->queue_size / GDMA_EQE_SIZE);
593587

@@ -819,6 +813,7 @@ int mana_gd_create_mana_eq(struct gdma_dev *gd,
819813
kfree(queue);
820814
return err;
821815
}
816+
EXPORT_SYMBOL_NS(mana_gd_create_mana_eq, NET_MANA);
822817

823818
int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
824819
const struct gdma_queue_spec *spec,
@@ -895,6 +890,7 @@ void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue)
895890
mana_gd_free_memory(gmi);
896891
kfree(queue);
897892
}
893+
EXPORT_SYMBOL_NS(mana_gd_destroy_queue, NET_MANA);
898894

899895
int mana_gd_verify_vf_version(struct pci_dev *pdev)
900896
{
@@ -1217,9 +1213,14 @@ int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe)
12171213
static irqreturn_t mana_gd_intr(int irq, void *arg)
12181214
{
12191215
struct gdma_irq_context *gic = arg;
1216+
struct list_head *eq_list = &gic->eq_list;
1217+
struct gdma_queue *eq;
12201218

1221-
if (gic->handler)
1222-
gic->handler(gic->arg);
1219+
rcu_read_lock();
1220+
list_for_each_entry_rcu(eq, eq_list, entry) {
1221+
gic->handler(eq);
1222+
}
1223+
rcu_read_unlock();
12231224

12241225
return IRQ_HANDLED;
12251226
}
@@ -1271,8 +1272,9 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
12711272

12721273
for (i = 0; i < nvec; i++) {
12731274
gic = &gc->irq_contexts[i];
1274-
gic->handler = NULL;
1275-
gic->arg = NULL;
1275+
gic->handler = mana_gd_process_eq_events;
1276+
INIT_LIST_HEAD(&gic->eq_list);
1277+
spin_lock_init(&gic->lock);
12761278

12771279
if (!i)
12781280
snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_hwc@pci:%s",
@@ -1295,10 +1297,6 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
12951297
irq_set_affinity_and_hint(irq, cpumask_of(cpu));
12961298
}
12971299

1298-
err = mana_gd_alloc_res_map(nvec, &gc->msix_resource);
1299-
if (err)
1300-
goto free_irq;
1301-
13021300
gc->max_num_msix = nvec;
13031301
gc->num_msix_usable = nvec;
13041302

@@ -1329,8 +1327,6 @@ static void mana_gd_remove_irqs(struct pci_dev *pdev)
13291327
if (gc->max_num_msix < 1)
13301328
return;
13311329

1332-
mana_gd_free_res_map(&gc->msix_resource);
1333-
13341330
for (i = 0; i < gc->max_num_msix; i++) {
13351331
irq = pci_irq_vector(pdev, i);
13361332
if (irq < 0)

drivers/net/ethernet/microsoft/mana/hw_channel.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -300,6 +300,7 @@ static int mana_hwc_create_gdma_eq(struct hw_channel_context *hwc,
300300
spec.eq.context = ctx;
301301
spec.eq.callback = cb;
302302
spec.eq.log2_throttle_limit = DEFAULT_LOG2_THROTTLING_FOR_ERROR_EQ;
303+
spec.eq.msix_index = 0;
303304

304305
return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
305306
}

drivers/net/ethernet/microsoft/mana/mana_en.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1244,6 +1244,7 @@ static int mana_create_eq(struct mana_context *ac)
12441244
spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
12451245

12461246
for (i = 0; i < gc->max_num_queues; i++) {
1247+
spec.eq.msix_index = (i + 1) % gc->num_msix_usable;
12471248
err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq);
12481249
if (err)
12491250
goto out;

include/net/mana/gdma.h

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -293,6 +293,7 @@ struct gdma_queue {
293293

294294
u32 head;
295295
u32 tail;
296+
struct list_head entry;
296297

297298
/* Extra fields specific to EQ/CQ. */
298299
union {
@@ -328,6 +329,7 @@ struct gdma_queue_spec {
328329
void *context;
329330

330331
unsigned long log2_throttle_limit;
332+
unsigned int msix_index;
331333
} eq;
332334

333335
struct {
@@ -344,7 +346,9 @@ struct gdma_queue_spec {
344346

345347
struct gdma_irq_context {
346348
void (*handler)(void *arg);
347-
void *arg;
349+
/* Protect the eq_list */
350+
spinlock_t lock;
351+
struct list_head eq_list;
348352
char name[MANA_IRQ_NAME_SZ];
349353
};
350354

@@ -355,7 +359,6 @@ struct gdma_context {
355359
unsigned int max_num_queues;
356360
unsigned int max_num_msix;
357361
unsigned int num_msix_usable;
358-
struct gdma_resource msix_resource;
359362
struct gdma_irq_context *irq_contexts;
360363

361364
/* L2 MTU */

0 commit comments

Comments
 (0)