Skip to content

Commit 176baef

Browse files
committed
cxl/hdm: Commit decoder state to hardware
After all the soft validation of the region has completed, convey the region configuration to hardware while being careful to commit decoders in specification mandated order. In addition to programming the endpoint decoder base-address, interleave ways and granularity, the switch decoder target lists are also established. While the kernel can enforce spec-mandated commit order, it can not enforce spec-mandated reset order. For example, the kernel can't stop someone from removing an endpoint device that is occupying decoderN in a switch decoder where decoderN+1 is also committed. To reset decoderN, decoderN+1 must be torn down first. That "tear down the world" implementation is saved for a follow-on patch. Callback operations are provided for the 'commit' and 'reset' operations. While those callbacks may prove useful for CXL accelerators (Type-2 devices with memory) the primary motivation is to enable a simple way for cxl_test to intercept those operations. Reviewed-by: Jonathan Cameron <[email protected]> Link: https://lore.kernel.org/r/165784338418.1758207.14659830845389904356.stgit@dwillia2-xfh.jf.intel.com Signed-off-by: Dan Williams <[email protected]>
1 parent 27b3f8d commit 176baef

File tree

6 files changed

+486
-11
lines changed

6 files changed

+486
-11
lines changed

Documentation/ABI/testing/sysfs-bus-cxl

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -372,3 +372,19 @@ Description:
372372
not an endpoint decoder. Once all positions have been
373373
successfully written a final validation for decode conflicts is
374374
performed before activating the region.
375+
376+
377+
What: /sys/bus/cxl/devices/regionZ/commit
378+
Date: May, 2022
379+
KernelVersion: v5.20
380+
381+
Description:
382+
(RW) Write a boolean 'true' string value to this attribute to
383+
trigger the region to transition from the software programmed
384+
state to the actively decoding in hardware state. The commit
385+
operation in addition to validating that the region is in proper
386+
configured state, validates that the decoders are being
387+
committed in spec mandated order (last committed decoder id +
388+
1), and checks that the hardware accepts the commit request.
389+
Reading this value indicates whether the region is committed or
390+
not.

drivers/cxl/core/hdm.c

Lines changed: 227 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -129,6 +129,8 @@ struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port)
129129
return ERR_PTR(-ENXIO);
130130
}
131131

132+
dev_set_drvdata(dev, cxlhdm);
133+
132134
return cxlhdm;
133135
}
134136
EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_hdm, CXL);
@@ -466,6 +468,222 @@ int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
466468
return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled);
467469
}
468470

471+
static void cxld_set_interleave(struct cxl_decoder *cxld, u32 *ctrl)
472+
{
473+
u16 eig;
474+
u8 eiw;
475+
476+
/*
477+
* Input validation ensures these warns never fire, but otherwise
478+
* suppress unititalized variable usage warnings.
479+
*/
480+
if (WARN_ONCE(ways_to_cxl(cxld->interleave_ways, &eiw),
481+
"invalid interleave_ways: %d\n", cxld->interleave_ways))
482+
return;
483+
if (WARN_ONCE(granularity_to_cxl(cxld->interleave_granularity, &eig),
484+
"invalid interleave_granularity: %d\n",
485+
cxld->interleave_granularity))
486+
return;
487+
488+
u32p_replace_bits(ctrl, eig, CXL_HDM_DECODER0_CTRL_IG_MASK);
489+
u32p_replace_bits(ctrl, eiw, CXL_HDM_DECODER0_CTRL_IW_MASK);
490+
*ctrl |= CXL_HDM_DECODER0_CTRL_COMMIT;
491+
}
492+
493+
static void cxld_set_type(struct cxl_decoder *cxld, u32 *ctrl)
494+
{
495+
u32p_replace_bits(ctrl, !!(cxld->target_type == 3),
496+
CXL_HDM_DECODER0_CTRL_TYPE);
497+
}
498+
499+
static void cxld_set_hpa(struct cxl_decoder *cxld, u64 *base, u64 *size)
500+
{
501+
struct cxl_region *cxlr = cxld->region;
502+
struct cxl_region_params *p = &cxlr->params;
503+
504+
cxld->hpa_range = (struct range) {
505+
.start = p->res->start,
506+
.end = p->res->end,
507+
};
508+
509+
*base = p->res->start;
510+
*size = resource_size(p->res);
511+
}
512+
513+
static void cxld_clear_hpa(struct cxl_decoder *cxld)
514+
{
515+
cxld->hpa_range = (struct range) {
516+
.start = 0,
517+
.end = -1,
518+
};
519+
}
520+
521+
static int cxlsd_set_targets(struct cxl_switch_decoder *cxlsd, u64 *tgt)
522+
{
523+
struct cxl_dport **t = &cxlsd->target[0];
524+
int ways = cxlsd->cxld.interleave_ways;
525+
526+
if (dev_WARN_ONCE(&cxlsd->cxld.dev,
527+
ways > 8 || ways > cxlsd->nr_targets,
528+
"ways: %d overflows targets: %d\n", ways,
529+
cxlsd->nr_targets))
530+
return -ENXIO;
531+
532+
*tgt = FIELD_PREP(GENMASK(7, 0), t[0]->port_id);
533+
if (ways > 1)
534+
*tgt |= FIELD_PREP(GENMASK(15, 8), t[1]->port_id);
535+
if (ways > 2)
536+
*tgt |= FIELD_PREP(GENMASK(23, 16), t[2]->port_id);
537+
if (ways > 3)
538+
*tgt |= FIELD_PREP(GENMASK(31, 24), t[3]->port_id);
539+
if (ways > 4)
540+
*tgt |= FIELD_PREP(GENMASK_ULL(39, 32), t[4]->port_id);
541+
if (ways > 5)
542+
*tgt |= FIELD_PREP(GENMASK_ULL(47, 40), t[5]->port_id);
543+
if (ways > 6)
544+
*tgt |= FIELD_PREP(GENMASK_ULL(55, 48), t[6]->port_id);
545+
if (ways > 7)
546+
*tgt |= FIELD_PREP(GENMASK_ULL(63, 56), t[7]->port_id);
547+
548+
return 0;
549+
}
550+
551+
/*
552+
* Per CXL 2.0 8.2.5.12.20 Committing Decoder Programming, hardware must set
553+
* committed or error within 10ms, but just be generous with 20ms to account for
554+
* clock skew and other marginal behavior
555+
*/
556+
#define COMMIT_TIMEOUT_MS 20
557+
static int cxld_await_commit(void __iomem *hdm, int id)
558+
{
559+
u32 ctrl;
560+
int i;
561+
562+
for (i = 0; i < COMMIT_TIMEOUT_MS; i++) {
563+
ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
564+
if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMIT_ERROR, ctrl)) {
565+
ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT;
566+
writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
567+
return -EIO;
568+
}
569+
if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl))
570+
return 0;
571+
fsleep(1000);
572+
}
573+
574+
return -ETIMEDOUT;
575+
}
576+
577+
static int cxl_decoder_commit(struct cxl_decoder *cxld)
578+
{
579+
struct cxl_port *port = to_cxl_port(cxld->dev.parent);
580+
struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
581+
void __iomem *hdm = cxlhdm->regs.hdm_decoder;
582+
int id = cxld->id, rc;
583+
u64 base, size;
584+
u32 ctrl;
585+
586+
if (cxld->flags & CXL_DECODER_F_ENABLE)
587+
return 0;
588+
589+
if (port->commit_end + 1 != id) {
590+
dev_dbg(&port->dev,
591+
"%s: out of order commit, expected decoder%d.%d\n",
592+
dev_name(&cxld->dev), port->id, port->commit_end + 1);
593+
return -EBUSY;
594+
}
595+
596+
down_read(&cxl_dpa_rwsem);
597+
/* common decoder settings */
598+
ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id));
599+
cxld_set_interleave(cxld, &ctrl);
600+
cxld_set_type(cxld, &ctrl);
601+
cxld_set_hpa(cxld, &base, &size);
602+
603+
writel(upper_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id));
604+
writel(lower_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id));
605+
writel(upper_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id));
606+
writel(lower_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id));
607+
608+
if (is_switch_decoder(&cxld->dev)) {
609+
struct cxl_switch_decoder *cxlsd =
610+
to_cxl_switch_decoder(&cxld->dev);
611+
void __iomem *tl_hi = hdm + CXL_HDM_DECODER0_TL_HIGH(id);
612+
void __iomem *tl_lo = hdm + CXL_HDM_DECODER0_TL_LOW(id);
613+
u64 targets;
614+
615+
rc = cxlsd_set_targets(cxlsd, &targets);
616+
if (rc) {
617+
dev_dbg(&port->dev, "%s: target configuration error\n",
618+
dev_name(&cxld->dev));
619+
goto err;
620+
}
621+
622+
writel(upper_32_bits(targets), tl_hi);
623+
writel(lower_32_bits(targets), tl_lo);
624+
} else {
625+
struct cxl_endpoint_decoder *cxled =
626+
to_cxl_endpoint_decoder(&cxld->dev);
627+
void __iomem *sk_hi = hdm + CXL_HDM_DECODER0_SKIP_HIGH(id);
628+
void __iomem *sk_lo = hdm + CXL_HDM_DECODER0_SKIP_LOW(id);
629+
630+
writel(upper_32_bits(cxled->skip), sk_hi);
631+
writel(lower_32_bits(cxled->skip), sk_lo);
632+
}
633+
634+
writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
635+
up_read(&cxl_dpa_rwsem);
636+
637+
port->commit_end++;
638+
rc = cxld_await_commit(hdm, cxld->id);
639+
err:
640+
if (rc) {
641+
dev_dbg(&port->dev, "%s: error %d committing decoder\n",
642+
dev_name(&cxld->dev), rc);
643+
cxld->reset(cxld);
644+
return rc;
645+
}
646+
cxld->flags |= CXL_DECODER_F_ENABLE;
647+
648+
return 0;
649+
}
650+
651+
static int cxl_decoder_reset(struct cxl_decoder *cxld)
652+
{
653+
struct cxl_port *port = to_cxl_port(cxld->dev.parent);
654+
struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
655+
void __iomem *hdm = cxlhdm->regs.hdm_decoder;
656+
int id = cxld->id;
657+
u32 ctrl;
658+
659+
if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)
660+
return 0;
661+
662+
if (port->commit_end != id) {
663+
dev_dbg(&port->dev,
664+
"%s: out of order reset, expected decoder%d.%d\n",
665+
dev_name(&cxld->dev), port->id, port->commit_end);
666+
return -EBUSY;
667+
}
668+
669+
down_read(&cxl_dpa_rwsem);
670+
ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
671+
ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT;
672+
writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
673+
674+
cxld_clear_hpa(cxld);
675+
writel(0, hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id));
676+
writel(0, hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id));
677+
writel(0, hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id));
678+
writel(0, hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id));
679+
up_read(&cxl_dpa_rwsem);
680+
681+
port->commit_end--;
682+
cxld->flags &= ~CXL_DECODER_F_ENABLE;
683+
684+
return 0;
685+
}
686+
469687
static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
470688
int *target_map, void __iomem *hdm, int which,
471689
u64 *dpa_base)
@@ -488,6 +706,8 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
488706
base = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(which));
489707
size = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(which));
490708
committed = !!(ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED);
709+
cxld->commit = cxl_decoder_commit;
710+
cxld->reset = cxl_decoder_reset;
491711

492712
if (!committed)
493713
size = 0;
@@ -511,6 +731,13 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
511731
cxld->target_type = CXL_DECODER_EXPANDER;
512732
else
513733
cxld->target_type = CXL_DECODER_ACCELERATOR;
734+
if (cxld->id != port->commit_end + 1) {
735+
dev_warn(&port->dev,
736+
"decoder%d.%d: Committed out of order\n",
737+
port->id, cxld->id);
738+
return -ENXIO;
739+
}
740+
port->commit_end = cxld->id;
514741
} else {
515742
/* unless / until type-2 drivers arrive, assume type-3 */
516743
if (FIELD_GET(CXL_HDM_DECODER0_CTRL_TYPE, ctrl) == 0) {

drivers/cxl/core/port.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -632,6 +632,7 @@ static struct cxl_port *cxl_port_alloc(struct device *uport,
632632
port->component_reg_phys = component_reg_phys;
633633
ida_init(&port->decoder_ida);
634634
port->hdm_end = -1;
635+
port->commit_end = -1;
635636
xa_init(&port->dports);
636637
xa_init(&port->endpoints);
637638
xa_init(&port->regions);

0 commit comments

Comments
 (0)