Skip to content

Commit c217c16

Browse files
committed
Revert "Revert "Revert "dmaengine: remove DMA_SG as it is dead code in kernel"""
This reverts commit a9401f2. We removed the Xilinx DMA, and when that was done, the Xilinx DMA was brought to the same state as the Xilinx upstream tree. But, we also reverted their revert, which requires their DMAs to build. So, this is a triple revert, to get back closer to the state in the Xilinx tree. This change does not revert the `drivers/dma/dmatest.c` changes, as we want to upstream more of our changes to that module, and being in-sync with DMAEngine upstream helps. Signed-off-by: Alexandru Ardelean <[email protected]>
1 parent 85922ac commit c217c16

File tree

11 files changed

+752
-3
lines changed

11 files changed

+752
-3
lines changed

Documentation/dmaengine/provider.txt

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -181,6 +181,13 @@ Currently, the types available are:
181181
- Used by the client drivers to register a callback that will be
182182
called on a regular basis through the DMA controller interrupt
183183

184+
* DMA_SG
185+
- The device supports memory to memory scatter-gather
186+
transfers.
187+
- Even though a plain memcpy can look like a particular case of a
188+
scatter-gather transfer, with a single chunk to transfer, it's a
189+
distinct transaction type in the mem2mem transfers case
190+
184191
* DMA_PRIVATE
185192
- The devices only supports slave transfers, and as such isn't
186193
available for async transfers.

drivers/crypto/ccp/ccp-dmaengine.c

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -502,6 +502,27 @@ static struct dma_async_tx_descriptor *ccp_prep_dma_memcpy(
502502
return &desc->tx_desc;
503503
}
504504

505+
static struct dma_async_tx_descriptor *ccp_prep_dma_sg(
506+
struct dma_chan *dma_chan, struct scatterlist *dst_sg,
507+
unsigned int dst_nents, struct scatterlist *src_sg,
508+
unsigned int src_nents, unsigned long flags)
509+
{
510+
struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
511+
dma_chan);
512+
struct ccp_dma_desc *desc;
513+
514+
dev_dbg(chan->ccp->dev,
515+
"%s - src=%p, src_nents=%u dst=%p, dst_nents=%u, flags=%#lx\n",
516+
__func__, src_sg, src_nents, dst_sg, dst_nents, flags);
517+
518+
desc = ccp_create_desc(dma_chan, dst_sg, dst_nents, src_sg, src_nents,
519+
flags);
520+
if (!desc)
521+
return NULL;
522+
523+
return &desc->tx_desc;
524+
}
525+
505526
static struct dma_async_tx_descriptor *ccp_prep_dma_interrupt(
506527
struct dma_chan *dma_chan, unsigned long flags)
507528
{
@@ -683,6 +704,7 @@ int ccp_dmaengine_register(struct ccp_device *ccp)
683704
dma_dev->directions = DMA_MEM_TO_MEM;
684705
dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
685706
dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
707+
dma_cap_set(DMA_SG, dma_dev->cap_mask);
686708
dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
687709

688710
/* The DMA channels for this device can be set to public or private,
@@ -718,6 +740,7 @@ int ccp_dmaengine_register(struct ccp_device *ccp)
718740

719741
dma_dev->device_free_chan_resources = ccp_free_chan_resources;
720742
dma_dev->device_prep_dma_memcpy = ccp_prep_dma_memcpy;
743+
dma_dev->device_prep_dma_sg = ccp_prep_dma_sg;
721744
dma_dev->device_prep_dma_interrupt = ccp_prep_dma_interrupt;
722745
dma_dev->device_issue_pending = ccp_issue_pending;
723746
dma_dev->device_tx_status = ccp_tx_status;

drivers/dma/at_hdmac.c

Lines changed: 139 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1202,6 +1202,138 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
12021202
return NULL;
12031203
}
12041204

1205+
/**
1206+
* atc_prep_dma_sg - prepare memory to memory scather-gather operation
1207+
* @chan: the channel to prepare operation on
1208+
* @dst_sg: destination scatterlist
1209+
* @dst_nents: number of destination scatterlist entries
1210+
* @src_sg: source scatterlist
1211+
* @src_nents: number of source scatterlist entries
1212+
* @flags: tx descriptor status flags
1213+
*/
1214+
static struct dma_async_tx_descriptor *
1215+
atc_prep_dma_sg(struct dma_chan *chan,
1216+
struct scatterlist *dst_sg, unsigned int dst_nents,
1217+
struct scatterlist *src_sg, unsigned int src_nents,
1218+
unsigned long flags)
1219+
{
1220+
struct at_dma_chan *atchan = to_at_dma_chan(chan);
1221+
struct at_desc *desc = NULL;
1222+
struct at_desc *first = NULL;
1223+
struct at_desc *prev = NULL;
1224+
unsigned int src_width;
1225+
unsigned int dst_width;
1226+
size_t xfer_count;
1227+
u32 ctrla;
1228+
u32 ctrlb;
1229+
size_t dst_len = 0, src_len = 0;
1230+
dma_addr_t dst = 0, src = 0;
1231+
size_t len = 0, total_len = 0;
1232+
1233+
if (unlikely(dst_nents == 0 || src_nents == 0))
1234+
return NULL;
1235+
1236+
if (unlikely(dst_sg == NULL || src_sg == NULL))
1237+
return NULL;
1238+
1239+
ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
1240+
| ATC_SRC_ADDR_MODE_INCR
1241+
| ATC_DST_ADDR_MODE_INCR
1242+
| ATC_FC_MEM2MEM;
1243+
1244+
/*
1245+
* loop until there is either no more source or no more destination
1246+
* scatterlist entry
1247+
*/
1248+
while (true) {
1249+
1250+
/* prepare the next transfer */
1251+
if (dst_len == 0) {
1252+
1253+
/* no more destination scatterlist entries */
1254+
if (!dst_sg || !dst_nents)
1255+
break;
1256+
1257+
dst = sg_dma_address(dst_sg);
1258+
dst_len = sg_dma_len(dst_sg);
1259+
1260+
dst_sg = sg_next(dst_sg);
1261+
dst_nents--;
1262+
}
1263+
1264+
if (src_len == 0) {
1265+
1266+
/* no more source scatterlist entries */
1267+
if (!src_sg || !src_nents)
1268+
break;
1269+
1270+
src = sg_dma_address(src_sg);
1271+
src_len = sg_dma_len(src_sg);
1272+
1273+
src_sg = sg_next(src_sg);
1274+
src_nents--;
1275+
}
1276+
1277+
len = min_t(size_t, src_len, dst_len);
1278+
if (len == 0)
1279+
continue;
1280+
1281+
/* take care for the alignment */
1282+
src_width = dst_width = atc_get_xfer_width(src, dst, len);
1283+
1284+
ctrla = ATC_SRC_WIDTH(src_width) |
1285+
ATC_DST_WIDTH(dst_width);
1286+
1287+
/*
1288+
* The number of transfers to set up refer to the source width
1289+
* that depends on the alignment.
1290+
*/
1291+
xfer_count = len >> src_width;
1292+
if (xfer_count > ATC_BTSIZE_MAX) {
1293+
xfer_count = ATC_BTSIZE_MAX;
1294+
len = ATC_BTSIZE_MAX << src_width;
1295+
}
1296+
1297+
/* create the transfer */
1298+
desc = atc_desc_get(atchan);
1299+
if (!desc)
1300+
goto err_desc_get;
1301+
1302+
desc->lli.saddr = src;
1303+
desc->lli.daddr = dst;
1304+
desc->lli.ctrla = ctrla | xfer_count;
1305+
desc->lli.ctrlb = ctrlb;
1306+
1307+
desc->txd.cookie = 0;
1308+
desc->len = len;
1309+
1310+
atc_desc_chain(&first, &prev, desc);
1311+
1312+
/* update the lengths and addresses for the next loop cycle */
1313+
dst_len -= len;
1314+
src_len -= len;
1315+
dst += len;
1316+
src += len;
1317+
1318+
total_len += len;
1319+
}
1320+
1321+
/* First descriptor of the chain embedds additional information */
1322+
first->txd.cookie = -EBUSY;
1323+
first->total_len = total_len;
1324+
1325+
/* set end-of-link to the last link descriptor of list*/
1326+
set_desc_eol(desc);
1327+
1328+
first->txd.flags = flags; /* client is in control of this ack */
1329+
1330+
return &first->txd;
1331+
1332+
err_desc_get:
1333+
atc_desc_put(atchan, first);
1334+
return NULL;
1335+
}
1336+
12051337
/**
12061338
* atc_dma_cyclic_check_values
12071339
* Check for too big/unaligned periods and unaligned DMA buffer
@@ -1801,12 +1933,14 @@ static int __init at_dma_probe(struct platform_device *pdev)
18011933

18021934
/* setup platform data for each SoC */
18031935
dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask);
1936+
dma_cap_set(DMA_SG, at91sam9rl_config.cap_mask);
18041937
dma_cap_set(DMA_INTERLEAVE, at91sam9g45_config.cap_mask);
18051938
dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask);
18061939
dma_cap_set(DMA_MEMSET, at91sam9g45_config.cap_mask);
18071940
dma_cap_set(DMA_MEMSET_SG, at91sam9g45_config.cap_mask);
18081941
dma_cap_set(DMA_PRIVATE, at91sam9g45_config.cap_mask);
18091942
dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask);
1943+
dma_cap_set(DMA_SG, at91sam9g45_config.cap_mask);
18101944

18111945
/* get DMA parameters from controller type */
18121946
plat_dat = at_dma_get_driver_data(pdev);
@@ -1944,12 +2078,16 @@ static int __init at_dma_probe(struct platform_device *pdev)
19442078
atdma->dma_common.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
19452079
}
19462080

2081+
if (dma_has_cap(DMA_SG, atdma->dma_common.cap_mask))
2082+
atdma->dma_common.device_prep_dma_sg = atc_prep_dma_sg;
2083+
19472084
dma_writel(atdma, EN, AT_DMA_ENABLE);
19482085

1949-
dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s), %d channels\n",
2086+
dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s%s), %d channels\n",
19502087
dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
19512088
dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask) ? "set " : "",
19522089
dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
2090+
dma_has_cap(DMA_SG, atdma->dma_common.cap_mask) ? "sg-cpy " : "",
19532091
plat_dat->nr_channels);
19542092

19552093
dma_async_device_register(&atdma->dma_common);

drivers/dma/dmaengine.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -937,6 +937,8 @@ int dma_async_device_register(struct dma_device *device)
937937
!device->device_prep_dma_memset);
938938
BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
939939
!device->device_prep_dma_interrupt);
940+
BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
941+
!device->device_prep_dma_sg);
940942
BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
941943
!device->device_prep_dma_cyclic);
942944
BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&

drivers/dma/fsldma.c

Lines changed: 118 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -825,6 +825,122 @@ fsl_dma_prep_memcpy(struct dma_chan *dchan,
825825
return NULL;
826826
}
827827

828+
static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan,
829+
struct scatterlist *dst_sg, unsigned int dst_nents,
830+
struct scatterlist *src_sg, unsigned int src_nents,
831+
unsigned long flags)
832+
{
833+
struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL;
834+
struct fsldma_chan *chan = to_fsl_chan(dchan);
835+
size_t dst_avail, src_avail;
836+
dma_addr_t dst, src;
837+
size_t len;
838+
839+
/* basic sanity checks */
840+
if (dst_nents == 0 || src_nents == 0)
841+
return NULL;
842+
843+
if (dst_sg == NULL || src_sg == NULL)
844+
return NULL;
845+
846+
/*
847+
* TODO: should we check that both scatterlists have the same
848+
* TODO: number of bytes in total? Is that really an error?
849+
*/
850+
851+
/* get prepared for the loop */
852+
dst_avail = sg_dma_len(dst_sg);
853+
src_avail = sg_dma_len(src_sg);
854+
855+
/* run until we are out of scatterlist entries */
856+
while (true) {
857+
858+
/* create the largest transaction possible */
859+
len = min_t(size_t, src_avail, dst_avail);
860+
len = min_t(size_t, len, FSL_DMA_BCR_MAX_CNT);
861+
if (len == 0)
862+
goto fetch;
863+
864+
dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail;
865+
src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail;
866+
867+
/* allocate and populate the descriptor */
868+
new = fsl_dma_alloc_descriptor(chan);
869+
if (!new) {
870+
chan_err(chan, "%s\n", msg_ld_oom);
871+
goto fail;
872+
}
873+
874+
set_desc_cnt(chan, &new->hw, len);
875+
set_desc_src(chan, &new->hw, src);
876+
set_desc_dst(chan, &new->hw, dst);
877+
878+
if (!first)
879+
first = new;
880+
else
881+
set_desc_next(chan, &prev->hw, new->async_tx.phys);
882+
883+
new->async_tx.cookie = 0;
884+
async_tx_ack(&new->async_tx);
885+
prev = new;
886+
887+
/* Insert the link descriptor to the LD ring */
888+
list_add_tail(&new->node, &first->tx_list);
889+
890+
/* update metadata */
891+
dst_avail -= len;
892+
src_avail -= len;
893+
894+
fetch:
895+
/* fetch the next dst scatterlist entry */
896+
if (dst_avail == 0) {
897+
898+
/* no more entries: we're done */
899+
if (dst_nents == 0)
900+
break;
901+
902+
/* fetch the next entry: if there are no more: done */
903+
dst_sg = sg_next(dst_sg);
904+
if (dst_sg == NULL)
905+
break;
906+
907+
dst_nents--;
908+
dst_avail = sg_dma_len(dst_sg);
909+
}
910+
911+
/* fetch the next src scatterlist entry */
912+
if (src_avail == 0) {
913+
914+
/* no more entries: we're done */
915+
if (src_nents == 0)
916+
break;
917+
918+
/* fetch the next entry: if there are no more: done */
919+
src_sg = sg_next(src_sg);
920+
if (src_sg == NULL)
921+
break;
922+
923+
src_nents--;
924+
src_avail = sg_dma_len(src_sg);
925+
}
926+
}
927+
928+
new->async_tx.flags = flags; /* client is in control of this ack */
929+
new->async_tx.cookie = -EBUSY;
930+
931+
/* Set End-of-link to the last link descriptor of new list */
932+
set_ld_eol(chan, new);
933+
934+
return &first->async_tx;
935+
936+
fail:
937+
if (!first)
938+
return NULL;
939+
940+
fsldma_free_desc_list_reverse(chan, &first->tx_list);
941+
return NULL;
942+
}
943+
828944
static int fsl_dma_device_terminate_all(struct dma_chan *dchan)
829945
{
830946
struct fsldma_chan *chan;
@@ -1241,10 +1357,12 @@ static int fsldma_of_probe(struct platform_device *op)
12411357
fdev->irq = irq_of_parse_and_map(op->dev.of_node, 0);
12421358

12431359
dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
1360+
dma_cap_set(DMA_SG, fdev->common.cap_mask);
12441361
dma_cap_set(DMA_SLAVE, fdev->common.cap_mask);
12451362
fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
12461363
fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
12471364
fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
1365+
fdev->common.device_prep_dma_sg = fsl_dma_prep_sg;
12481366
fdev->common.device_tx_status = fsl_tx_status;
12491367
fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
12501368
fdev->common.device_config = fsl_dma_device_config;

0 commit comments

Comments
 (0)