Skip to content

Commit 5c772d4

Browse files
Fix / Range selector (#620)
* Fix negative offset in range selector * Do not double pump in streaming encoder base class
1 parent 7c986d0 commit 5c772d4

File tree

2 files changed

+8
-9
lines changed

2 files changed

+8
-9
lines changed

tracdap-libs/tracdap-lib-data/src/main/java/org/finos/tracdap/common/codec/StreamingEncoder.java

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -46,11 +46,6 @@ public boolean isReady() {
4646

4747
@Override
4848
public void pump() {
49-
50-
if (context == null)
51-
return;
52-
53-
if (context.readyToUnload() && consumerReady())
54-
onBatch();
49+
// No-op, streaming encoder pushes batches through directly
5550
}
5651
}

tracdap-libs/tracdap-lib-data/src/main/java/org/finos/tracdap/common/data/pipeline/RangeSelector.java

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,8 @@
2323
import org.finos.tracdap.common.data.ArrowVsrContext;
2424
import org.finos.tracdap.common.data.DataPipeline;
2525
import org.finos.tracdap.common.exception.EUnexpected;
26+
import org.slf4j.Logger;
27+
import org.slf4j.LoggerFactory;
2628

2729
import java.util.ArrayList;
2830

@@ -35,6 +37,7 @@ public class RangeSelector
3537
DataPipeline.DataConsumer<DataPipeline.ArrowApi>,
3638
DataPipeline.DataProducer<DataPipeline.ArrowApi> {
3739

40+
private static final Logger log = LoggerFactory.getLogger(RangeSelector.class);
3841
private final long offset;
3942
private final long limit;
4043
private long currentRow;
@@ -123,24 +126,25 @@ public void onBatch() {
123126

124127
sliceRoot.setRowCount(batchSize);
125128
sliceRoot.setLoaded();
126-
consumer().onBatch();
127129
}
128130
else if (batchEndRow >= offset && (batchStartRow < offset + limit || limit == 0)) {
129131

130-
var sliceStart = (int) (offset - batchStartRow);
132+
var sliceStart = (int) Math.max(offset - batchStartRow, 0);
131133
var sliceEnd = (int) Math.min(offset + limit - batchStartRow, batchSize);
132134
var sliceLength = sliceEnd - sliceStart;
133135

134136
sliceTransfers.forEach(slice -> slice.splitAndTransfer(sliceStart, sliceLength));
135137

136138
sliceRoot.setRowCount(sliceLength);
137139
sliceRoot.setLoaded();
138-
consumer().onBatch();
139140
}
140141

141142
// Always consume the incoming data
142143
incomingRoot.setUnloaded();
143144
currentRow += batchSize;
145+
146+
if (sliceRoot.readyToUnload())
147+
consumer().onBatch();
144148
}
145149

146150
@Override

0 commit comments

Comments
 (0)