Skip to content
This repository was archived by the owner on Feb 4, 2019. It is now read-only.

Commit 533c7b6

Browse files
committed
Updates to the parallel examples, using the new code.
1 parent ddb0cfb commit 533c7b6

File tree

2 files changed

+121
-6
lines changed

2 files changed

+121
-6
lines changed

rackspace/pom.xml

Lines changed: 16 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,13 +26,28 @@
2626
<name>rackspace-examples</name>
2727

2828
<properties>
29-
<jclouds.version>1.9.1</jclouds.version>
29+
<jclouds.version>2.0.0-SNAPSHOT</jclouds.version>
3030
</properties>
3131

32+
33+
<!-- Included in case the examples have to run against a snapshot jclouds version -->
3234
<repositories>
3335
<repository>
3436
<id>apache-snapshots</id>
3537
<url>https://repository.apache.org/content/repositories/snapshots</url>
38+
<releases>
39+
<enabled>false</enabled>
40+
</releases>
41+
<snapshots>
42+
<enabled>true</enabled>
43+
</snapshots>
44+
</repository>
45+
<repository>
46+
<id>sonatype-nexus-snapshots</id>
47+
<url>https://oss.sonatype.org/content/repositories/snapshots</url>
48+
<releases>
49+
<enabled>false</enabled>
50+
</releases>
3651
<snapshots>
3752
<enabled>true</enabled>
3853
</snapshots>

rackspace/src/main/java/org/jclouds/examples/rackspace/cloudfiles/UploadLargeObject.java

Lines changed: 105 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -26,8 +26,15 @@
2626
import java.io.Closeable;
2727
import java.io.File;
2828
import java.io.IOException;
29+
import java.io.RandomAccessFile;
30+
import java.nio.MappedByteBuffer;
31+
import java.nio.channels.FileChannel;
2932
import java.util.Properties;
33+
import java.util.Random;
3034
import java.util.concurrent.ExecutionException;
35+
import java.util.concurrent.ExecutorService;
36+
import java.util.concurrent.Executors;
37+
import java.util.concurrent.TimeUnit;
3138

3239
import org.jclouds.ContextBuilder;
3340
import org.jclouds.blobstore.BlobStore;
@@ -36,6 +43,7 @@
3643
import org.jclouds.io.Payloads;
3744
import org.jclouds.openstack.swift.v1.blobstore.RegionScopedBlobStoreContext;
3845

46+
import com.google.common.hash.Hashing;
3947
import com.google.common.io.ByteSource;
4048
import com.google.common.io.Closeables;
4149
import com.google.common.io.Files;
@@ -51,34 +59,45 @@ public class UploadLargeObject implements Closeable {
5159
*
5260
* The first argument (args[0]) must be your username
5361
* The second argument (args[1]) must be your API key
54-
* The third argument (args[2]) must be the absolute path to a large file
5562
*/
5663
public static void main(String[] args) throws IOException {
5764
UploadLargeObject createContainer = new UploadLargeObject(args[0], args[1]);
65+
File largeFile = new File("largefile.dat");
66+
File downloadedFile = new File(largeFile.getName()+".downloaded");
5867

5968
try {
60-
createContainer.uploadLargeObjectFromFile(new File(args[2]));
69+
// Create a 200MB file for this example
70+
createContainer.createRandomFile(200000000, largeFile);
71+
createContainer.uploadLargeObjectFromFile(largeFile);
72+
createContainer.downloadLargeObjectToFile(largeFile.getName());
73+
System.out.println("Random file hash: " + Files.hash(largeFile, Hashing.md5()));
74+
System.out.println("Downloaded file hash: " + Files.hash(downloadedFile, Hashing.md5()));
6175
}
6276
catch (Exception e) {
6377
e.printStackTrace();
6478
}
6579
finally {
80+
createContainer.cleanup();
6681
createContainer.close();
82+
if(largeFile.exists()) largeFile.delete();
83+
if(downloadedFile.exists()) downloadedFile.delete();
6784
}
6885
}
6986

7087
public UploadLargeObject(String username, String apiKey) {
7188
Properties overrides = new Properties();
7289
// This property controls the number of parts being uploaded in parallel, the default is 4
73-
overrides.setProperty("jclouds.mpu.parallel.degree", "5");
90+
overrides.setProperty("jclouds.user-threads", "14");
7491
// This property controls the size (in bytes) of parts being uploaded in parallel, the default is 33554432 bytes = 32 MB
75-
overrides.setProperty("jclouds.mpu.parts.size", "67108864"); // 64 MB
92+
overrides.setProperty("jclouds.mpu.parts.size", "1100000"); // 1 MB
7693

7794
RegionScopedBlobStoreContext context = ContextBuilder.newBuilder(PROVIDER)
7895
.credentials(username, apiKey)
7996
.overrides(overrides)
8097
.buildView(RegionScopedBlobStoreContext.class);
8198
blobStore = context.getBlobStore(REGION);
99+
100+
blobStore.createContainerInLocation(null, CONTAINER);
82101
}
83102

84103
/**
@@ -102,7 +121,24 @@ private void uploadLargeObjectFromFile(File largeFile) throws InterruptedExcepti
102121
// configure the blobstore to use multipart uploading of the file
103122
String eTag = blobStore.putBlob(CONTAINER, blob, multipart());
104123

105-
System.out.format(" Uploaded %s eTag=%s", largeFile.getName(), eTag);
124+
System.out.format(" Uploaded %s eTag=%s to %s in %s%n", largeFile.getName(), eTag, REGION, CONTAINER);
125+
}
126+
127+
/**
128+
* Download a large object from a File using the BlobStore API.
129+
*
130+
* @throws ExecutionException
131+
* @throws InterruptedException
132+
*/
133+
private void downloadLargeObjectToFile(String blobName) throws InterruptedException, ExecutionException {
134+
System.out.format("Download large object to file%n");
135+
136+
blobStore.downloadBlob(CONTAINER, blobName, new File(blobName + ".downloaded"));
137+
}
138+
139+
private void cleanup() {
140+
System.out.format("Cleaning up...%n");
141+
blobStore.clearContainer(CONTAINER);
106142
}
107143

108144
/**
@@ -111,4 +147,68 @@ private void uploadLargeObjectFromFile(File largeFile) throws InterruptedExcepti
111147
public void close() throws IOException {
112148
Closeables.close(blobStore.getContext(), true);
113149
}
150+
151+
/**
152+
* Helper method; so that we don't have to add a large file to the repo
153+
* @param size File size
154+
* @param file The new random file to generate (will overwrite if it exists)
155+
* @throws IOException
156+
* @throws InterruptedException
157+
*/
158+
private void createRandomFile(long size, File file) throws IOException, InterruptedException {
159+
RandomAccessFile raf = null;
160+
161+
// Reserve space for performance reasons
162+
raf = new RandomAccessFile(file.getAbsoluteFile(), "rw");
163+
raf.seek(size - 1);
164+
raf.write(0);
165+
166+
// Loop through ranges within the file
167+
long from;
168+
long to;
169+
long partSize = 1000000;
170+
171+
ExecutorService threadPool = Executors.newFixedThreadPool(16);
172+
173+
for (from = 0; from < size; from = from + partSize) {
174+
to = (from + partSize >= size) ? size - 1 : from + partSize - 1;
175+
RandomFileWriter writer = new RandomFileWriter(raf, from, to);
176+
threadPool.submit(writer);
177+
}
178+
179+
threadPool.shutdown();
180+
threadPool.awaitTermination(1, TimeUnit.DAYS);
181+
182+
raf.close();
183+
}
184+
185+
/**
186+
* Helper class that runs the random file generation
187+
*/
188+
private final class RandomFileWriter implements Runnable {
189+
private final RandomAccessFile raf;
190+
private final long begin;
191+
private final long end;
192+
193+
RandomFileWriter(RandomAccessFile raf, long begin, long end) {
194+
this.raf = raf;
195+
this.begin = begin;
196+
this.end = end;
197+
}
198+
199+
@Override
200+
public void run() {
201+
try {
202+
byte[] targetArray = new byte[(int) (end - begin + 1)];
203+
Random random = new Random();
204+
random.nextBytes(targetArray);
205+
// Map file region
206+
MappedByteBuffer out = raf.getChannel().map(FileChannel.MapMode.READ_WRITE, begin, end - begin + 1);
207+
out.put(targetArray);
208+
out.force();
209+
} catch (IOException e) {
210+
throw new RuntimeException(e);
211+
}
212+
}
213+
}
114214
}

0 commit comments

Comments
 (0)