26
26
import java .io .Closeable ;
27
27
import java .io .File ;
28
28
import java .io .IOException ;
29
+ import java .io .RandomAccessFile ;
30
+ import java .nio .MappedByteBuffer ;
31
+ import java .nio .channels .FileChannel ;
29
32
import java .util .Properties ;
33
+ import java .util .Random ;
30
34
import java .util .concurrent .ExecutionException ;
35
+ import java .util .concurrent .ExecutorService ;
36
+ import java .util .concurrent .Executors ;
37
+ import java .util .concurrent .TimeUnit ;
31
38
32
39
import org .jclouds .ContextBuilder ;
33
40
import org .jclouds .blobstore .BlobStore ;
36
43
import org .jclouds .io .Payloads ;
37
44
import org .jclouds .openstack .swift .v1 .blobstore .RegionScopedBlobStoreContext ;
38
45
46
+ import com .google .common .hash .Hashing ;
39
47
import com .google .common .io .ByteSource ;
40
48
import com .google .common .io .Closeables ;
41
49
import com .google .common .io .Files ;
@@ -51,34 +59,45 @@ public class UploadLargeObject implements Closeable {
51
59
*
52
60
* The first argument (args[0]) must be your username
53
61
* The second argument (args[1]) must be your API key
54
- * The third argument (args[2]) must be the absolute path to a large file
55
62
*/
56
63
public static void main (String [] args ) throws IOException {
57
64
UploadLargeObject createContainer = new UploadLargeObject (args [0 ], args [1 ]);
65
+ File largeFile = new File ("largefile.dat" );
66
+ File downloadedFile = new File (largeFile .getName ()+".downloaded" );
58
67
59
68
try {
60
- createContainer .uploadLargeObjectFromFile (new File (args [2 ]));
69
+ // Create a 200MB file for this example
70
+ createContainer .createRandomFile (200000000 , largeFile );
71
+ createContainer .uploadLargeObjectFromFile (largeFile );
72
+ createContainer .downloadLargeObjectToFile (largeFile .getName ());
73
+ System .out .println ("Random file hash: " + Files .hash (largeFile , Hashing .md5 ()));
74
+ System .out .println ("Downloaded file hash: " + Files .hash (downloadedFile , Hashing .md5 ()));
61
75
}
62
76
catch (Exception e ) {
63
77
e .printStackTrace ();
64
78
}
65
79
finally {
80
+ createContainer .cleanup ();
66
81
createContainer .close ();
82
+ if (largeFile .exists ()) largeFile .delete ();
83
+ if (downloadedFile .exists ()) downloadedFile .delete ();
67
84
}
68
85
}
69
86
70
87
public UploadLargeObject (String username , String apiKey ) {
71
88
Properties overrides = new Properties ();
72
89
// This property controls the number of parts being uploaded in parallel, the default is 4
73
- overrides .setProperty ("jclouds.mpu.parallel.degree " , "5 " );
90
+ overrides .setProperty ("jclouds.user-threads " , "14 " );
74
91
// This property controls the size (in bytes) of parts being uploaded in parallel, the default is 33554432 bytes = 32 MB
75
- overrides .setProperty ("jclouds.mpu.parts.size" , "67108864 " ); // 64 MB
92
+ overrides .setProperty ("jclouds.mpu.parts.size" , "1100000 " ); // 1 MB
76
93
77
94
RegionScopedBlobStoreContext context = ContextBuilder .newBuilder (PROVIDER )
78
95
.credentials (username , apiKey )
79
96
.overrides (overrides )
80
97
.buildView (RegionScopedBlobStoreContext .class );
81
98
blobStore = context .getBlobStore (REGION );
99
+
100
+ blobStore .createContainerInLocation (null , CONTAINER );
82
101
}
83
102
84
103
/**
@@ -102,7 +121,24 @@ private void uploadLargeObjectFromFile(File largeFile) throws InterruptedExcepti
102
121
// configure the blobstore to use multipart uploading of the file
103
122
String eTag = blobStore .putBlob (CONTAINER , blob , multipart ());
104
123
105
- System .out .format (" Uploaded %s eTag=%s" , largeFile .getName (), eTag );
124
+ System .out .format (" Uploaded %s eTag=%s to %s in %s%n" , largeFile .getName (), eTag , REGION , CONTAINER );
125
+ }
126
+
127
+ /**
128
+ * Download a large object from a File using the BlobStore API.
129
+ *
130
+ * @throws ExecutionException
131
+ * @throws InterruptedException
132
+ */
133
+ private void downloadLargeObjectToFile (String blobName ) throws InterruptedException , ExecutionException {
134
+ System .out .format ("Download large object to file%n" );
135
+
136
+ blobStore .downloadBlob (CONTAINER , blobName , new File (blobName + ".downloaded" ));
137
+ }
138
+
139
+ private void cleanup () {
140
+ System .out .format ("Cleaning up...%n" );
141
+ blobStore .clearContainer (CONTAINER );
106
142
}
107
143
108
144
/**
@@ -111,4 +147,68 @@ private void uploadLargeObjectFromFile(File largeFile) throws InterruptedExcepti
111
147
public void close () throws IOException {
112
148
Closeables .close (blobStore .getContext (), true );
113
149
}
150
+
151
+ /**
152
+ * Helper method; so that we don't have to add a large file to the repo
153
+ * @param size File size
154
+ * @param file The new random file to generate (will overwrite if it exists)
155
+ * @throws IOException
156
+ * @throws InterruptedException
157
+ */
158
+ private void createRandomFile (long size , File file ) throws IOException , InterruptedException {
159
+ RandomAccessFile raf = null ;
160
+
161
+ // Reserve space for performance reasons
162
+ raf = new RandomAccessFile (file .getAbsoluteFile (), "rw" );
163
+ raf .seek (size - 1 );
164
+ raf .write (0 );
165
+
166
+ // Loop through ranges within the file
167
+ long from ;
168
+ long to ;
169
+ long partSize = 1000000 ;
170
+
171
+ ExecutorService threadPool = Executors .newFixedThreadPool (16 );
172
+
173
+ for (from = 0 ; from < size ; from = from + partSize ) {
174
+ to = (from + partSize >= size ) ? size - 1 : from + partSize - 1 ;
175
+ RandomFileWriter writer = new RandomFileWriter (raf , from , to );
176
+ threadPool .submit (writer );
177
+ }
178
+
179
+ threadPool .shutdown ();
180
+ threadPool .awaitTermination (1 , TimeUnit .DAYS );
181
+
182
+ raf .close ();
183
+ }
184
+
185
+ /**
186
+ * Helper class that runs the random file generation
187
+ */
188
+ private final class RandomFileWriter implements Runnable {
189
+ private final RandomAccessFile raf ;
190
+ private final long begin ;
191
+ private final long end ;
192
+
193
+ RandomFileWriter (RandomAccessFile raf , long begin , long end ) {
194
+ this .raf = raf ;
195
+ this .begin = begin ;
196
+ this .end = end ;
197
+ }
198
+
199
+ @ Override
200
+ public void run () {
201
+ try {
202
+ byte [] targetArray = new byte [(int ) (end - begin + 1 )];
203
+ Random random = new Random ();
204
+ random .nextBytes (targetArray );
205
+ // Map file region
206
+ MappedByteBuffer out = raf .getChannel ().map (FileChannel .MapMode .READ_WRITE , begin , end - begin + 1 );
207
+ out .put (targetArray );
208
+ out .force ();
209
+ } catch (IOException e ) {
210
+ throw new RuntimeException (e );
211
+ }
212
+ }
213
+ }
114
214
}
0 commit comments