10
10
# specific language governing permissions and limitations under the License.
11
11
12
12
import os
13
- import tempfile
13
+ import time
14
14
15
15
from ds3 import ds3
16
16
36
36
"folder/folder2/ulysses.txt" :"resources/ulysses.txt"
37
37
}
38
38
39
- # this method is used to get the size of the files
39
+ # this method is used to map a file path to a Ds3PutObject
40
40
# we need two parameters because the S3 API wants the name that the object will take on the server, but the size obviously needs to come from the file on the current file system
41
- def getSize (fileName , realFileName ):
41
+ def fileNameToDs3PutObject (fileName , realFileName ):
42
42
size = os .stat (realFileName ).st_size
43
- return ds3 .FileObject (fileName , size )
43
+ return ds3 .Ds3PutObject (fileName , size )
44
44
45
45
# get the sizes for each file
46
- fileList = ds3 . FileObjectList ([ getSize (key , fileListMapping [key ]) for key in list (fileListMapping .keys ())])
46
+ fileList = list ([ fileNameToDs3PutObject (key , fileListMapping [key ]) for key in list (fileListMapping .keys ())])
47
47
48
48
# submit the put bulk request to DS3
49
49
bulkResult = client .put_bulk_job_spectra_s3 (ds3 .PutBulkJobSpectraS3Request (bucketName , fileList ))
@@ -81,17 +81,21 @@ def getSize(fileName, realFileName):
81
81
# it is possible that if we start resending a chunk, due to the program crashing, that
82
82
# some objects will already be in cache. Check to make sure that they are not, and then
83
83
# send the object to Spectra S3
84
- if not obj ['InCache' ]:
85
- client .put_object (PutObjectRequest (bucketName ,
86
- obj ['Name' ],
87
- obj ['Offset' ],
88
- obj ['Length' ],
89
- bulkResult .result ['JobId' ],
90
- real_file_name = fileListMapping [obj .name ]))
84
+ if obj ['InCache' ] == 'false' :
85
+ objectDataStream = open (fileListMapping [obj ['Name' ]], "rb" )
86
+ objectDataStream .seek (int (obj ['Offset' ]), 0 )
87
+ putObjectResponse = client .put_object (ds3 .PutObjectRequest (bucket_name = bucketName ,
88
+ object_name = obj ['Name' ],
89
+ offset = obj ['Offset' ],
90
+ length = obj ['Length' ],
91
+ stream = objectDataStream ,
92
+ job = bulkResult .result ['JobId' ]))
93
+
91
94
92
95
# we now verify that all our objects have been sent to DS3
93
96
bucketResponse = client .get_bucket (ds3 .GetBucketRequest (bucketName ))
94
97
98
+ print ("\n Files in bucket:" )
95
99
for obj in bucketResponse .result ['ContentsList' ]:
96
100
print (obj ['Key' ])
97
101
@@ -100,15 +104,15 @@ def getSize(fileName, realFileName):
100
104
101
105
client .delete_folder_recursively_spectra_s3 (ds3 .DeleteFolderRecursivelySpectraS3Request (bucketName , "folder/folder2" ))
102
106
103
- print ("\n After deletion number 1 :" )
107
+ print ("\n After deleting 'folder/folder2' :" )
104
108
bucketResponse = client .get_bucket (ds3 .GetBucketRequest (bucketName ))
105
109
106
110
for obj in bucketResponse .result ['ContentsList' ]:
107
111
print (obj ['Key' ])
108
112
109
113
client .delete_folder_recursively_spectra_s3 (ds3 .DeleteFolderRecursivelySpectraS3Request (bucketName , "folder" ))
110
114
111
- print ("\n After deletion number 2 :" )
115
+ print ("\n After deleting 'folder' :" )
112
116
bucketResponse = client .get_bucket (ds3 .GetBucketRequest (bucketName ))
113
117
114
118
for obj in bucketResponse .result ['ContentsList' ]:
0 commit comments