您可以使用下面的Python代碼冰川數據恢復到S3。
import boto3, botocore
import subprocess, os, shutil, tempfile, argparse, sys, time, codecs
from pprint import pprint
sys.stdout = codecs.getwriter('utf8')(sys.stdout)
parser = argparse.ArgumentParser()
parser.add_argument('--max-rate-mb', action='store', type=int, default=10000, help='The maximum rate in MB/h to restore files at. Files larger than this will not be restored.')
parser.add_argument('--restore-days', action='store', type=int, default=30, help='How many days restored objects will remain in S3.')
parser.add_argument('--restore-path', action='store', help='The bucket/prefix to restore from')
parser.add_argument('--pretend', action='store_true', help='Do not execute restores')
parser.add_argument('--estimate', action='store_true', help='When pretending, do not check for already-restored files')
args = parser.parse_args()
if not args.restore_path:
print 'No restore path specified.'
sys.exit(1)
BUCKET = None
PREFIX = None
if '/' in args.restore_path:
BUCKET, PREFIX = args.restore_path.split('/',1)
else:
BUCKET = args.restore_path
PREFIX = ''
RATE_LIMIT_BYTES = args.max_rate_mb * 1024 * 1024
s3 = boto3.Session(aws_access_key_id='<ACCESS_KEY>', aws_secret_access_key='<SECRET_KEY>').resource('s3')
bucket = s3.Bucket(BUCKET)
totalsize = 0
objects = []
objcount = 0
for objpage in bucket.objects.filter(Prefix=PREFIX).page_size(100).pages():
for obj in objpage:
objcount += 1
print obj
objects.append(obj)
print u'Found {} objects.'.format(objcount)
print
objects.sort(key=lambda x: x.size, reverse=True)
objects = filter(lambda x: x.storage_class == 'GLACIER', objects)
if objects:
obj = objects[0]
print u'The largest object found is of {} size: {:14,d} {:1s} {}'.format(('a restorable' if obj.size <= RATE_LIMIT_BYTES else 'an UNRESTORABLE'), obj.size, obj.storage_class[0], obj.key)
print
while objects:
current_set = []
current_set_total = 0
unreported_unrestoreable_objects = []
i = 0
while i < len(objects):
obj = objects[i]
if obj.size > RATE_LIMIT_BYTES:
unreported_unrestoreable_objects.append(obj)
elif unreported_unrestoreable_objects:
# No longer accumulating these. Print the ones we found.
print u'Some objects could not be restored due to exceeding the hourly rate limit:'
for obj in unreported_unrestoreable_objects:
print u'- {:14,d} {:1s} {}'.format(obj.size, obj.storage_class[0], obj.key)
print
if current_set_total + obj.size <= RATE_LIMIT_BYTES:
if not args.pretend or not args.estimate:
if obj.Object().restore is not None:
objects.pop(i)
continue
current_set.append(obj)
current_set_total += obj.size
objects.pop(i)
continue
i += 1
for obj in current_set:
print u'{:14,d} {:1s} {}'.format(obj.size, obj.storage_class[0], obj.key)
#pprint(obj.Object().restore)
if not args.pretend:
obj.restore_object(RestoreRequest={'Days': args.restore_days})
#sys.exit(0)
print u'{:s} Requested restore of {:d} objects consisting of {:,d} bytes. {:d} objects remaining. {:,d} bytes of hourly restore rate wasted'.format(time.strftime('%Y-%m-%d %H:%M:%S'), len(current_set), current_set_total, len(objects), RATE_LIMIT_BYTES - current_set_total)
print
if not objects:
break
if not args.pretend:
time.sleep(3690)
命令來運行腳本:
python restore_glacier_data_to_s3.py --restore-path s3-bucket-name/folder-name/
當你說 「這裏沒有幫助」,你是什麼意思?命令失敗了嗎?它成功了,但是你找不到S3對象?您應該從文檔中瞭解到標準冰川檢索時間通常爲3-5小時,除非您要求加快(在這種情況下您需要支付更多費用)。 – jarmod