|
@@ -0,0 +1,228 @@
|
|
|
+let bucket;
|
|
|
+let bucketname = "ccrb";
|
|
|
+const partsize = 1 * 1024 * 1024;
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+async function init() {
|
|
|
+ const credentials = {
|
|
|
+ accessKeyId: "AKIATLPEDU37QV5CHLMH",
|
|
|
+ secretAccessKey: "Q2SQw37HfolS7yeaR1Ndpy9Jl4E2YZKUuuy2muZR",
|
|
|
+ };
|
|
|
+ window.AWS.config.update(credentials);
|
|
|
+ window.AWS.config.region = "cn-northwest-1";
|
|
|
+
|
|
|
+ bucket = new window.AWS.S3({
|
|
|
+ params: {
|
|
|
+ Bucket: bucketname
|
|
|
+ }
|
|
|
+ });
|
|
|
+ return bucket;
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
+async function initMultipartUpload(file, folderid) {
|
|
|
+ const params = {
|
|
|
+ Bucket: bucketname,
|
|
|
+ Key: folderid + "/" + file.name
|
|
|
+ };
|
|
|
+
|
|
|
+ const data = await bucket.createMultipartUpload(params).promise();
|
|
|
+ return data.UploadId;
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
+async function uploadPart(file, keyname, uploadid, pn, start, end) {
|
|
|
+
|
|
|
+ var params = {
|
|
|
+ Bucket: bucketname,
|
|
|
+ Key: keyname,
|
|
|
+ ContentType: file.type,
|
|
|
+ PartNumber: pn,
|
|
|
+ UploadId: uploadid,
|
|
|
+ Body: file.slice(start, end),
|
|
|
+ "Access-Control-Allow-Credentials": "*",
|
|
|
+ ACL: "public-read",
|
|
|
+ };
|
|
|
+ const result = await bucket.uploadPart(params).promise()
|
|
|
+ .then((data) => {
|
|
|
+ return { ETag: result.ETag, PartNumber: pn };
|
|
|
+ })
|
|
|
+ .catch((err) => {
|
|
|
+ return err;
|
|
|
+ });
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
+async function completeMultipartUpload(parts, keyname) {
|
|
|
+ const params = {
|
|
|
+ Bucket: bucketname,
|
|
|
+ Key: keyname,
|
|
|
+ MultipartUpload: { Parts: parts },
|
|
|
+ UploadId: uploadid
|
|
|
+ };
|
|
|
+ await bucket.completeMultipartUpload(params).promise();
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
+async function abortMultipartUpload(key, uploadid) {
|
|
|
+ const params = {
|
|
|
+ Bucket: bucketname,
|
|
|
+ Key: key,
|
|
|
+ UploadId: uploadid
|
|
|
+ };
|
|
|
+ await bucket.abortMultipartUpload(params).promise();
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
+async function uploadFile(file, folderid) {
|
|
|
+ folderid = folderid || window.Guid.newGuid();
|
|
|
+ var keyname = folderid + "/" + file.name;
|
|
|
+ var uploadid = "";
|
|
|
+ try {
|
|
|
+ init();
|
|
|
+
|
|
|
+ uploadid = await initMultipartUpload(file, folderid);
|
|
|
+
|
|
|
+ let parts = [];
|
|
|
+ let start = 0;
|
|
|
+ let end = 0;
|
|
|
+ let len = Math.ceil(file.length / partsize);
|
|
|
+
|
|
|
+ for (let i = 0; i < len; i++) {
|
|
|
+ start = i * partsize;
|
|
|
+ end = (i + 1) * partsize;
|
|
|
+ parts.push(await uploadPart(file, keyname, uploadid, i, start, end));
|
|
|
+ }
|
|
|
+
|
|
|
+ await completeMultipartUpload(parts, uploadid);
|
|
|
+ return uploadid;
|
|
|
+ }
|
|
|
+ catch (error) {
|
|
|
+
|
|
|
+ console.error('An error occurred during file upload:', error);
|
|
|
+ await abortMultipartUpload(keyname, uploadid);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+async function init() {
|
|
|
+
|
|
|
+ const credentials = {
|
|
|
+ accessKeyId: "AKIATLPEDU37QV5CHLMH",
|
|
|
+ secretAccessKey: "Q2SQw37HfolS7yeaR1Ndpy9Jl4E2YZKUuuy2muZR",
|
|
|
+ region: "cn-northwest-1"
|
|
|
+ };
|
|
|
+ window.AWS.config.update(credentials);
|
|
|
+
|
|
|
+
|
|
|
+ bucket = new window.AWS.S3({
|
|
|
+ params: {
|
|
|
+ Bucket: bucketname
|
|
|
+ }
|
|
|
+ });
|
|
|
+ return bucket;
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
+async function getawscheckpoint(key) {
|
|
|
+ let partsinfo;
|
|
|
+ try {
|
|
|
+ const result = await bucket.listMultipartUploads({ Bucket: bucketname, Prefix: key }).promise();
|
|
|
+
|
|
|
+ if (result.Uploads.length) {
|
|
|
+ uploadid = result.Uploads[result.Uploads.length - 1].UploadId;
|
|
|
+ partsinfo = await bucket.listParts({ Bucket: bucketname, Key: key, UploadId: uploadid, }).promise();
|
|
|
+ }
|
|
|
+ } catch (err) {
|
|
|
+ console.log(err);
|
|
|
+ }
|
|
|
+ return { uploadid, partsinfo };
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
+async function awsuploadpart(filestate, file, uploadid, parts, key) {
|
|
|
+ var partarr = [];
|
|
|
+
|
|
|
+ const completeparts = parts.map((_) => {
|
|
|
+ partarr.push(_.PartNumber);
|
|
|
+ return { PartNumber: _.PartNumber, ETag: _.ETag };
|
|
|
+ });
|
|
|
+
|
|
|
+ let parts = [];
|
|
|
+ let uploadpart;
|
|
|
+ let start = 0;
|
|
|
+ let end = 0;
|
|
|
+ let len = Math.ceil(file.length / partsize);
|
|
|
+ if (partarr.length) {
|
|
|
+ filestate.status = "processing";
|
|
|
+ filestate.percent = parseInt((completeparts.length * 100) / count);
|
|
|
+ }
|
|
|
+
|
|
|
+ for (let i = 0; i < len; i++) {
|
|
|
+ start = i * partsize;
|
|
|
+ end = (i + 1) * partsize;
|
|
|
+ if (!partarr.includes(i)) {
|
|
|
+ uploadpart = await uploadPart(file, key, uploadid, i, start, end);
|
|
|
+ if (uploadpart.ETag != null) {
|
|
|
+ completeparts.push(uploadpart);
|
|
|
+ filestate.percent = parseInt((completeparts.length * 100) / count);
|
|
|
+ }
|
|
|
+
|
|
|
+ else {
|
|
|
+ filestate.status = "fail";
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ await completeMultipartUpload(completeparts, uploadid);
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
+async function awsupload(file, folderid, filestate) {
|
|
|
+ init();
|
|
|
+ const key = (folderid || window.Guid.newGuid()) + "/" + file.name;
|
|
|
+ filestate.percent = 0;
|
|
|
+ filestate.status = "start";
|
|
|
+
|
|
|
+ var params = {
|
|
|
+ Bucket: bucketname,
|
|
|
+ Key: key
|
|
|
+ };
|
|
|
+
|
|
|
+ try {
|
|
|
+
|
|
|
+ bucket.headObject(params, async (err, data) => {
|
|
|
+
|
|
|
+ if (err) {
|
|
|
+
|
|
|
+ const { uploadid, partsinfo } = await getawscheckpoint(key, bucket);
|
|
|
+
|
|
|
+ if (uploadid) {
|
|
|
+
|
|
|
+ awsuploadpart(filestate, file, uploadid, partsinfo.Parts, key);
|
|
|
+ }
|
|
|
+
|
|
|
+ else {
|
|
|
+ const uploadid = await initMultiPartUpload(bucket, params);
|
|
|
+ awsuploadpart(filestate, file, uploadid, [], key);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ else if (data) {
|
|
|
+
|
|
|
+ filestate.percent = 100;
|
|
|
+ filestate.status = "success";
|
|
|
+ }
|
|
|
+ });
|
|
|
+ }
|
|
|
+ catch (err) {
|
|
|
+ filestate.status = "error";
|
|
|
+ console.log(err);
|
|
|
+ }
|
|
|
+ return filestate;
|
|
|
+}
|
|
|
+
|