mirror of
https://gitee.com/lxp135/minio-plus.git
synced 2025-12-07 01:18:22 +08:00
解决大文件上传时,前端计算MD5值卡死问题。
This commit is contained in:
parent
4a0715fa30
commit
c2cf723afe
@ -112,4 +112,4 @@ MinIO 的基础上只做增强,不侵入 MinIO 代码,只为简化开发、
|
||||
|
||||

|
||||
|
||||
如果二维码失效,可以加我的微信*movedisk_1*,我会手动拉您入群。
|
||||
如果二维码失效,可以加我的微信*movedisk_1*,加好友时备注minio-plus,我会手动拉您入群。
|
||||
@ -5,6 +5,7 @@ import org.liuxp.minioplus.api.model.dto.FileMetadataInfoDTO;
|
||||
import org.liuxp.minioplus.api.model.vo.CompleteResultVo;
|
||||
import org.liuxp.minioplus.api.model.vo.FileCheckResultVo;
|
||||
import org.liuxp.minioplus.api.model.vo.FileMetadataInfoVo;
|
||||
import org.liuxp.minioplus.api.model.vo.FilePreShardingVo;
|
||||
|
||||
import java.io.InputStream;
|
||||
import java.util.List;
|
||||
@ -16,6 +17,13 @@ import java.util.List;
|
||||
*/
|
||||
public interface StorageService {
|
||||
|
||||
/**
|
||||
* 文件预分片
|
||||
* @param fileSize 文件大小
|
||||
* @return 预分片结果
|
||||
*/
|
||||
FilePreShardingVo sharding(long fileSize);
|
||||
|
||||
/**
|
||||
* 上传任务初始化
|
||||
* @param fileMd5 文件md5
|
||||
|
||||
@ -0,0 +1,62 @@
|
||||
package org.liuxp.minioplus.api.model.vo;
|
||||
|
||||
import io.swagger.annotations.ApiModel;
|
||||
import io.swagger.annotations.ApiModelProperty;
|
||||
import lombok.Getter;
|
||||
import lombok.Setter;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* 文件预分片结果
|
||||
* @author contact@liuxp.me
|
||||
* @since 2024-07-09
|
||||
**/
|
||||
@Getter
|
||||
@Setter
|
||||
@ApiModel("文件预分片结果")
|
||||
public class FilePreShardingVo {
|
||||
|
||||
/**
|
||||
* 文件长度
|
||||
*/
|
||||
@ApiModelProperty("文件长度")
|
||||
private Long fileSize;
|
||||
|
||||
/**
|
||||
* 分块数量
|
||||
*/
|
||||
@ApiModelProperty("分块数量")
|
||||
private Integer partCount;
|
||||
|
||||
/**
|
||||
* 分块大小
|
||||
*/
|
||||
@ApiModelProperty("分块大小")
|
||||
private Integer partSize;
|
||||
|
||||
/**
|
||||
* 分块信息
|
||||
*/
|
||||
@ApiModelProperty("分块信息")
|
||||
private List<Part> partList = new ArrayList<>();
|
||||
|
||||
@Getter
|
||||
@Setter
|
||||
public static class Part {
|
||||
|
||||
/**
|
||||
* 开始位置
|
||||
*/
|
||||
@ApiModelProperty("开始位置")
|
||||
private Long startPosition;
|
||||
/**
|
||||
* 结束位置
|
||||
*/
|
||||
@ApiModelProperty("结束位置")
|
||||
private Long endPosition;
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
@ -1,150 +0,0 @@
|
||||
|
||||
let partMd5List = new Array();
|
||||
let partCount = 0;
|
||||
let partSize = 0;
|
||||
let fileSize = 0;
|
||||
|
||||
/**
|
||||
* 注意:本测试Demo不受分片顺序影响
|
||||
* 关于上传文件成功后的处理:配置minio监听指定存储桶指定格式文件上传成功后,push通知到mq,后端程序监听并消费即可
|
||||
* (建议上传mp4,成功后可以直接在页面看到效果)
|
||||
* 测试分片上传
|
||||
* 运行页面 > 打开控制台 > console > 选择上传的文件 > 观察打印的信息
|
||||
* 测试秒传
|
||||
* 在上一个测试的基础上,刷新一下页面,选择上一次上传的文件
|
||||
* 测试断点续传
|
||||
* 重新选择一个文件(如果你没有多的测试文件,就重启一下后台服务) > 手动模拟上传了一部分失败的场景(在所有分片未上传完成时关掉页面 或 注释掉合并文件代码,然后去 minio chunk桶 删除几个分片)
|
||||
* > 再选择刚选择的文件上传 > 观察打印的信息是否从缺失的分片开始上传
|
||||
*/
|
||||
uploadFile = async () => {
|
||||
//获取用户选择的文件
|
||||
const file = document.getElementById("upload").files[0];
|
||||
|
||||
//获取文件md5
|
||||
let startTime = new Date();
|
||||
const fileMd5 = await getFileMd5(file);
|
||||
|
||||
console.log("文件md5:", fileMd5 + ",耗时" + (new Date() - startTime)+"毫秒");
|
||||
|
||||
console.log("向后端请求本次分片上传初始化")
|
||||
|
||||
$.ajax({
|
||||
url: "/storage/upload/init",
|
||||
type: 'POST',
|
||||
contentType: "application/json",
|
||||
dataType: "json",
|
||||
data: JSON.stringify({
|
||||
fileMd5: fileMd5,
|
||||
fullFileName: file.name,
|
||||
fileSize: file.size,
|
||||
}),
|
||||
success: async function (res) {
|
||||
partMd5List = new Array();
|
||||
console.log("当前文件上传情况:初次上传 或 断点续传")
|
||||
document.getElementById("uploadId").value = (res.data.fileKey);
|
||||
if (res.isDone) {
|
||||
return;
|
||||
}
|
||||
const chunkUploadUrls = res.data.partList;
|
||||
partCount = res.data.partCount;
|
||||
partSize = res.data.partSize;
|
||||
fileSize = res.data.fileSize;
|
||||
|
||||
//当前为顺序上传方式,若要测试并发上传,请将第52行 await 修饰符删除即可
|
||||
//若使用并发上传方式,当前分片上传完成后打印出来的完成提示是不准确的,但这并不影响最终运行结果;原因是由ajax请求本身是异步导致的
|
||||
|
||||
for (const [i, item] of chunkUploadUrls.entries()) {
|
||||
|
||||
//取文件指定范围内的byte,从而得到分片数据
|
||||
let _chunkFile = file.slice(item.startPosition, item.endPosition)
|
||||
console.log("开始上传第" + i + "个分片", _chunkFile)
|
||||
$.ajax({
|
||||
url: item.url,
|
||||
type: 'PUT',
|
||||
contentType: false,
|
||||
processData: false,
|
||||
data: _chunkFile,
|
||||
success: function (res) {
|
||||
console.log("第" + i + "个分片上传完成")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
calculatePartMd5 = async () => {
|
||||
//获取用户选择的文件
|
||||
const file = document.getElementById("upload").files[0];
|
||||
|
||||
//获取文件md5
|
||||
let startTime = new Date();
|
||||
const fileMd5 = await getFileMd5(file);
|
||||
|
||||
console.log("文件md5:", fileMd5 + ",耗时" + (new Date() - startTime)+"毫秒");
|
||||
|
||||
for(let i=0;i<partCount;i++){
|
||||
console.log(i)
|
||||
let _chunkFile;
|
||||
if(i==partCount-1){
|
||||
_chunkFile = file.slice(i*partSize, fileSize)
|
||||
}else{
|
||||
_chunkFile = file.slice(i*partSize, (i+1)*partSize)
|
||||
}
|
||||
|
||||
let partMd5 = await getFileMd5(_chunkFile);
|
||||
partMd5List.push(partMd5);
|
||||
console.log(partMd5List)
|
||||
}
|
||||
}
|
||||
|
||||
function download() {
|
||||
let fileKey = document.getElementById("uploadId").value;
|
||||
window.location.href = "/storage/download/" + fileKey;
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取文件MD5
|
||||
* @param file
|
||||
* @returns {Promise<unknown>}
|
||||
*/
|
||||
getFileMd5 = (file) => {
|
||||
let fileReader = new FileReader()
|
||||
fileReader.readAsBinaryString(file)
|
||||
let spark = new SparkMD5()
|
||||
return new Promise((resolve) => {
|
||||
fileReader.onload = (e) => {
|
||||
spark.appendBinary(e.target.result)
|
||||
resolve(spark.end())
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* 请求后端合并文件
|
||||
* @param fileMd5
|
||||
* @param fileName
|
||||
*/
|
||||
merge = () => {
|
||||
let fileKey = document.getElementById("uploadId").value;
|
||||
console.log("开始请求后端合并文件")
|
||||
//注意:bucketName请填写你自己的存储桶名称,如果没有,就先创建一个写在这
|
||||
$.ajax({
|
||||
url: "/storage/upload/complete/" + fileKey,
|
||||
type: 'POST',
|
||||
contentType: "application/json",
|
||||
dataType: "json",
|
||||
data: JSON.stringify({
|
||||
partMd5List:partMd5List
|
||||
}),
|
||||
success: function (res) {
|
||||
console.log("合并文件完成", res.data)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
removeTaskId = async () => {
|
||||
document.getElementById("uploadId").value = '';
|
||||
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@ -4,9 +4,8 @@
|
||||
<meta charset="UTF-8">
|
||||
<title>MinIO Plus Demo</title>
|
||||
</head>
|
||||
<script src="https://unpkg.com/vue@3/dist/vue.global.js"></script>
|
||||
<script src="js/vue.global.js"></script>
|
||||
<script src="js/spark-md5.js"></script>
|
||||
<script src="js/upload.js"></script>
|
||||
<body>
|
||||
<div id="app">
|
||||
<div>
|
||||
@ -18,7 +17,7 @@
|
||||
<button @click="uploadFile(false)" :disabled="partList.length === 0">正常上传</button>
|
||||
<button @click="uploadFile(true)" :disabled="partList.length < 2">模拟丢片上传</button>
|
||||
<button @click="recover" :disabled="missChunkNumber == null">丢片恢复</button>
|
||||
<button @click="remove" :disabled="uploadId == null">删除文件</button>
|
||||
<!-- <button @click="remove" :disabled="uploadId == null">删除文件</button>-->
|
||||
<div>
|
||||
<input type="text" v-model="uploadId" id="uploadId">
|
||||
<button @click="merge" :disabled="uploadId == null">合并分片</button>
|
||||
@ -28,6 +27,9 @@
|
||||
<button @click="download" :disabled="uploadId == null">下载文件</button>
|
||||
<img :src="previewUrl">
|
||||
</div>
|
||||
<div>
|
||||
<span>执行过程:</span><textarea id="logstr" style="height: 300px;width: 600px;"></textarea>
|
||||
</div>
|
||||
<div>
|
||||
<label>总计片数:{{partList.length}}</label>
|
||||
<div v-for="(item,index) in partList" :style="{color:missChunkNumber === index ? 'red' : 'black'}">
|
||||
@ -48,6 +50,7 @@
|
||||
const state = reactive({
|
||||
uploadId: null,
|
||||
partList: [],
|
||||
partMd5Map:{},
|
||||
missChunkNumber: null,
|
||||
partCount: null,
|
||||
partSize: null,
|
||||
@ -64,38 +67,80 @@
|
||||
const file = document.getElementById("upload").files[0];
|
||||
//获取文件md5
|
||||
let startTime = new Date();
|
||||
const fileMd5 = await getFileMd5(file)
|
||||
console.log('开始计算文件MD5值')
|
||||
|
||||
console.log("文件md5:" + fileMd5 + ",耗时" + (new Date() - startTime) + "毫秒");
|
||||
|
||||
console.log("向后端请求本次分片上传初始化")
|
||||
|
||||
fetch("/storage/upload/init", {
|
||||
fetch("/storage/upload/sharding", {
|
||||
method: "POST",
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
"Authorization":state.loginUser
|
||||
"Content-Type": "application/json"
|
||||
},
|
||||
body: JSON.stringify({
|
||||
fileMd5: fileMd5,
|
||||
fullFileName: file.name,
|
||||
fileSize: file.size
|
||||
})
|
||||
}).then(res => res.json()).then(({data}) => {
|
||||
console.log(data);
|
||||
// 获取文件上传id
|
||||
state.uploadId = data.fileKey;
|
||||
// 获取文件分片
|
||||
state.partList = data.partList;
|
||||
// 获取文件大小
|
||||
state.fileSize = data.fileSize;
|
||||
// 获取块大小
|
||||
state.partSize = data.partSize;
|
||||
// 获取文件分片数
|
||||
state.partCount = data.partCount;
|
||||
|
||||
let fileReader = new FileReader();
|
||||
let md5 = new SparkMD5();
|
||||
let md5Total = new SparkMD5();
|
||||
let currentIndex = 0;
|
||||
|
||||
const loadFile = () => {
|
||||
if (currentIndex >= data.partList.length) {
|
||||
let fileMd5 = md5Total.end();
|
||||
console.log("文件md5:" + fileMd5 + ",耗时" + (new Date() - startTime) + "毫秒");
|
||||
|
||||
console.log("向后端请求本次分片上传初始化")
|
||||
|
||||
fetch("/storage/upload/init", {
|
||||
method: "POST",
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
"Authorization":state.loginUser
|
||||
},
|
||||
body: JSON.stringify({
|
||||
fileMd5: fileMd5,
|
||||
fullFileName: file.name,
|
||||
fileSize: file.size
|
||||
})
|
||||
}).then(res => res.json()).then(({data}) => {
|
||||
console.log(data);
|
||||
// 获取文件上传id
|
||||
state.uploadId = data.fileKey;
|
||||
// 获取文件分片
|
||||
state.partList = data.partList;
|
||||
// 获取文件大小
|
||||
state.fileSize = data.fileSize;
|
||||
// 获取块大小
|
||||
state.partSize = data.partSize;
|
||||
// 获取文件分片数
|
||||
state.partCount = data.partCount;
|
||||
}).catch(err => {
|
||||
console.log(err);
|
||||
})
|
||||
return;
|
||||
}
|
||||
|
||||
const item = data.partList[currentIndex];
|
||||
const slice = file.slice(item.startPosition, item.endPosition);
|
||||
fileReader.readAsBinaryString(slice);
|
||||
fileReader.onload = e => {
|
||||
md5.appendBinary(e.target.result);
|
||||
md5Total.appendBinary(e.target.result);
|
||||
let partMd5 = md5.end();
|
||||
console.log("开始计算第" + currentIndex + "个分片MD5值"+partMd5);
|
||||
state.partMd5Map[item.startPosition + '_' + item.endPosition] = partMd5;
|
||||
currentIndex++;
|
||||
loadFile();
|
||||
};
|
||||
}
|
||||
|
||||
loadFile();
|
||||
}).catch(err => {
|
||||
console.log(err);
|
||||
})
|
||||
|
||||
|
||||
}
|
||||
/**
|
||||
* 上传文件
|
||||
@ -149,12 +194,11 @@
|
||||
continue;
|
||||
}
|
||||
if (i === state.partCount - 1) {
|
||||
_chunkFile = file.slice(i * state.partSize, state.fileSize)
|
||||
partMd5List.push(state.partMd5Map[i * state.partSize+'_'+ state.fileSize]);
|
||||
} else {
|
||||
_chunkFile = file.slice(i * state.partSize, (i + 1) * state.partSize)
|
||||
partMd5List.push(state.partMd5Map[i * state.partSize+'_'+ (i + 1) * state.partSize]);
|
||||
}
|
||||
let partMd5 = await getFileMd5(_chunkFile);
|
||||
partMd5List.push(partMd5);
|
||||
|
||||
}
|
||||
//
|
||||
fetch(`/storage/upload/complete/${state.uploadId}`, {
|
||||
@ -222,16 +266,6 @@
|
||||
});
|
||||
|
||||
}
|
||||
// 删除文件
|
||||
const remove = () => {
|
||||
fetch(`/storage/remove/${state.uploadId}`, {
|
||||
method: "POST",
|
||||
}).then(res => res.json()).then(({data}) => {
|
||||
console.log("删除文件完成", data)
|
||||
}).catch(err => {
|
||||
console.log(err);
|
||||
});
|
||||
}
|
||||
/**
|
||||
* 获取文件MD5
|
||||
* @param file
|
||||
@ -249,16 +283,15 @@
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
return {
|
||||
checkFile,
|
||||
removeTaskId,
|
||||
// removeTaskId,
|
||||
clearState,
|
||||
uploadFile,
|
||||
merge,
|
||||
download,
|
||||
preview,
|
||||
remove,
|
||||
// remove,
|
||||
recover,
|
||||
...toRefs(state)
|
||||
}
|
||||
|
||||
@ -16,6 +16,14 @@ import java.util.List;
|
||||
*/
|
||||
public interface StorageEngineService {
|
||||
|
||||
/**
|
||||
* 计算分块的数量
|
||||
*
|
||||
* @param fileSize 文件大小
|
||||
* @return {@link Integer}
|
||||
*/
|
||||
Integer computeChunkNum(Long fileSize);
|
||||
|
||||
/**
|
||||
* 上传任务初始化
|
||||
* @param fileMd5 文件md5
|
||||
|
||||
@ -528,7 +528,7 @@ public class StorageEngineServiceImpl implements StorageEngineService {
|
||||
* @param fileSize 文件大小
|
||||
* @return {@link Integer}
|
||||
*/
|
||||
private Integer computeChunkNum(Long fileSize) {
|
||||
public Integer computeChunkNum(Long fileSize) {
|
||||
// 计算分块数量
|
||||
double tempNum = (double) fileSize / properties.getPart().getSize();
|
||||
// 向上取整
|
||||
|
||||
@ -15,6 +15,7 @@ import org.liuxp.minioplus.api.model.dto.FileMetadataInfoSaveDTO;
|
||||
import org.liuxp.minioplus.api.model.vo.CompleteResultVo;
|
||||
import org.liuxp.minioplus.api.model.vo.FileCheckResultVo;
|
||||
import org.liuxp.minioplus.api.model.vo.FileMetadataInfoVo;
|
||||
import org.liuxp.minioplus.api.model.vo.FilePreShardingVo;
|
||||
import org.liuxp.minioplus.common.config.MinioPlusProperties;
|
||||
import org.liuxp.minioplus.common.enums.MinioPlusErrorCode;
|
||||
import org.liuxp.minioplus.common.enums.StorageBucketEnums;
|
||||
@ -27,6 +28,7 @@ import org.springframework.stereotype.Service;
|
||||
|
||||
import javax.annotation.Resource;
|
||||
import java.io.InputStream;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
@ -55,6 +57,39 @@ public class StorageServiceImpl implements StorageService {
|
||||
@Resource
|
||||
MinioPlusProperties properties;
|
||||
|
||||
@Override
|
||||
public FilePreShardingVo sharding(long fileSize) {
|
||||
|
||||
// 计算分块数量
|
||||
Integer chunkNum = storageEngineService.computeChunkNum(fileSize);
|
||||
|
||||
List<FilePreShardingVo.Part> partList = new ArrayList<>();
|
||||
|
||||
long start = 0;
|
||||
for (int partNumber = 1; partNumber <= chunkNum; partNumber++) {
|
||||
|
||||
long end = Math.min(start + properties.getPart().getSize(), fileSize);
|
||||
|
||||
FilePreShardingVo.Part part = new FilePreShardingVo.Part();
|
||||
// 开始位置
|
||||
part.setStartPosition(start);
|
||||
// 结束位置
|
||||
part.setEndPosition(end);
|
||||
|
||||
// 更改下一次的开始位置
|
||||
start = start + properties.getPart().getSize();
|
||||
partList.add(part);
|
||||
}
|
||||
|
||||
FilePreShardingVo filePreShardingVo = new FilePreShardingVo();
|
||||
filePreShardingVo.setFileSize(fileSize);
|
||||
filePreShardingVo.setPartCount(chunkNum);
|
||||
filePreShardingVo.setPartSize(properties.getPart().getSize());
|
||||
filePreShardingVo.setPartList(partList);
|
||||
|
||||
return filePreShardingVo;
|
||||
}
|
||||
|
||||
@Override
|
||||
public FileCheckResultVo init(String fileMd5, String fullFileName, long fileSize, Boolean isPrivate, String userId) {
|
||||
|
||||
|
||||
@ -6,10 +6,12 @@ import lombok.extern.slf4j.Slf4j;
|
||||
import org.liuxp.minioplus.api.StorageService;
|
||||
import org.liuxp.minioplus.api.model.vo.CompleteResultVo;
|
||||
import org.liuxp.minioplus.api.model.vo.FileCheckResultVo;
|
||||
import org.liuxp.minioplus.api.model.vo.FilePreShardingVo;
|
||||
import org.liuxp.minioplus.extension.context.Response;
|
||||
import org.liuxp.minioplus.extension.context.UserHolder;
|
||||
import org.liuxp.minioplus.extension.dto.FileCheckDTO;
|
||||
import org.liuxp.minioplus.extension.dto.FileCompleteDTO;
|
||||
import org.liuxp.minioplus.extension.dto.PreShardingDTO;
|
||||
import org.springframework.stereotype.Controller;
|
||||
import org.springframework.validation.annotation.Validated;
|
||||
import org.springframework.web.bind.annotation.*;
|
||||
@ -39,6 +41,21 @@ public class StorageController {
|
||||
@Resource
|
||||
private StorageService storageService;
|
||||
|
||||
/**
|
||||
* 文件预分片方法
|
||||
* 在大文件上传时,为了防止前端重复计算文件MD5值,提供该方法
|
||||
* @return 预分片结果
|
||||
*/
|
||||
@ApiOperation(value = "文件预分片")
|
||||
@PostMapping("/upload/sharding")
|
||||
@ResponseBody
|
||||
public Response<FilePreShardingVo> sharding(@RequestBody @Validated PreShardingDTO preShardingDTO){
|
||||
|
||||
FilePreShardingVo resultVo = storageService.sharding(preShardingDTO.getFileSize());
|
||||
|
||||
return Response.success(resultVo);
|
||||
}
|
||||
|
||||
/**
|
||||
* 上传任务初始化
|
||||
* 上传前的预检查:秒传、分块上传和断点续传等特性均基于该方法实现
|
||||
|
||||
@ -0,0 +1,24 @@
|
||||
package org.liuxp.minioplus.extension.dto;
|
||||
|
||||
import io.swagger.annotations.ApiModel;
|
||||
import io.swagger.annotations.ApiModelProperty;
|
||||
import lombok.Getter;
|
||||
import lombok.Setter;
|
||||
import lombok.ToString;
|
||||
|
||||
/**
|
||||
* 文件预分片入参DTO
|
||||
*
|
||||
* @author contact@liuxp.me
|
||||
* @since 2024/7/9
|
||||
*/
|
||||
@Getter
|
||||
@Setter
|
||||
@ToString
|
||||
@ApiModel("文件预分片入参DTO")
|
||||
public class PreShardingDTO {
|
||||
|
||||
@ApiModelProperty(value = "文件长度", required = true)
|
||||
private Long fileSize;
|
||||
|
||||
}
|
||||
Loading…
x
Reference in New Issue
Block a user