When users upload large files, it may take several hours if the network connection is poor. If the line is interrupted, servers that do not have resumable uploads can only retransmit them from the beginning. Click Resume allows users to continue transmission from where the upload was disconnected, which greatly reduces users' worries.
Solve the problem of insufficient server memory for uploading large files
Solve the problem of uploading being terminated due to other factors and still persisting after refreshing the browser The upload can be resumed, and the upload can still be continued after restarting the browser (close the browser and then open it again), and the upload can still be continued after restarting the computer.
Detect the content of the file due to network fluctuations during the upload process If it is lost, it needs to be automatically detected and re-uploaded
Front end
Needs to be split Uploaded files
You need to specify the file serial number for the uploaded fragmented file
You need to monitor the upload progress and control the progress bar
After uploading, you need to send a merge request
Blob object, operating file
Backend
Interface for uploading fragments
Interface for merging fragments
Interface for obtaining fragments
Other tools and methods are used to assist
What the front-end needs to pay attention to is: file cutting and progress bar
Back-end What you need to pay attention to is: where the shards are stored and how to merge shards
First find the file that needs to be uploaded
When we start uploading, the progress bar will change. When we click to stop uploading, the progress bar will stop.
Our backend will generate MD5 through the file name and file size. The corresponding directory results are as follows:
When the uploaded file on the current end reaches 100%, a file merge request will be sent, and then all the fragments on our backend will be merged into one file.
You can see from the picture below that all the shards are gone, thus merging one file
During the file upload process, network fluctuations caused part of the stream to be lost (comparison size)
During the file upload process, the server lost fragments (comparison fragment continuity)
The file has been tampered with (size comparison)
Validation core code
##Reference codeFront-end<!DOCTYPE html> <html lang="en"> <head> <meta charset="UTF-8"> <meta http-equiv="X-UA-Compatible" content="IE=edge"> <meta name="viewport" content="width=device-width, initial-scale=1.0"> <title>Document</title> </head> <body> <h2>html5大文件断点切割上传</h2> <div id="progressBar"></div> <input id="file" name="mov" type="file" /> <input id="btn" type="button" value="点我上传" /> <input id="btn1" type="button" value="点我停止上传" /> <script type="module"> import FileSliceUpload from '../jsutils/FileSliceUpload.js' let testingUrl="http://localhost:7003/fileslice/testing" let uploadUrl="http://localhost:7003/fileslice/uploads" let margeUrl="http://localhost:7003/fileslice/merge-file-slice" let progressUrl="http://localhost:7003/fileslice/progress" let fileSliceUpload= new FileSliceUpload(testingUrl,uploadUrl,margeUrl,progressUrl,"#file") fileSliceUpload.addProgress("#progressBar") let btn= document.querySelector("#btn") let btn1= document.querySelector("#btn1") btn.addEventListener("click",function () { fileSliceUpload.startUploadFile() }) btn1.addEventListener("click",function () { fileSliceUpload.stopUploadFile() }) </script> </body> </html>
//大文件分片上传,比如10G的压缩包,或者视频等,这些文件太大了 (需要后端配合进行) class FileSliceUpload{ constructor(testingUrl, uploadUrl, margeUrl,progressUrl, fileSelect) { this.testingUrl = testingUrl; // 检测文件上传的url this.uploadUrl = uploadUrl;//文件上传接口 this.margeUrl = margeUrl; // 合并文件接口 this.progressUrl = progressUrl; //进度接口 this.fileSelect = fileSelect; this.fileObj = null; this.totalize = null; this.blockSize = 1024 * 1024; //每次上传多少字节1mb(最佳) this.sta = 0; //起始位置 this.end = this.sta + this.blockSize; //结束位置 this.count = 0; //分片个数 this.barId = "bar"; //进度条id this.progressId = "progress";//进度数值ID this.fileSliceName = ""; //分片文件名称 this.fileName = ""; this.uploadFileInterval = null; //上传文件定时器 } /** * 样式可以进行修改 * @param {*} progressId 需要将进度条添加到那个元素下面 */ addProgress (progressSelect) { let bar = document.createElement("div") bar.setAttribute("id", this.barId); let num = document.createElement("div") num.setAttribute("id", this.progressId); num.innerText = "0%" bar.appendChild(num); document.querySelector(progressSelect).appendChild(bar) } //续传 在上传前先去服务器检测之前是否有上传过这个文件,如果还有返回上传的的分片,那么进行续传 // 将当前服务器上传的最后一个分片会从新上传, 避免因为网络的原因导致分片损坏 sequelFile () { if (this.fileName) { var xhr = new XMLHttpRequest(); //同步 xhr.open('GET', this.testingUrl + "/" + this.fileName+ "/" + this.blockSize+ "/" + this.totalize, false); xhr.send(); if (xhr.readyState === 4 && xhr.status === 200) { let ret = JSON.parse(xhr.response) if (ret.code == 20000) { let data= ret.data this.count = data.code; this.fileSliceName = data.fileSliceName //计算起始位置和结束位置 this.sta = this.blockSize * this.count //计算结束位置 this.end = this.sta + this.blockSize } else { this.sta = 0; //从头开始 this.end = this.sta + this.blockSize; this.count = 0; //分片个数 } } } } stopUploadFile () { clearInterval(this.uploadFileInterval) } // 文件上传(单文件) startUploadFile () { // 进度条 let bar = document.getElementById(this.barId) let progressEl = document.getElementById(this.progressId) this.fileObj = document.querySelector(this.fileSelect).files[0]; this.totalize = this.fileObj.size; this.fileName = this.fileObj.name; //查询是否存在之前上传过此文件,然后继续 this.sequelFile() let ref = this; //拿到当前对象的引用,因为是在异步中使用this就是他本身而不是class this.uploadFileInterval = setInterval(function () { if (ref.sta > ref.totalize) { //上传完毕后结束定时器 clearInterval(ref.uploadFileInterval) //发送合并请求 ref.margeUploadFile () console.log("stop" + ref.sta); return; }; //分片名称 ref.fileSliceName = ref.fileName + "-slice-" + ref.count++ //分割文件 , var blob1 = ref.fileObj.slice(ref.sta, ref.end); var fd = new FormData(); fd.append('part', blob1); fd.append('fileSliceName', ref.fileSliceName); fd.append('fileSize', ref.totalize); var xhr = new XMLHttpRequest(); xhr.open('POST', ref.uploadUrl, true); xhr.send(fd); //异步发送文件,不管是否成功, 会定期检测 xhr.onreadystatechange = function () { if (xhr.readyState === 4 && xhr.status === 200) { let ret = JSON.parse(xhr.response) if (ret.code == 20000) { //计算进度 let percent = Math.ceil((ret.data*ref.blockSize/ ref.totalize) * 100) if (percent > 100) { percent=100 } bar.style.width = percent + '%'; bar.style.backgroundColor = 'red'; progressEl.innerHTML = percent + '%' } } } //起始位置等于上次上传的结束位置 ref.sta = ref.end; //结束位置等于上次上传的结束位置+每次上传的字节 ref.end = ref.sta + ref.blockSize; }, 5) } margeUploadFile () { console.log("检测上传的文件完整性.........."); var xhr = new XMLHttpRequest(); //文件分片的名称/分片大小/总大小 xhr.open('GET', this.margeUrl+ "/" + this.fileSliceName + "/" + this.blockSize + "/" + this.totalize, true); xhr.send(); //发送请求 xhr.onreadystatechange = function () { if (xhr.readyState === 4 && xhr.status === 200) { let ret = JSON.parse(xhr.response) if (ret.code == 20000) { console.log("文件上传完毕"); } else { console.log("上传完毕但是文件上传过程中出现了异常", ret); } } } } } export default FileSliceUpload;
package com.controller.commontools.fIleupload; import com.alibaba.fastjson.JSON; import com.application.Result; import com.container.ArrayByteUtil; import com.encryption.hash.HashUtil; import com.file.FileUtils; import com.file.FileWebUpload; import com.file.ReadWriteFileUtils; import com.function.impl.ExecutorUtils; import com.path.ResourceFileUtil; import com.string.PatternCommon; import org.springframework.web.bind.annotation.*; import javax.servlet.http.HttpServletRequest; import java.io.File; import java.io.FileNotFoundException; import java.io.IOException; import java.io.RandomAccessFile; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.concurrent.Future; import java.util.concurrent.atomic.AtomicBoolean; import java.util.stream.Collectors; @RestController @RequestMapping("/fileslice") public class FIleSliceUploadController { private final String identification="-slice-"; private final String uploadslicedir="uploads"+File.separator+"slice"+File.separator;//分片目录 private final String uploaddir="uploads"+File.separator+"real"+File.separator;//实际文件目录 //获取分片 @GetMapping("/testing/{fileName}/{fileSlicSize}/{fileSize}") public Result testing(@PathVariable String fileName,@PathVariable long fileSlicSize,@PathVariable long fileSize ) throws Exception { String dir = fileNameMd5Dir(fileName,fileSize); String absoluteFilePathAndCreate = ResourceFileUtil.getAbsoluteFilePathAndCreate(uploadslicedir)+File.separator+dir; File file = new File(absoluteFilePathAndCreate); if (file.exists()) { List<String> filesAll = FileUtils.getFilesAll(file.getAbsolutePath()); if (filesAll.size()<2){ //分片缺少 删除全部分片文件 ,从新上传 FileUtils.delFilesAllReview(absoluteFilePathAndCreate,true); return Result.Error(); } //从小到大文件进行按照序号排序,和判断分片是否损坏 List<String> collect = fileSliceIsbadAndSort(file, fileSlicSize); //获取最后一个分片 String fileSliceName = collect.get(collect.size() - 1); fileSliceName = new File(fileSliceName).getName(); int code = fileId(fileSliceName); //服务器的分片总大小必须小于或者等于文件的总大小 if ((code*fileSlicSize)<=fileSize) { Result result = new Result(); String finalFileSliceName = fileSliceName; String str = PatternCommon.renderString("{\"code\":\"$[code]\",\"fileSliceName\":\"${fileSliceName}\"}", new HashMap<String, String>() {{ put("code", String.valueOf(code)); put("fileSliceName", finalFileSliceName); }}); result.setData(JSON.parse(str)); return result; }else { //分片异常 ,删除全部分片文件,从新上传 FileUtils.delFilesAllReview(absoluteFilePathAndCreate,true); return Result.Error(); } } //不存在 return Result.Error(); } @PostMapping(value = "/uploads") public Result uploads(HttpServletRequest request) { String fileSliceName = request.getParameter("fileSliceName"); long fileSize = Long.parseLong(request.getParameter("fileSize")); //文件大小 String dir = fileSliceMd5Dir(fileSliceName,fileSize); String absoluteFilePathAndCreate = ResourceFileUtil.getAbsoluteFilePathAndCreate(uploadslicedir+dir); FileWebUpload.fileUpload(absoluteFilePathAndCreate,fileSliceName,request); int i = fileId(fileSliceName); //返回上传成功的文件id,用于前端计算进度 Result result=new Result(); result.setData(i); return result; } // 合并分片 @GetMapping(value = "/merge-file-slice/{fileSlicNamee}/{fileSlicSize}/{fileSize}") public Result mergeFileSlice(@PathVariable String fileSlicNamee,@PathVariable long fileSlicSize,@PathVariable long fileSize ) throws Exception { int l =(int) Math.ceil((double) fileSize / fileSlicSize); //有多少个分片 String dir = fileSliceMd5Dir(fileSlicNamee,fileSize); //分片所在的目录 String absoluteFilePathAndCreate = ResourceFileUtil.getAbsoluteFilePathAndCreate(uploadslicedir+dir); File file=new File(absoluteFilePathAndCreate); if (file.exists()){ List<String> filesAll = FileUtils.getFilesAll(file.getAbsolutePath()); //阻塞循环判断是否还在上传 ,解决前端进行ajax异步上传的问题 int beforeSize=filesAll.size(); while (true){ Thread.sleep(1000); //之前分片数量和现在分片数据只差,如果大于1那么就在上传,那么继续 filesAll = FileUtils.getFilesAll(file.getAbsolutePath()); if (filesAll.size()-beforeSize>=1){ beforeSize=filesAll.size(); //继续检测 continue; } //如果是之前分片和现在的分片相等的,那么在阻塞2秒后检测是否发生变化,如果还没变化那么上传全部完成,可以进行合并了 //当然这不是绝对的,只能解决网络短暂的波动,因为有可能发生断网很长时间,网络恢复后文件恢复上传, 这个问题是避免不了的,所以我们在下面的代码进行数量的效验 // 因为我们不可能一直等着他网好,所以如果1~3秒内没有上传新的内容,那么我们默认判定上传完毕 if (beforeSize==filesAll.size()){ Thread.sleep(2000); filesAll = FileUtils.getFilesAll(file.getAbsolutePath()); if (beforeSize==filesAll.size()){ break; } } } //分片数量效验 if (filesAll.size()!=l){ //分片缺少 ,删除全部分片文件,从新上传 FileUtils.delFilesAllReview(absoluteFilePathAndCreate,true); return Result.Error(); } //获取实际的文件名称,组装路径 String realFileName = realFileName(fileSlicNamee); String realFileNamePath = ResourceFileUtil.getAbsoluteFilePathAndCreate(uploaddir+ realFileName); //从小到大文件进行按照序号排序 ,和检查分片文件是否有问题 List<String> collect = fileSliceIsbadAndSort(file, fileSlicSize); int fileSliceSize = collect.size(); List<Future<?>> futures = new ArrayList<>(); // 将文件按照序号进行合并 ,算出Runtime.getRuntime().availableProcessors()个线程 ,每个线程需要读取多少分片, 和每个线程需要读取多少字节大小 //有人会说一个分片一个线程不行吗,你想想如果上千或者上万分片的话,你创建这么多的线程需要多少时间,以及线程切换上下文切换和销毁需要多少时间? // 就算使用线程池,也顶不住啊,你内存又有多大,能存下多少队列?,并发高的话直接怼爆 int availableProcessors = Runtime.getRuntime().availableProcessors(); //每个线程读取多少文件 int readFileSize = (int)Math.ceil((double)fileSliceSize / availableProcessors); //每个线程需要读取的文件大小 long readSliceSize = readFileSize * fileSlicSize; for (int i = 0; i < availableProcessors; i++) { int finalI = i; Future<?> future = ExecutorUtils.createFuture("FIleSliceUploadController",()->{ //每个线程需要读取多少字节 byte[] bytes=new byte[(int) readSliceSize]; int index=0; for (int i1 = finalI *readFileSize,i2 = readFileSize*(finalI+1)>fileSliceSize?fileSliceSize:readFileSize*(finalI+1); i1 < i2; i1++) { try ( RandomAccessFile r = new RandomAccessFile(collect.get(i1), "r");){ r.read(bytes, (int)(index*fileSlicSize),(int)fileSlicSize); } catch (IOException e) { e.printStackTrace(); } index++; } if(finalI==availableProcessors-1){ //需要调整数组 bytes = ArrayByteUtil.getActualBytes(bytes); } try ( RandomAccessFile w = new RandomAccessFile(realFileNamePath, "rw");){ //当前文件写入的位置 w.seek(finalI*readSliceSize); w.write(bytes); } catch (IOException e) { e.printStackTrace(); } }); futures.add(future); } //阻塞到全部线程执行完毕后 ExecutorUtils.waitComplete(futures); //删除全部分片文件 FileUtils.delFilesAllReview(absoluteFilePathAndCreate,true); }else { //没有这个分片相关的的目录 return Result.Error(); } return Result.Ok(); } //获取分片文件的目录 private String fileSliceMd5Dir(String fileSliceName,long fileSize){ int i = fileSliceName.indexOf(identification) ; String substring = fileSliceName.substring(0, i); String dir = HashUtil.md5(substring+fileSize); return dir; } //通过文件名称获取文件目录 private String fileNameMd5Dir(String fileName,long fileSize){ return HashUtil.md5(fileName+fileSize); } //获取分片的实际文件名 private String realFileName(String fileSliceName){ int i = fileSliceName.indexOf(identification) ; String substring = fileSliceName.substring(0, i); return substring; } //获取文件序号 private int fileId(String fileSliceName){ int i = fileSliceName.indexOf(identification)+identification.length() ; String fileId = fileSliceName.substring(i); return Integer.parseInt(fileId); } //判断是否损坏 private List<String> fileSliceIsbadAndSort(File file,long fileSlicSize) throws Exception { String absolutePath = file.getAbsolutePath(); List<String> filesAll = FileUtils.getFilesAll(absolutePath); if (filesAll.size()<1){ //分片缺少,删除全部分片文件 ,从新上传 FileUtils.delFilesAllReview(absolutePath,true); throw new Exception("分片损坏"); } //从小到大文件进行按照序号排序 List<String> collect = filesAll.stream().sorted((a, b) -> fileId(a) - fileId(b)).collect(Collectors.toList()); //判断文件是否损坏,将文件排序后,进行前后序号相差大于1那么就代表少分片了 for (int i = 0; i < collect.size()-1; i++) { //检测分片的连续度 if (fileId(collect.get(i)) - fileId(collect.get(i+1))!=-1) { //分片损坏 删除全部分片文件 ,从新上传 FileUtils.delFilesAllReview(absolutePath,true); throw new Exception("分片损坏"); } //检测分片的完整度 if (new File(collect.get(i)).length()!=fileSlicSize) { //分片损坏 删除全部分片文件 ,从新上传 FileUtils.delFilesAllReview(absolutePath,true); throw new Exception("分片损坏"); } } return collect; } }
The above is the detailed content of How to use Java to implement the breakpoint resume function of files?. For more information, please follow other related articles on the PHP Chinese website!