在聊天Stream/Buffer 的時候,我們已經開始使用require("fs")
引入文件模組做一些操作了
文件模組是對底層文件操作的封裝,例如文件讀寫/打開關閉/刪除添加等等
文件模組最大的特點就是所有的方法都提供的同步和非同步兩個版本,具有sync 後綴的方法都是同步方法,沒有的都是非同步方法
ll能夠查看目錄中檔案/資料夾的權限
const fs = require("fs"); const path = require("path"); const { promisify } = require("util"); const reg = new RegExp("(.ts[x]*|.js[x]*|.json)$"); const targetPath = path.resolve(__dirname, "../mini-proxy-mobx"); const readDir = (targetPath, callback) => { fs.readdir(targetPath, (err, files) => { if (err) callback(err); files.forEach(async (file) => { const filePath = path.resolve(__dirname, `${targetPath}/${file}`); const stats = await promisify(fs.stat)(filePath); if (stats.isDirectory()) { await readDir(filePath); } else { checkFile(filePath); } }); }); }; const checkFile = (file) => { if (reg.test(file)) { console.log(file); } }; readDir(targetPath, (err) => { throw err; });
const fs = require("fs"); const path = require("path"); const sourceFile = path.resolve(__dirname, "../doc/Mobx原理及丐版实现.md"); const targetFile = path.resolve(__dirname, "target.txt"); fs.readFile(sourceFile, (err, data) => { if (err) throw err; const dataStr = data.toString(); fs.writeFile(targetFile, dataStr, (err) => { if (err) throw err; console.log("copy success~"); process.exit(1); }); });
? 這樣是否有問題,我們在Stream 講過,需要一點一點來,否則在大檔案時內存吃不消。
Buffer 使用const copyFile = (source, target, size, callback) => { const sourceFile = path.resolve(__dirname, source); const targetFile = path.resolve(__dirname, target); const buf = Buffer.alloc(size); let hasRead = 0; // 下次读取文件的位置 let hasWrite = 0; // 下次写入文件的位置 fs.open(sourceFile, "r", (err, sourceFd) => { if (err) callback(err); fs.open(targetFile, "w", (err, targetFd) => { if (err) throw callback(err); function next() { fs.read(sourceFd, buf, 0, size, hasRead, (err, bytesRead) => { if (err) callback(err); hasRead += bytesRead; if (bytesRead) { fs.write(targetFd, buf, 0, size, hasWrite, (err, bytesWrite) => { if (err) callback(err); hasWrite += bytesWrite; next(); }); return; } fs.close(sourceFd, () => { console.log("关闭源文件"); }); fs.close(targetFd, () => { console.log("关闭目标文件"); }); }); } next(); }); }); };Stream 使用
const fs = require("fs"); const path = require("path"); const readStream = fs.createReadStream( path.resolve(__dirname, "../doc/Mobx原理及丐版实现.md") ); const writeStream = fs.createWriteStream(path.resolve("target.txt")); readStream.pipe(writeStream);
// 上传后资源的URL地址 const RESOURCE_URL = `http://localhost:${PORT}`; // 存储上传文件的目录 const UPLOAD_DIR = path.join(__dirname, "../public"); const storage = multer.diskStorage({ destination: async function (req, file, cb) { // 设置文件的存储目录 cb(null, UPLOAD_DIR); }, filename: function (req, file, cb) { // 设置文件名 cb(null, `${file.originalname}`); }, }); const multerUpload = multer({ storage }); router.post( "/uploadSingle", async (ctx, next) => { try { await next(); ctx.body = { code: 1, msg: "文件上传成功", url: `${RESOURCE_URL}/${ctx.file.originalname}`, }; } catch (error) { console.log(error); ctx.body = { code: 0, msg: "文件上传失败", }; } }, multerUpload.single("file") );
# 具体实现 前端切片 file 是一种特殊的 Blob 对象,可以使用 slice 进行大文件分割 上传切片 根据切片构建每个切片的 formData,将二进制数据放在 slice 参数中,分别发送请求。 onUploadProgress 来处理每个切片的上传进度 切片合并 当我们所有的切片上传成功之后,我们依旧希望是按着原始文件作为保存的,所以需要对切片进行合并操作 上传文件校验 当我们上传一个文件的时候,先去判断在服务器上是否存在该文件,如果存在则直接不做上传操作,否则按上述逻辑进行上传 上述直接使用文件名来做判断,过于绝对,对文件做了相关修改并不更改名字,就会出现问题。更应该采用的方案是根据文件相关的元数据计算出它的 hash 值来做判断。 本文所有的代码可以github上查看 本文从文件常识/常用的文件 API 入手,重点讲解了 Node 中 File 的相关实践,最后使用相关内容实现了大文件上传。 更多node相关知识,请访问:nodejs 教程!
const BIG_FILE_SIZE = 25 * 1024 * 1024;
const SLICE_FILE_SIZE = 5 * 1024 * 1024;
const uploadFile = async () => {
if (!fileList?.length) return alert("请选择文件");
const file = fileList[0];
const shouldUpload = await verifyUpload(file.name);
if (!shouldUpload) return message.success("文件已存在,上传成功");
if (file.size > BIG_FILE_SIZE) {
// big handle
getSliceList(file);
}
// // normal handle
// upload("/uploadSingle", file);
};
const getSliceList = (file: RcFile) => {
const sliceList: ISlice[] = [];
let curSize = 0;
let index = 0;
while (curSize < file.size) {
sliceList.push({
id: shortid.generate(),
slice: new File(
[file.slice(curSize, (curSize += SLICE_FILE_SIZE))],
`${file.name}-${index}`
),
name: file.name,
sliceName: `${file.name}-${index}`,
progress: 0,
});
index++;
}
uploadSlice(sliceList);
setSliceList(sliceList);
};
const uploadSlice = async (sliceList: ISlice[]) => {
const requestList = sliceList
.map(({ slice, sliceName, name }: ISlice, index: number) => {
const formData = new FormData();
formData.append("slice", slice);
formData.append("sliceName", sliceName);
formData.append("name", name);
return { formData, index, sliceName };
})
.map(({ formData }: { formData: FormData }, index: number) =>
request.post("/uploadBig", formData, {
onUploadProgress: (progressEvent: AxiosProgressEvent) =>
sliceUploadProgress(progressEvent, index),
})
);
await Promise.all(requestList);
};
// Client
const storage = multer.diskStorage({
destination: async function (req, file, cb) {
const name = file?.originalname.split(".")?.[0];
const SLICE_DIR = path.join(UPLOAD_DIR, `${name}-slice`);
if (!fs.existsSync(SLICE_DIR)) {
await fs.mkdirSync(SLICE_DIR);
}
// 设置文件的存储目录
cb(null, SLICE_DIR);
},
filename: async function (req, file, cb) {
// 设置文件名
cb(null, `${file?.originalname}`);
},
});
// Server
router.post(
"/uploadBig",
async (ctx, next) => {
try {
await next();
const slice = ctx.files.slice[0]; // 切片文件
ctx.body = {
code: 1,
msg: "文件上传成功",
url: `${RESOURCE_URL}/${slice.originalname}`,
};
} catch (error) {
ctx.body = {
code: 0,
msg: "文件上传失败",
};
}
},
multerUpload.fields([{ name: "slice" }])
);
// Client
const uploadSlice = async (sliceList: ISlice[]) => {
// ...和上述 uploadSlice 一致
mergeSlice();
};
const mergeSlice = () => {
request.post("/mergeSlice", {
size: SLICE_FILE_SIZE,
name: fileList[0].name,
});
};
// Server
router.post("/mergeSlice", async (ctx, next) => {
try {
await next();
const { size, name } = ctx.request.body ?? {};
const sliceName = name.split(".")?.[0];
const filePath = path.join(UPLOAD_DIR, name);
const slice_dir = path.join(UPLOAD_DIR, `${sliceName}-slice`);
await mergeSlice(filePath, slice_dir, size);
ctx.body = {
code: 1,
msg: "文件合并成功",
};
} catch (error) {
ctx.body = {
code: 0,
msg: "文件合并失败",
};
}
});
// 通过 stream 来读写数据,将 slice 中数据读取到文件中
const pipeStream = (path, writeStream) => {
return new Promise((resolve) => {
const readStream = fs.createReadStream(path);
readStream.on("end", () => {
fs.unlinkSync(path); // 读取完成之后,删除切片文件
resolve();
});
readStream.pipe(writeStream);
});
};
const mergeSlice = async (filePath, sliceDir, size) => {
if (!fs.existsSync(sliceDir)) {
throw new Error("当前文件不存在");
}
const slices = await fs.readdirSync(sliceDir);
slices.sort((a, b) => a.split("-")[1] - b.split("-")[1]);
try {
const slicesPipe = slices.map((sliceName, index) => {
return pipeStream(
path.resolve(sliceDir, sliceName),
fs.createWriteStream(filePath, { start: index * size })
);
});
await Promise.all(slicesPipe);
await fs.rmdirSync(sliceDir); // 读取完成之后,删除切片文件夹
} catch (error) {
console.log(error);
}
};
// Client
const verifyUpload = async (name: string) => {
const res = await request.post("/verify", { name });
return res?.data?.data;
};
const uploadFile = async () => {
if (!fileList?.length) return alert("请选择文件");
const file = fileList[0];
const shouldUpload = await verifyUpload(file.name);
if (!shouldUpload) return message.success("文件已存在,上传成功");
if (file.size > BIG_FILE_SIZE) {
// big handle
getSliceList(file);
}
// // normal handle
// upload("/uploadSingle", file);
};
// Server
router.post("/verify", async (ctx, next) => {
try {
await next();
const { name } = ctx.request.body ?? {};
const filePath = path.resolve(UPLOAD_DIR, name);
if (fs.existsSync(filePath)) {
ctx.body = {
code: 1,
data: false,
};
} else {
ctx.body = {
code: 1,
data: true,
};
}
} catch (error) {
ctx.body = {
code: 0,
msg: "检测失败",
};
}
});
const calculateMD5 = (file: any) => new Promise((resolve, reject) => {
const chunkSize = SLICE_FILE_SIZE
const fileReader = new FileReader();
const spark = new SparkMD5.ArrayBuffer();
let cursor = 0;
fileReader.onerror = () => {
reject(new Error('Error reading file'));
};
fileReader.onload = (e: any) => {
spark.append(e.target.result);
cursor += e.target.result.byteLength;
if (cursor < file.size) loadNext();
else resolve(spark.end());
};
const loadNext = () => {
const fileSlice = file.slice(cursor, cursor + chunkSize);
fileReader.readAsArrayBuffer(fileSlice);
}
loadNext();
});
总结
以上是深入聊聊Node中的File模組的詳細內容。更多資訊請關注PHP中文網其他相關文章!