Record the optimization process here, from complex to simple, from slow to fast
FileReader
Most of the methods found on the Internet use FileReader to implement
const file = '../test.txt'
const reader = new FileReader()
reader.readAsText(file, "UTF-8")
let dataArr = []
reader.onload = (evt) => {
const fileString = evt.target.result
const count = fileString.trim().split('\n').length
for (let index = 1; index < count; index++) {
const fileline = fileString.split("\r\n")[index].split(",")[0]
const filelineNumber = (fileline.split(/\s+/)).map(Number)
dataArr.push(filelineNumber)
}
}
ajax+FileReader
It feels that there is no essential difference from the above one, it is very slow
urlToBlob(file_url) {
return new Promise(function (resolve, reject){
let xhr = new XMLHttpRequest();
xhr.open("get", file_url, true);
xhr.responseType = "blob";
xhr.onload = function () {
if (this.status == 200) {
const reader = new FileReader()
reader.onload = function () {
resolve(reader.result)
}
reader.readAsText(this.response);
}else{
console.log('err');
}
};
xhr.send();
})
},
this.urlToBlob('/static/test.txt').then(res => {
const fileString = res
// console.log('原始test文件',res);
const count = fileString.trim().split('\n').length
for (let index = 1; index < count; index++) {
const fileline = fileString.split("\r\n")[index].split(",")[0]
const lineValue = fileString.split("\r\n")[index].split(",")[1]
}
})
fetch
The actual test here is the fastest and the most concise. I have 150,000 pieces of data, which are processed almost instantly.
//说明还没有存储过,需要先请求,后存储,再返回
let resMesh = await fetch(txtUrl).then(res => res.text())
// 处理点文件
let pointRows = resMesh.split("\n");
for (let i = 1; i < pointRows.length-1; i++) {
let point = pointRows[i];
let p = point.split(" ");//这里是分割每一行数据,逗号就写逗号,空格就空格
//这里已经处理好了,p[0],p[1],p[2]...就是我们获取到的每一行数据
}