I'm having trouble figuring this out. I'm using Papaparse to parse a huge CSV file to get the data set and plug into d3 and crossfiltering. FIRST, I used node.js just to run a server for ejs (I have not used any embedded js). When I run my application, it works like a charm. I see in the chunk function its getting results base on 10 mb default and i got data set. THEN I need to transfer my code to eclipse and convert it to jsp or html and run my websphere server instead of Node.js. When I run my application, I noticed the chunk is receiving ALL my dataset in one instead of doing 10MB per dataset and thats crashing my IE11 browser. Is this a bug? i tried to state the Papa.LocalChunkSize to equal amount of MB but it still loading all my dataset. Please i need help....
Here is my code just in case.
var chunk1Finish = false;
var chunkMainData1Finish = false;
var finishLoadingMainData = false;
var mdata = [];
Papa.parse("data2/week201751custinqfinal_v4.csv", {
download: true,
header: true,
skipEmptyLines: true,
complete: function() {
console.log("complete!! the MAIN data length is: " + mdata.length);
finishLoadingMainData = true;
buildPage(finishLoadingMainData, finishLoadingTrendData);
},
chunk: function(results) {
//mdata.push(results.data[0]);
if(chunkMainData1Finish) {
console.log("chunkMainData1Finish is true");
Array.prototype.push.apply(mdata, results.data);
} else {
console.log("chunkMainData1Finish is false");
mdata = results.data;
chunkMainData1Finish = true;
}
},
error: function(error) {
console.log("error!!!! " + error);
}
});