More aggressive rageshake log culling

Also bump the client-side timeout on the upload from 3 mins to 5 mins, to see
if it helps people on slower connections.
This commit is contained in:
Richard van der Hoff 2017-02-23 14:22:03 +00:00
parent 6690df1203
commit e2cec7b69c
1 changed files with 10 additions and 6 deletions

View File

@ -314,17 +314,20 @@ class IndexedDBLogStore {
let size = 0; let size = 0;
for (let i = 0; i < allLogIds.length; i++) { for (let i = 0; i < allLogIds.length; i++) {
let lines = await fetchLogs(allLogIds[i]); let lines = await fetchLogs(allLogIds[i]);
if (i > 0 && size + lines.length > MAX_LOG_SIZE) {
// the remaining log IDs should be removed. If we go out of
// bounds this is just []
//
// XXX: there's nothing stopping the current session exceeding
// MAX_LOG_SIZE. We ought to think about culling it.
removeLogIds = allLogIds.slice(i + 1);
break;
}
logs.push({ logs.push({
lines: lines, lines: lines,
id: allLogIds[i], id: allLogIds[i],
}); });
size += lines.length; size += lines.length;
if (size > MAX_LOG_SIZE) {
// the remaining log IDs should be removed. If we go out of
// bounds this is just []
removeLogIds = allLogIds.slice(i + 1);
break;
}
} }
if (removeLogIds.length > 0) { if (removeLogIds.length > 0) {
console.log("Removing logs: ", removeLogIds); console.log("Removing logs: ", removeLogIds);
@ -485,6 +488,7 @@ module.exports = {
user_agent: userAgent, user_agent: userAgent,
}, },
json: true, json: true,
timeout: 5 * 60 * 1000,
}, (err, res) => { }, (err, res) => {
if (err) { if (err) {
reject(err); reject(err);