More aggressive rageshake log culling
Also bump the client-side timeout on the upload from 3 mins to 5 mins, to see if it helps people on slower connections.
This commit is contained in:
parent
6690df1203
commit
e2cec7b69c
1 changed files with 10 additions and 6 deletions
|
@ -314,17 +314,20 @@ class IndexedDBLogStore {
|
|||
let size = 0;
|
||||
for (let i = 0; i < allLogIds.length; i++) {
|
||||
let lines = await fetchLogs(allLogIds[i]);
|
||||
if (i > 0 && size + lines.length > MAX_LOG_SIZE) {
|
||||
// the remaining log IDs should be removed. If we go out of
|
||||
// bounds this is just []
|
||||
//
|
||||
// XXX: there's nothing stopping the current session exceeding
|
||||
// MAX_LOG_SIZE. We ought to think about culling it.
|
||||
removeLogIds = allLogIds.slice(i + 1);
|
||||
break;
|
||||
}
|
||||
logs.push({
|
||||
lines: lines,
|
||||
id: allLogIds[i],
|
||||
});
|
||||
size += lines.length;
|
||||
if (size > MAX_LOG_SIZE) {
|
||||
// the remaining log IDs should be removed. If we go out of
|
||||
// bounds this is just []
|
||||
removeLogIds = allLogIds.slice(i + 1);
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (removeLogIds.length > 0) {
|
||||
console.log("Removing logs: ", removeLogIds);
|
||||
|
@ -485,6 +488,7 @@ module.exports = {
|
|||
user_agent: userAgent,
|
||||
},
|
||||
json: true,
|
||||
timeout: 5 * 60 * 1000,
|
||||
}, (err, res) => {
|
||||
if (err) {
|
||||
reject(err);
|
||||
|
|
Loading…
Reference in a new issue