2017-01-18 17:00:13 +01:00
|
|
|
/*
|
|
|
|
Copyright 2017 OpenMarket Ltd
|
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
|
|
|
|
2017-07-13 01:48:31 +02:00
|
|
|
import Promise from 'bluebird';
|
2017-01-19 16:47:55 +01:00
|
|
|
|
2017-01-20 13:02:48 +01:00
|
|
|
// This module contains all the code needed to log the console, persist it to
|
|
|
|
// disk and submit bug reports. Rationale is as follows:
|
|
|
|
// - Monkey-patching the console is preferable to having a log library because
|
|
|
|
// we can catch logs by other libraries more easily, without having to all
|
|
|
|
// depend on the same log framework / pass the logger around.
|
|
|
|
// - We use IndexedDB to persists logs because it has generous disk space
|
|
|
|
// limits compared to local storage. IndexedDB does not work in incognito
|
|
|
|
// mode, in which case this module will not be able to write logs to disk.
|
|
|
|
// However, the logs will still be stored in-memory, so can still be
|
|
|
|
// submitted in a bug report should the user wish to: we can also store more
|
|
|
|
// logs in-memory than in local storage, which does work in incognito mode.
|
|
|
|
// We also need to handle the case where there are 2+ tabs. Each JS runtime
|
|
|
|
// generates a random string which serves as the "ID" for that tab/session.
|
|
|
|
// These IDs are stored along with the log lines.
|
|
|
|
// - Bug reports are sent as a POST over HTTPS: it purposefully does not use
|
|
|
|
// Matrix as bug reports may be made when Matrix is not responsive (which may
|
|
|
|
// be the cause of the bug). We send the most recent N MB of UTF-8 log data,
|
|
|
|
// starting with the most recent, which we know because the "ID"s are
|
|
|
|
// actually timestamps. We then purge the remaining logs. We also do this
|
|
|
|
// purge on startup to prevent logs from accumulating.
|
2017-01-18 17:00:13 +01:00
|
|
|
|
2017-05-17 15:43:21 +02:00
|
|
|
// the frequency with which we flush to indexeddb
|
2017-01-18 17:00:13 +01:00
|
|
|
const FLUSH_RATE_MS = 30 * 1000;
|
|
|
|
|
2017-05-17 15:43:21 +02:00
|
|
|
// the length of log data we keep in indexeddb (and include in the reports)
|
|
|
|
const MAX_LOG_SIZE = 1024 * 1024 * 1; // 1 MB
|
|
|
|
|
2017-01-18 17:00:13 +01:00
|
|
|
// A class which monkey-patches the global console and stores log lines.
|
|
|
|
class ConsoleLogger {
|
|
|
|
constructor() {
|
|
|
|
this.logs = "";
|
2017-01-20 12:56:11 +01:00
|
|
|
}
|
2017-01-18 17:00:13 +01:00
|
|
|
|
2017-01-20 12:56:11 +01:00
|
|
|
monkeyPatch(consoleObj) {
|
2017-01-18 17:00:13 +01:00
|
|
|
// Monkey-patch console logging
|
|
|
|
const consoleFunctionsToLevels = {
|
|
|
|
log: "I",
|
|
|
|
info: "I",
|
2017-01-19 13:02:19 +01:00
|
|
|
warn: "W",
|
2017-01-18 17:00:13 +01:00
|
|
|
error: "E",
|
|
|
|
};
|
|
|
|
Object.keys(consoleFunctionsToLevels).forEach((fnName) => {
|
|
|
|
const level = consoleFunctionsToLevels[fnName];
|
2017-01-20 12:56:11 +01:00
|
|
|
let originalFn = consoleObj[fnName].bind(consoleObj);
|
|
|
|
consoleObj[fnName] = (...args) => {
|
2017-01-18 17:00:13 +01:00
|
|
|
this.log(level, ...args);
|
|
|
|
originalFn(...args);
|
|
|
|
}
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
log(level, ...args) {
|
|
|
|
// We don't know what locale the user may be running so use ISO strings
|
|
|
|
const ts = new Date().toISOString();
|
|
|
|
// Some browsers support string formatting which we're not doing here
|
2017-01-20 13:02:48 +01:00
|
|
|
// so the lines are a little more ugly but easy to implement / quick to
|
|
|
|
// run.
|
2017-01-18 17:00:13 +01:00
|
|
|
// Example line:
|
2017-01-20 13:02:48 +01:00
|
|
|
// 2017-01-18T11:23:53.214Z W Failed to set badge count
|
2017-01-18 17:00:13 +01:00
|
|
|
const line = `${ts} ${level} ${args.join(' ')}\n`;
|
|
|
|
// Using + really is the quickest way in JS
|
|
|
|
// http://jsperf.com/concat-vs-plus-vs-join
|
|
|
|
this.logs += line;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Retrieve log lines to flush to disk.
|
2017-01-23 10:28:48 +01:00
|
|
|
* @param {boolean} keepLogs True to not delete logs after flushing.
|
2017-01-18 17:00:13 +01:00
|
|
|
* @return {string} \n delimited log lines to flush.
|
|
|
|
*/
|
2017-01-23 10:28:48 +01:00
|
|
|
flush(keepLogs) {
|
2017-01-20 13:02:48 +01:00
|
|
|
// The ConsoleLogger doesn't care how these end up on disk, it just
|
|
|
|
// flushes them to the caller.
|
2017-01-23 10:28:48 +01:00
|
|
|
if (keepLogs) {
|
|
|
|
return this.logs;
|
|
|
|
}
|
2017-01-18 17:00:13 +01:00
|
|
|
const logsToFlush = this.logs;
|
|
|
|
this.logs = "";
|
|
|
|
return logsToFlush;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// A class which stores log lines in an IndexedDB instance.
|
|
|
|
class IndexedDBLogStore {
|
|
|
|
constructor(indexedDB, logger) {
|
|
|
|
this.indexedDB = indexedDB;
|
|
|
|
this.logger = logger;
|
2017-01-20 15:00:30 +01:00
|
|
|
this.id = "instance-" + Math.random() + Date.now();
|
2017-01-18 17:27:11 +01:00
|
|
|
this.index = 0;
|
2017-01-18 17:00:13 +01:00
|
|
|
this.db = null;
|
2017-01-20 15:46:19 +01:00
|
|
|
this.flushPromise = null;
|
2017-01-24 18:05:01 +01:00
|
|
|
// set if flush() is called whilst one is ongoing
|
|
|
|
this.flushAgainPromise = null;
|
2017-01-18 17:00:13 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @return {Promise} Resolves when the store is ready.
|
|
|
|
*/
|
|
|
|
connect() {
|
|
|
|
let req = this.indexedDB.open("logs");
|
2017-07-13 01:37:04 +02:00
|
|
|
return new Promise((resolve, reject) => {
|
2017-01-18 17:00:13 +01:00
|
|
|
req.onsuccess = (event) => {
|
|
|
|
this.db = event.target.result;
|
|
|
|
// Periodically flush logs to local storage / indexeddb
|
|
|
|
setInterval(this.flush.bind(this), FLUSH_RATE_MS);
|
|
|
|
resolve();
|
|
|
|
};
|
|
|
|
|
|
|
|
req.onerror = (event) => {
|
2017-01-20 13:02:48 +01:00
|
|
|
const err = (
|
|
|
|
"Failed to open log database: " + event.target.errorCode
|
|
|
|
);
|
2017-01-18 17:00:13 +01:00
|
|
|
console.error(err);
|
|
|
|
reject(new Error(err));
|
|
|
|
};
|
|
|
|
|
|
|
|
// First time: Setup the object store
|
|
|
|
req.onupgradeneeded = (event) => {
|
|
|
|
const db = event.target.result;
|
2017-01-20 15:00:30 +01:00
|
|
|
const logObjStore = db.createObjectStore("logs", {
|
2017-01-18 17:27:11 +01:00
|
|
|
keyPath: ["id", "index"]
|
|
|
|
});
|
2017-01-19 13:02:19 +01:00
|
|
|
// Keys in the database look like: [ "instance-148938490", 0 ]
|
2017-01-20 15:00:30 +01:00
|
|
|
// Later on we need to query everything based on an instance id.
|
|
|
|
// In order to do this, we need to set up indexes "id".
|
|
|
|
logObjStore.createIndex("id", "id", { unique: false });
|
2017-01-19 13:02:19 +01:00
|
|
|
|
2017-01-20 15:00:30 +01:00
|
|
|
logObjStore.add(
|
2017-01-18 17:27:11 +01:00
|
|
|
this._generateLogEntry(
|
2017-01-18 17:00:13 +01:00
|
|
|
new Date() + " ::: Log database was created."
|
2017-01-18 17:27:11 +01:00
|
|
|
)
|
|
|
|
);
|
2017-01-20 15:00:30 +01:00
|
|
|
|
|
|
|
const lastModifiedStore = db.createObjectStore("logslastmod", {
|
|
|
|
keyPath: "id",
|
|
|
|
});
|
|
|
|
lastModifiedStore.add(this._generateLastModifiedTime());
|
2017-01-18 17:00:13 +01:00
|
|
|
}
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2017-01-20 15:46:19 +01:00
|
|
|
* Flush logs to disk.
|
|
|
|
*
|
|
|
|
* There are guards to protect against race conditions in order to ensure
|
|
|
|
* that all previous flushes have completed before the most recent flush.
|
|
|
|
* Consider without guards:
|
|
|
|
* - A calls flush() periodically.
|
|
|
|
* - B calls flush() and wants to send logs immediately afterwards.
|
|
|
|
* - If B doesn't wait for A's flush to complete, B will be missing the
|
|
|
|
* contents of A's flush.
|
|
|
|
* To protect against this, we set 'flushPromise' when a flush is ongoing.
|
2017-01-24 18:05:01 +01:00
|
|
|
* Subsequent calls to flush() during this period will chain another flush,
|
|
|
|
* then keep returning that same chained flush.
|
2017-01-20 15:46:19 +01:00
|
|
|
*
|
|
|
|
* This guarantees that we will always eventually do a flush when flush() is
|
|
|
|
* called.
|
|
|
|
*
|
2017-01-18 17:00:13 +01:00
|
|
|
* @return {Promise} Resolved when the logs have been flushed.
|
|
|
|
*/
|
|
|
|
flush() {
|
2017-01-24 13:43:18 +01:00
|
|
|
// check if a flush() operation is ongoing
|
|
|
|
if (this.flushPromise && this.flushPromise.isPending()) {
|
2017-01-24 18:05:01 +01:00
|
|
|
if (this.flushAgainPromise && this.flushAgainPromise.isPending()) {
|
|
|
|
// this is the 3rd+ time we've called flush() : return the same
|
|
|
|
// promise.
|
|
|
|
return this.flushAgainPromise;
|
|
|
|
}
|
|
|
|
// queue up a flush to occur immediately after the pending one
|
|
|
|
// completes.
|
|
|
|
this.flushAgainPromise = this.flushPromise.then(() => {
|
2017-01-20 15:46:19 +01:00
|
|
|
return this.flush();
|
|
|
|
});
|
2017-01-24 18:05:01 +01:00
|
|
|
return this.flushAgainPromise;
|
2017-01-18 17:00:13 +01:00
|
|
|
}
|
2017-01-24 13:43:18 +01:00
|
|
|
// there is no flush promise or there was but it has finished, so do
|
|
|
|
// a brand new one, destroying the chain which may have been built up.
|
2017-07-13 01:37:04 +02:00
|
|
|
this.flushPromise = new Promise((resolve, reject) => {
|
2017-01-20 15:46:19 +01:00
|
|
|
if (!this.db) {
|
|
|
|
// not connected yet or user rejected access for us to r/w to
|
|
|
|
// the db.
|
|
|
|
reject(new Error("No connected database"));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
const lines = this.logger.flush();
|
|
|
|
if (lines.length === 0) {
|
|
|
|
resolve();
|
|
|
|
return;
|
|
|
|
}
|
2017-01-20 15:00:30 +01:00
|
|
|
let txn = this.db.transaction(["logs", "logslastmod"], "readwrite");
|
2017-01-18 17:00:13 +01:00
|
|
|
let objStore = txn.objectStore("logs");
|
|
|
|
txn.oncomplete = (event) => {
|
|
|
|
resolve();
|
|
|
|
};
|
|
|
|
txn.onerror = (event) => {
|
2017-01-20 13:02:48 +01:00
|
|
|
console.error(
|
2017-01-20 15:00:30 +01:00
|
|
|
"Failed to flush logs : ", event
|
2017-01-20 13:02:48 +01:00
|
|
|
);
|
|
|
|
reject(
|
|
|
|
new Error("Failed to write logs: " + event.target.errorCode)
|
|
|
|
);
|
2017-01-18 17:00:13 +01:00
|
|
|
}
|
2017-04-12 15:42:01 +02:00
|
|
|
objStore.add(this._generateLogEntry(lines));
|
|
|
|
let lastModStore = txn.objectStore("logslastmod");
|
|
|
|
lastModStore.put(this._generateLastModifiedTime());
|
2017-01-18 17:00:13 +01:00
|
|
|
});
|
2017-01-20 15:46:19 +01:00
|
|
|
return this.flushPromise;
|
2017-01-18 17:00:13 +01:00
|
|
|
}
|
2017-01-18 17:27:11 +01:00
|
|
|
|
2017-01-18 18:18:02 +01:00
|
|
|
/**
|
2017-01-20 13:02:48 +01:00
|
|
|
* Consume the most recent logs and return them. Older logs which are not
|
|
|
|
* returned are deleted at the same time, so this can be called at startup
|
|
|
|
* to do house-keeping to keep the logs from growing too large.
|
2017-01-18 18:18:02 +01:00
|
|
|
*
|
2017-01-20 13:02:48 +01:00
|
|
|
* @return {Promise<Object[]>} Resolves to an array of objects. The array is
|
|
|
|
* sorted in time (oldest first) based on when the log file was created (the
|
|
|
|
* log ID). The objects have said log ID in an "id" field and "lines" which
|
|
|
|
* is a big string with all the new-line delimited logs.
|
2017-01-18 18:18:02 +01:00
|
|
|
*/
|
2017-01-20 14:02:57 +01:00
|
|
|
async consume() {
|
2017-01-19 16:03:47 +01:00
|
|
|
const db = this.db;
|
|
|
|
|
|
|
|
// Returns: a string representing the concatenated logs for this ID.
|
|
|
|
function fetchLogs(id) {
|
|
|
|
const o = db.transaction("logs", "readonly").objectStore("logs");
|
2017-01-20 13:02:48 +01:00
|
|
|
return selectQuery(o.index("id"), IDBKeyRange.only(id),
|
|
|
|
(cursor) => {
|
2017-01-19 16:03:47 +01:00
|
|
|
return {
|
|
|
|
lines: cursor.value.lines,
|
|
|
|
index: cursor.value.index,
|
|
|
|
}
|
|
|
|
}).then((linesArray) => {
|
2017-01-20 13:02:48 +01:00
|
|
|
// We have been storing logs periodically, so string them all
|
|
|
|
// together *in order of index* now
|
2017-01-19 16:03:47 +01:00
|
|
|
linesArray.sort((a, b) => {
|
|
|
|
return a.index - b.index;
|
|
|
|
})
|
|
|
|
return linesArray.map((l) => l.lines).join("");
|
|
|
|
});
|
|
|
|
}
|
2017-01-18 18:18:02 +01:00
|
|
|
|
2017-01-19 16:03:47 +01:00
|
|
|
// Returns: A sorted array of log IDs. (newest first)
|
|
|
|
function fetchLogIds() {
|
2017-01-20 15:00:30 +01:00
|
|
|
// To gather all the log IDs, query for all records in logslastmod.
|
|
|
|
const o = db.transaction("logslastmod", "readonly").objectStore(
|
|
|
|
"logslastmod"
|
|
|
|
);
|
|
|
|
return selectQuery(o, undefined, (cursor) => {
|
|
|
|
return {
|
|
|
|
id: cursor.value.id,
|
|
|
|
ts: cursor.value.ts,
|
|
|
|
};
|
|
|
|
}).then((res) => {
|
|
|
|
// Sort IDs by timestamp (newest first)
|
|
|
|
return res.sort((a, b) => {
|
|
|
|
return b.ts - a.ts;
|
|
|
|
}).map((a) => a.id);
|
2017-01-19 16:03:47 +01:00
|
|
|
});
|
|
|
|
}
|
2017-01-18 18:18:02 +01:00
|
|
|
|
2017-01-19 16:03:47 +01:00
|
|
|
function deleteLogs(id) {
|
2017-07-13 01:37:04 +02:00
|
|
|
return new Promise((resolve, reject) => {
|
2017-01-20 15:00:30 +01:00
|
|
|
const txn = db.transaction(
|
|
|
|
["logs", "logslastmod"], "readwrite"
|
|
|
|
);
|
2017-01-19 16:03:47 +01:00
|
|
|
const o = txn.objectStore("logs");
|
|
|
|
// only load the key path, not the data which may be huge
|
|
|
|
const query = o.index("id").openKeyCursor(IDBKeyRange.only(id));
|
|
|
|
query.onsuccess = (event) => {
|
|
|
|
const cursor = event.target.result;
|
|
|
|
if (!cursor) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
o.delete(cursor.primaryKey);
|
|
|
|
cursor.continue();
|
|
|
|
}
|
|
|
|
txn.oncomplete = () => {
|
|
|
|
resolve();
|
|
|
|
};
|
|
|
|
txn.onerror = (event) => {
|
2017-01-20 13:02:48 +01:00
|
|
|
reject(
|
|
|
|
new Error(
|
|
|
|
"Failed to delete logs for " +
|
|
|
|
`'${id}' : ${event.target.errorCode}`
|
|
|
|
)
|
|
|
|
);
|
2017-01-20 15:00:30 +01:00
|
|
|
};
|
|
|
|
// delete last modified entries
|
|
|
|
const lastModStore = txn.objectStore("logslastmod");
|
|
|
|
lastModStore.delete(id);
|
2017-01-19 16:03:47 +01:00
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2017-01-19 17:40:54 +01:00
|
|
|
let allLogIds = await fetchLogIds();
|
|
|
|
let removeLogIds = [];
|
|
|
|
let logs = [];
|
|
|
|
let size = 0;
|
|
|
|
for (let i = 0; i < allLogIds.length; i++) {
|
|
|
|
let lines = await fetchLogs(allLogIds[i]);
|
2017-02-23 15:37:46 +01:00
|
|
|
|
|
|
|
// always include at least one log file, but only include
|
|
|
|
// subsequent ones if they won't take us over the MAX_LOG_SIZE
|
2017-02-23 15:22:03 +01:00
|
|
|
if (i > 0 && size + lines.length > MAX_LOG_SIZE) {
|
2017-01-20 13:02:48 +01:00
|
|
|
// the remaining log IDs should be removed. If we go out of
|
|
|
|
// bounds this is just []
|
2017-02-23 15:22:03 +01:00
|
|
|
//
|
|
|
|
// XXX: there's nothing stopping the current session exceeding
|
|
|
|
// MAX_LOG_SIZE. We ought to think about culling it.
|
2017-01-19 17:40:54 +01:00
|
|
|
removeLogIds = allLogIds.slice(i + 1);
|
|
|
|
break;
|
2017-01-19 16:03:47 +01:00
|
|
|
}
|
2017-02-23 15:37:46 +01:00
|
|
|
|
2017-02-23 15:22:03 +01:00
|
|
|
logs.push({
|
|
|
|
lines: lines,
|
|
|
|
id: allLogIds[i],
|
|
|
|
});
|
|
|
|
size += lines.length;
|
2017-01-19 16:03:47 +01:00
|
|
|
}
|
2017-01-19 17:40:54 +01:00
|
|
|
if (removeLogIds.length > 0) {
|
|
|
|
console.log("Removing logs: ", removeLogIds);
|
2017-01-20 13:02:48 +01:00
|
|
|
// Don't await this because it's non-fatal if we can't clean up
|
|
|
|
// logs.
|
2017-01-19 17:40:54 +01:00
|
|
|
Promise.all(removeLogIds.map((id) => deleteLogs(id))).then(() => {
|
|
|
|
console.log(`Removed ${removeLogIds.length} old logs.`);
|
|
|
|
}, (err) => {
|
|
|
|
console.error(err);
|
|
|
|
})
|
2017-01-19 17:49:25 +01:00
|
|
|
}
|
2017-01-19 17:40:54 +01:00
|
|
|
return logs;
|
2017-01-18 18:18:02 +01:00
|
|
|
}
|
|
|
|
|
2017-01-18 17:27:11 +01:00
|
|
|
_generateLogEntry(lines) {
|
|
|
|
return {
|
|
|
|
id: this.id,
|
|
|
|
lines: lines,
|
|
|
|
index: this.index++
|
|
|
|
};
|
|
|
|
}
|
2017-01-20 15:00:30 +01:00
|
|
|
|
|
|
|
_generateLastModifiedTime() {
|
|
|
|
return {
|
|
|
|
id: this.id,
|
|
|
|
ts: Date.now(),
|
|
|
|
};
|
|
|
|
}
|
2017-01-18 17:00:13 +01:00
|
|
|
}
|
|
|
|
|
2017-01-19 13:02:19 +01:00
|
|
|
/**
|
|
|
|
* Helper method to collect results from a Cursor and promiseify it.
|
|
|
|
* @param {ObjectStore|Index} store The store to perform openCursor on.
|
|
|
|
* @param {IDBKeyRange=} keyRange Optional key range to apply on the cursor.
|
2017-01-20 13:02:48 +01:00
|
|
|
* @param {Function} resultMapper A function which is repeatedly called with a
|
|
|
|
* Cursor.
|
2017-01-19 13:02:19 +01:00
|
|
|
* Return the data you want to keep.
|
2017-01-20 13:02:48 +01:00
|
|
|
* @return {Promise<T[]>} Resolves to an array of whatever you returned from
|
|
|
|
* resultMapper.
|
2017-01-19 13:02:19 +01:00
|
|
|
*/
|
|
|
|
function selectQuery(store, keyRange, resultMapper) {
|
|
|
|
const query = store.openCursor(keyRange);
|
2017-07-13 01:37:04 +02:00
|
|
|
return new Promise((resolve, reject) => {
|
2017-01-19 13:02:19 +01:00
|
|
|
let results = [];
|
|
|
|
query.onerror = (event) => {
|
|
|
|
reject(new Error("Query failed: " + event.target.errorCode));
|
|
|
|
};
|
|
|
|
// collect results
|
|
|
|
query.onsuccess = (event) => {
|
|
|
|
const cursor = event.target.result;
|
|
|
|
if (!cursor) {
|
|
|
|
resolve(results);
|
|
|
|
return; // end of results
|
|
|
|
}
|
|
|
|
results.push(resultMapper(cursor));
|
|
|
|
cursor.continue();
|
|
|
|
}
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2017-01-18 17:00:13 +01:00
|
|
|
|
|
|
|
let store = null;
|
2017-01-19 16:47:55 +01:00
|
|
|
let logger = null;
|
|
|
|
let initPromise = null;
|
2017-01-18 17:00:13 +01:00
|
|
|
module.exports = {
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Configure rage shaking support for sending bug reports.
|
|
|
|
* Modifies globals.
|
2017-01-19 16:47:55 +01:00
|
|
|
* @return {Promise} Resolves when set up.
|
2017-01-18 17:00:13 +01:00
|
|
|
*/
|
|
|
|
init: function() {
|
2017-01-19 16:47:55 +01:00
|
|
|
if (initPromise) {
|
|
|
|
return initPromise;
|
|
|
|
}
|
|
|
|
logger = new ConsoleLogger();
|
2017-01-20 12:56:11 +01:00
|
|
|
logger.monkeyPatch(window.console);
|
2017-06-22 16:11:11 +02:00
|
|
|
|
|
|
|
// just *accessing* indexedDB throws an exception in firefox with
|
|
|
|
// indexeddb disabled.
|
|
|
|
let indexedDB;
|
|
|
|
try {
|
|
|
|
indexedDB = window.indexedDB;
|
|
|
|
} catch(e) {}
|
|
|
|
|
|
|
|
if (indexedDB) {
|
|
|
|
store = new IndexedDBLogStore(indexedDB, logger);
|
2017-01-19 16:47:55 +01:00
|
|
|
initPromise = store.connect();
|
|
|
|
return initPromise;
|
2017-01-18 17:00:13 +01:00
|
|
|
}
|
2017-01-19 16:47:55 +01:00
|
|
|
initPromise = Promise.resolve();
|
|
|
|
return initPromise;
|
2017-01-18 17:00:13 +01:00
|
|
|
},
|
|
|
|
|
2017-06-22 19:25:19 +02:00
|
|
|
flush: function() {
|
|
|
|
if (!store) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
store.flush();
|
|
|
|
},
|
|
|
|
|
2017-01-19 16:47:55 +01:00
|
|
|
/**
|
|
|
|
* Clean up old logs.
|
|
|
|
* @return Promise Resolves if cleaned logs.
|
|
|
|
*/
|
2017-01-19 17:40:54 +01:00
|
|
|
cleanup: async function() {
|
2017-01-19 16:47:55 +01:00
|
|
|
if (!store) {
|
2017-01-19 17:40:54 +01:00
|
|
|
return;
|
2017-01-19 16:47:55 +01:00
|
|
|
}
|
2017-01-20 14:02:57 +01:00
|
|
|
await store.consume();
|
2017-01-19 16:47:55 +01:00
|
|
|
},
|
|
|
|
|
2017-01-18 17:00:13 +01:00
|
|
|
/**
|
2017-04-11 19:47:55 +02:00
|
|
|
* Get a recent snapshot of the logs, ready for attaching to a bug report
|
|
|
|
*
|
|
|
|
* @return {Array<{lines: string, id, string}>} list of log data
|
2017-01-18 17:00:13 +01:00
|
|
|
*/
|
2017-04-11 19:47:55 +02:00
|
|
|
getLogsForReport: async function() {
|
2017-01-19 16:47:55 +01:00
|
|
|
if (!logger) {
|
2017-01-20 13:02:48 +01:00
|
|
|
throw new Error(
|
|
|
|
"No console logger, did you forget to call init()?"
|
|
|
|
);
|
2017-01-19 16:47:55 +01:00
|
|
|
}
|
2017-01-20 13:02:48 +01:00
|
|
|
// If in incognito mode, store is null, but we still want bug report
|
|
|
|
// sending to work going off the in-memory console logs.
|
2017-04-11 19:47:55 +02:00
|
|
|
if (store) {
|
|
|
|
// flush most recent logs
|
|
|
|
await store.flush();
|
|
|
|
return await store.consume();
|
2017-01-20 15:46:19 +01:00
|
|
|
}
|
2017-04-11 19:47:55 +02:00
|
|
|
else {
|
|
|
|
return [{
|
|
|
|
lines: logger.flush(true),
|
|
|
|
id: "-",
|
|
|
|
}];
|
|
|
|
}
|
|
|
|
},
|
2017-01-18 17:00:13 +01:00
|
|
|
};
|