From e9884768f6fa770d6f33e9ab456688928ba50f39 Mon Sep 17 00:00:00 2001
From: Kegan Dougal <kegan@matrix.org>
Date: Wed, 18 Jan 2017 16:00:13 +0000
Subject: [PATCH 01/20] Persist console logs to an IndexedDB instance

---
 src/vector/index.js     |   7 ++
 src/vector/rageshake.js | 180 ++++++++++++++++++++++++++++++++++++++++
 2 files changed, 187 insertions(+)
 create mode 100644 src/vector/rageshake.js

diff --git a/src/vector/index.js b/src/vector/index.js
index 8231950b..b965f01d 100644
--- a/src/vector/index.js
+++ b/src/vector/index.js
@@ -36,6 +36,13 @@ require('gfm.css/gfm.css');
 require('highlight.js/styles/github.css');
 require('draft-js/dist/Draft.css');
 
+const rageshake = require("./rageshake");
+rageshake.init().then(() => {
+    console.log("Initialised rageshake");
+}, (err) => {
+    console.error("Failed to initialise rageshake: " + err);
+});
+
 
  // add React and ReactPerf to the global namespace, to make them easier to
  // access via the console
diff --git a/src/vector/rageshake.js b/src/vector/rageshake.js
new file mode 100644
index 00000000..fe768630
--- /dev/null
+++ b/src/vector/rageshake.js
@@ -0,0 +1,180 @@
+/*
+Copyright 2017 OpenMarket Ltd
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This module contains all the code needed to log the console, persist it to disk and submit bug reports. Rationale is as follows:
+//  - Monkey-patching the console is preferable to having a log library because we can catch logs by other libraries more easily,
+//    without having to all depend on the same log framework / pass the logger around.
+//  - We use IndexedDB to persists logs because it has generous disk space limits compared to local storage. IndexedDB does not work
+//    in incognito mode, in which case this module will not be able to write logs to disk. However, the logs will still be stored
+//    in-memory, so can still be submitted in a bug report should the user wish to: we can also store more logs in-memory than in
+//    local storage, which does work in incognito mode.
+//  - Bug reports are sent as a POST over HTTPS: it purposefully does not use Matrix as bug reports may be made when Matrix is
+//    not responsive (which may be the cause of the bug).
+
+const FLUSH_RATE_MS = 30 * 1000;
+
+// A class which monkey-patches the global console and stores log lines.
+class ConsoleLogger {
+    constructor() {
+        this.logs = "";
+
+        // Monkey-patch console logging
+        const consoleFunctionsToLevels = {
+            log: "I",
+            info: "I",
+            error: "E",
+        };
+        Object.keys(consoleFunctionsToLevels).forEach((fnName) => {
+            const level = consoleFunctionsToLevels[fnName];
+            let originalFn = window.console[fnName].bind(window.console);
+            window.console[fnName] = (...args) => {
+                this.log(level, ...args);
+                originalFn(...args);
+            }
+        });
+    }
+
+    log(level, ...args) {
+        // We don't know what locale the user may be running so use ISO strings
+        const ts = new Date().toISOString();
+        // Some browsers support string formatting which we're not doing here
+        // so the lines are a little more ugly but easy to implement / quick to run.
+        // Example line:
+        // 2017-01-18T11:23:53.214Z W Failed to set badge count: Error setting badge. Message: Too many badges requests in queue.
+        const line = `${ts} ${level} ${args.join(' ')}\n`;
+        // Using + really is the quickest way in JS
+        // http://jsperf.com/concat-vs-plus-vs-join
+        this.logs += line;
+    }
+
+    /**
+     * Retrieve log lines to flush to disk.
+     * @return {string} \n delimited log lines to flush.
+     */
+    flush() {
+        // The ConsoleLogger doesn't care how these end up on disk, it just flushes them to the caller.
+        const logsToFlush = this.logs;
+        this.logs = "";
+        return logsToFlush;
+    }
+}
+
+// A class which stores log lines in an IndexedDB instance.
+class IndexedDBLogStore {
+    constructor(indexedDB, logger) {
+        this.indexedDB = indexedDB;
+        this.logger = logger;
+        this.db = null;
+    }
+
+    /**
+     * @return {Promise} Resolves when the store is ready.
+     */
+    connect() {
+        let req = this.indexedDB.open("logs");
+        return new Promise((resolve, reject) => {
+            req.onsuccess = (event) => {
+                this.db = event.target.result;
+                // Periodically flush logs to local storage / indexeddb
+                setInterval(this.flush.bind(this), FLUSH_RATE_MS);
+                resolve();
+            };
+
+            req.onerror = (event) => {
+                const err = "Failed to open log database: " + event.target.errorCode;
+                console.error(err);
+                reject(new Error(err));
+            };
+
+            // First time: Setup the object store
+            req.onupgradeneeded = (event) => {
+                const db = event.target.result;
+                const objectStore = db.createObjectStore("logs", {
+                    autoIncrement: true
+                })
+                objectStore.transaction.oncomplete = function(event) {
+                    objectStore.add(
+                        new Date() + " ::: Log database was created."
+                    );
+                };
+            }
+        });
+    }
+
+    /**
+     * @return {Promise} Resolved when the logs have been flushed.
+     */
+    flush() {
+        if (!this.db) {
+            // not connected yet or user rejected access for us to r/w to the db
+            return Promise.reject(new Error("No connected database"));
+        }
+        const lines = this.logger.flush();
+        if (lines.length === 0) {
+            return Promise.resolve();
+        }
+        return new Promise((resolve, reject) => {
+            let txn = this.db.transaction("logs", "readwrite");
+            let objStore = txn.objectStore("logs");
+            objStore.add(lines);
+            txn.oncomplete = (event) => {
+                resolve();
+            };
+            txn.onerror = (event) => {
+                console.error("Failed to flush logs : " + event.target.errorCode);
+                reject(new Error("Failed to write logs: " + event.target.errorCode));
+            }
+        });
+    }
+}
+
+
+let store = null;
+let inited = false;
+module.exports = {
+
+    /**
+     * Configure rage shaking support for sending bug reports.
+     * Modifies globals.
+     */
+    init: function() {
+        if (inited || !window.indexedDB) {
+            return;
+        }
+        store = new IndexedDBLogStore(window.indexedDB, new ConsoleLogger());
+        inited = true;
+        return store.connect();
+    },
+
+    /**
+     * Force-flush the logs to storage.
+     * @return {Promise} Resolved when the logs have been flushed.
+     */
+    flush: function() {
+        if (!store) {
+            return;
+        }
+        return store.flush();
+    },
+
+    /**
+     * Send a bug report.
+     * @param {string} userText Any additional user input.
+     * @return {Promise} Resolved when the bug report is sent.
+     */
+    sendBugReport: function(userText) {
+    }
+};

From 5828ab107707752c76934484bad96ad8b1ff35f5 Mon Sep 17 00:00:00 2001
From: Kegan Dougal <kegan@matrix.org>
Date: Wed, 18 Jan 2017 16:27:11 +0000
Subject: [PATCH 02/20] Generate unique IDs for each JS runtime to accomodate
 multiple tabs

---
 src/vector/rageshake.js | 31 +++++++++++++++++++++++--------
 1 file changed, 23 insertions(+), 8 deletions(-)

diff --git a/src/vector/rageshake.js b/src/vector/rageshake.js
index fe768630..5739a934 100644
--- a/src/vector/rageshake.js
+++ b/src/vector/rageshake.js
@@ -20,7 +20,8 @@ limitations under the License.
 //  - We use IndexedDB to persists logs because it has generous disk space limits compared to local storage. IndexedDB does not work
 //    in incognito mode, in which case this module will not be able to write logs to disk. However, the logs will still be stored
 //    in-memory, so can still be submitted in a bug report should the user wish to: we can also store more logs in-memory than in
-//    local storage, which does work in incognito mode.
+//    local storage, which does work in incognito mode. We also need to handle the case where there are 2+ tabs. Each JS runtime
+//    generates a random string which serves as the "ID" for that tab/session. These IDs are stored along with the log lines.
 //  - Bug reports are sent as a POST over HTTPS: it purposefully does not use Matrix as bug reports may be made when Matrix is
 //    not responsive (which may be the cause of the bug).
 
@@ -77,6 +78,8 @@ class IndexedDBLogStore {
     constructor(indexedDB, logger) {
         this.indexedDB = indexedDB;
         this.logger = logger;
+        this.id = "instance-" + Date.now();
+        this.index = 0;
         this.db = null;
     }
 
@@ -103,13 +106,13 @@ class IndexedDBLogStore {
             req.onupgradeneeded = (event) => {
                 const db = event.target.result;
                 const objectStore = db.createObjectStore("logs", {
-                    autoIncrement: true
-                })
-                objectStore.transaction.oncomplete = function(event) {
-                    objectStore.add(
+                    keyPath: ["id", "index"]
+                });
+                objectStore.add(
+                    this._generateLogEntry(
                         new Date() + " ::: Log database was created."
-                    );
-                };
+                    )
+                );
             }
         });
     }
@@ -129,7 +132,7 @@ class IndexedDBLogStore {
         return new Promise((resolve, reject) => {
             let txn = this.db.transaction("logs", "readwrite");
             let objStore = txn.objectStore("logs");
-            objStore.add(lines);
+            objStore.add(this._generateLogEntry(lines));
             txn.oncomplete = (event) => {
                 resolve();
             };
@@ -139,6 +142,14 @@ class IndexedDBLogStore {
             }
         });
     }
+
+    _generateLogEntry(lines) {
+        return {
+            id: this.id,
+            lines: lines,
+            index: this.index++
+        };
+    }
 }
 
 
@@ -176,5 +187,9 @@ module.exports = {
      * @return {Promise} Resolved when the bug report is sent.
      */
     sendBugReport: function(userText) {
+        // To gather all the logs, we first query for every log entry with index "0", this will let us
+        // know all the IDs from different tabs/sessions.
+
+        // Send logs grouped by ID
     }
 };

From 811086ac8ec1ea1b7cd5d5b475712d5277ce9af3 Mon Sep 17 00:00:00 2001
From: Kegan Dougal <kegan@matrix.org>
Date: Wed, 18 Jan 2017 17:18:02 +0000
Subject: [PATCH 03/20] Comment how we should consume logs

---
 src/vector/rageshake.js | 44 ++++++++++++++++++++++++++++++++++++-----
 1 file changed, 39 insertions(+), 5 deletions(-)

diff --git a/src/vector/rageshake.js b/src/vector/rageshake.js
index 5739a934..9b6106ed 100644
--- a/src/vector/rageshake.js
+++ b/src/vector/rageshake.js
@@ -23,7 +23,9 @@ limitations under the License.
 //    local storage, which does work in incognito mode. We also need to handle the case where there are 2+ tabs. Each JS runtime
 //    generates a random string which serves as the "ID" for that tab/session. These IDs are stored along with the log lines.
 //  - Bug reports are sent as a POST over HTTPS: it purposefully does not use Matrix as bug reports may be made when Matrix is
-//    not responsive (which may be the cause of the bug).
+//    not responsive (which may be the cause of the bug). We send the most recent N MB of UTF-8 log data, starting with the most
+//    recent, which we know because the "ID"s are actually timestamps. We then purge the remaining logs. We also do this check
+//    on startup to prevent logs from accumulating.
 
 const FLUSH_RATE_MS = 30 * 1000;
 
@@ -143,6 +145,39 @@ class IndexedDBLogStore {
         });
     }
 
+    /**
+     * Consume the most recent logs and return them. Older logs which are not returned are deleted at the same time,
+     * so this can be called at startup to do house-keeping to keep the logs from growing too large.
+     *
+     * @param {boolean} clearAll True to clear the most recent logs returned in addition to the
+     * older logs. This is desirable when sending bug reports as we do not want to submit the
+     * same logs twice. This is not desirable when doing house-keeping at startup in case they
+     * want to submit a bug report later.
+     * @return {Promise<Object[]>} Resolves to an array of objects. The array is sorted in time (oldest first) based on
+     * when the log file was created (the log ID). The objects have said log ID in an "id" field and "lines" which is a
+     * big string with all the new-line delimited logs.
+     */
+    consume(clearAll) {
+        const MAX_LOG_SIZE = 1024 * 1024 * 50; // 50 MB
+        // To gather all the logs, we first query for every log entry with index "0", this will let us
+        // know all the IDs from different tabs/sessions.
+        return new Promise((resolve, reject) => {
+            // let txn = this.db.transaction("logs", "readonly");
+            // let objectStore = txn.objectStore("logs");
+
+            // we know each entry has a unique ID, and we know IDs are timestamps, so accumulate all the IDs,
+            // ignoring the logs for now, and sort them to work out the correct log ID ordering.
+
+            // Starting with the most recent ID, fetch the logs (all indices) for said ID and accumulate them
+            // in order. After fetching ALL the logs for an ID, recheck the total length of the logs to work out
+            // if we have exceeded the max size cutoff for "recent" logs.
+
+            // Remove all logs that are older than the cutoff (or the entire logs if clearAll is set).
+
+            // Return the logs that are within the cutoff.
+        });
+    }
+
     _generateLogEntry(lines) {
         return {
             id: this.id,
@@ -187,9 +222,8 @@ module.exports = {
      * @return {Promise} Resolved when the bug report is sent.
      */
     sendBugReport: function(userText) {
-        // To gather all the logs, we first query for every log entry with index "0", this will let us
-        // know all the IDs from different tabs/sessions.
-
-        // Send logs grouped by ID
+        return store.consume(true).then((data) => {
+            // Send logs grouped by ID
+        });
     }
 };

From 89d514a532827f5eb576a43138228a96a1606c02 Mon Sep 17 00:00:00 2001
From: Kegan Dougal <kegan@matrix.org>
Date: Thu, 19 Jan 2017 12:02:19 +0000
Subject: [PATCH 04/20] Query IndexedDB for all results with index=0

---
 src/vector/rageshake.js | 46 ++++++++++++++++++++++++++++++++++++-----
 1 file changed, 41 insertions(+), 5 deletions(-)

diff --git a/src/vector/rageshake.js b/src/vector/rageshake.js
index 9b6106ed..65462d15 100644
--- a/src/vector/rageshake.js
+++ b/src/vector/rageshake.js
@@ -24,7 +24,7 @@ limitations under the License.
 //    generates a random string which serves as the "ID" for that tab/session. These IDs are stored along with the log lines.
 //  - Bug reports are sent as a POST over HTTPS: it purposefully does not use Matrix as bug reports may be made when Matrix is
 //    not responsive (which may be the cause of the bug). We send the most recent N MB of UTF-8 log data, starting with the most
-//    recent, which we know because the "ID"s are actually timestamps. We then purge the remaining logs. We also do this check
+//    recent, which we know because the "ID"s are actually timestamps. We then purge the remaining logs. We also do this purge
 //    on startup to prevent logs from accumulating.
 
 const FLUSH_RATE_MS = 30 * 1000;
@@ -38,6 +38,7 @@ class ConsoleLogger {
         const consoleFunctionsToLevels = {
             log: "I",
             info: "I",
+            warn: "W",
             error: "E",
         };
         Object.keys(consoleFunctionsToLevels).forEach((fnName) => {
@@ -110,6 +111,12 @@ class IndexedDBLogStore {
                 const objectStore = db.createObjectStore("logs", {
                     keyPath: ["id", "index"]
                 });
+                // Keys in the database look like: [ "instance-148938490", 0 ]
+                // Later on we need to query for everything with index=0, and query everything for an instance id.
+                // In order to do this, we need to set up indexes on both "id" and "index".
+                objectStore.createIndex("index", "index", { unique: false });
+                objectStore.createIndex("id", "id", { unique: false });
+
                 objectStore.add(
                     this._generateLogEntry(
                         new Date() + " ::: Log database was created."
@@ -161,9 +168,11 @@ class IndexedDBLogStore {
         const MAX_LOG_SIZE = 1024 * 1024 * 50; // 50 MB
         // To gather all the logs, we first query for every log entry with index "0", this will let us
         // know all the IDs from different tabs/sessions.
-        return new Promise((resolve, reject) => {
-            // let txn = this.db.transaction("logs", "readonly");
-            // let objectStore = txn.objectStore("logs");
+        const txn = this.db.transaction("logs", "readonly");
+        const objectStore = txn.objectStore("logs");
+        return selectQuery(objectStore.index("index"), IDBKeyRange.only(0), (cursor) => cursor.value.id).then((res) => {
+            console.log("Instances: ", res);
+        });
 
             // we know each entry has a unique ID, and we know IDs are timestamps, so accumulate all the IDs,
             // ignoring the logs for now, and sort them to work out the correct log ID ordering.
@@ -175,7 +184,6 @@ class IndexedDBLogStore {
             // Remove all logs that are older than the cutoff (or the entire logs if clearAll is set).
 
             // Return the logs that are within the cutoff.
-        });
     }
 
     _generateLogEntry(lines) {
@@ -187,6 +195,34 @@ class IndexedDBLogStore {
     }
 }
 
+/**
+ * Helper method to collect results from a Cursor and promiseify it.
+ * @param {ObjectStore|Index} store The store to perform openCursor on.
+ * @param {IDBKeyRange=} keyRange Optional key range to apply on the cursor.
+ * @param {Function} resultMapper A function which is repeatedly called with a Cursor.
+ * Return the data you want to keep.
+ * @return {Promise<T[]>} Resolves to an array of whatever you returned from resultMapper.
+ */
+function selectQuery(store, keyRange, resultMapper) {
+    const query = store.openCursor(keyRange);
+    return new Promise((resolve, reject) => {
+        let results = [];
+        query.onerror = (event) => {
+            reject(new Error("Query failed: " + event.target.errorCode));
+        };
+        // collect results
+        query.onsuccess = (event) => {
+            const cursor = event.target.result;
+            if (!cursor) {
+                resolve(results);
+                return; // end of results
+            }
+            results.push(resultMapper(cursor));
+            cursor.continue();
+        }
+    });
+}
+
 
 let store = null;
 let inited = false;

From 608c1b02081a75617a9d9a9ed483a962c7f0940a Mon Sep 17 00:00:00 2001
From: Kegan Dougal <kegan@matrix.org>
Date: Thu, 19 Jan 2017 15:03:47 +0000
Subject: [PATCH 05/20] Finish implementing consume()

- Fetches all logs in order and concatenates correctly.
- Purges old logs correctly.
---
 src/vector/index.js     |   1 +
 src/vector/rageshake.js | 133 ++++++++++++++++++++++++++++++++++------
 2 files changed, 116 insertions(+), 18 deletions(-)

diff --git a/src/vector/index.js b/src/vector/index.js
index b965f01d..d773db85 100644
--- a/src/vector/index.js
+++ b/src/vector/index.js
@@ -39,6 +39,7 @@ require('draft-js/dist/Draft.css');
 const rageshake = require("./rageshake");
 rageshake.init().then(() => {
     console.log("Initialised rageshake");
+    rageshake.sendBugReport();
 }, (err) => {
     console.error("Failed to initialise rageshake: " + err);
 });
diff --git a/src/vector/rageshake.js b/src/vector/rageshake.js
index 65462d15..6093515d 100644
--- a/src/vector/rageshake.js
+++ b/src/vector/rageshake.js
@@ -166,24 +166,120 @@ class IndexedDBLogStore {
      */
     consume(clearAll) {
         const MAX_LOG_SIZE = 1024 * 1024 * 50; // 50 MB
-        // To gather all the logs, we first query for every log entry with index "0", this will let us
-        // know all the IDs from different tabs/sessions.
-        const txn = this.db.transaction("logs", "readonly");
-        const objectStore = txn.objectStore("logs");
-        return selectQuery(objectStore.index("index"), IDBKeyRange.only(0), (cursor) => cursor.value.id).then((res) => {
-            console.log("Instances: ", res);
+        const db = this.db;
+
+        // Returns: a string representing the concatenated logs for this ID.
+        function fetchLogs(id) {
+            const o = db.transaction("logs", "readonly").objectStore("logs");
+            return selectQuery(o.index("id"), IDBKeyRange.only(id), (cursor) => {
+                return {
+                    lines: cursor.value.lines,
+                    index: cursor.value.index,
+                }
+            }).then((linesArray) => {
+                // We have been storing logs periodically, so string them all together *in order of index* now
+                linesArray.sort((a, b) => {
+                    return a.index - b.index;
+                })
+                return linesArray.map((l) => l.lines).join("");
+            });
+        }
+
+        // Returns: A sorted array of log IDs. (newest first)
+        function fetchLogIds() {
+            // To gather all the log IDs, query for every log entry with index "0", this will let us
+            // know all the IDs from different tabs/sessions.
+            const o = db.transaction("logs", "readonly").objectStore("logs");
+            return selectQuery(o.index("index"), IDBKeyRange.only(0), (cursor) => cursor.value.id).then((res) => {
+                // we know each entry has a unique ID, and we know IDs are timestamps, so accumulate all the IDs,
+                // ignoring the logs for now, and sort them to work out the correct log ID ordering, newest first.
+                // E.g. [ "instance-1484827160051", "instance-1374827160051", "instance-1000007160051"]
+                return res.sort().reverse();
+            });
+        }
+
+        function deleteLogs(id) {
+            return new Promise((resolve, reject) => {
+                const txn = db.transaction("logs", "readwrite");
+                const o = txn.objectStore("logs");
+                // only load the key path, not the data which may be huge
+                const query = o.index("id").openKeyCursor(IDBKeyRange.only(id));
+                query.onsuccess = (event) => {
+                    const cursor = event.target.result;
+                    if (!cursor) {
+                        return;
+                    }
+                    o.delete(cursor.primaryKey);
+                    cursor.continue();
+                }
+                txn.oncomplete = () => {
+                    resolve();
+                };
+                txn.onerror = (event) => {
+                    reject(new Error(`Failed to delete logs for '${id}' : ${event.target.errorCode}`));
+                }
+            });
+        }
+
+        // Ideally we'd just use coroutines and a for loop but riot-web doesn't support async/await so instead
+        // recursively fetch logs up to the given threshold. We can't cheat and fetch all the logs
+        // from all time, but we may OOM if we do so.
+        // Returns: Promise<Object[]> : Each object having 'id' and 'lines'. Same ordering as logIds.
+        function fetchLogsToThreshold(logIds, threshold, logs) {
+            // Base case: check log size and return if bigger than threshold
+            let size = 0;
+            logs.forEach((l) => {
+                size += l.lines.length;
+            });
+            if (size > threshold) {
+                return Promise.resolve(logs);
+            }
+
+            // fetch logs for the first element
+            let logId = logIds.shift();
+            if (!logId) {
+                // no more entries
+                return Promise.resolve(logs);
+            }
+            return fetchLogs(logId).then((lines) => {
+                // add result to logs
+                logs.push({
+                    lines: lines,
+                    id: logId,
+                });
+                // recurse with the next log ID. TODO: Stack overflow risk?
+                return fetchLogsToThreshold(logIds, threshold, logs);
+            })
+        }
+
+        let allLogIds = [];
+        return fetchLogIds().then((logIds) => {
+            allLogIds = logIds.map((id) => id); // deep copy array as we'll modify it when fetching logs
+            return fetchLogsToThreshold(logIds, MAX_LOG_SIZE, []);
+        }).then((logs) => {
+            // Remove all logs that are beyond the threshold (not in logs), or the entire logs if clearAll was set.
+            let removeLogIds = allLogIds;
+            if (!clearAll) {
+                removeLogIds = removeLogIds.filter((id) => {
+                    for (let i = 0; i < logs.length; i++) {
+                        if (logs[i].id === id) {
+                            return false; // do not remove logs that we're about to return to the caller.
+                        }
+                    }
+                    return true;
+                });
+            }
+            if (removeLogIds.length > 0) {
+                console.log("Removing logs: ", removeLogIds);
+                // Don't promise chain this because it's non-fatal if we can't clean up logs.
+                Promise.all(removeLogIds.map((id) => deleteLogs(id))).then(() => {
+                    console.log(`Removed ${removeLogIds.length} old logs.`);
+                }, (err) => {
+                    console.error(err);
+                })
+            }
+            return logs;
         });
-
-            // we know each entry has a unique ID, and we know IDs are timestamps, so accumulate all the IDs,
-            // ignoring the logs for now, and sort them to work out the correct log ID ordering.
-
-            // Starting with the most recent ID, fetch the logs (all indices) for said ID and accumulate them
-            // in order. After fetching ALL the logs for an ID, recheck the total length of the logs to work out
-            // if we have exceeded the max size cutoff for "recent" logs.
-
-            // Remove all logs that are older than the cutoff (or the entire logs if clearAll is set).
-
-            // Return the logs that are within the cutoff.
     }
 
     _generateLogEntry(lines) {
@@ -258,8 +354,9 @@ module.exports = {
      * @return {Promise} Resolved when the bug report is sent.
      */
     sendBugReport: function(userText) {
-        return store.consume(true).then((data) => {
+        return store.consume(false).then((logs) => {
             // Send logs grouped by ID
+            console.log(logs);
         });
     }
 };

From 81d437ac1e16104f32fef3c0d694a215fb7129a5 Mon Sep 17 00:00:00 2001
From: Kegan Dougal <kegan@matrix.org>
Date: Thu, 19 Jan 2017 15:47:55 +0000
Subject: [PATCH 06/20] POST reports to localhost for now. Also send live
 console logs

---
 src/vector/index.js     |  2 +-
 src/vector/rageshake.js | 75 +++++++++++++++++++++++++++++++++++------
 2 files changed, 66 insertions(+), 11 deletions(-)

diff --git a/src/vector/index.js b/src/vector/index.js
index d773db85..d1d457e9 100644
--- a/src/vector/index.js
+++ b/src/vector/index.js
@@ -39,7 +39,7 @@ require('draft-js/dist/Draft.css');
 const rageshake = require("./rageshake");
 rageshake.init().then(() => {
     console.log("Initialised rageshake");
-    rageshake.sendBugReport();
+    rageshake.cleanup();
 }, (err) => {
     console.error("Failed to initialise rageshake: " + err);
 });
diff --git a/src/vector/rageshake.js b/src/vector/rageshake.js
index 6093515d..1f3dbc7d 100644
--- a/src/vector/rageshake.js
+++ b/src/vector/rageshake.js
@@ -14,6 +14,8 @@ See the License for the specific language governing permissions and
 limitations under the License.
 */
 
+import request from "browser-request";
+
 // This module contains all the code needed to log the console, persist it to disk and submit bug reports. Rationale is as follows:
 //  - Monkey-patching the console is preferable to having a log library because we can catch logs by other libraries more easily,
 //    without having to all depend on the same log framework / pass the logger around.
@@ -321,20 +323,27 @@ function selectQuery(store, keyRange, resultMapper) {
 
 
 let store = null;
-let inited = false;
+let logger = null;
+let initPromise = null;
 module.exports = {
 
     /**
      * Configure rage shaking support for sending bug reports.
      * Modifies globals.
+     * @return {Promise} Resolves when set up.
      */
     init: function() {
-        if (inited || !window.indexedDB) {
-            return;
+        if (initPromise) {
+            return initPromise;
         }
-        store = new IndexedDBLogStore(window.indexedDB, new ConsoleLogger());
-        inited = true;
-        return store.connect();
+        logger = new ConsoleLogger();
+        if (window.indexedDB) {
+            store = new IndexedDBLogStore(window.indexedDB, logger);
+            initPromise = store.connect();
+            return initPromise;
+        }
+        initPromise = Promise.resolve();
+        return initPromise;
     },
 
     /**
@@ -343,20 +352,66 @@ module.exports = {
      */
     flush: function() {
         if (!store) {
-            return;
+            return Promise.resolve();
         }
         return store.flush();
     },
 
+    /**
+     * Clean up old logs.
+     * @return Promise Resolves if cleaned logs.
+     */
+    cleanup: function() {
+        if (!store) {
+            return Promise.resolve();
+        }
+        return store.consume(false);
+    },
+
     /**
      * Send a bug report.
      * @param {string} userText Any additional user input.
      * @return {Promise} Resolved when the bug report is sent.
      */
     sendBugReport: function(userText) {
-        return store.consume(false).then((logs) => {
-            // Send logs grouped by ID
-            console.log(logs);
+        if (!logger) {
+            return Promise.reject(new Error("No console logger, did you forget to call init()?"));
+        }
+        // If in incognito mode, store is null, but we still want bug report sending to work going off
+        // the in-memory console logs.
+        let promise = Promise.resolve([]);
+        if (store) {
+            promise = store.consume(false); //  TODO Swap to true to remove all logs
+        }
+        return promise.then((logs) => {
+            // and add the most recent console logs which won't be in the store yet.
+            const consoleLogs = logger.flush(); // remove logs from console
+            const currentId = store ? store.id : "-";
+            logs.unshift({
+                lines: consoleLogs,
+                id: currentId,
+            });
+            return new Promise((resolve, reject) => {
+                request({
+                    method: "POST",
+                    url: "http://localhost:1337",
+                    body: {
+                        logs: logs,
+                        text: userText || "User did not supply any additional text.",
+                    },
+                    json: true,
+                }, (err, res) => {
+                    if (err) {
+                        reject(err);
+                        return;
+                    }
+                    if (res.status < 200 || res.status >= 400) {
+                        reject(new Error(`HTTP ${res.status}`));
+                        return;
+                    }
+                    resolve();
+                })
+            });
         });
     }
 };

From bf887e82fec360a237bb1262ed4426311b5f7b18 Mon Sep 17 00:00:00 2001
From: Kegan Dougal <kegan@matrix.org>
Date: Thu, 19 Jan 2017 16:40:54 +0000
Subject: [PATCH 07/20] Swap to async/await rather than promise chains

Since we do in fact support coroutines!
---
 src/vector/rageshake.js | 161 ++++++++++++++++------------------------
 1 file changed, 65 insertions(+), 96 deletions(-)

diff --git a/src/vector/rageshake.js b/src/vector/rageshake.js
index 1f3dbc7d..299a8497 100644
--- a/src/vector/rageshake.js
+++ b/src/vector/rageshake.js
@@ -166,7 +166,7 @@ class IndexedDBLogStore {
      * when the log file was created (the log ID). The objects have said log ID in an "id" field and "lines" which is a
      * big string with all the new-line delimited logs.
      */
-    consume(clearAll) {
+    async consume(clearAll) {
         const MAX_LOG_SIZE = 1024 * 1024 * 50; // 50 MB
         const db = this.db;
 
@@ -223,65 +223,36 @@ class IndexedDBLogStore {
             });
         }
 
-        // Ideally we'd just use coroutines and a for loop but riot-web doesn't support async/await so instead
-        // recursively fetch logs up to the given threshold. We can't cheat and fetch all the logs
-        // from all time, but we may OOM if we do so.
-        // Returns: Promise<Object[]> : Each object having 'id' and 'lines'. Same ordering as logIds.
-        function fetchLogsToThreshold(logIds, threshold, logs) {
-            // Base case: check log size and return if bigger than threshold
-            let size = 0;
-            logs.forEach((l) => {
-                size += l.lines.length;
+        let allLogIds = await fetchLogIds();
+        let removeLogIds = [];
+        let logs = [];
+        let size = 0;
+        for (let i = 0; i < allLogIds.length; i++) {
+            let lines = await fetchLogs(allLogIds[i]);
+            logs.push({
+                lines: lines,
+                id: allLogIds[i],
             });
-            if (size > threshold) {
-                return Promise.resolve(logs);
+            size += lines.length;
+            if (size > MAX_LOG_SIZE) {
+                // the remaining log IDs should be removed. If we go out of bounds this is just []
+                removeLogIds = allLogIds.slice(i + 1);
+                break;
             }
-
-            // fetch logs for the first element
-            let logId = logIds.shift();
-            if (!logId) {
-                // no more entries
-                return Promise.resolve(logs);
-            }
-            return fetchLogs(logId).then((lines) => {
-                // add result to logs
-                logs.push({
-                    lines: lines,
-                    id: logId,
-                });
-                // recurse with the next log ID. TODO: Stack overflow risk?
-                return fetchLogsToThreshold(logIds, threshold, logs);
-            })
         }
-
-        let allLogIds = [];
-        return fetchLogIds().then((logIds) => {
-            allLogIds = logIds.map((id) => id); // deep copy array as we'll modify it when fetching logs
-            return fetchLogsToThreshold(logIds, MAX_LOG_SIZE, []);
-        }).then((logs) => {
-            // Remove all logs that are beyond the threshold (not in logs), or the entire logs if clearAll was set.
-            let removeLogIds = allLogIds;
-            if (!clearAll) {
-                removeLogIds = removeLogIds.filter((id) => {
-                    for (let i = 0; i < logs.length; i++) {
-                        if (logs[i].id === id) {
-                            return false; // do not remove logs that we're about to return to the caller.
-                        }
-                    }
-                    return true;
-                });
-            }
-            if (removeLogIds.length > 0) {
-                console.log("Removing logs: ", removeLogIds);
-                // Don't promise chain this because it's non-fatal if we can't clean up logs.
-                Promise.all(removeLogIds.map((id) => deleteLogs(id))).then(() => {
-                    console.log(`Removed ${removeLogIds.length} old logs.`);
-                }, (err) => {
-                    console.error(err);
-                })
-            }
-            return logs;
-        });
+        if (clearAll) {
+            removeLogIds = allLogIds;
+        }
+        if (removeLogIds.length > 0) {
+            console.log("Removing logs: ", removeLogIds);
+            // Don't await this because it's non-fatal if we can't clean up logs.
+            Promise.all(removeLogIds.map((id) => deleteLogs(id))).then(() => {
+                console.log(`Removed ${removeLogIds.length} old logs.`);
+            }, (err) => {
+                console.error(err);
+            })
+        }console.log("async consumeeeee");
+        return logs;
     }
 
     _generateLogEntry(lines) {
@@ -350,22 +321,22 @@ module.exports = {
      * Force-flush the logs to storage.
      * @return {Promise} Resolved when the logs have been flushed.
      */
-    flush: function() {
+    flush: async function() {
         if (!store) {
-            return Promise.resolve();
+            return;
         }
-        return store.flush();
+        await store.flush();
     },
 
     /**
      * Clean up old logs.
      * @return Promise Resolves if cleaned logs.
      */
-    cleanup: function() {
+    cleanup: async function() {
         if (!store) {
-            return Promise.resolve();
+            return;
         }
-        return store.consume(false);
+        await store.consume(false);
     },
 
     /**
@@ -373,45 +344,43 @@ module.exports = {
      * @param {string} userText Any additional user input.
      * @return {Promise} Resolved when the bug report is sent.
      */
-    sendBugReport: function(userText) {
+    sendBugReport: async function(userText) {
         if (!logger) {
-            return Promise.reject(new Error("No console logger, did you forget to call init()?"));
+            throw new Error("No console logger, did you forget to call init()?");
         }
         // If in incognito mode, store is null, but we still want bug report sending to work going off
         // the in-memory console logs.
-        let promise = Promise.resolve([]);
+        let logs = [];
         if (store) {
-            promise = store.consume(false); //  TODO Swap to true to remove all logs
+            logs = await store.consume(false);
         }
-        return promise.then((logs) => {
-            // and add the most recent console logs which won't be in the store yet.
-            const consoleLogs = logger.flush(); // remove logs from console
-            const currentId = store ? store.id : "-";
-            logs.unshift({
-                lines: consoleLogs,
-                id: currentId,
-            });
-            return new Promise((resolve, reject) => {
-                request({
-                    method: "POST",
-                    url: "http://localhost:1337",
-                    body: {
-                        logs: logs,
-                        text: userText || "User did not supply any additional text.",
-                    },
-                    json: true,
-                }, (err, res) => {
-                    if (err) {
-                        reject(err);
-                        return;
-                    }
-                    if (res.status < 200 || res.status >= 400) {
-                        reject(new Error(`HTTP ${res.status}`));
-                        return;
-                    }
-                    resolve();
-                })
-            });
+        // and add the most recent console logs which won't be in the store yet.
+        const consoleLogs = logger.flush(); // remove logs from console
+        const currentId = store ? store.id : "-";
+        logs.unshift({
+            lines: consoleLogs,
+            id: currentId,
+        });
+        await new Promise((resolve, reject) => {
+            request({
+                method: "POST",
+                url: "http://localhost:1337",
+                body: {
+                    logs: logs,
+                    text: userText || "User did not supply any additional text.",
+                },
+                json: true,
+            }, (err, res) => {
+                if (err) {
+                    reject(err);
+                    return;
+                }
+                if (res.status < 200 || res.status >= 400) {
+                    reject(new Error(`HTTP ${res.status}`));
+                    return;
+                }
+                resolve();
+            })
         });
     }
 };

From 3996d23b1996434723335ed0c3be423ba7298561 Mon Sep 17 00:00:00 2001
From: Kegan Dougal <kegan@matrix.org>
Date: Thu, 19 Jan 2017 16:49:25 +0000
Subject: [PATCH 08/20] Inject bug report endpoint URL from config.json

---
 config.sample.json      |  1 +
 src/vector/index.js     |  1 +
 src/vector/rageshake.js | 12 ++++++++++--
 3 files changed, 12 insertions(+), 2 deletions(-)

diff --git a/config.sample.json b/config.sample.json
index e6384221..a65646ac 100644
--- a/config.sample.json
+++ b/config.sample.json
@@ -4,6 +4,7 @@
     "brand": "Riot",
     "integrations_ui_url": "https://scalar.vector.im/",
     "integrations_rest_url": "https://scalar.vector.im/api",
+    "bug_report_endpoint_url": "https://vector.im/bugs",
     "enableLabs": true,
     "roomDirectory": {
         "servers": [
diff --git a/src/vector/index.js b/src/vector/index.js
index d1d457e9..d1d85e06 100644
--- a/src/vector/index.js
+++ b/src/vector/index.js
@@ -241,6 +241,7 @@ async function loadApp() {
     let configError;
     try {
         configJson = await getConfig();
+        rageshake.setBugReportEndpoint(configJson.bug_report_endpoint_url);
     } catch (e) {
         configError = e;
     }
diff --git a/src/vector/rageshake.js b/src/vector/rageshake.js
index 299a8497..3fb9efdc 100644
--- a/src/vector/rageshake.js
+++ b/src/vector/rageshake.js
@@ -251,7 +251,7 @@ class IndexedDBLogStore {
             }, (err) => {
                 console.error(err);
             })
-        }console.log("async consumeeeee");
+        }
         return logs;
     }
 
@@ -296,6 +296,7 @@ function selectQuery(store, keyRange, resultMapper) {
 let store = null;
 let logger = null;
 let initPromise = null;
+let bugReportEndpoint = null;
 module.exports = {
 
     /**
@@ -339,6 +340,10 @@ module.exports = {
         await store.consume(false);
     },
 
+    setBugReportEndpoint: function(url) {
+        bugReportEndpoint = url;
+    },
+
     /**
      * Send a bug report.
      * @param {string} userText Any additional user input.
@@ -348,6 +353,9 @@ module.exports = {
         if (!logger) {
             throw new Error("No console logger, did you forget to call init()?");
         }
+        if (!bugReportEndpoint) {
+            throw new Error("No bug report endpoint has been set.");
+        }
         // If in incognito mode, store is null, but we still want bug report sending to work going off
         // the in-memory console logs.
         let logs = [];
@@ -364,7 +372,7 @@ module.exports = {
         await new Promise((resolve, reject) => {
             request({
                 method: "POST",
-                url: "http://localhost:1337",
+                url: bugReportEndpoint,
                 body: {
                     logs: logs,
                     text: userText || "User did not supply any additional text.",

From beba4d2ae36c4499e99a27258fb58b9a8e6e12b7 Mon Sep 17 00:00:00 2001
From: Kegan Dougal <kegan@matrix.org>
Date: Thu, 19 Jan 2017 17:40:21 +0000
Subject: [PATCH 09/20] Add version and user agent to bug report

---
 src/vector/rageshake.js | 15 +++++++++++++++
 1 file changed, 15 insertions(+)

diff --git a/src/vector/rageshake.js b/src/vector/rageshake.js
index 3fb9efdc..e3cc6661 100644
--- a/src/vector/rageshake.js
+++ b/src/vector/rageshake.js
@@ -14,6 +14,7 @@ See the License for the specific language governing permissions and
 limitations under the License.
 */
 
+import PlatformPeg from 'matrix-react-sdk/lib/PlatformPeg';
 import request from "browser-request";
 
 // This module contains all the code needed to log the console, persist it to disk and submit bug reports. Rationale is as follows:
@@ -356,6 +357,18 @@ module.exports = {
         if (!bugReportEndpoint) {
             throw new Error("No bug report endpoint has been set.");
         }
+
+        let version = "UNKNOWN";
+        try {
+            version = await PlatformPeg.get().getAppVersion();
+        }
+        catch (err) {} // PlatformPeg already logs this.
+
+        let userAgent = "UNKNOWN";
+        if (window.navigator && window.navigator.userAgent) {
+            userAgent = window.navigator.userAgent;
+        }
+
         // If in incognito mode, store is null, but we still want bug report sending to work going off
         // the in-memory console logs.
         let logs = [];
@@ -376,6 +389,8 @@ module.exports = {
                 body: {
                     logs: logs,
                     text: userText || "User did not supply any additional text.",
+                    version: version,
+                    user_agent: userAgent,
                 },
                 json: true,
             }, (err, res) => {

From 53a16158c75ff0b938b2383ad91829c9d23c891a Mon Sep 17 00:00:00 2001
From: Kegan Dougal <kegan@matrix.org>
Date: Thu, 19 Jan 2017 17:41:08 +0000
Subject: [PATCH 10/20] Remove logs when submitting bug reports

---
 src/vector/rageshake.js | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/vector/rageshake.js b/src/vector/rageshake.js
index e3cc6661..2b4d0b7d 100644
--- a/src/vector/rageshake.js
+++ b/src/vector/rageshake.js
@@ -373,7 +373,7 @@ module.exports = {
         // the in-memory console logs.
         let logs = [];
         if (store) {
-            logs = await store.consume(false);
+            logs = await store.consume(true);
         }
         // and add the most recent console logs which won't be in the store yet.
         const consoleLogs = logger.flush(); // remove logs from console

From ea860807c4fef075767e06aa19c33fd97f702846 Mon Sep 17 00:00:00 2001
From: Kegan Dougal <kegan@matrix.org>
Date: Fri, 20 Jan 2017 11:56:11 +0000
Subject: [PATCH 11/20] Add a monkeyPatch function rather than monkey-patching
 in the constructor

---
 src/vector/rageshake.js | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/src/vector/rageshake.js b/src/vector/rageshake.js
index 2b4d0b7d..e12ee329 100644
--- a/src/vector/rageshake.js
+++ b/src/vector/rageshake.js
@@ -36,7 +36,9 @@ const FLUSH_RATE_MS = 30 * 1000;
 class ConsoleLogger {
     constructor() {
         this.logs = "";
+    }
 
+    monkeyPatch(consoleObj) {
         // Monkey-patch console logging
         const consoleFunctionsToLevels = {
             log: "I",
@@ -46,8 +48,8 @@ class ConsoleLogger {
         };
         Object.keys(consoleFunctionsToLevels).forEach((fnName) => {
             const level = consoleFunctionsToLevels[fnName];
-            let originalFn = window.console[fnName].bind(window.console);
-            window.console[fnName] = (...args) => {
+            let originalFn = consoleObj[fnName].bind(consoleObj);
+            consoleObj[fnName] = (...args) => {
                 this.log(level, ...args);
                 originalFn(...args);
             }
@@ -310,6 +312,7 @@ module.exports = {
             return initPromise;
         }
         logger = new ConsoleLogger();
+        logger.monkeyPatch(window.console);
         if (window.indexedDB) {
             store = new IndexedDBLogStore(window.indexedDB, logger);
             initPromise = store.connect();

From ba1e166ac83ae0f6477d617705a3f333f8d4f74e Mon Sep 17 00:00:00 2001
From: Kegan Dougal <kegan@matrix.org>
Date: Fri, 20 Jan 2017 12:02:48 +0000
Subject: [PATCH 12/20] Line length 80 like we're still in the 80s

---
 src/vector/rageshake.js | 133 ++++++++++++++++++++++++++--------------
 1 file changed, 87 insertions(+), 46 deletions(-)

diff --git a/src/vector/rageshake.js b/src/vector/rageshake.js
index e12ee329..a3d39635 100644
--- a/src/vector/rageshake.js
+++ b/src/vector/rageshake.js
@@ -17,18 +17,26 @@ limitations under the License.
 import PlatformPeg from 'matrix-react-sdk/lib/PlatformPeg';
 import request from "browser-request";
 
-// This module contains all the code needed to log the console, persist it to disk and submit bug reports. Rationale is as follows:
-//  - Monkey-patching the console is preferable to having a log library because we can catch logs by other libraries more easily,
-//    without having to all depend on the same log framework / pass the logger around.
-//  - We use IndexedDB to persists logs because it has generous disk space limits compared to local storage. IndexedDB does not work
-//    in incognito mode, in which case this module will not be able to write logs to disk. However, the logs will still be stored
-//    in-memory, so can still be submitted in a bug report should the user wish to: we can also store more logs in-memory than in
-//    local storage, which does work in incognito mode. We also need to handle the case where there are 2+ tabs. Each JS runtime
-//    generates a random string which serves as the "ID" for that tab/session. These IDs are stored along with the log lines.
-//  - Bug reports are sent as a POST over HTTPS: it purposefully does not use Matrix as bug reports may be made when Matrix is
-//    not responsive (which may be the cause of the bug). We send the most recent N MB of UTF-8 log data, starting with the most
-//    recent, which we know because the "ID"s are actually timestamps. We then purge the remaining logs. We also do this purge
-//    on startup to prevent logs from accumulating.
+// This module contains all the code needed to log the console, persist it to
+// disk and submit bug reports. Rationale is as follows:
+//  - Monkey-patching the console is preferable to having a log library because
+//    we can catch logs by other libraries more easily, without having to all
+//    depend on the same log framework / pass the logger around.
+//  - We use IndexedDB to persists logs because it has generous disk space
+//    limits compared to local storage. IndexedDB does not work in incognito
+//    mode, in which case this module will not be able to write logs to disk.
+//    However, the logs will still be stored in-memory, so can still be
+//    submitted in a bug report should the user wish to: we can also store more
+//    logs in-memory than in local storage, which does work in incognito mode.
+//    We also need to handle the case where there are 2+ tabs. Each JS runtime
+//    generates a random string which serves as the "ID" for that tab/session.
+//    These IDs are stored along with the log lines.
+//  - Bug reports are sent as a POST over HTTPS: it purposefully does not use
+//    Matrix as bug reports may be made when Matrix is not responsive (which may
+//    be the cause of the bug). We send the most recent N MB of UTF-8 log data,
+//    starting with the most recent, which we know because the "ID"s are
+//    actually timestamps. We then purge the remaining logs. We also do this
+//    purge on startup to prevent logs from accumulating.
 
 const FLUSH_RATE_MS = 30 * 1000;
 
@@ -60,9 +68,10 @@ class ConsoleLogger {
         // We don't know what locale the user may be running so use ISO strings
         const ts = new Date().toISOString();
         // Some browsers support string formatting which we're not doing here
-        // so the lines are a little more ugly but easy to implement / quick to run.
+        // so the lines are a little more ugly but easy to implement / quick to
+        // run.
         // Example line:
-        // 2017-01-18T11:23:53.214Z W Failed to set badge count: Error setting badge. Message: Too many badges requests in queue.
+        // 2017-01-18T11:23:53.214Z W Failed to set badge count
         const line = `${ts} ${level} ${args.join(' ')}\n`;
         // Using + really is the quickest way in JS
         // http://jsperf.com/concat-vs-plus-vs-join
@@ -74,7 +83,8 @@ class ConsoleLogger {
      * @return {string} \n delimited log lines to flush.
      */
     flush() {
-        // The ConsoleLogger doesn't care how these end up on disk, it just flushes them to the caller.
+        // The ConsoleLogger doesn't care how these end up on disk, it just
+        // flushes them to the caller.
         const logsToFlush = this.logs;
         this.logs = "";
         return logsToFlush;
@@ -105,7 +115,9 @@ class IndexedDBLogStore {
             };
 
             req.onerror = (event) => {
-                const err = "Failed to open log database: " + event.target.errorCode;
+                const err = (
+                    "Failed to open log database: " + event.target.errorCode
+                );
                 console.error(err);
                 reject(new Error(err));
             };
@@ -117,8 +129,10 @@ class IndexedDBLogStore {
                     keyPath: ["id", "index"]
                 });
                 // Keys in the database look like: [ "instance-148938490", 0 ]
-                // Later on we need to query for everything with index=0, and query everything for an instance id.
-                // In order to do this, we need to set up indexes on both "id" and "index".
+                // Later on we need to query for everything with index=0, and
+                // query everything for an instance id.
+                // In order to do this, we need to set up indexes on both "id"
+                // and "index".
                 objectStore.createIndex("index", "index", { unique: false });
                 objectStore.createIndex("id", "id", { unique: false });
 
@@ -151,23 +165,30 @@ class IndexedDBLogStore {
                 resolve();
             };
             txn.onerror = (event) => {
-                console.error("Failed to flush logs : " + event.target.errorCode);
-                reject(new Error("Failed to write logs: " + event.target.errorCode));
+                console.error(
+                    "Failed to flush logs : " + event.target.errorCode
+                );
+                reject(
+                    new Error("Failed to write logs: " + event.target.errorCode)
+                );
             }
         });
     }
 
     /**
-     * Consume the most recent logs and return them. Older logs which are not returned are deleted at the same time,
-     * so this can be called at startup to do house-keeping to keep the logs from growing too large.
+     * Consume the most recent logs and return them. Older logs which are not
+     * returned are deleted at the same time, so this can be called at startup
+     * to do house-keeping to keep the logs from growing too large.
      *
-     * @param {boolean} clearAll True to clear the most recent logs returned in addition to the
-     * older logs. This is desirable when sending bug reports as we do not want to submit the
-     * same logs twice. This is not desirable when doing house-keeping at startup in case they
-     * want to submit a bug report later.
-     * @return {Promise<Object[]>} Resolves to an array of objects. The array is sorted in time (oldest first) based on
-     * when the log file was created (the log ID). The objects have said log ID in an "id" field and "lines" which is a
-     * big string with all the new-line delimited logs.
+     * @param {boolean} clearAll True to clear the most recent logs returned in
+     * addition to the older logs. This is desirable when sending bug reports as
+     * we do not want to submit the same logs twice. This is not desirable when
+     * doing house-keeping at startup in case they want to submit a bug report
+     * later.
+     * @return {Promise<Object[]>} Resolves to an array of objects. The array is
+     * sorted in time (oldest first) based on when the log file was created (the
+     * log ID). The objects have said log ID in an "id" field and "lines" which
+     * is a big string with all the new-line delimited logs.
      */
     async consume(clearAll) {
         const MAX_LOG_SIZE = 1024 * 1024 * 50; // 50 MB
@@ -176,13 +197,15 @@ class IndexedDBLogStore {
         // Returns: a string representing the concatenated logs for this ID.
         function fetchLogs(id) {
             const o = db.transaction("logs", "readonly").objectStore("logs");
-            return selectQuery(o.index("id"), IDBKeyRange.only(id), (cursor) => {
+            return selectQuery(o.index("id"), IDBKeyRange.only(id),
+            (cursor) => {
                 return {
                     lines: cursor.value.lines,
                     index: cursor.value.index,
                 }
             }).then((linesArray) => {
-                // We have been storing logs periodically, so string them all together *in order of index* now
+                // We have been storing logs periodically, so string them all
+                // together *in order of index* now
                 linesArray.sort((a, b) => {
                     return a.index - b.index;
                 })
@@ -192,13 +215,18 @@ class IndexedDBLogStore {
 
         // Returns: A sorted array of log IDs. (newest first)
         function fetchLogIds() {
-            // To gather all the log IDs, query for every log entry with index "0", this will let us
-            // know all the IDs from different tabs/sessions.
+            // To gather all the log IDs, query for every log entry with index
+            // "0", this will let us know all the IDs from different
+            // tabs/sessions.
             const o = db.transaction("logs", "readonly").objectStore("logs");
-            return selectQuery(o.index("index"), IDBKeyRange.only(0), (cursor) => cursor.value.id).then((res) => {
-                // we know each entry has a unique ID, and we know IDs are timestamps, so accumulate all the IDs,
-                // ignoring the logs for now, and sort them to work out the correct log ID ordering, newest first.
-                // E.g. [ "instance-1484827160051", "instance-1374827160051", "instance-1000007160051"]
+            return selectQuery(o.index("index"), IDBKeyRange.only(0),
+            (cursor) => cursor.value.id).then((res) => {
+                // we know each entry has a unique ID, and we know IDs are
+                // timestamps, so accumulate all the IDs, ignoring the logs for
+                // now, and sort them to work out the correct log ID ordering,
+                // newest first.
+                // E.g. [ "instance-1484827160051", "instance-1374827160051",
+                //        "instance-1000007160051"]
                 return res.sort().reverse();
             });
         }
@@ -221,7 +249,12 @@ class IndexedDBLogStore {
                     resolve();
                 };
                 txn.onerror = (event) => {
-                    reject(new Error(`Failed to delete logs for '${id}' : ${event.target.errorCode}`));
+                    reject(
+                        new Error(
+                            "Failed to delete logs for " +
+                            `'${id}' : ${event.target.errorCode}`
+                        )
+                    );
                 }
             });
         }
@@ -238,7 +271,8 @@ class IndexedDBLogStore {
             });
             size += lines.length;
             if (size > MAX_LOG_SIZE) {
-                // the remaining log IDs should be removed. If we go out of bounds this is just []
+                // the remaining log IDs should be removed. If we go out of
+                // bounds this is just []
                 removeLogIds = allLogIds.slice(i + 1);
                 break;
             }
@@ -248,7 +282,8 @@ class IndexedDBLogStore {
         }
         if (removeLogIds.length > 0) {
             console.log("Removing logs: ", removeLogIds);
-            // Don't await this because it's non-fatal if we can't clean up logs.
+            // Don't await this because it's non-fatal if we can't clean up
+            // logs.
             Promise.all(removeLogIds.map((id) => deleteLogs(id))).then(() => {
                 console.log(`Removed ${removeLogIds.length} old logs.`);
             }, (err) => {
@@ -271,9 +306,11 @@ class IndexedDBLogStore {
  * Helper method to collect results from a Cursor and promiseify it.
  * @param {ObjectStore|Index} store The store to perform openCursor on.
  * @param {IDBKeyRange=} keyRange Optional key range to apply on the cursor.
- * @param {Function} resultMapper A function which is repeatedly called with a Cursor.
+ * @param {Function} resultMapper A function which is repeatedly called with a
+ * Cursor.
  * Return the data you want to keep.
- * @return {Promise<T[]>} Resolves to an array of whatever you returned from resultMapper.
+ * @return {Promise<T[]>} Resolves to an array of whatever you returned from
+ * resultMapper.
  */
 function selectQuery(store, keyRange, resultMapper) {
     const query = store.openCursor(keyRange);
@@ -355,7 +392,9 @@ module.exports = {
      */
     sendBugReport: async function(userText) {
         if (!logger) {
-            throw new Error("No console logger, did you forget to call init()?");
+            throw new Error(
+                "No console logger, did you forget to call init()?"
+            );
         }
         if (!bugReportEndpoint) {
             throw new Error("No bug report endpoint has been set.");
@@ -372,8 +411,8 @@ module.exports = {
             userAgent = window.navigator.userAgent;
         }
 
-        // If in incognito mode, store is null, but we still want bug report sending to work going off
-        // the in-memory console logs.
+        // If in incognito mode, store is null, but we still want bug report
+        // sending to work going off the in-memory console logs.
         let logs = [];
         if (store) {
             logs = await store.consume(true);
@@ -391,7 +430,9 @@ module.exports = {
                 url: bugReportEndpoint,
                 body: {
                     logs: logs,
-                    text: userText || "User did not supply any additional text.",
+                    text: (
+                        userText || "User did not supply any additional text."
+                    ),
                     version: version,
                     user_agent: userAgent,
                 },

From 41c6294be2e3a6b23fad65771173819c74e35f6e Mon Sep 17 00:00:00 2001
From: Kegan Dougal <kegan@matrix.org>
Date: Fri, 20 Jan 2017 13:02:57 +0000
Subject: [PATCH 13/20] Remove clearAll from consume(): we want duplicate logs
 on multiple reports

---
 src/vector/rageshake.js | 14 +++-----------
 1 file changed, 3 insertions(+), 11 deletions(-)

diff --git a/src/vector/rageshake.js b/src/vector/rageshake.js
index a3d39635..a96b1f28 100644
--- a/src/vector/rageshake.js
+++ b/src/vector/rageshake.js
@@ -180,17 +180,12 @@ class IndexedDBLogStore {
      * returned are deleted at the same time, so this can be called at startup
      * to do house-keeping to keep the logs from growing too large.
      *
-     * @param {boolean} clearAll True to clear the most recent logs returned in
-     * addition to the older logs. This is desirable when sending bug reports as
-     * we do not want to submit the same logs twice. This is not desirable when
-     * doing house-keeping at startup in case they want to submit a bug report
-     * later.
      * @return {Promise<Object[]>} Resolves to an array of objects. The array is
      * sorted in time (oldest first) based on when the log file was created (the
      * log ID). The objects have said log ID in an "id" field and "lines" which
      * is a big string with all the new-line delimited logs.
      */
-    async consume(clearAll) {
+    async consume() {
         const MAX_LOG_SIZE = 1024 * 1024 * 50; // 50 MB
         const db = this.db;
 
@@ -277,9 +272,6 @@ class IndexedDBLogStore {
                 break;
             }
         }
-        if (clearAll) {
-            removeLogIds = allLogIds;
-        }
         if (removeLogIds.length > 0) {
             console.log("Removing logs: ", removeLogIds);
             // Don't await this because it's non-fatal if we can't clean up
@@ -378,7 +370,7 @@ module.exports = {
         if (!store) {
             return;
         }
-        await store.consume(false);
+        await store.consume();
     },
 
     setBugReportEndpoint: function(url) {
@@ -415,7 +407,7 @@ module.exports = {
         // sending to work going off the in-memory console logs.
         let logs = [];
         if (store) {
-            logs = await store.consume(true);
+            logs = await store.consume();
         }
         // and add the most recent console logs which won't be in the store yet.
         const consoleLogs = logger.flush(); // remove logs from console

From 378126e746bd78088996a82024ea5c4204955b7c Mon Sep 17 00:00:00 2001
From: Kegan Dougal <kegan@matrix.org>
Date: Fri, 20 Jan 2017 14:00:30 +0000
Subject: [PATCH 14/20] Add another store to keep last modified times

This makes it easier to get a list of all the log IDs. It also makes it possible
to order the logs by the *LAST* log line and not the first as was the case
previously, which is important in the case of long-running tabs.
---
 src/vector/rageshake.js | 69 +++++++++++++++++++++++++----------------
 1 file changed, 43 insertions(+), 26 deletions(-)

diff --git a/src/vector/rageshake.js b/src/vector/rageshake.js
index a96b1f28..f4fe4396 100644
--- a/src/vector/rageshake.js
+++ b/src/vector/rageshake.js
@@ -96,7 +96,7 @@ class IndexedDBLogStore {
     constructor(indexedDB, logger) {
         this.indexedDB = indexedDB;
         this.logger = logger;
-        this.id = "instance-" + Date.now();
+        this.id = "instance-" + Math.random() + Date.now();
         this.index = 0;
         this.db = null;
     }
@@ -125,22 +125,24 @@ class IndexedDBLogStore {
             // First time: Setup the object store
             req.onupgradeneeded = (event) => {
                 const db = event.target.result;
-                const objectStore = db.createObjectStore("logs", {
+                const logObjStore = db.createObjectStore("logs", {
                     keyPath: ["id", "index"]
                 });
                 // Keys in the database look like: [ "instance-148938490", 0 ]
-                // Later on we need to query for everything with index=0, and
-                // query everything for an instance id.
-                // In order to do this, we need to set up indexes on both "id"
-                // and "index".
-                objectStore.createIndex("index", "index", { unique: false });
-                objectStore.createIndex("id", "id", { unique: false });
+                // Later on we need to query everything based on an instance id.
+                // In order to do this, we need to set up indexes "id".
+                logObjStore.createIndex("id", "id", { unique: false });
 
-                objectStore.add(
+                logObjStore.add(
                     this._generateLogEntry(
                         new Date() + " ::: Log database was created."
                     )
                 );
+
+                const lastModifiedStore = db.createObjectStore("logslastmod", {
+                    keyPath: "id",
+                });
+                lastModifiedStore.add(this._generateLastModifiedTime());
             }
         });
     }
@@ -158,15 +160,17 @@ class IndexedDBLogStore {
             return Promise.resolve();
         }
         return new Promise((resolve, reject) => {
-            let txn = this.db.transaction("logs", "readwrite");
+            let txn = this.db.transaction(["logs", "logslastmod"], "readwrite");
             let objStore = txn.objectStore("logs");
             objStore.add(this._generateLogEntry(lines));
+            let lastModStore = txn.objectStore("logslastmod");
+            lastModStore.put(this._generateLastModifiedTime());
             txn.oncomplete = (event) => {
                 resolve();
             };
             txn.onerror = (event) => {
                 console.error(
-                    "Failed to flush logs : " + event.target.errorCode
+                    "Failed to flush logs : ", event
                 );
                 reject(
                     new Error("Failed to write logs: " + event.target.errorCode)
@@ -210,25 +214,28 @@ class IndexedDBLogStore {
 
         // Returns: A sorted array of log IDs. (newest first)
         function fetchLogIds() {
-            // To gather all the log IDs, query for every log entry with index
-            // "0", this will let us know all the IDs from different
-            // tabs/sessions.
-            const o = db.transaction("logs", "readonly").objectStore("logs");
-            return selectQuery(o.index("index"), IDBKeyRange.only(0),
-            (cursor) => cursor.value.id).then((res) => {
-                // we know each entry has a unique ID, and we know IDs are
-                // timestamps, so accumulate all the IDs, ignoring the logs for
-                // now, and sort them to work out the correct log ID ordering,
-                // newest first.
-                // E.g. [ "instance-1484827160051", "instance-1374827160051",
-                //        "instance-1000007160051"]
-                return res.sort().reverse();
+            // To gather all the log IDs, query for all records in logslastmod.
+            const o = db.transaction("logslastmod", "readonly").objectStore(
+                "logslastmod"
+            );
+            return selectQuery(o, undefined, (cursor) => {
+                return {
+                    id: cursor.value.id,
+                    ts: cursor.value.ts,
+                };
+            }).then((res) => {
+                // Sort IDs by timestamp (newest first)
+                return res.sort((a, b) => {
+                    return b.ts - a.ts;
+                }).map((a) => a.id);
             });
         }
 
         function deleteLogs(id) {
             return new Promise((resolve, reject) => {
-                const txn = db.transaction("logs", "readwrite");
+                const txn = db.transaction(
+                    ["logs", "logslastmod"], "readwrite"
+                );
                 const o = txn.objectStore("logs");
                 // only load the key path, not the data which may be huge
                 const query = o.index("id").openKeyCursor(IDBKeyRange.only(id));
@@ -250,7 +257,10 @@ class IndexedDBLogStore {
                             `'${id}' : ${event.target.errorCode}`
                         )
                     );
-                }
+                };
+                // delete last modified entries
+                const lastModStore = txn.objectStore("logslastmod");
+                lastModStore.delete(id);
             });
         }
 
@@ -292,6 +302,13 @@ class IndexedDBLogStore {
             index: this.index++
         };
     }
+
+    _generateLastModifiedTime() {
+        return {
+            id: this.id,
+            ts: Date.now(),
+        };
+    }
 }
 
 /**

From ea063ab8b0b2ce6b2f1a5d36132757e0cb52f8ff Mon Sep 17 00:00:00 2001
From: Kegan Dougal <kegan@matrix.org>
Date: Fri, 20 Jan 2017 14:46:19 +0000
Subject: [PATCH 15/20] Address race conditions when flushing logs

---
 src/vector/rageshake.js | 94 +++++++++++++++++++++++++++++------------
 1 file changed, 68 insertions(+), 26 deletions(-)

diff --git a/src/vector/rageshake.js b/src/vector/rageshake.js
index f4fe4396..7d1a93ef 100644
--- a/src/vector/rageshake.js
+++ b/src/vector/rageshake.js
@@ -99,6 +99,11 @@ class IndexedDBLogStore {
         this.id = "instance-" + Math.random() + Date.now();
         this.index = 0;
         this.db = null;
+        // Promise is not null when a flush is IN PROGRESS
+        this.flushPromise = null;
+        // Promise is not null when flush() is called when one is already in
+        // progress.
+        this.flushAgainPromise = null;
     }
 
     /**
@@ -148,35 +153,80 @@ class IndexedDBLogStore {
     }
 
     /**
+     * Flush logs to disk.
+     *
+     * There are guards to protect against race conditions in order to ensure
+     * that all previous flushes have completed before the most recent flush.
+     * Consider without guards:
+     *  - A calls flush() periodically.
+     *  - B calls flush() and wants to send logs immediately afterwards.
+     *  - If B doesn't wait for A's flush to complete, B will be missing the
+     *    contents of A's flush.
+     * To protect against this, we set 'flushPromise' when a flush is ongoing.
+     * Subsequent calls to flush() during this period return a new promise
+     * 'flushAgainPromise' which is chained off the current 'flushPromise'.
+     * Subsequent calls to flush() when the first flush hasn't completed will
+     * return the same 'flushAgainPromise' as we can guarantee that we WILL
+     * do a brand new flush at some point in the future. Once the first flush
+     * has completed, 'flushAgainPromise' becomes 'flushPromise' and can be
+     * chained again.
+     *
+     * This guarantees that we will always eventually do a flush when flush() is
+     * called.
+     *
      * @return {Promise} Resolved when the logs have been flushed.
      */
     flush() {
-        if (!this.db) {
-            // not connected yet or user rejected access for us to r/w to the db
-            return Promise.reject(new Error("No connected database"));
+        if (this.flushPromise) { // a flush is ongoing
+            if (this.flushAgainPromise) { // a flush is queued up, return that.
+                return this.flushAgainPromise;
+            }
+            // queue up a new flush
+            this.flushAgainPromise = this.flushPromise.then(() => {
+                // the current flush has completed, so shuffle the promises
+                // around:
+                // flushAgainPromise => flushPromise and null flushAgainPromise.
+                // flushPromise has already nulled itself.
+                this.flushAgainPromise = null;
+                return this.flush();
+            });
+            return this.flushAgainPromise;
         }
-        const lines = this.logger.flush();
-        if (lines.length === 0) {
-            return Promise.resolve();
-        }
-        return new Promise((resolve, reject) => {
+
+        this.flushPromise = new Promise((resolve, reject) => {
+            if (!this.db) {
+                // not connected yet or user rejected access for us to r/w to
+                // the db.
+                this.flushPromise = null;
+                reject(new Error("No connected database"));
+                return;
+            }
+            const lines = this.logger.flush();
+            if (lines.length === 0) {
+                this.flushPromise = null;
+                resolve();
+                return;
+            }
             let txn = this.db.transaction(["logs", "logslastmod"], "readwrite");
             let objStore = txn.objectStore("logs");
             objStore.add(this._generateLogEntry(lines));
             let lastModStore = txn.objectStore("logslastmod");
             lastModStore.put(this._generateLastModifiedTime());
             txn.oncomplete = (event) => {
+                this.flushPromise = null;
                 resolve();
             };
             txn.onerror = (event) => {
                 console.error(
                     "Failed to flush logs : ", event
                 );
+                this.flushPromise = null;
                 reject(
                     new Error("Failed to write logs: " + event.target.errorCode)
                 );
             }
         });
+        return this.flushPromise;
     }
 
     /**
@@ -368,17 +418,6 @@ module.exports = {
         return initPromise;
     },
 
-    /**
-     * Force-flush the logs to storage.
-     * @return {Promise} Resolved when the logs have been flushed.
-     */
-    flush: async function() {
-        if (!store) {
-            return;
-        }
-        await store.flush();
-    },
-
     /**
      * Clean up old logs.
      * @return Promise Resolves if cleaned logs.
@@ -422,17 +461,20 @@ module.exports = {
 
         // If in incognito mode, store is null, but we still want bug report
         // sending to work going off the in-memory console logs.
+        console.log("Sending bug report.");
         let logs = [];
         if (store) {
+            // flush most recent logs
+            await store.flush();
             logs = await store.consume();
         }
-        // and add the most recent console logs which won't be in the store yet.
-        const consoleLogs = logger.flush(); // remove logs from console
-        const currentId = store ? store.id : "-";
-        logs.unshift({
-            lines: consoleLogs,
-            id: currentId,
-        });
+        else {
+            logs.push({
+                lines: logger.flush(),
+                id: "-",
+            });
+        }
+
         await new Promise((resolve, reject) => {
             request({
                 method: "POST",

From 2ecf65f05702075175b211161f05b5e2c57d9182 Mon Sep 17 00:00:00 2001
From: Kegan Dougal <kegan@matrix.org>
Date: Mon, 23 Jan 2017 09:28:48 +0000
Subject: [PATCH 16/20] Keep the logs if no store exists

---
 src/vector/rageshake.js | 8 ++++++--
 1 file changed, 6 insertions(+), 2 deletions(-)

diff --git a/src/vector/rageshake.js b/src/vector/rageshake.js
index 7d1a93ef..3cc15886 100644
--- a/src/vector/rageshake.js
+++ b/src/vector/rageshake.js
@@ -80,11 +80,15 @@ class ConsoleLogger {
 
     /**
      * Retrieve log lines to flush to disk.
+     * @param {boolean} keepLogs True to not delete logs after flushing.
      * @return {string} \n delimited log lines to flush.
      */
-    flush() {
+    flush(keepLogs) {
         // The ConsoleLogger doesn't care how these end up on disk, it just
         // flushes them to the caller.
+        if (keepLogs) {
+            return this.logs;
+        }
         const logsToFlush = this.logs;
         this.logs = "";
         return logsToFlush;
@@ -470,7 +474,7 @@ module.exports = {
         }
         else {
             logs.push({
-                lines: logger.flush(),
+                lines: logger.flush(true),
                 id: "-",
             });
         }

From f3df86872b4e4bc7b4e7a489993cf3e73c6a3637 Mon Sep 17 00:00:00 2001
From: Kegan Dougal <kegan@matrix.org>
Date: Mon, 23 Jan 2017 17:37:57 +0000
Subject: [PATCH 17/20] Fix tightlooping when flush()ing without any logs

The promise would resolve immediately, nulling out `flushPromise`. This would
then immediately be set from `new Promise((resolve, reject) => {...})` turning
it back into non-null `flushPromise`. The resolve handler was called so the
next `flush()` would see "oh yes, there is a non-null `flushPromise`" then
promptly try to set `flushAgainPromise` which chains off the resolved
`flushPromise` which relied on `flushPromise` being `null`ed out after
`resolve()`, causing the chained `flush()` to see "oh yes, there is a non-null
`flushPromise`" which... ad infinitum.

This PR fixes it by making the nulling out asynchronous but the fact it took
me this long to debug this issue indicates to me that this is a terrible piece
of code. Will re-write.
---
 src/vector/rageshake.js | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/src/vector/rageshake.js b/src/vector/rageshake.js
index 3cc15886..a17c829a 100644
--- a/src/vector/rageshake.js
+++ b/src/vector/rageshake.js
@@ -201,13 +201,11 @@ class IndexedDBLogStore {
             if (!this.db) {
                 // not connected yet or user rejected access for us to r/w to
                 // the db.
-                this.flushPromise = null;
                 reject(new Error("No connected database"));
                 return;
             }
             const lines = this.logger.flush();
             if (lines.length === 0) {
-                this.flushPromise = null;
                 resolve();
                 return;
             }
@@ -217,18 +215,20 @@ class IndexedDBLogStore {
             let lastModStore = txn.objectStore("logslastmod");
             lastModStore.put(this._generateLastModifiedTime());
             txn.oncomplete = (event) => {
-                this.flushPromise = null;
                 resolve();
             };
             txn.onerror = (event) => {
                 console.error(
                     "Failed to flush logs : ", event
                 );
-                this.flushPromise = null;
                 reject(
                     new Error("Failed to write logs: " + event.target.errorCode)
                 );
             }
+        }).then(() => {
+            this.flushPromise = null;
+        }, (err) => {
+            this.flushPromise = null;
         });
         return this.flushPromise;
     }

From 6f3b70dbb02bb6edb614209592546fb3ba28854b Mon Sep 17 00:00:00 2001
From: Kegan Dougal <kegan@matrix.org>
Date: Tue, 24 Jan 2017 12:43:18 +0000
Subject: [PATCH 18/20] Use Q promises and isPending to make logic simpler

---
 src/vector/rageshake.js | 52 ++++++++++++++++-------------------------
 1 file changed, 20 insertions(+), 32 deletions(-)

diff --git a/src/vector/rageshake.js b/src/vector/rageshake.js
index a17c829a..28f901a3 100644
--- a/src/vector/rageshake.js
+++ b/src/vector/rageshake.js
@@ -16,6 +16,7 @@ limitations under the License.
 
 import PlatformPeg from 'matrix-react-sdk/lib/PlatformPeg';
 import request from "browser-request";
+import q from "q";
 
 // This module contains all the code needed to log the console, persist it to
 // disk and submit bug reports. Rationale is as follows:
@@ -105,9 +106,6 @@ class IndexedDBLogStore {
         this.db = null;
         // Promise is not null when a flush is IN PROGRESS
         this.flushPromise = null;
-        // Promise is not null when flush() is called when one is already in
-        // progress.
-        this.flushAgainPromise = null;
     }
 
     /**
@@ -115,7 +113,7 @@ class IndexedDBLogStore {
      */
     connect() {
         let req = this.indexedDB.open("logs");
-        return new Promise((resolve, reject) => {
+        return q.Promise((resolve, reject) => {
             req.onsuccess = (event) => {
                 this.db = event.target.result;
                 // Periodically flush logs to local storage / indexeddb
@@ -167,13 +165,10 @@ class IndexedDBLogStore {
      *  - If B doesn't wait for A's flush to complete, B will be missing the
      *    contents of A's flush.
      * To protect against this, we set 'flushPromise' when a flush is ongoing.
-     * Subsequent calls to flush() during this period return a new promise
-     * 'flushAgainPromise' which is chained off the current 'flushPromise'.
-     * Subsequent calls to flush() when the first flush hasn't completed will
-     * return the same 'flushAgainPromise' as we can guarantee that we WILL
-     * do a brand new flush at some point in the future. Once the first flush
-     * has completed, 'flushAgainPromise' becomes 'flushPromise' and can be
-     * chained again.
+     * Subsequent calls to flush() during this period will chain another flush.
+     * This guarantees that we WILL do a brand new flush at some point in the
+     * future. Once the flushes have finished, it's  safe to clobber the promise
+     * with a new one to prevent very deep promise chains from building up.
      *
      * This guarantees that we will always eventually do a flush when flush() is
      * called.
@@ -181,23 +176,20 @@ class IndexedDBLogStore {
      * @return {Promise} Resolved when the logs have been flushed.
      */
     flush() {
-        if (this.flushPromise) { // a flush is ongoing
-            if (this.flushAgainPromise) { // a flush is queued up, return that.
-                return this.flushAgainPromise;
-            }
-            // queue up a new flush
-            this.flushAgainPromise = this.flushPromise.then(() => {
-                // the current flush has completed, so shuffle the promises
-                // around:
-                // flushAgainPromise => flushPromise and null flushAgainPromise.
-                // flushPromise has already nulled itself.
-                this.flushAgainPromise = null;
+        // check if a flush() operation is ongoing
+        if (this.flushPromise && this.flushPromise.isPending()) {
+            // chain a flush operation after this one has completed to guarantee
+            // that a complete flush() is done. This does mean that if there are
+            // 3 calls to flush() in one go, the 2nd and 3rd promises will run
+            // concurrently, but this is fine since they can safely race when
+            // collecting logs.
+            return this.flushPromise.then(() => {
                 return this.flush();
             });
-            return this.flushAgainPromise;
         }
-
-        this.flushPromise = new Promise((resolve, reject) => {
+        // there is no flush promise or there was but it has finished, so do
+        // a brand new one, destroying the chain which may have been built up.
+        this.flushPromise = q.Promise((resolve, reject) => {
             if (!this.db) {
                 // not connected yet or user rejected access for us to r/w to
                 // the db.
@@ -225,10 +217,6 @@ class IndexedDBLogStore {
                     new Error("Failed to write logs: " + event.target.errorCode)
                 );
             }
-        }).then(() => {
-            this.flushPromise = null;
-        }, (err) => {
-            this.flushPromise = null;
         });
         return this.flushPromise;
     }
@@ -286,7 +274,7 @@ class IndexedDBLogStore {
         }
 
         function deleteLogs(id) {
-            return new Promise((resolve, reject) => {
+            return q.Promise((resolve, reject) => {
                 const txn = db.transaction(
                     ["logs", "logslastmod"], "readwrite"
                 );
@@ -377,7 +365,7 @@ class IndexedDBLogStore {
  */
 function selectQuery(store, keyRange, resultMapper) {
     const query = store.openCursor(keyRange);
-    return new Promise((resolve, reject) => {
+    return q.Promise((resolve, reject) => {
         let results = [];
         query.onerror = (event) => {
             reject(new Error("Query failed: " + event.target.errorCode));
@@ -479,7 +467,7 @@ module.exports = {
             });
         }
 
-        await new Promise((resolve, reject) => {
+        await q.Promise((resolve, reject) => {
             request({
                 method: "POST",
                 url: bugReportEndpoint,

From e225d3e3708bccf6cd458453206d076841011630 Mon Sep 17 00:00:00 2001
From: Kegan Dougal <kegan@matrix.org>
Date: Tue, 24 Jan 2017 17:05:01 +0000
Subject: [PATCH 19/20] Preserve ordering of flush()es by not letting
 subsequent flush()es race

---
 src/vector/rageshake.js | 24 +++++++++++++-----------
 1 file changed, 13 insertions(+), 11 deletions(-)

diff --git a/src/vector/rageshake.js b/src/vector/rageshake.js
index 28f901a3..086ab155 100644
--- a/src/vector/rageshake.js
+++ b/src/vector/rageshake.js
@@ -104,8 +104,9 @@ class IndexedDBLogStore {
         this.id = "instance-" + Math.random() + Date.now();
         this.index = 0;
         this.db = null;
-        // Promise is not null when a flush is IN PROGRESS
         this.flushPromise = null;
+        // set if flush() is called whilst one is ongoing
+        this.flushAgainPromise = null;
     }
 
     /**
@@ -165,10 +166,8 @@ class IndexedDBLogStore {
      *  - If B doesn't wait for A's flush to complete, B will be missing the
      *    contents of A's flush.
      * To protect against this, we set 'flushPromise' when a flush is ongoing.
-     * Subsequent calls to flush() during this period will chain another flush.
-     * This guarantees that we WILL do a brand new flush at some point in the
-     * future. Once the flushes have finished, it's  safe to clobber the promise
-     * with a new one to prevent very deep promise chains from building up.
+     * Subsequent calls to flush() during this period will chain another flush,
+     * then keep returning that same chained flush.
      *
      * This guarantees that we will always eventually do a flush when flush() is
      * called.
@@ -178,14 +177,17 @@ class IndexedDBLogStore {
     flush() {
         // check if a flush() operation is ongoing
         if (this.flushPromise && this.flushPromise.isPending()) {
-            // chain a flush operation after this one has completed to guarantee
-            // that a complete flush() is done. This does mean that if there are
-            // 3 calls to flush() in one go, the 2nd and 3rd promises will run
-            // concurrently, but this is fine since they can safely race when
-            // collecting logs.
-            return this.flushPromise.then(() => {
+            if (this.flushAgainPromise && this.flushAgainPromise.isPending()) {
+                // this is the 3rd+ time we've called flush() : return the same
+                // promise.
+                return this.flushAgainPromise;
+            }
+            // queue up a flush to occur immediately after the pending one
+            // completes.
+            this.flushAgainPromise = this.flushPromise.then(() => {
                 return this.flush();
             });
+            return this.flushAgainPromise;
         }
         // there is no flush promise or there was but it has finished, so do
         // a brand new one, destroying the chain which may have been built up.

From eaff0abfb07fddc1f657a1c942c72968e7b47619 Mon Sep 17 00:00:00 2001
From: Kegan Dougal <kegan@matrix.org>
Date: Wed, 25 Jan 2017 10:28:04 +0000
Subject: [PATCH 20/20] Add link to Chrome issue status for fixing line numbers

---
 src/vector/index.js | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/vector/index.js b/src/vector/index.js
index d1d85e06..c40edd7e 100644
--- a/src/vector/index.js
+++ b/src/vector/index.js
@@ -38,7 +38,7 @@ require('draft-js/dist/Draft.css');
 
 const rageshake = require("./rageshake");
 rageshake.init().then(() => {
-    console.log("Initialised rageshake");
+    console.log("Initialised rageshake: See https://bugs.chromium.org/p/chromium/issues/detail?id=583193 to fix line numbers on Chrome.");
     rageshake.cleanup();
 }, (err) => {
     console.error("Failed to initialise rageshake: " + err);