mirror of
https://github.com/vector-im/element-web.git
synced 2024-11-15 04:34:59 +08:00
Add mechanism to check only one instance of the app is running (#11416)
* Add mechanism to check only one instance of the app is running This isn't used yet, but will form part of the solution to https://github.com/vector-im/element-web/issues/25157. * disable instrumentation for SessionLock * disable coverage reporting * exclude SessionLock in sonar.properties * Revert "disable coverage reporting" This reverts commit 80c4336f76ec8e47e376b6744ef612a73299d14f. * only disable session storage * use pagehide instead of visibilitychange * Add `checkSessionLockFree` * Give up waiting for a lock immediately when someone else claims * Update src/utils/SessionLock.ts
This commit is contained in:
parent
4de315fb6c
commit
d13b6e1b41
@ -36,7 +36,12 @@ const config: Config = {
|
||||
"RecorderWorklet": "<rootDir>/__mocks__/empty.js",
|
||||
},
|
||||
transformIgnorePatterns: ["/node_modules/(?!matrix-js-sdk).+$"],
|
||||
collectCoverageFrom: ["<rootDir>/src/**/*.{js,ts,tsx}"],
|
||||
collectCoverageFrom: [
|
||||
"<rootDir>/src/**/*.{js,ts,tsx}",
|
||||
// getSessionLock is piped into a different JS context via stringification, and the coverage functionality is
|
||||
// not available in that contest. So, turn off coverage instrumentation for it.
|
||||
"!<rootDir>/src/utils/SessionLock.ts",
|
||||
],
|
||||
coverageReporters: ["text-summary", "lcov"],
|
||||
testResultsProcessor: "@casualbot/jest-sonar-reporter",
|
||||
};
|
||||
|
@ -121,6 +121,7 @@
|
||||
"sanitize-html": "2.11.0",
|
||||
"tar-js": "^0.3.0",
|
||||
"ua-parser-js": "^1.0.2",
|
||||
"uuid": "^9.0.0",
|
||||
"what-input": "^5.2.10",
|
||||
"zxcvbn": "^4.4.2"
|
||||
},
|
||||
|
@ -11,5 +11,6 @@ sonar.exclusions=__mocks__,docs
|
||||
sonar.cpd.exclusions=src/i18n/strings/*.json
|
||||
sonar.typescript.tsconfigPath=./tsconfig.json
|
||||
sonar.javascript.lcov.reportPaths=coverage/lcov.info
|
||||
sonar.coverage.exclusions=test/**/*,cypress/**/*,src/components/views/dialogs/devtools/**/*
|
||||
# instrumentation is disabled on SessionLock
|
||||
sonar.coverage.exclusions=test/**/*,cypress/**/*,src/components/views/dialogs/devtools/**/*,src/utils/SessionLock.ts
|
||||
sonar.testExecutionReportPaths=coverage/jest-sonar-report.xml
|
||||
|
261
src/utils/SessionLock.ts
Normal file
261
src/utils/SessionLock.ts
Normal file
@ -0,0 +1,261 @@
|
||||
/*
|
||||
Copyright 2023 The Matrix.org Foundation C.I.C.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
import { logger } from "matrix-js-sdk/src/logger";
|
||||
import { v4 as uuidv4 } from "uuid";
|
||||
|
||||
/*
|
||||
* Functionality for checking that only one instance is running at once
|
||||
*
|
||||
* The algorithm here is twofold.
|
||||
*
|
||||
* First, we "claim" a lock by periodically writing to `STORAGE_ITEM_PING`. On shutdown, we clear that item. So,
|
||||
* a new instance starting up can check if the lock is free by inspecting `STORAGE_ITEM_PING`. If it is unset,
|
||||
* or is stale, the new instance can assume the lock is free and claim it for itself. Otherwise, the new instance
|
||||
* has to wait for the ping to be stale, or the item to be cleared.
|
||||
*
|
||||
* Secondly, we need a mechanism for proactively telling existing instances to shut down. We do this by writing a
|
||||
* unique value to `STORAGE_ITEM_CLAIMANT`. Other instances of the app are supposed to monitor for writes to
|
||||
* `STORAGE_ITEM_CLAIMANT` and initiate shutdown when it happens.
|
||||
*
|
||||
* There is slight complexity in `STORAGE_ITEM_CLAIMANT` in that we need to watch out for yet another instance
|
||||
* starting up and staking a claim before we even get a chance to take the lock. When that happens we just bail out
|
||||
* and let the newer instance get the lock.
|
||||
*
|
||||
* `STORAGE_ITEM_OWNER` has no functional role in the lock mechanism; it exists solely as a diagnostic indicator
|
||||
* of which instance is writing to `STORAGE_ITEM_PING`.
|
||||
*/
|
||||
|
||||
export const SESSION_LOCK_CONSTANTS = {
|
||||
/**
|
||||
* LocalStorage key for an item which indicates we have the lock.
|
||||
*
|
||||
* The instance which holds the lock writes the current time to this key every few seconds, to indicate it is still
|
||||
* alive and holds the lock.
|
||||
*/
|
||||
STORAGE_ITEM_PING: "react_sdk_session_lock_ping",
|
||||
|
||||
/**
|
||||
* LocalStorage key for an item which holds the unique "session ID" of the instance which currently holds the lock.
|
||||
*
|
||||
* This property doesn't actually form a functional part of the locking algorithm; it is purely diagnostic.
|
||||
*/
|
||||
STORAGE_ITEM_OWNER: "react_sdk_session_lock_owner",
|
||||
|
||||
/**
|
||||
* LocalStorage key for the session ID of the most recent claimant to the lock.
|
||||
*
|
||||
* Each instance writes to this key on startup, so existing instances can detect new ones starting up.
|
||||
*/
|
||||
STORAGE_ITEM_CLAIMANT: "react_sdk_session_lock_claimant",
|
||||
|
||||
/**
|
||||
* The number of milliseconds after which we consider a lock claim stale
|
||||
*/
|
||||
LOCK_EXPIRY_TIME_MS: 30000,
|
||||
};
|
||||
|
||||
/**
|
||||
* See if any instances are currently running
|
||||
*
|
||||
* @returns true if any instance is currently active
|
||||
*/
|
||||
export function checkSessionLockFree(): boolean {
|
||||
const lastPingTime = window.localStorage.getItem(SESSION_LOCK_CONSTANTS.STORAGE_ITEM_PING);
|
||||
if (lastPingTime === null) {
|
||||
// no other holder
|
||||
return true;
|
||||
}
|
||||
|
||||
// see if it has expired
|
||||
const timeAgo = Date.now() - parseInt(lastPingTime);
|
||||
return timeAgo > SESSION_LOCK_CONSTANTS.LOCK_EXPIRY_TIME_MS;
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensure that only one instance of the application is running at once.
|
||||
*
|
||||
* If there are any other running instances, tells them to stop, and waits for them to do so.
|
||||
*
|
||||
* Once we are the sole instance, sets a background job going to service a lock. Then, if another instance starts up,
|
||||
* `onNewInstance` is called: it should shut the app down to make sure we aren't doing any more work.
|
||||
*
|
||||
* @param onNewInstance - callback to handle another instance starting up. NOTE: this may be called before
|
||||
* `getSessionLock` returns if the lock is stolen before we get a chance to start.
|
||||
*
|
||||
* @returns true if we successfully claimed the lock; false if another instance stole it from under our nose
|
||||
* (in which `onNewInstance` will have been called)
|
||||
*/
|
||||
export async function getSessionLock(onNewInstance: () => Promise<void>): Promise<boolean> {
|
||||
/** unique ID for this session */
|
||||
const sessionIdentifier = uuidv4();
|
||||
|
||||
const prefixedLogger = logger.withPrefix(`getSessionLock[${sessionIdentifier}]`);
|
||||
|
||||
/** The ID of our regular task to service the lock.
|
||||
*
|
||||
* Non-null while we hold the lock; null if we have not yet claimed it, or have released it. */
|
||||
let lockServicer: number | null = null;
|
||||
|
||||
/**
|
||||
* See if the lock is free.
|
||||
*
|
||||
* @returns
|
||||
* - `>0`: the number of milliseconds before the current claim on the lock can be considered stale.
|
||||
* - `0`: the lock is free for the taking
|
||||
* - `<0`: someone else has staked a claim for the lock, so we are no longer in line for it.
|
||||
*/
|
||||
function checkLock(): number {
|
||||
// first of all, check that we are still the active claimant (ie, another instance hasn't come along while we were waiting.
|
||||
const claimant = window.localStorage.getItem(SESSION_LOCK_CONSTANTS.STORAGE_ITEM_CLAIMANT);
|
||||
if (claimant !== sessionIdentifier) {
|
||||
prefixedLogger.warn(`Lock was claimed by ${claimant} while we were waiting for it: aborting startup`);
|
||||
return -1;
|
||||
}
|
||||
|
||||
const lastPingTime = window.localStorage.getItem(SESSION_LOCK_CONSTANTS.STORAGE_ITEM_PING);
|
||||
const lockHolder = window.localStorage.getItem(SESSION_LOCK_CONSTANTS.STORAGE_ITEM_OWNER);
|
||||
if (lastPingTime === null) {
|
||||
prefixedLogger.info("No other session has the lock: proceeding with startup");
|
||||
return 0;
|
||||
}
|
||||
|
||||
const timeAgo = Date.now() - parseInt(lastPingTime);
|
||||
const remaining = SESSION_LOCK_CONSTANTS.LOCK_EXPIRY_TIME_MS - timeAgo;
|
||||
if (remaining <= 0) {
|
||||
// another session claimed the lock, but it is stale.
|
||||
prefixedLogger.info(`Last ping (from ${lockHolder}) was ${timeAgo}ms ago: proceeding with startup`);
|
||||
return 0;
|
||||
}
|
||||
|
||||
prefixedLogger.info(`Last ping (from ${lockHolder}) was ${timeAgo}ms ago, waiting`);
|
||||
return remaining;
|
||||
}
|
||||
|
||||
function serviceLock(): void {
|
||||
window.localStorage.setItem(SESSION_LOCK_CONSTANTS.STORAGE_ITEM_OWNER, sessionIdentifier);
|
||||
window.localStorage.setItem(SESSION_LOCK_CONSTANTS.STORAGE_ITEM_PING, Date.now().toString());
|
||||
}
|
||||
|
||||
// handler for storage events, used later
|
||||
function onStorageEvent(event: StorageEvent): void {
|
||||
if (event.key === SESSION_LOCK_CONSTANTS.STORAGE_ITEM_CLAIMANT) {
|
||||
// It's possible that the event was delayed, and this update actually predates our claim on the lock.
|
||||
// (In particular: suppose tab A and tab B start concurrently and both attempt to set STORAGE_ITEM_CLAIMANT.
|
||||
// Each write queues up a `storage` event for all other tabs. So both tabs see the `storage` event from the
|
||||
// other, even though by the time it arrives we may have overwritten it.)
|
||||
//
|
||||
// To resolve any doubt, we check the *actual* state of the storage.
|
||||
const claimingSession = window.localStorage.getItem(SESSION_LOCK_CONSTANTS.STORAGE_ITEM_CLAIMANT);
|
||||
if (claimingSession === sessionIdentifier) {
|
||||
return;
|
||||
}
|
||||
prefixedLogger.info(`Session ${claimingSession} is waiting for the lock`);
|
||||
window.removeEventListener("storage", onStorageEvent);
|
||||
releaseLock().catch((err) => {
|
||||
prefixedLogger.error("Error releasing session lock", err);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
async function releaseLock(): Promise<void> {
|
||||
// tell the app to shut down
|
||||
await onNewInstance();
|
||||
|
||||
// and, once it has done so, stop pinging the lock.
|
||||
if (lockServicer !== null) {
|
||||
clearInterval(lockServicer);
|
||||
}
|
||||
window.localStorage.removeItem(SESSION_LOCK_CONSTANTS.STORAGE_ITEM_PING);
|
||||
window.localStorage.removeItem(SESSION_LOCK_CONSTANTS.STORAGE_ITEM_OWNER);
|
||||
lockServicer = null;
|
||||
}
|
||||
|
||||
// first of all, stake a claim for the lock. This tells anyone else holding the lock that we want it.
|
||||
window.localStorage.setItem(SESSION_LOCK_CONSTANTS.STORAGE_ITEM_CLAIMANT, sessionIdentifier);
|
||||
|
||||
// now, wait for the lock to be free.
|
||||
// eslint-disable-next-line no-constant-condition
|
||||
while (true) {
|
||||
const remaining = checkLock();
|
||||
|
||||
if (remaining == 0) {
|
||||
// ok, the lock is free, and nobody else has staked a more recent claim.
|
||||
break;
|
||||
} else if (remaining < 0) {
|
||||
// someone else staked a claim for the lock; we bail out.
|
||||
await onNewInstance();
|
||||
return false;
|
||||
}
|
||||
|
||||
// someone else has the lock.
|
||||
// wait for either the ping to expire, or a storage event.
|
||||
let onStorageUpdate: (event: StorageEvent) => void;
|
||||
|
||||
const storageUpdatePromise = new Promise((resolve) => {
|
||||
onStorageUpdate = (event: StorageEvent) => {
|
||||
if (
|
||||
event.key === SESSION_LOCK_CONSTANTS.STORAGE_ITEM_PING ||
|
||||
event.key === SESSION_LOCK_CONSTANTS.STORAGE_ITEM_CLAIMANT
|
||||
)
|
||||
resolve(event);
|
||||
};
|
||||
});
|
||||
|
||||
const sleepPromise = new Promise((resolve) => {
|
||||
setTimeout(resolve, remaining, undefined);
|
||||
});
|
||||
|
||||
window.addEventListener("storage", onStorageUpdate!);
|
||||
await Promise.race([sleepPromise, storageUpdatePromise]);
|
||||
window.removeEventListener("storage", onStorageUpdate!);
|
||||
}
|
||||
|
||||
// If we get here, we know the lock is ours for the taking.
|
||||
|
||||
// CRITICAL SECTION
|
||||
//
|
||||
// The following code, up to the end of the function, must all be synchronous (ie, no `await` calls), to ensure that
|
||||
// we get our listeners in place and all the writes to localStorage done before other tabs run again.
|
||||
|
||||
// claim the lock, and kick off a background process to service it every 5 seconds
|
||||
serviceLock();
|
||||
lockServicer = setInterval(serviceLock, 5000);
|
||||
|
||||
// Now add a listener for other claimants to the lock.
|
||||
window.addEventListener("storage", onStorageEvent);
|
||||
|
||||
// also add a listener to clear our claims when our tab closes or navigates away
|
||||
window.addEventListener("pagehide", (event) => {
|
||||
// only remove the ping if we still think we're the owner. Otherwise we could be removing someone else's claim!
|
||||
if (lockServicer !== null) {
|
||||
prefixedLogger.debug("page hide: clearing our claim");
|
||||
window.localStorage.removeItem(SESSION_LOCK_CONSTANTS.STORAGE_ITEM_PING);
|
||||
window.localStorage.removeItem(SESSION_LOCK_CONSTANTS.STORAGE_ITEM_OWNER);
|
||||
}
|
||||
|
||||
// It's worth noting that, according to the spec, the page might come back to life again after a pagehide.
|
||||
//
|
||||
// In practice that's unlikely because Element is unlikely to qualify for the bfcache, but if it does,
|
||||
// this is probably the best we can do: we certainly don't want to stop the user loading any new tabs because
|
||||
// Element happens to be in a bfcache somewhere.
|
||||
//
|
||||
// So, we just hope that we aren't in the middle of any crypto operations, and rely on `onStorageEvent` kicking
|
||||
// in soon enough after we resume to tell us if another tab woke up while we were asleep.
|
||||
});
|
||||
|
||||
return true;
|
||||
}
|
275
test/utils/SessionLock-test.ts
Normal file
275
test/utils/SessionLock-test.ts
Normal file
@ -0,0 +1,275 @@
|
||||
/*
|
||||
Copyright 2023 The Matrix.org Foundation C.I.C.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
import { checkSessionLockFree, getSessionLock, SESSION_LOCK_CONSTANTS } from "../../src/utils/SessionLock";
|
||||
|
||||
describe("SessionLock", () => {
|
||||
const otherWindows: Array<Window> = [];
|
||||
let windowEventListeners: Array<[string, any]>;
|
||||
let documentEventListeners: Array<[string, any]>;
|
||||
|
||||
beforeEach(() => {
|
||||
jest.useFakeTimers({ now: 1000 });
|
||||
|
||||
// keep track of the registered event listeners, so that we can unregister them in `afterEach`
|
||||
windowEventListeners = [];
|
||||
const realWindowAddEventListener = window.addEventListener.bind(window);
|
||||
jest.spyOn(window, "addEventListener").mockImplementation((type, listener, options) => {
|
||||
const res = realWindowAddEventListener(type, listener, options);
|
||||
windowEventListeners.push([type, listener]);
|
||||
return res;
|
||||
});
|
||||
|
||||
documentEventListeners = [];
|
||||
const realDocumentAddEventListener = document.addEventListener.bind(document);
|
||||
jest.spyOn(document, "addEventListener").mockImplementation((type, listener, options) => {
|
||||
const res = realDocumentAddEventListener(type, listener, options);
|
||||
documentEventListeners.push([type, listener]);
|
||||
return res;
|
||||
});
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
// shut down other windows created by `createWindow`
|
||||
otherWindows.forEach((window) => window.close());
|
||||
otherWindows.splice(0);
|
||||
|
||||
// remove listeners on our own window
|
||||
windowEventListeners.forEach(([type, listener]) => window.removeEventListener(type, listener));
|
||||
documentEventListeners.forEach(([type, listener]) => document.removeEventListener(type, listener));
|
||||
|
||||
localStorage.clear();
|
||||
jest.restoreAllMocks();
|
||||
});
|
||||
|
||||
it("A single instance starts up normally", async () => {
|
||||
const onNewInstance = jest.fn();
|
||||
const result = await getSessionLock(onNewInstance);
|
||||
expect(result).toBe(true);
|
||||
expect(onNewInstance).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("A second instance starts up normally when the first shut down cleanly", async () => {
|
||||
// first instance starts...
|
||||
const onNewInstance1 = jest.fn();
|
||||
expect(await getSessionLock(onNewInstance1)).toBe(true);
|
||||
expect(onNewInstance1).not.toHaveBeenCalled();
|
||||
|
||||
// ... and navigates away
|
||||
window.dispatchEvent(new Event("pagehide", {}));
|
||||
|
||||
// second instance starts as normal
|
||||
expect(checkSessionLockFree()).toBe(true);
|
||||
const onNewInstance2 = jest.fn();
|
||||
expect(await getSessionLock(onNewInstance2)).toBe(true);
|
||||
|
||||
expect(onNewInstance1).not.toHaveBeenCalled();
|
||||
expect(onNewInstance2).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("A second instance starts up *eventually* when the first terminated uncleanly", async () => {
|
||||
// first instance starts...
|
||||
const onNewInstance1 = jest.fn();
|
||||
expect(await getSessionLock(onNewInstance1)).toBe(true);
|
||||
expect(onNewInstance1).not.toHaveBeenCalled();
|
||||
expect(checkSessionLockFree()).toBe(false);
|
||||
|
||||
// and pings the timer after 5 seconds
|
||||
jest.advanceTimersByTime(5000);
|
||||
expect(checkSessionLockFree()).toBe(false);
|
||||
|
||||
// oops, now it dies. We simulate this by forcibly clearing the timers.
|
||||
// For some reason `jest.clearAllTimers` also resets the simulated time, so preserve that
|
||||
const time = Date.now();
|
||||
jest.clearAllTimers();
|
||||
jest.setSystemTime(time);
|
||||
expect(checkSessionLockFree()).toBe(false);
|
||||
|
||||
// time advances a bit more
|
||||
jest.advanceTimersByTime(5000);
|
||||
expect(checkSessionLockFree()).toBe(false);
|
||||
|
||||
// second instance tries to start. This should block for 25 more seconds
|
||||
const onNewInstance2 = jest.fn();
|
||||
let session2Result: boolean | undefined;
|
||||
getSessionLock(onNewInstance2).then((res) => {
|
||||
session2Result = res;
|
||||
});
|
||||
|
||||
// after another 24.5 seconds, we are still waiting
|
||||
jest.advanceTimersByTime(24500);
|
||||
expect(session2Result).toBe(undefined);
|
||||
expect(checkSessionLockFree()).toBe(false);
|
||||
|
||||
// another 500ms and we get the lock
|
||||
await jest.advanceTimersByTimeAsync(500);
|
||||
expect(session2Result).toBe(true);
|
||||
expect(checkSessionLockFree()).toBe(false); // still false, because the new session has claimed it
|
||||
|
||||
expect(onNewInstance1).not.toHaveBeenCalled();
|
||||
expect(onNewInstance2).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("A second instance waits for the first to shut down", async () => {
|
||||
// first instance starts. Once it gets the shutdown signal, it will wait two seconds and then release the lock.
|
||||
await getSessionLock(
|
||||
() =>
|
||||
new Promise<void>((resolve) => {
|
||||
setTimeout(resolve, 2000, 0);
|
||||
}),
|
||||
);
|
||||
|
||||
// second instance tries to start, but should block
|
||||
const { window: window2, getSessionLock: getSessionLock2 } = buildNewContext();
|
||||
let session2Result: boolean | undefined;
|
||||
getSessionLock2(async () => {}).then((res) => {
|
||||
session2Result = res;
|
||||
});
|
||||
await jest.advanceTimersByTimeAsync(100);
|
||||
// should still be blocking
|
||||
expect(session2Result).toBe(undefined);
|
||||
|
||||
await jest.advanceTimersByTimeAsync(2000);
|
||||
await jest.advanceTimersByTimeAsync(0);
|
||||
|
||||
// session 2 now gets the lock
|
||||
expect(session2Result).toBe(true);
|
||||
window2.close();
|
||||
});
|
||||
|
||||
it("If a third instance starts while we are waiting, we give up immediately", async () => {
|
||||
// first instance starts. It will never release the lock.
|
||||
await getSessionLock(() => new Promise(() => {}));
|
||||
|
||||
// first instance should ping the timer after 5 seconds
|
||||
jest.advanceTimersByTime(5000);
|
||||
|
||||
// second instance starts
|
||||
const { getSessionLock: getSessionLock2 } = buildNewContext();
|
||||
let session2Result: boolean | undefined;
|
||||
const onNewInstance2 = jest.fn();
|
||||
getSessionLock2(onNewInstance2).then((res) => {
|
||||
session2Result = res;
|
||||
});
|
||||
|
||||
await jest.advanceTimersByTimeAsync(100);
|
||||
// should still be blocking
|
||||
expect(session2Result).toBe(undefined);
|
||||
|
||||
// third instance starts
|
||||
const { getSessionLock: getSessionLock3 } = buildNewContext();
|
||||
getSessionLock3(async () => {});
|
||||
await jest.advanceTimersByTimeAsync(0);
|
||||
|
||||
// session 2 should have given up
|
||||
expect(session2Result).toBe(false);
|
||||
expect(onNewInstance2).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("If two new instances start concurrently, only one wins", async () => {
|
||||
// first instance starts. Once it gets the shutdown signal, it will wait two seconds and then release the lock.
|
||||
await getSessionLock(async () => {
|
||||
await new Promise<void>((resolve) => {
|
||||
setTimeout(resolve, 2000, 0);
|
||||
});
|
||||
});
|
||||
|
||||
// first instance should ping the timer after 5 seconds
|
||||
jest.advanceTimersByTime(5000);
|
||||
|
||||
// two new instances start at once
|
||||
const { getSessionLock: getSessionLock2 } = buildNewContext();
|
||||
let session2Result: boolean | undefined;
|
||||
getSessionLock2(async () => {}).then((res) => {
|
||||
session2Result = res;
|
||||
});
|
||||
|
||||
const { getSessionLock: getSessionLock3 } = buildNewContext();
|
||||
let session3Result: boolean | undefined;
|
||||
getSessionLock3(async () => {}).then((res) => {
|
||||
session3Result = res;
|
||||
});
|
||||
|
||||
await jest.advanceTimersByTimeAsync(100);
|
||||
// session 3 still be blocking. Session 2 should have given up.
|
||||
expect(session2Result).toBe(false);
|
||||
expect(session3Result).toBe(undefined);
|
||||
|
||||
await jest.advanceTimersByTimeAsync(2000);
|
||||
await jest.advanceTimersByTimeAsync(0);
|
||||
|
||||
// session 3 now gets the lock
|
||||
expect(session2Result).toBe(false);
|
||||
expect(session3Result).toBe(true);
|
||||
});
|
||||
|
||||
/** build a new Window in the same domain as the current one.
|
||||
*
|
||||
* We do this by constructing an iframe, which gets its own Window object.
|
||||
*/
|
||||
function createWindow() {
|
||||
const iframe = window.document.createElement("iframe");
|
||||
window.document.body.appendChild(iframe);
|
||||
const window2: any = iframe.contentWindow;
|
||||
|
||||
otherWindows.push(window2);
|
||||
|
||||
// make the new Window use the same jest fake timers as us
|
||||
for (const m of ["setTimeout", "clearTimeout", "setInterval", "clearInterval", "Date"]) {
|
||||
// @ts-ignore
|
||||
window2[m] = global[m];
|
||||
}
|
||||
return window2;
|
||||
}
|
||||
|
||||
/**
|
||||
* Instantiate `getSessionLock` in a new context (ie, using a different global `window`).
|
||||
*
|
||||
* The new window will share the same fake timer impl as the current context.
|
||||
*
|
||||
* @returns the new window and (a wrapper for) getSessionLock in the new context.
|
||||
*/
|
||||
function buildNewContext(): {
|
||||
window: Window;
|
||||
getSessionLock: (onNewInstance: () => Promise<void>) => Promise<boolean>;
|
||||
} {
|
||||
const window2 = createWindow();
|
||||
|
||||
// import the dependencies of getSessionLock into the new context
|
||||
window2._uuid = require("uuid");
|
||||
window2._logger = require("matrix-js-sdk/src/logger");
|
||||
window2.SESSION_LOCK_CONSTANTS = SESSION_LOCK_CONSTANTS;
|
||||
|
||||
// now, define getSessionLock as a global
|
||||
window2.eval(String(getSessionLock));
|
||||
|
||||
// return a function that will call it
|
||||
function callGetSessionLock(onNewInstance: () => Promise<void>): Promise<boolean> {
|
||||
// import the callback into the context
|
||||
window2._getSessionLockCallback = onNewInstance;
|
||||
|
||||
// start the function
|
||||
try {
|
||||
return window2.eval(`getSessionLock(_getSessionLockCallback)`);
|
||||
} finally {
|
||||
// we can now clear the callback
|
||||
delete window2._getSessionLockCallback;
|
||||
}
|
||||
}
|
||||
|
||||
return { window: window2, getSessionLock: callGetSessionLock };
|
||||
}
|
||||
});
|
@ -9790,7 +9790,7 @@ uuid@8.3.2, uuid@^8.3.2:
|
||||
resolved "https://registry.yarnpkg.com/uuid/-/uuid-8.3.2.tgz#80d5b5ced271bb9af6c445f21a1a04c606cefbe2"
|
||||
integrity sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==
|
||||
|
||||
uuid@9:
|
||||
uuid@9, uuid@^9.0.0:
|
||||
version "9.0.0"
|
||||
resolved "https://registry.yarnpkg.com/uuid/-/uuid-9.0.0.tgz#592f550650024a38ceb0c562f2f6aa435761efb5"
|
||||
integrity sha512-MXcSTerfPa4uqyzStbRoTgt5XIe3x5+42+q1sDuy3R5MDk66URdLMOZe5aPX/SQd+kuYAh0FdP/pO28IkQyTeg==
|
||||
|
Loading…
Reference in New Issue
Block a user