Skip to content

EventEmitter memory leak #428

@marcus-pousette

Description

@marcus-pousette

Hi!

I am using js-ipfsd-ctl in a testing suite, which is depending on this project, which in turn depends on mortice for RW control. During testing I got this message EventEmitter memory leak detected. 11 listeners added I found that this was due to that cluster.on('on', message) was never cleanup correctly when closing.

Here is the diff that solved my problem:

diff --git a/node_modules/ipfs-repo/src/index.js b/node_modules/ipfs-repo/src/index.js
index 3f3f54f..27b7904 100644
--- a/node_modules/ipfs-repo/src/index.js
+++ b/node_modules/ipfs-repo/src/index.js
@@ -229,7 +229,8 @@ class Repo {
    *
    * @private
    */
-  _closeLock () {
+  _closeLock() {
+    this.gcLock.disconnect && this.gcLock.disconnect();
     return this._lockfile && this._lockfile.close()
   }
 


```diff
diff --git a/node_modules/mortice/dist/src/index.js b/node_modules/mortice/dist/src/index.js
index 5babae6..120b2ac 100644
--- a/node_modules/mortice/dist/src/index.js
+++ b/node_modules/mortice/dist/src/index.js
@@ -64,6 +64,9 @@ const createMutex = (name, options) => {
             // released
             readQueue = null;
             return await createReleaseable(masterQueue, options);
+        },
+        disconnect: () => {
+
         }
     };
 };
@@ -75,28 +78,30 @@ const defaultOptions = {
 };
 export default function createMortice(options) {
     const opts = Object.assign({}, defaultOptions, options);
+    let disconnect = undefined;
     if (implementation == null) {
         implementation = impl(opts);
         if (implementation.isWorker !== true) {
             // we are master, set up worker requests
-            implementation.addEventListener('requestReadLock', (event) => {
+            implementation.emitter.addEventListener('requestReadLock', (event) => {
                 if (mutexes[event.data.name] == null) {
                     return;
                 }
                 void mutexes[event.data.name].readLock()
                     .then(async (release) => await event.data.handler().finally(() => release()));
             });
-            implementation.addEventListener('requestWriteLock', async (event) => {
+            implementation.emitter.addEventListener('requestWriteLock', async (event) => {
                 if (mutexes[event.data.name] == null) {
                     return;
                 }
                 void mutexes[event.data.name].writeLock()
                     .then(async (release) => await event.data.handler().finally(() => release()));
             });
+            disconnect = implementation.disconnect;
         }
     }
     if (mutexes[opts.name] == null) {
-        mutexes[opts.name] = createMutex(opts.name, opts);
+        mutexes[opts.name] = { ...createMutex(opts.name, opts), disconnect };
     }
     return mutexes[opts.name];
 }
diff --git a/node_modules/mortice/dist/src/node.js b/node_modules/mortice/dist/src/node.js
index b802bac..7573ad4 100644
--- a/node_modules/mortice/dist/src/node.js
+++ b/node_modules/mortice/dist/src/node.js
@@ -64,11 +64,19 @@ const makeWorkerLockRequest = (name, requestType, grantType, releaseType) => {
     };
 };
 export default (options) => {
+    const listeners = [];
     if (cluster.isPrimary || options.singleProcess) {
         const emitter = new EventTarget();
-        cluster.on('message', handleWorkerLockRequest(emitter, 'requestReadLock', WORKER_REQUEST_READ_LOCK, WORKER_RELEASE_READ_LOCK, MASTER_GRANT_READ_LOCK));
-        cluster.on('message', handleWorkerLockRequest(emitter, 'requestWriteLock', WORKER_REQUEST_WRITE_LOCK, WORKER_RELEASE_WRITE_LOCK, MASTER_GRANT_WRITE_LOCK));
-        return emitter;
+        listeners.push(handleWorkerLockRequest(emitter, 'requestReadLock', WORKER_REQUEST_READ_LOCK, WORKER_RELEASE_READ_LOCK, MASTER_GRANT_READ_LOCK));
+        listeners.push(handleWorkerLockRequest(emitter, 'requestWriteLock', WORKER_REQUEST_WRITE_LOCK, WORKER_RELEASE_WRITE_LOCK, MASTER_GRANT_WRITE_LOCK));
+        listeners.forEach(l => {
+            cluster.on('message', l);
+        })
+        return {
+            emitter, disconnect: () => {
+                listeners.forEach((l) => cluster.removeListener('message', l));
+            }
+        };
     }
     return {
         isWorker: true,

I am not sure if I am doing anything particularly wrong with my configs, but I this seems to be an issue when spawning and closing many controllers in sequence.

How to reproduce,

Use "ipfsd-ctl": "^12.2.2" and spawn and close many proc-js (disposable = true, test = true) instances until the EventEmitter memory leak message appear

Metadata

Metadata

Assignees

No one assigned

    Labels

    need/triageNeeds initial labeling and prioritization

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions