diff --git a/packages/firestore/CHANGELOG.md b/packages/firestore/CHANGELOG.md index abd9b5608de..2a268b804aa 100644 --- a/packages/firestore/CHANGELOG.md +++ b/packages/firestore/CHANGELOG.md @@ -1,10 +1,4 @@ -# 0.7.1 (Unreleased) -- [fixed] Fixed an issue where the first `get()` call made after being offline - could incorrectly return cached data without attempting to reach the backend. -- [changed] Changed `get()` to only make 1 attempt to reach the backend before - returning cached data, potentially reducing delays while offline. - -# 0.7.0 +# 0.7.0 (Unreleased) - [fixed] Fixed `get({source: 'cache'})` to be able to return nonexistent documents from cache. - [changed] Prepared the persistence layer to allow shared access from multiple diff --git a/packages/firestore/src/remote/online_state_tracker.ts b/packages/firestore/src/remote/online_state_tracker.ts index 41e1422a242..4938a0b1f76 100644 --- a/packages/firestore/src/remote/online_state_tracker.ts +++ b/packages/firestore/src/remote/online_state_tracker.ts @@ -25,10 +25,7 @@ const LOG_TAG = 'OnlineStateTracker'; // To deal with transient failures, we allow multiple stream attempts before // giving up and transitioning from OnlineState.Unknown to Offline. -// TODO(mikelehen): This used to be set to 2 as a mitigation for b/66228394. -// @jdimond thinks that bug is sufficiently fixed so that we can set this back -// to 1. If that works okay, we could potentially remove this logic entirely. -const MAX_WATCH_STREAM_FAILURES = 1; +const MAX_WATCH_STREAM_FAILURES = 2; // To deal with stream attempts that don't succeed or fail in a timely manner, // we have a timeout for OnlineState to reach Online or Offline. diff --git a/packages/firestore/src/remote/remote_store.ts b/packages/firestore/src/remote/remote_store.ts index 7167c762c9e..a16435e4932 100644 --- a/packages/firestore/src/remote/remote_store.ts +++ b/packages/firestore/src/remote/remote_store.ts @@ -243,16 +243,8 @@ export class RemoteStore implements TargetMetadataProvider { delete this.listenTargets[targetId]; if (this.watchStream.isOpen()) { this.sendUnwatchRequest(targetId); - } - - if (objUtils.isEmpty(this.listenTargets)) { - if (this.watchStream.isOpen()) { + if (objUtils.isEmpty(this.listenTargets)) { this.watchStream.markIdle(); - } else { - // Revert to OnlineState.Unknown if the watch stream is not open and we - // have no listeners, since without any listens to send we cannot - // confirm if the stream is healthy and upgrade to OnlineState.Online. - this.onlineStateTracker.set(OnlineState.Unknown); } } } @@ -338,6 +330,7 @@ export class RemoteStore implements TargetMetadataProvider { // If we still need the watch stream, retry the connection. if (this.shouldStartWatchStream()) { this.onlineStateTracker.handleWatchStreamFailure(error); + this.startWatchStream(); } else { // No need to restart watch stream because there are no active targets. diff --git a/packages/firestore/test/unit/specs/offline_spec.test.ts b/packages/firestore/test/unit/specs/offline_spec.test.ts index 34112db8b30..5c6be0d99d2 100644 --- a/packages/firestore/test/unit/specs/offline_spec.test.ts +++ b/packages/firestore/test/unit/specs/offline_spec.test.ts @@ -23,60 +23,56 @@ import { spec } from './spec_builder'; import { TimerId } from '../../../src/util/async_queue'; describeSpec('Offline:', [], () => { - specTest( - 'Empty queries are resolved if client goes offline', - ['no-android', 'no-ios'], - () => { - const query = Query.atPath(path('collection')); - return ( - spec() - .userListens(query) - .watchStreamCloses(Code.UNAVAILABLE) - .expectEvents(query, { - fromCache: true, - hasPendingWrites: false - }) - // no further events - .watchStreamCloses(Code.UNAVAILABLE) - .watchStreamCloses(Code.UNAVAILABLE) - ); - } - ); + specTest('Empty queries are resolved if client goes offline', [], () => { + const query = Query.atPath(path('collection')); + return ( + spec() + .userListens(query) + // second error triggers event + .watchStreamCloses(Code.UNAVAILABLE) + .watchStreamCloses(Code.UNAVAILABLE) + .expectEvents(query, { + fromCache: true, + hasPendingWrites: false + }) + // no further events + .watchStreamCloses(Code.UNAVAILABLE) + .watchStreamCloses(Code.UNAVAILABLE) + ); + }); - specTest( - 'A successful message delays offline status', - ['no-android', 'no-ios'], - () => { - const query = Query.atPath(path('collection')); - return ( - spec() - .userListens(query) - .watchAcks(query) - // first error triggers unknown state - .watchStreamCloses(Code.UNAVAILABLE) - // second error triggers offline state - .watchStreamCloses(Code.UNAVAILABLE) - .expectEvents(query, { - fromCache: true, - hasPendingWrites: false - }) - // no further events - .watchStreamCloses(Code.UNAVAILABLE) - .watchStreamCloses(Code.UNAVAILABLE) - ); - } - ); + specTest('A successful message delays offline status', [], () => { + const query = Query.atPath(path('collection')); + return ( + spec() + .userListens(query) + .watchAcks(query) + // first error triggers unknown state + .watchStreamCloses(Code.UNAVAILABLE) + // getting two more errors triggers offline state + .watchStreamCloses(Code.UNAVAILABLE) + .watchStreamCloses(Code.UNAVAILABLE) + .expectEvents(query, { + fromCache: true, + hasPendingWrites: false + }) + // no further events + .watchStreamCloses(Code.UNAVAILABLE) + .watchStreamCloses(Code.UNAVAILABLE) + ); + }); specTest( 'Removing all listeners delays "Offline" status on next listen', - ['eager-gc', 'no-android', 'no-ios'], + ['eager-gc'], 'Marked as no-lru because when a listen is re-added, it gets a new target id rather than reusing one', () => { const query = Query.atPath(path('collection')); return ( spec() .userListens(query) - // error triggers offline state + // getting two errors triggers offline state + .watchStreamCloses(Code.UNAVAILABLE) .watchStreamCloses(Code.UNAVAILABLE) .expectEvents(query, { fromCache: true, @@ -87,10 +83,11 @@ describeSpec('Offline:', [], () => { // If the next (already scheduled) connection attempt fails, we'll move // to unknown since there are no listeners, and stop trying to connect. .watchStreamCloses(Code.UNAVAILABLE) - // Suppose sometime later we listen again, it should take one failure + // Suppose sometime later we listen again, it should take two failures // before we get cached data. .userListens(query) .watchStreamCloses(Code.UNAVAILABLE) + .watchStreamCloses(Code.UNAVAILABLE) .expectEvents(query, { fromCache: true, hasPendingWrites: false @@ -99,62 +96,56 @@ describeSpec('Offline:', [], () => { } ); - specTest( - 'Queries revert to fromCache=true when offline.', - ['no-android', 'no-ios'], - () => { - const query = Query.atPath(path('collection')); - const docA = doc('collection/a', 1000, { key: 'a' }); - return ( - spec() - .userListens(query) - .watchAcksFull(query, 1000, docA) - .expectEvents(query, { added: [docA] }) - // first error triggers unknown state - .watchStreamCloses(Code.UNAVAILABLE) - .restoreListen(query, 'resume-token-1000') - // second error triggers offline state and fromCache: true - .watchStreamCloses(Code.UNAVAILABLE) - .expectEvents(query, { fromCache: true }) - // Going online and getting a CURRENT message triggers fromCache: false - .watchAcksFull(query, 1000) - .expectEvents(query, { fromCache: false }) - ); - } - ); + specTest('Queries revert to fromCache=true when offline.', [], () => { + const query = Query.atPath(path('collection')); + const docA = doc('collection/a', 1000, { key: 'a' }); + return ( + spec() + .userListens(query) + .watchAcksFull(query, 1000, docA) + .expectEvents(query, { added: [docA] }) + // first error triggers unknown state + .watchStreamCloses(Code.UNAVAILABLE) + .restoreListen(query, 'resume-token-1000') + // getting two more errors triggers offline state and fromCache: true + .watchStreamCloses(Code.UNAVAILABLE) + .watchStreamCloses(Code.UNAVAILABLE) + .expectEvents(query, { fromCache: true }) + // Going online and getting a CURRENT message triggers fromCache: false + .watchAcksFull(query, 1000) + .expectEvents(query, { fromCache: false }) + ); + }); - specTest( - 'Queries with limbo documents handle going offline.', - ['no-android', 'no-ios'], - () => { - const query = Query.atPath(path('collection')); - const docA = doc('collection/a', 1000, { key: 'a' }); - const limboQuery = Query.atPath(docA.key.path); - return ( - spec() - .userListens(query) - .watchAcksFull(query, 1000, docA) - .expectEvents(query, { added: [docA] }) - .watchResets(query) - // No more documents - .watchCurrents(query, 'resume-token-1001') - .watchSnapshots(1001) - // docA will now be in limbo (triggering fromCache=true) - .expectLimboDocs(docA.key) - .expectEvents(query, { fromCache: true }) - // first error triggers unknown state - .watchStreamCloses(Code.UNAVAILABLE) - .restoreListen(query, 'resume-token-1001') - // second error triggers offline state. - .watchStreamCloses(Code.UNAVAILABLE) - .watchAcksFull(query, 1001) - .watchAcksFull(limboQuery, 1001) - // Limbo document is resolved. No longer from cache. - .expectEvents(query, { removed: [docA], fromCache: false }) - .expectLimboDocs() - ); - } - ); + specTest('Queries with limbo documents handle going offline.', [], () => { + const query = Query.atPath(path('collection')); + const docA = doc('collection/a', 1000, { key: 'a' }); + const limboQuery = Query.atPath(docA.key.path); + return ( + spec() + .userListens(query) + .watchAcksFull(query, 1000, docA) + .expectEvents(query, { added: [docA] }) + .watchResets(query) + // No more documents + .watchCurrents(query, 'resume-token-1001') + .watchSnapshots(1001) + // docA will now be in limbo (triggering fromCache=true) + .expectLimboDocs(docA.key) + .expectEvents(query, { fromCache: true }) + // first error triggers unknown state + .watchStreamCloses(Code.UNAVAILABLE) + .restoreListen(query, 'resume-token-1001') + // getting two more errors triggers offline state. + .watchStreamCloses(Code.UNAVAILABLE) + .watchStreamCloses(Code.UNAVAILABLE) + .watchAcksFull(query, 1001) + .watchAcksFull(limboQuery, 1001) + // Limbo document is resolved. No longer from cache. + .expectEvents(query, { removed: [docA], fromCache: false }) + .expectLimboDocs() + ); + }); specTest('OnlineState timeout triggers offline behavior', [], () => { const query = Query.atPath(path('collection')); @@ -193,16 +184,17 @@ describeSpec('Offline:', [], () => { specTest( 'New queries return immediately with fromCache=true when offline due to ' + 'stream failures.', - ['no-android', 'no-ios'], + [], () => { const query1 = Query.atPath(path('collection')); const query2 = Query.atPath(path('collection2')); return ( spec() .userListens(query1) - // After failure, we mark the client offline and trigger an empty + // 2 Failures should mark the client offline and trigger an empty // fromCache event. .watchStreamCloses(Code.UNAVAILABLE) + .watchStreamCloses(Code.UNAVAILABLE) .expectEvents(query1, { fromCache: true }) // A new query should immediately return from cache. diff --git a/packages/firestore/test/unit/specs/remote_store_spec.test.ts b/packages/firestore/test/unit/specs/remote_store_spec.test.ts index 71f8d884976..ef758d4d171 100644 --- a/packages/firestore/test/unit/specs/remote_store_spec.test.ts +++ b/packages/firestore/test/unit/specs/remote_store_spec.test.ts @@ -67,7 +67,7 @@ describeSpec('Remote store:', [], () => { .expectEvents(query, { added: [doc4] }); // This should work now. }); - specTest('Cleans up watch state correctly', ['no-android', 'no-ios'], () => { + specTest('Cleans up watch state correctly', [], () => { const query = Query.atPath(path('collection')); const doc1 = doc('collection/a', 1000, { key: 'a' }); return ( @@ -77,7 +77,6 @@ describeSpec('Remote store:', [], () => { // Close before we get an ack, this should reset our pending // target counts. .watchStreamCloses(Code.UNAVAILABLE) - .expectEvents(query, { fromCache: true }) // This should work now. .watchAcksFull(query, 1001, doc1) .expectEvents(query, { added: [doc1] }) @@ -98,7 +97,6 @@ describeSpec('Remote store:', [], () => { // close the stream (this should trigger retry with backoff; but don't // run it in an attempt to reproduce b/74749605). .watchStreamCloses(Code.UNAVAILABLE, { runBackoffTimer: false }) - .expectEvents(query, { fromCache: true }) // Because we didn't let the backoff timer run and restart the watch // stream, there will be no active targets.