From bba8fe0d5fc32845041e70372c3c14c09ccfca2c Mon Sep 17 00:00:00 2001 From: Ahmed Hamouda Date: Wed, 1 Apr 2026 08:46:40 -0400 Subject: [PATCH 1/2] feat(docs): add DataStore migration guide with Apollo Client patterns - Add comprehensive migration guide index page for DataStore to Apollo Client - Add choose-strategy guide for migration approach selection - Add set-up-apollo guide for Apollo Client configuration - Add migrate-crud-operations guide for query and mutation patterns - Add migrate-relationships guide for handling data relationships - Add add-local-caching guide for persistent cache and optimistic updates - Add build-offline-support guide for offline-first functionality - Add advanced-patterns guide for complex migration scenarios - Update directory structure to include new migrate-from-datastore section with nested pages - Provides step-by-step guidance for developers transitioning from DataStore to Apollo Client --- src/directory/directory.mjs | 26 + .../add-local-caching/index.mdx | 531 ++++++++++++ .../advanced-patterns/index.mdx | 563 +++++++++++++ .../build-offline-support/index.mdx | 786 ++++++++++++++++++ .../choose-strategy/index.mdx | 190 +++++ .../migrate-from-datastore/index.mdx | 202 +++++ .../migrate-crud-operations/index.mdx | 734 ++++++++++++++++ .../migrate-relationships/index.mdx | 507 +++++++++++ .../set-up-apollo/index.mdx | 767 +++++++++++++++++ 9 files changed, 4306 insertions(+) create mode 100644 src/pages/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/add-local-caching/index.mdx create mode 100644 src/pages/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/advanced-patterns/index.mdx create mode 100644 src/pages/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/build-offline-support/index.mdx create mode 100644 src/pages/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/choose-strategy/index.mdx create mode 100644 src/pages/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/index.mdx create mode 100644 src/pages/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/migrate-crud-operations/index.mdx create mode 100644 src/pages/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/migrate-relationships/index.mdx create mode 100644 src/pages/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/set-up-apollo/index.mdx diff --git a/src/directory/directory.mjs b/src/directory/directory.mjs index ffe67e4bb6d..8bfcb586a52 100644 --- a/src/directory/directory.mjs +++ b/src/directory/directory.mjs @@ -1861,6 +1861,32 @@ export const directory = { }, { path: 'src/pages/gen1/[platform]/build-a-backend/more-features/datastore/app-uninstall/index.mdx' + }, + { + path: 'src/pages/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/index.mdx', + children: [ + { + path: 'src/pages/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/choose-strategy/index.mdx' + }, + { + path: 'src/pages/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/set-up-apollo/index.mdx' + }, + { + path: 'src/pages/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/migrate-crud-operations/index.mdx' + }, + { + path: 'src/pages/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/migrate-relationships/index.mdx' + }, + { + path: 'src/pages/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/add-local-caching/index.mdx' + }, + { + path: 'src/pages/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/build-offline-support/index.mdx' + }, + { + path: 'src/pages/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/advanced-patterns/index.mdx' + } + ] } ] }, diff --git a/src/pages/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/add-local-caching/index.mdx b/src/pages/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/add-local-caching/index.mdx new file mode 100644 index 00000000000..0c9461ba012 --- /dev/null +++ b/src/pages/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/add-local-caching/index.mdx @@ -0,0 +1,531 @@ +import { getCustomStaticPath } from '@/utils/getCustomStaticPath'; + +export const meta = { + title: 'Add local caching', + description: 'Enhance your Apollo Client migration with persistent cache, optimistic updates, and intelligent fetch policies.', + platforms: [ + 'angular', + 'javascript', + 'nextjs', + 'react', + 'react-native', + 'vue' + ] +}; + +export const getStaticPaths = async () => { + return getCustomStaticPath(meta.platforms); +}; + +export function getStaticProps(context) { + return { + props: { + platform: context.params.platform, + meta + } + }; +} + +The [Set up Apollo Client](/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/set-up-apollo/) page gave you a working Apollo Client with auth, error handling, retry logic, and `new InMemoryCache()`. That cache lives in memory only -- every time the user refreshes the page or reopens the app, every query starts from scratch with a network request and a loading spinner. + +This page adds persistent caching and optimistic updates on top of that foundation. You will configure Apollo's cache to survive page refreshes by persisting it to IndexedDB, gate your app startup on cache restoration, choose the right fetch policy for each query, implement instant UI updates for mutations, and manage cache size with eviction and purge on sign-out. + +## Install persistence libraries + +```bash +npm install apollo3-cache-persist localforage +``` + +- **apollo3-cache-persist** (v0.15.0) -- persists Apollo's `InMemoryCache` to a storage backend +- **localforage** (v1.10.0) -- provides an IndexedDB storage backend with automatic fallback + +## Set up CachePersistor with IndexedDB + +### Configure localforage + +```ts +import localforage from 'localforage'; + +localforage.config({ + driver: localforage.INDEXEDDB, + name: 'myapp-apollo-cache', + storeName: 'apollo_cache', +}); +``` + +### Create the CachePersistor + +```ts +import { CachePersistor, LocalForageWrapper } from 'apollo3-cache-persist'; + +export const persistor = new CachePersistor({ + cache, + storage: new LocalForageWrapper(localforage), + maxSize: 1048576 * 2, // 2MB -- increase if your app caches large datasets + debug: process.env.NODE_ENV === 'development', + trigger: 'write', + key: 'apollo-cache-v1', // Bump when your GraphQL schema changes +}); +``` + + + +**Use `CachePersistor` instead of `persistCache`.** The convenience function `persistCache` does not return the persistor instance, which means you cannot call `purge()` (needed for sign-out), `pause()`/`resume()`, or `getSize()`. For any production app, `CachePersistor` is the right choice. + + + +### Configuration options + +| Option | Default | Purpose | +|--------|---------|---------| +| `cache` | (required) | The `InMemoryCache` instance to persist | +| `storage` | (required) | Storage wrapper -- use `LocalForageWrapper` for IndexedDB | +| `maxSize` | `1048576` (1MB) | Max persisted size in bytes. Set `false` to disable the limit | +| `trigger` | `'write'` | When to persist: `'write'` (on every cache write), `'background'` (on tab visibility change) | +| `debounce` | `1000` | Milliseconds to wait between persist writes | +| `key` | `'apollo-cache-persist'` | Storage key identifier. Version this to invalidate stale caches | +| `debug` | `false` | Log persistence activity to the console | + + + +Here is the complete enhanced `src/apolloClient.ts` that builds on the setup from the previous page. The link chain (retry, error, auth, HTTP) is unchanged -- only the cache configuration, persistor, and default fetch policy are new. + +```ts title="src/apolloClient.ts" +import { + ApolloClient, + InMemoryCache, + createHttpLink, + from, +} from '@apollo/client'; +import { setContext } from '@apollo/client/link/context'; +import { onError } from '@apollo/client/link/error'; +import { RetryLink } from '@apollo/client/link/retry'; +import { CachePersistor, LocalForageWrapper } from 'apollo3-cache-persist'; +import localforage from 'localforage'; +import { fetchAuthSession } from 'aws-amplify/auth'; +import config from '../amplifyconfiguration.json'; + +// --- Configure IndexedDB via localforage --- +localforage.config({ + driver: localforage.INDEXEDDB, + name: 'myapp-apollo-cache', + storeName: 'apollo_cache', +}); + +// --- InMemoryCache --- +const cache = new InMemoryCache({ + typePolicies: { + // See typePolicies section below for full configuration + }, +}); + +// --- Cache Persistor --- +export const persistor = new CachePersistor({ + cache, + storage: new LocalForageWrapper(localforage), + maxSize: 1048576 * 2, + debug: process.env.NODE_ENV === 'development', + trigger: 'write', + key: 'apollo-cache-v1', +}); + +// --- Links (unchanged from Set up Apollo Client page) --- +const httpLink = createHttpLink({ uri: config.aws_appsync_graphqlEndpoint }); + +const authLink = setContext(async (_, { headers }) => { + try { + const session = await fetchAuthSession(); + const token = session.tokens?.idToken?.toString(); + return { headers: { ...headers, authorization: token || '' } }; + } catch (error) { + console.error('Auth session error:', error); + return { headers }; + } +}); + +const errorLink = onError(({ graphQLErrors, networkError }) => { + if (graphQLErrors) { + for (const { message, locations, path } of graphQLErrors) { + console.error(`[GraphQL error]: ${message}, ${locations}, ${path}`); + } + } + if (networkError) { + console.error(`[Network error]: ${networkError}`); + } +}); + +const retryLink = new RetryLink({ + delay: { initial: 300, max: 5000, jitter: true }, + attempts: { max: 3, retryIf: (error) => !!error }, +}); + +// --- Apollo Client --- +export const apolloClient = new ApolloClient({ + link: from([retryLink, errorLink, authLink, httpLink]), + cache, + defaultOptions: { + watchQuery: { fetchPolicy: 'cache-and-network' }, + query: { fetchPolicy: 'cache-and-network' }, + }, +}); +``` + + + +## Cache restoration on app startup + + + +Queries that fire before `persistor.restore()` completes see an empty `InMemoryCache`. Not gating renders on cache restoration is the most common persistence mistake. The symptom is loading spinners on every app launch despite having cached data in IndexedDB. + + + +Call `await persistor.restore()` before rendering any component that uses Apollo queries: + + + +```tsx title="src/App.tsx" +import { useState, useEffect } from 'react'; +import { ApolloProvider } from '@apollo/client'; +import { apolloClient, persistor } from './apolloClient'; + +function App() { + const [cacheReady, setCacheReady] = useState(false); + + useEffect(() => { + persistor.restore().then(() => setCacheReady(true)); + }, []); + + if (!cacheReady) { + return
Loading...
; + } + + return ( + + {/* Your app components */} + + ); +} +``` + +
+ +Once `cacheReady` flips to `true`, every `useQuery` hook inside `ApolloProvider` will find the restored cache data and render immediately -- no network request needed for data that was cached in a previous session. + +## Fetch policy patterns + +Fetch policies control where Apollo reads data from -- cache, network, or both -- on a per-query basis. + +| Policy | Cache Read | Network Fetch | Best For | +|--------|-----------|---------------|----------| +| `cache-first` | Yes (if data exists) | Only on cache miss | Data that rarely changes | +| `cache-and-network` | Yes (immediate) | Always (updates cache after) | **Recommended default.** Shows cached data instantly, then updates from server. | +| `network-only` | No | Always | Force fresh data after a conflict error | +| `cache-only` | Yes | Never | True offline reads | +| `no-cache` | No | Always | One-off sensitive reads | +| `standby` | Yes | Only on manual `refetch()` | Inactive queries | + +### DataStore migration mapping + +| DataStore Pattern | Recommended fetchPolicy | Why | +|-------------------|------------------------|-----| +| `DataStore.query(Model)` (online) | `cache-and-network` | Returns cached data immediately, then updates from server | +| `DataStore.query(Model)` (offline) | `cache-only` | Reads from persistent cache with no network attempt | +| `DataStore.observeQuery()` | `cache-and-network` with `useQuery` | Shows cache first, updates on server response | +| After conflict error | `network-only` | Forces fresh data from server to resolve stale state | + +### Why cache-and-network is the recommended default + +DataStore always showed locally cached data immediately and then synced with the server in the background. `cache-and-network` is the closest Apollo equivalent: + +1. The query reads from cache first (instant render, no loading spinner) +2. Apollo fires a network request in the background +3. When the response arrives, the cache updates and the component re-renders with fresh data + +## Enhanced sign-out with cache purge + + + +**Order matters for sign-out: pause, clearStore, purge, signOut.** If you skip the purge step, the next user who signs in will see the previous user's cached data restored from disk. + + + +```ts title="src/auth.ts" +import { signOut } from 'aws-amplify/auth'; +import { apolloClient, persistor } from './apolloClient'; + +export async function handleSignOut() { + // 1. Pause persistence so clearStore doesn't trigger a write + persistor.pause(); + + // 2. Clear in-memory cache and cancel active queries + await apolloClient.clearStore(); + + // 3. Purge persisted cache from IndexedDB + await persistor.purge(); + + // 4. Sign out from Amplify (clears Cognito tokens) + await signOut(); +} +``` + +**Why this order matters:** + +1. **Pause first** -- `clearStore()` modifies the cache, which would trigger the persistor to write an empty cache to IndexedDB. Pausing prevents that unnecessary write. +2. **Clear in-memory cache** -- removes all cached data from memory and cancels active queries. +3. **Purge IndexedDB** -- deletes the persisted cache from disk so the next user starts fresh. +4. **Sign out last** -- clears Cognito tokens. If you sign out first, `clearStore()` may trigger refetches that fail because the auth token is already invalidated. + +## Optimistic updates + +The [Migrate CRUD operations](/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/migrate-crud-operations/) page showed how to create, update, and delete records using Apollo mutations with `refetchQueries`. That approach waits for the server response before the UI updates. Optimistic updates replace `refetchQueries` with instant UI updates that show changes before the server confirms. + +DataStore updated its local store synchronously on `save()`. Apollo's optimistic layer achieves the same instant-UI behavior, but you write it explicitly. + +### How optimistic updates work + +When you provide an `optimisticResponse` to a mutation, Apollo: + +1. Caches the optimistic object in a separate layer (does not overwrite canonical cache data) +2. Active queries re-render immediately with the optimistic data +3. When the server responds, the optimistic layer is discarded and the canonical cache updates +4. On error, the optimistic layer is discarded and the UI reverts automatically -- **zero rollback code needed** + +### Optimistic create + +```ts +const [createPost] = useMutation(CREATE_POST, { + optimisticResponse: ({ input }) => ({ + createPost: { + __typename: 'Post', + id: `temp-${Date.now()}`, + title: input.title, + content: input.content, + status: input.status, + rating: input.rating ?? null, + _version: 1, + _deleted: false, + _lastChangedAt: Date.now(), + createdAt: new Date().toISOString(), + updatedAt: new Date().toISOString(), + }, + }), + update(cache, { data }) { + if (!data?.createPost) return; + cache.updateQuery({ query: LIST_POSTS }, (existing) => { + if (!existing?.listPosts) return existing; + return { + listPosts: { + ...existing.listPosts, + items: [data.createPost, ...existing.listPosts.items], + }, + }; + }); + }, +}); +``` + +The `update` function is needed for creates because Apollo's normalized cache cannot know that a brand-new object should appear in an existing list query. + +### Optimistic update + +```ts +const [updatePost] = useMutation(UPDATE_POST, { + optimisticResponse: { + updatePost: { + __typename: 'Post', + id: post.id, + title: 'Updated Title', + content: post.content, + status: post.status, + rating: 4, + _version: post._version + 1, + _deleted: false, + _lastChangedAt: Date.now(), + createdAt: post.createdAt, + updatedAt: new Date().toISOString(), + }, + }, + // No update function needed -- Apollo auto-merges by __typename + id +}); +``` + +### Optimistic delete + +```ts +const [deletePost] = useMutation(DELETE_POST, { + optimisticResponse: { + deletePost: { + __typename: 'Post', + id: post.id, + _version: post._version + 1, + _deleted: true, + _lastChangedAt: Date.now(), + }, + }, + update(cache, { data }) { + if (!data?.deletePost) return; + cache.evict({ id: cache.identify(data.deletePost) }); + cache.gc(); + }, +}); +``` + +### _version in optimistic responses + +| Operation | Optimistic `_version` | Why | +|-----------|----------------------|-----| +| Create | `1` | New records start at version 1 | +| Update | `post._version + 1` | Predicts the server's version increment | +| Delete | `post._version + 1` | The delete mutation increments the version | + +The optimistic `_version` does not need to be exact. The server response always replaces the optimistic data in the canonical cache. + +## typePolicies for pagination and soft-delete filtering + +### Pagination merge + +Without `typePolicies`, Apollo treats each `(limit, nextToken)` combination as a separate cache entry. A "Load More" button would replace page 1 with page 2 instead of appending. + +```ts +import { InMemoryCache } from '@apollo/client'; + +const cache = new InMemoryCache({ + typePolicies: { + Query: { + fields: { + listPosts: { + keyArgs: ['filter'], + merge(existing, incoming) { + if (!existing) return incoming; + return { + ...incoming, + items: [...(existing.items || []), ...(incoming.items || [])], + }; + }, + read(existing, { readField }) { + if (!existing) return existing; + return { + ...existing, + items: existing.items.filter( + (ref) => !readField('_deleted', ref) + ), + }; + }, + }, + }, + }, + }, +}); +``` + +`keyArgs: ['filter']` tells Apollo that queries with the same filter share a cache entry (pages merge), while different filters are separate entries. + +### Why readField instead of direct property access + +In Apollo's normalized cache, list items are stored as **references** (for example, `{ __ref: "Post:123" }`), not as full objects. You cannot access `ref._deleted` directly. The `readField` helper resolves the reference and reads the field from the normalized cache entry. + +```ts +// WRONG -- ref is a cache reference, not the actual object +items.filter((ref) => !ref._deleted) + +// CORRECT -- readField resolves the reference +items.filter((ref) => !readField('_deleted', ref)) +``` + + + +```ts +const cache = new InMemoryCache({ + typePolicies: { + Post: { keyFields: ['id'] }, + Comment: { keyFields: ['id'] }, + Query: { + fields: { + listPosts: { + keyArgs: ['filter'], + merge(existing, incoming) { + if (!existing) return incoming; + return { + ...incoming, + items: [...(existing.items || []), ...(incoming.items || [])], + }; + }, + read(existing, { readField }) { + if (!existing) return existing; + return { + ...existing, + items: existing.items.filter( + (ref) => !readField('_deleted', ref) + ), + }; + }, + }, + listComments: { + keyArgs: ['filter'], + merge(existing, incoming) { + if (!existing) return incoming; + return { + ...incoming, + items: [...(existing.items || []), ...(incoming.items || [])], + }; + }, + read(existing, { readField }) { + if (!existing) return existing; + return { + ...existing, + items: existing.items.filter( + (ref) => !readField('_deleted', ref) + ), + }; + }, + }, + }, + }, + }, +}); +``` + +The pattern is the same for every list query: `keyArgs` for filter separation, `merge` for pagination, `read` for soft-delete filtering. Add a field policy for each list query in your schema. + + + +## Cache size management + +### Monitor cache size + +```ts +async function logCacheSize() { + const sizeInBytes = await persistor.getSize(); + if (sizeInBytes !== null) { + console.log(`Cache size: ${(sizeInBytes / 1024).toFixed(1)} KB`); + } +} +``` + +### maxSize behavior + +When the serialized cache exceeds `maxSize`, the persistor stops writing to IndexedDB silently. The in-memory cache continues to work normally. Enable `debug: true` during development to see console warnings. + +### Schema version strategy + +When your GraphQL schema changes, bump the `key` option on your `CachePersistor` (for example, from `'apollo-cache-v1'` to `'apollo-cache-v2'`). This starts with an empty cache -- one cold start in exchange for zero cache migration code. + + + +**Cache not restored before queries run:** +Every page load shows loading spinners briefly. Gate your app rendering on `persistor.restore()` completion. + +**Cache exceeds maxSize silently:** +Recent data is not persisted across refreshes. Increase `maxSize` to 2-5MB and enable `debug: true`. + +**Stale cache after schema changes:** +App crashes with TypeErrors reading cached data. Bump the version in the `key` option. + +**Duplicate items after create:** +Apollo calls the `update` function twice for optimistic mutations (once for optimistic, once for server response). Rely on Apollo's optimistic layer lifecycle, or add an existence check in the `update` function. + +**_deleted records still showing:** +Use `readField('_deleted', ref)` in the `read` function, not direct property access. + + diff --git a/src/pages/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/advanced-patterns/index.mdx b/src/pages/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/advanced-patterns/index.mdx new file mode 100644 index 00000000000..7fab5636150 --- /dev/null +++ b/src/pages/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/advanced-patterns/index.mdx @@ -0,0 +1,563 @@ +import { getCustomStaticPath } from '@/utils/getCustomStaticPath'; + +export const meta = { + title: 'Advanced patterns', + description: 'Handle composite keys, set up GraphQL codegen, migrate React components, and understand what DataStore features have no direct equivalent.', + platforms: [ + 'angular', + 'javascript', + 'nextjs', + 'react', + 'react-native', + 'vue' + ] +}; + +export const getStaticPaths = async () => { + return getCustomStaticPath(meta.platforms); +}; + +export function getStaticProps(context) { + return { + props: { + platform: context.params.platform, + meta + } + }; +} + +This page covers four advanced topics: migrating React components from imperative DataStore calls to declarative Apollo hooks, composite and custom primary keys, GraphQL codegen for type-safe operations, and an honest accounting of DataStore features that have no direct Apollo Client equivalent. + + + +## Migrate React components + +This section shows the core paradigm shift: from imperative state management with DataStore to declarative Apollo hooks. + +### Before: DataStore component + +```tsx +import { useState, useEffect } from 'react'; +import { DataStore } from 'aws-amplify/datastore'; +import { Post } from './models'; + +function PostList() { + const [posts, setPosts] = useState([]); + const [loading, setLoading] = useState(true); + + useEffect(() => { + setLoading(true); + DataStore.query(Post).then(results => { + setPosts(results); + setLoading(false); + }); + }, []); + + const handleDelete = async (post: Post) => { + await DataStore.delete(post); + setPosts(prev => prev.filter(p => p.id !== post.id)); + }; + + if (loading) return

Loading...

; + return ( +
    + {posts.map(post => ( +
  • + {post.title} + +
  • + ))} +
+ ); +} +``` + +### After: Apollo Client component + +```tsx +import { useQuery, useMutation } from '@apollo/client'; +import { LIST_POSTS, DELETE_POST } from './graphql/operations'; + +function PostList() { + const { data, loading, error } = useQuery(LIST_POSTS); + const [deletePost] = useMutation(DELETE_POST, { + refetchQueries: [{ query: LIST_POSTS }], + }); + + const handleDelete = async (post: any) => { + await deletePost({ + variables: { input: { id: post.id, _version: post._version } }, + }); + }; + + if (loading) return

Loading...

; + if (error) return

Error: {error.message}

; + + const posts = data?.listPosts?.items?.filter((p: any) => !p._deleted) || []; + return ( +
    + {posts.map((post: any) => ( +
  • + {post.title} + +
  • + ))} +
+ ); +} +``` + +### Key differences + +| Aspect | DataStore | Apollo Client | +|--------|-----------|---------------| +| Data fetching | `useState` + `useEffect` + `DataStore.query()` | `useQuery()` handles everything | +| Loading state | Manual `useState(true)` / `setLoading(false)` | Built-in `loading` from `useQuery` | +| Error handling | Not exposed | Built-in `error` from `useQuery` | +| Mutation response | Manual state update | `refetchQueries` triggers automatic re-fetch | +| Delete input | Pass the model instance | Must include `id` AND `_version` | +| Soft-deleted records | Filtered automatically | Must filter `_deleted` records manually | + +### Migrate DataStore.observe() + +DataStore's `observe()` returned a single Observable for all change events. The migration replaces this with three separate Amplify subscriptions: + +```tsx +import { useEffect } from 'react'; +import { useQuery } from '@apollo/client'; +import { generateClient } from 'aws-amplify/api'; +import { LIST_POSTS } from './graphql/operations'; + +const amplifyClient = generateClient(); + +function PostList() { + const { data, loading, error, refetch } = useQuery(LIST_POSTS); + + useEffect(() => { + const subscriptions = [ + amplifyClient.graphql({ + query: `subscription OnCreatePost { onCreatePost { id } }`, + }).subscribe({ next: () => refetch() }), + amplifyClient.graphql({ + query: `subscription OnUpdatePost { onUpdatePost { id } }`, + }).subscribe({ next: () => refetch() }), + amplifyClient.graphql({ + query: `subscription OnDeletePost { onDeletePost { id } }`, + }).subscribe({ next: () => refetch() }), + ]; + + return () => subscriptions.forEach(sub => sub.unsubscribe()); + }, [refetch]); + + if (loading) return

Loading...

; + if (error) return

Error: {error.message}

; + + const posts = data?.listPosts?.items?.filter((p: any) => !p._deleted) || []; + return ( +
    + {posts.map((post: any) => ( +
  • {post.title}
  • + ))} +
+ ); +} +``` + +### Migrate DataStore.observeQuery() + +`observeQuery()` combined an initial query with live updates. The Apollo equivalent is `useQuery` with `fetchPolicy: 'cache-and-network'` plus subscription-triggered refetch: + +```tsx +function PublishedPosts() { + const { data, loading, refetch } = useQuery(LIST_POSTS, { + variables: { filter: { status: { eq: 'PUBLISHED' } } }, + fetchPolicy: 'cache-and-network', + }); + + useEffect(() => { + const subscriptions = [ + amplifyClient.graphql({ + query: `subscription OnCreatePost { onCreatePost { id } }`, + }).subscribe({ next: () => refetch() }), + amplifyClient.graphql({ + query: `subscription OnUpdatePost { onUpdatePost { id } }`, + }).subscribe({ next: () => refetch() }), + amplifyClient.graphql({ + query: `subscription OnDeletePost { onDeletePost { id } }`, + }).subscribe({ next: () => refetch() }), + ]; + + return () => subscriptions.forEach(sub => sub.unsubscribe()); + }, [refetch]); + + const posts = data?.listPosts?.items + ?.filter((p: any) => !p._deleted) + ?.sort((a: any, b: any) => + new Date(b.createdAt).getTime() - new Date(a.createdAt).getTime() + ) || []; + + if (loading && !data) return

Loading...

; + + return ( +
+ {loading && Refreshing...} +
    + {posts.map((post: any) => ( +
  • {post.title}
  • + ))} +
+
+ ); +} +``` + +### Owner-based auth subscriptions + + + +When a model has `@auth(rules: [{ allow: owner }])`, you **must** manually pass the `owner` variable to subscriptions. DataStore injected this automatically. Without it, subscriptions connect successfully but never fire events. + + + +```ts +import { fetchAuthSession } from 'aws-amplify/auth'; + +async function getCurrentOwner(): Promise { + const session = await fetchAuthSession(); + // Default Amplify owner field uses the 'sub' claim. + // Check your Gen 1 schema.graphql @auth rules to confirm. + return session.tokens?.idToken?.payload?.sub as string; +} +``` + +Pass the owner to each subscription: + +```ts +amplifyClient.graphql({ + query: `subscription OnCreatePost($owner: String!) { + onCreatePost(owner: $owner) { id } + }`, + variables: { owner }, +}).subscribe({ next: () => refetch() }); +``` + +### React component migration checklist + +**Queries:** +- Replace `useState` + `useEffect` + `DataStore.query()` with `useQuery()` +- Filter `_deleted` records from ALL list query results +- Add `error` state handling +- Use `fetchPolicy: 'cache-and-network'` where you need cached + fresh data + +**Mutations:** +- Replace `DataStore.save(new Model({...}))` with `useMutation(CREATE_MODEL)` +- Replace `DataStore.save(Model.copyOf(...))` with `useMutation(UPDATE_MODEL)` -- include `_version` +- Replace `DataStore.delete(instance)` with `useMutation(DELETE_MODEL)` -- include `_version` +- Add `refetchQueries` to mutations that affect list queries + +**Real-time:** +- Replace `DataStore.observe()` with three Amplify subscriptions +- Replace `DataStore.observeQuery()` with `useQuery` + subscription-triggered `refetch()` +- Add `owner` argument if the model uses owner-based auth +- Clean up ALL subscriptions in the `useEffect` return function + +
+ + + + + +The React-specific hooks (`useQuery`, `useMutation`) shown in other sections of this guide are not available in Angular, vanilla JavaScript, or Vue. Use the imperative Apollo Client APIs (`apolloClient.query()`, `apolloClient.mutate()`) instead. These are the same patterns shown in the "imperative" examples on the [Migrate CRUD operations](/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/migrate-crud-operations/) page. + + + + + +## Composite and custom primary keys + +Amplify supports three identifier modes for models. Each mode changes how you query, update, and delete records -- and each requires different Apollo Client configuration. + +### The three identifier modes + +| Identifier Mode | Gen 1 Schema | GraphQL Get Input | Create Input | +|---|---|---|---| +| Default auto-generated ID | No `@primaryKey` directive | `getModel(id: ID!)` | `id` auto-generated by AppSync | +| Custom single-field PK | `@primaryKey(sortKeyFields: [])` on a custom field | `getModel(id: ID!)` | `id` required in create input | +| Composite PK | `@primaryKey(sortKeyFields: ["field2"])` | `getModel(field1: ..., field2: ...)` | All PK fields required | + +### Default (auto ID) + +This is the default mode when you do not use `@primaryKey` on your model. AppSync auto-generates a UUID `id` field. No special migration is needed -- the standard CRUD patterns from the [Migrate CRUD operations](/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/migrate-crud-operations/) page apply directly. + +**Gen 1 schema:** + +```graphql +# amplify/backend/api//schema.graphql +type Post @model @auth(rules: [{ allow: owner }]) { + id: ID! + title: String! + content: String + status: String +} +``` + +### Custom single-field PK + +When your model defines a custom primary key field, the `id` is no longer auto-generated. You must provide it explicitly in create mutations. + +**Gen 1 schema:** + +```graphql +# amplify/backend/api//schema.graphql +type Product @model @auth(rules: [{ allow: owner }]) { + id: ID! @primaryKey + sku: String! + name: String! + price: Float +} +``` + +Apollo Client: + +```ts +const { data } = await apolloClient.mutate({ + mutation: CREATE_PRODUCT, + variables: { + input: { + id: 'PROD-001', // REQUIRED -- you must provide this + sku: 'SKU-12345', + name: 'Widget', + price: 29.99, + }, + }, +}); +``` + +### Composite PK + +This mode requires the most migration work. When a model uses `@primaryKey` with `sortKeyFields`, ALL primary key fields become required arguments. + +**Gen 1 schema:** + +```graphql title="amplify/backend/api//schema.graphql" +type StoreBranch @model @auth(rules: [{ allow: owner }]) { + tenantId: ID! @primaryKey(sortKeyFields: ["branchName"]) + branchName: String! + address: String + phone: String +} +``` + +**Apollo Client queries and mutations:** + +```ts +// Query by composite key -- both fields as separate variables +const { data } = await apolloClient.query({ + query: GET_STORE_BRANCH, + variables: { tenantId: 'tenant-123', branchName: 'Downtown' }, +}); + +// Update -- ALL PK fields + _version required in input +await apolloClient.mutate({ + mutation: UPDATE_STORE_BRANCH, + variables: { + input: { + tenantId: 'tenant-123', + branchName: 'Downtown', + address: '456 New St', + _version: data.getStoreBranch._version, + }, + }, +}); +``` + +### Cache configuration for composite keys (typePolicies) + + + +This is the critical configuration step that is easy to miss. Apollo's `InMemoryCache` uses `__typename:id` as the default cache key. Models with composite keys will NOT cache or normalize correctly without explicit `keyFields` configuration. + + + +```ts +import { InMemoryCache } from '@apollo/client'; + +const cache = new InMemoryCache({ + typePolicies: { + // Default models work automatically + Post: { keyFields: ['id'] }, + // Composite key models NEED explicit keyFields + StoreBranch: { keyFields: ['tenantId', 'branchName'] }, + // Custom single-field PK + Product: { keyFields: ['sku'] }, + }, +}); +``` + +**Warning signs that `keyFields` is missing:** queries return stale data after mutations, Apollo DevTools shows duplicate entries, `cache.readQuery` returns `null` for records you know exist. + +## GraphQL codegen for type-safe operations + +The CRUD examples in earlier pages use `(post: any)` casts. This section shows how to eliminate those. + +### Step 1: Generate GraphQL operations + +```bash +amplify codegen +``` + +This generates TypeScript files in `src/graphql/` containing your operations as string constants. + +### Step 2: Wrap with gql() and TypeScript types + +Create a typed operations file that wraps the generated strings: + + + +```ts title="src/graphql/typed-operations.ts" +import { gql, TypedDocumentNode } from '@apollo/client'; +import { getPost as getPostString, listPosts as listPostsString } from './queries'; +import { createPost as createPostString, updatePost as updatePostString, deletePost as deletePostString } from './mutations'; + +export interface Post { + id: string; + title: string; + content: string; + status: string; + rating: number; + createdAt: string; + updatedAt: string; + _version: number; + _deleted: boolean | null; + _lastChangedAt: number; +} + +export interface GetPostData { getPost: Post | null; } +export interface GetPostVars { id: string; } + +export interface ListPostsData { + listPosts: { items: Post[]; nextToken: string | null; }; +} +export interface ListPostsVars { + filter?: Record; + limit?: number; + nextToken?: string; +} + +export interface CreatePostData { createPost: Post; } +export interface CreatePostVars { + input: { title: string; content: string; status?: string; rating?: number; }; +} + +export interface UpdatePostData { updatePost: Post; } +export interface UpdatePostVars { + input: { id: string; _version: number; title?: string; content?: string; }; +} + +export interface DeletePostData { deletePost: Post; } +export interface DeletePostVars { + input: { id: string; _version: number; }; +} + +export const GET_POST: TypedDocumentNode = gql(getPostString); +export const LIST_POSTS: TypedDocumentNode = gql(listPostsString); +export const CREATE_POST: TypedDocumentNode = gql(createPostString); +export const UPDATE_POST: TypedDocumentNode = gql(updatePostString); +export const DELETE_POST: TypedDocumentNode = gql(deletePostString); +``` + + + +### Step 3: Use type-safe hooks + +With `TypedDocumentNode`, Apollo hooks automatically infer data and variable types: + + + +```tsx +import { useQuery, useMutation } from '@apollo/client'; +import { GET_POST, UPDATE_POST } from './graphql/typed-operations'; + +function PostDetail({ postId }: { postId: string }) { + // data is automatically typed as GetPostData + const { data, loading, error } = useQuery(GET_POST, { + variables: { id: postId }, + }); + + const [updatePost] = useMutation(UPDATE_POST); + + async function handleUpdate(title: string) { + const post = data?.getPost; + if (!post) return; + // variables.input is type-checked + await updatePost({ + variables: { input: { id: post.id, title, _version: post._version } }, + }); + } + + if (loading) return

Loading...

; + if (error) return

Error: {error.message}

; + if (!data?.getPost) return

Post not found

; + + const post = data.getPost; // Typed as Post -- no (post: any) cast + return ( +
+

{post.title}

+

{post.content}

+

Rating: {post.rating}

+
+ ); +} +``` + +
+ +## What is lost -- features with no direct equivalent + + + +DataStore provided a managed sync lifecycle with rich event hooks. Apollo Client is a query/cache layer, not a sync engine. This section documents every DataStore feature that has no direct Apollo equivalent, with honest workaround ratings. + + + +### Hub events + +DataStore dispatched 9 distinct events via Hub. Of the 9: + +| Category | Count | Details | +|---|---|---| +| Fully replaced | 0 | None have a direct Apollo equivalent | +| Partially replaced | 2 | `networkStatus` (use browser APIs), `subscriptionsEstablished` (monitor subscription callbacks) | +| No equivalent | 7 | `syncQueriesStarted`, `syncQueriesReady`, `modelSynced`, `outboxMutationEnqueued`, `outboxMutationProcessed`, `outboxStatus`, `storageSubscribed` | + +The 7 with no equivalent describe sync engine behavior, and Apollo Client does not have a sync engine. For the Offline-First strategy, the custom sync engine and mutation queue provide extension points for emitting similar events. + +### Selective sync (syncExpressions) + +DataStore's `syncExpressions` let you filter which records synced from server to local store. Apollo has no equivalent for API Only or Local Caching. For Offline-First, the custom sync engine could implement sync filters, but this requires custom code. + +### Lifecycle methods + +| Method | Apollo Equivalent | Rating | +|---|---|---| +| `DataStore.start()` | None (Apollo queries on demand) | None | +| `DataStore.stop()` | Unsubscribe manually; `apolloClient.stop()` cancels in-flight | None | +| `DataStore.clear()` | `apolloClient.clearStore()` + `persistor.purge()` + Dexie `db.delete()` | Partial | + +### Conflict handler configuration + +This IS covered in the migration guide. API Only/Local Caching handle conflicts server-side. Offline-First uses custom client-side conflict resolution. **Rating: Full** (different location, same capability). + +### Summary + +| Category | Fully Replaced | Partially Replaced | No Equivalent | +|---|---|---|---| +| Hub lifecycle events (9 total) | 0 | 2 | 7 | +| Selective sync | 0 | 1 | 0 | +| Lifecycle methods (3 total) | 0 | 1 | 2 | +| Conflict handlers | 1 | 0 | 0 | +| **Totals** | **1** | **4** | **9** | + +### Practical guidance + +If your app depends heavily on Hub events for UI state (showing sync progress indicators, outbox status badges), plan additional custom implementation work. For most apps migrating to API Only or Local Caching, these features are not needed because there is no local sync to monitor. The loss is real but the impact is low. diff --git a/src/pages/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/build-offline-support/index.mdx b/src/pages/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/build-offline-support/index.mdx new file mode 100644 index 00000000000..58a1ea9ecf0 --- /dev/null +++ b/src/pages/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/build-offline-support/index.mdx @@ -0,0 +1,786 @@ +import { getCustomStaticPath } from '@/utils/getCustomStaticPath'; + +export const meta = { + title: 'Build offline support', + description: 'Build a full offline-first architecture with Dexie.js local database, mutation queue, sync engine, and conflict resolution.', + platforms: [ + 'angular', + 'javascript', + 'nextjs', + 'react', + 'react-native', + 'vue' + ] +}; + +export const getStaticPaths = async () => { + return getCustomStaticPath(meta.platforms); +}; + +export function getStaticProps(context) { + return { + props: { + platform: context.params.platform, + meta + } + }; +} + + + +**This page replaces the Local Caching strategy entirely.** The Offline-First architecture uses Dexie.js (IndexedDB) as the source of truth instead of Apollo's InMemoryCache. If you have already set up Local Caching with `apollo3-cache-persist`, you will remove that when adopting this strategy. Do not combine the two strategies. + + + +This page covers the complete offline-first architecture: understanding the component model, setting up Dexie.js as your local database, building a mutation queue with deduplication, implementing connectivity monitoring, building a sync engine for base and delta synchronization, and handling conflict resolution with `_version`-based optimistic locking. + +## When you need offline-first + +The previous strategies covered two levels of connectivity tolerance: + +1. **API Only** -- every operation goes directly to AppSync. If the network is down, operations fail. +2. **Local Caching** -- adds persistence to Apollo's cache. Reads work offline, but writes still require a network connection. +3. **Offline-First (this page)** -- full offline capability. Both reads and writes work without a network connection. Mutations are queued locally and replayed when connectivity returns. + +The key difference: in Local Caching, Apollo's `InMemoryCache` is the source of truth. On this page, **Dexie.js (IndexedDB) is the source of truth**. Apollo Client becomes a transport layer only. + + + +**Reference architecture.** The code on this page is a reference architecture -- patterns you adapt to your application, not a drop-in library. DataStore hid all of this complexity behind a simple API. Moving to Apollo means you build the offline layer yourself, but you gain full control over every aspect of sync, conflict resolution, and data storage. + + + +## Architecture overview + +The offline-first architecture has four layers: + +``` ++-------------------------------------------------------+ +| Application / React UI | ++-------------------------------------------------------+ +| OfflineDataManager (Facade) | +| save() query() delete() observe() | ++-------------------------------------------------------+ +| | +| +-------------------+ +------------------------+ | +| | Mutation Queue | | Sync Engine | | +| | (Dexie table) | | (base + delta sync) | | +| | FIFO, dedup by | | paginated download | | +| | modelId | | via syncPosts query | | +| +-------------------+ +------------------------+ | +| | +| +-------------------+ +------------------------+ | +| | Conflict Resolver | | Connectivity Monitor | | +| | (_version based | | navigator.onLine + | | +| | optimistic lock) | | WebSocket state | | +| +-------------------+ +------------------------+ | +| | ++-------------------------------------------------------+ +| Dexie.js (IndexedDB) | +| Data tables + _mutationQueue table + | +| _syncMetadata table | ++-------------------------------------------------------+ +| | +| +-------------------+ +------------------------+ | +| | Apollo Client | | Amplify | | +| | (queries, muts) | | (subscriptions) | | +| +-------------------+ +------------------------+ | +| | ++-------------------------------------------------------+ +| AWS AppSync | ++-------------------------------------------------------+ +``` + +### Component descriptions + +**OfflineDataManager** is the facade that your application calls. It exposes `save()`, `query()`, `delete()`, and `observe()` methods. When you call `save()`, it writes to the local Dexie.js database immediately (so the UI updates instantly) and enqueues a mutation for later sync. + +**Dexie.js (IndexedDB)** is the source of truth. It stores your user data tables, a `_mutationQueue` table for pending mutations, and a `_syncMetadata` table for tracking sync timestamps. + +**Mutation Queue** is a FIFO queue of pending mutations stored in a Dexie.js table. The queue deduplicates by `modelId` -- if a record is updated three times while offline, only the latest state needs to be sent. + +**Sync Engine** downloads data from AppSync using two strategies: base sync (full download) and delta sync (incremental download of records changed since `lastSync`). + +**Conflict Resolver** handles `ConflictUnhandled` errors using `_version`-based optimistic locking. + +**Connectivity Monitor** watches `navigator.onLine` and WebSocket subscription state, with a 5-second stabilization delay on transitions (matching DataStore). + +### Data flow paths + +**Write path (instant local, async remote):** + +``` +App --> OfflineDataManager.save(record) + --> Dexie.js: put(record) [immediate, UI updates] + --> _mutationQueue: add(mutation) [enqueued for sync] + --> [when online] Apollo Client.mutate() --> AppSync +``` + +**Read path (always local, always fast):** + +``` +App --> OfflineDataManager.query(filter) + --> Dexie.js: where(filter).toArray() [local read, instant] +``` + +**Sync path (background, on reconnect):** + +``` +Connectivity restored + --> Sync Engine: Apollo Client.query(syncPosts) --> AppSync + --> Merge response into Dexie.js (skip records with pending mutations) + --> Drain _mutationQueue: process each entry via Apollo Client.mutate() + --> On conflict: Conflict Resolver decides retry vs discard +``` + +## Set up Dexie.js local database + +Install Dexie.js: + +```bash +npm install dexie +``` + +### TypeScript interfaces + +Define interfaces that match your GraphQL models, including the three metadata fields: + +```ts title="src/offline/types.ts" +interface Post { + id: string; + title: string; + content: string; + status?: string; + rating?: number; + owner?: string; + _version: number; + _deleted: boolean; + _lastChangedAt: number; + createdAt: string; + updatedAt: string; +} + +interface Comment { + id: string; + postId: string; + content: string; + owner?: string; + _version: number; + _deleted: boolean; + _lastChangedAt: number; + createdAt: string; + updatedAt: string; +} + +interface MutationQueueEntry { + id: string; + modelName: string; + modelId: string; + operation: 'CREATE' | 'UPDATE' | 'DELETE'; + data: string; // JSON-serialized record data + condition: string; + createdAt: number; + inProgress?: boolean; +} + +interface SyncMetadata { + id: string; + modelName: string; + lastSync: number | null; + lastFullSync: number | null; + fullSyncInterval: number; +} +``` + +### The OfflineDatabase class + +```ts title="src/offline/database.ts" +import Dexie, { type Table } from 'dexie'; + +class OfflineDatabase extends Dexie { + posts!: Table; + comments!: Table; + mutationQueue!: Table; + syncMetadata!: Table; + + constructor() { + super('MyAppOfflineDB'); + + this.version(1).stores({ + // Primary key first, then indexed fields only + posts: 'id, _deleted, updatedAt', + comments: 'id, postId, _deleted, updatedAt', + mutationQueue: 'id, modelName, modelId, createdAt', + syncMetadata: 'id, modelName', + }); + } +} + +export const db = new OfflineDatabase(); +``` + + + +The `stores()` definition lists only the **primary key** and **indexed fields** (fields you query with `.where()`). Dexie stores all fields on every record regardless of whether they appear in `stores()`. Only add a field to `stores()` if you need to filter or sort by it. + +Note that Dexie stores booleans as `0`/`1` in IndexedDB indexes, so use `.equals(0)` rather than `.equals(false)` in `.where()` queries for the `_deleted` field. + + + + + +**Compound model names:** The `OfflineDataManager` uses `modelName.toLowerCase() + 's'` to derive table names (e.g., `'Post'` becomes `'posts'`). This produces unexpected results for compound names like `'PostTag'` becoming `'posttags'`. If your schema has compound model names, define an explicit lookup map instead: + +```ts +const MODEL_TABLE_MAP: Record = { + Post: 'posts', Comment: 'comments', Tag: 'tags', PostTag: 'posttags', +}; +const tableName = MODEL_TABLE_MAP[modelName]; +``` + +Use this map everywhere you convert a model name to a table name -- in the mutation queue processor, sync engine, and offline data manager. + + + +## Build the mutation queue + +### Enqueue with deduplication + +The deduplication rules match DataStore's `MutationEventOutbox`: + +| Existing Entry | Incoming Entry | Result | +|---------------|---------------|--------| +| None | Any | Add to queue | +| CREATE | UPDATE | Merge data fields into the CREATE | +| CREATE | DELETE | Remove the CREATE entirely (net no-op) | +| UPDATE | UPDATE | Merge into single UPDATE with latest data | +| UPDATE | DELETE | Replace with DELETE | +| DELETE | Any | Replace entirely | + + + +```ts title="src/offline/mutationQueue.ts" +import { db } from './database'; + +export async function enqueueMutation( + entry: Omit +): Promise { + await db.transaction('rw', db.mutationQueue, async () => { + const existing = await db.mutationQueue + .where('modelId') + .equals(entry.modelId) + .and(item => !item.inProgress) + .first(); + + if (!existing) { + await db.mutationQueue.add({ + ...entry, + id: crypto.randomUUID(), + createdAt: Date.now(), + }); + return; + } + + if (existing.operation === 'CREATE') { + if (entry.operation === 'DELETE') { + await db.mutationQueue.delete(existing.id); + } else { + const mergedData = { + ...JSON.parse(existing.data), + ...JSON.parse(entry.data), + }; + await db.mutationQueue.update(existing.id, { + data: JSON.stringify(mergedData), + }); + } + return; + } + + if (existing.operation === 'UPDATE') { + if (entry.operation === 'DELETE') { + await db.mutationQueue.update(existing.id, { + operation: 'DELETE', + data: entry.data, + }); + } else { + const mergedData = { + ...JSON.parse(existing.data), + ...JSON.parse(entry.data), + }; + await db.mutationQueue.update(existing.id, { + data: JSON.stringify(mergedData), + }); + } + return; + } + + await db.mutationQueue.delete(existing.id); + await db.mutationQueue.add({ + ...entry, + id: crypto.randomUUID(), + createdAt: Date.now(), + }); + }); +} +``` + + + +### Version propagation on dequeue + +After a mutation succeeds, the server returns the record with an updated `_version`. Remaining queue entries for the same record must use this new `_version` or they will always conflict. + + + +```ts +async function dequeueAndSyncVersions( + completedEntry: MutationQueueEntry, + serverResponse: Record, +): Promise { + await db.transaction('rw', db.mutationQueue, async () => { + await db.mutationQueue.delete(completedEntry.id); + + const remaining = await db.mutationQueue + .where('modelId') + .equals(completedEntry.modelId) + .toArray(); + + for (const entry of remaining) { + const data = JSON.parse(entry.data); + data._version = serverResponse._version; + data._lastChangedAt = serverResponse._lastChangedAt; + await db.mutationQueue.update(entry.id, { + data: JSON.stringify(data), + }); + } + }); +} +``` + + + +### Process the queue + +The processor drains the queue in FIFO order when the device is online, handling network errors (stop and retry on reconnect) and conflict errors (delegate to conflict handler) separately. + + + +**Import path note:** The queue processor imports `apolloClient` from your Apollo Client setup file. Adjust the import path to match your project (e.g., `'./apolloClient'`, `'./apollo-setup'`, or similar). + + + + + +```ts +export async function processMutationQueue( + connectivity: ConnectivityMonitor, + conflictHandler: ConflictHandler, +): Promise { + if (!connectivity.online) return; + + while (connectivity.online) { + const head = await db.mutationQueue + .orderBy('createdAt') + .first(); + + if (!head) break; + + await db.mutationQueue.update(head.id, { inProgress: true }); + + try { + const mutation = getMutationForOperation(head.modelName, head.operation); + + const result = await apolloClient.mutate({ + mutation, + variables: { input: JSON.parse(head.data) }, + }); + + const serverRecord = result.data[Object.keys(result.data)[0]]; + await dequeueAndSyncVersions(head, serverRecord); + + if (head.operation !== 'DELETE') { + const tableName = head.modelName.toLowerCase() + 's'; + await db.table(tableName).put(serverRecord); + } + + } catch (error) { + const gqlErrors = error?.graphQLErrors ?? []; + const conflictError = gqlErrors.find( + (e) => e.errorType === 'ConflictUnhandled' + ); + + if (conflictError) { + await handleConflict(head, conflictError, conflictHandler); + continue; + } + + if (isNetworkError(error)) { + await db.mutationQueue.update(head.id, { inProgress: false }); + break; + } + + // Permanent error -- dequeue to prevent infinite loop + console.error(`[MutationQueue] Permanent error:`, error); + await db.mutationQueue.delete(head.id); + } + } +} +``` + + + +## Build the connectivity monitor + +The monitor uses two signals: `navigator.onLine` events and WebSocket subscription disconnection. It includes a 5-second stabilization delay on transitions to prevent flapping (matching DataStore). + + + +```ts title="src/offline/connectivity.ts" +type ConnectionHandler = (online: boolean) => void; + +export class ConnectivityMonitor { + private listeners: Set = new Set(); + private _online: boolean = navigator.onLine; + private stabilizationTimer: ReturnType | null = null; + + constructor() { + window.addEventListener('online', () => this.handleTransition(true)); + window.addEventListener('offline', () => this.handleTransition(false)); + } + + get online(): boolean { + return this._online; + } + + subscribe(handler: ConnectionHandler): () => void { + this.listeners.add(handler); + handler(this._online); + return () => this.listeners.delete(handler); + } + + notifySocketDisconnect(): void { + this.setOnline(false); + if (this.stabilizationTimer) clearTimeout(this.stabilizationTimer); + this.stabilizationTimer = setTimeout(() => { + this.stabilizationTimer = null; + this.setOnline(navigator.onLine); + }, 5000); + } + + destroy(): void { + if (this.stabilizationTimer) clearTimeout(this.stabilizationTimer); + this.listeners.clear(); + } + + private handleTransition(online: boolean): void { + if (online && !this._online) { + if (this.stabilizationTimer) clearTimeout(this.stabilizationTimer); + this.stabilizationTimer = setTimeout(() => { + this.stabilizationTimer = null; + this.setOnline(navigator.onLine); + }, 5000); + return; + } + if (!online) { + if (this.stabilizationTimer) { + clearTimeout(this.stabilizationTimer); + this.stabilizationTimer = null; + } + this.setOnline(false); + } + } + + private setOnline(online: boolean): void { + if (this._online === online) return; + this._online = online; + this.listeners.forEach(handler => handler(online)); + } +} +``` + + + +Wire connectivity to queue processing: + +```ts +const connectivity = new ConnectivityMonitor(); + +connectivity.subscribe((online) => { + if (online) { + processMutationQueue(connectivity, defaultConflictHandler); + syncAllModels(connectivity); + } +}); +``` + +## Build the sync engine + + + +**TypeScript strict mode note:** The paginated download loop in `syncModel()` can cause circular reference errors under strict TypeScript settings because `nextToken` is both read from and written to within the do-while loop. If you see TS7022 errors, use explicit type annotations on the response variable. + + + +The sync engine downloads data from AppSync using two modes: + +- **Base sync** -- full download when `lastSync` is `null` or when the full sync interval has expired (default: 24 hours) +- **Delta sync** -- incremental download of only records changed since `lastSync` + + + +Use `syncPosts` queries (not `listPosts`) for synchronization. The `syncPosts` queries accept `$lastSync: AWSTimestamp` and return `startedAt` -- this is what makes delta sync possible. Using `listPosts` would force a full download every time. + + + +### Sync query definition + +```ts +export const SYNC_POSTS = gql` + query SyncPosts( + $limit: Int + $nextToken: String + $lastSync: AWSTimestamp + $filter: ModelPostFilterInput + ) { + syncPosts( + limit: $limit + nextToken: $nextToken + lastSync: $lastSync + filter: $filter + ) { + items { + id title content status rating owner + _version _deleted _lastChangedAt + createdAt updatedAt + } + nextToken + startedAt + } + } +`; +``` + +### Merge logic + +When merging sync results into the local database, skip records that have pending mutations in the queue to avoid overwriting local changes: + + + +```ts +async function mergeItemsIntoLocal( + tableName: string, + items: any[], +): Promise { + const pendingModelIds = new Set( + (await db.mutationQueue + .where('modelName') + .equals(tableName === 'posts' ? 'Post' : 'Comment') + .toArray() + ).map(e => e.modelId) + ); + + const toMerge = items.filter(item => !pendingModelIds.has(item.id)); + + await db.table(tableName).bulkPut(toMerge); +} +``` + + + +### Conflict resolution + +The default conflict handler uses "last writer wins" -- retry with the server's `_version`: + +```ts +type ConflictHandler = (conflict: { + localModel: Record; + remoteModel: Record; + operation: 'CREATE' | 'UPDATE' | 'DELETE'; + attempts: number; +}) => Record | 'DISCARD' | Promise | 'DISCARD'>; + +const defaultConflictHandler: ConflictHandler = ({ localModel, remoteModel }) => { + return { ...localModel, _version: remoteModel._version }; +}; +``` + +Returning `'DISCARD'` accepts the server version and drops the local change. + +## Integrating with Apollo Client + +Apollo Client's role in the offline-first architecture is **transport only**. It sends sync queries and mutations to AppSync but is NOT the source of truth -- Dexie.js is. This creates a question: how does the React UI read data? + +| Scenario | Recommended Approach | +|----------|---------------------| +| Building new offline-first components from scratch | **Approach A** -- read from Dexie directly using `liveQuery()` (single source of truth, simpler) | +| Migrating an existing app that already uses `useQuery` everywhere | **Approach B** -- write data to Apollo cache after syncing into Dexie, keeping existing `useQuery` components working | +| Incremental migration | **Approach B** for existing components, **Approach A** for new ones | + +In practice, most migrations start with Approach B (keeping `useQuery` for the UI layer) and use the Dexie offline infrastructure only for the sync/queue layer. Approach A is architecturally cleaner but requires replacing every `useQuery` call with a custom `useDexieQuery` hook. + +## The OfflineDataManager facade + +The `OfflineDataManager` is the facade that your application calls for all data operations: + + + +```ts title="src/offline/manager.ts" +export class OfflineDataManager { + private connectivity: ConnectivityMonitor; + private conflictHandler: ConflictHandler; + + constructor(config: { + connectivity: ConnectivityMonitor; + conflictHandler: ConflictHandler; + }) { + this.connectivity = config.connectivity; + this.conflictHandler = config.conflictHandler; + } + + async save( + modelName: string, + record: T, + isNew: boolean = false, + ): Promise { + const tableName = modelName.toLowerCase() + 's'; + + // Step 1: Write to local database immediately + await db.table(tableName).put(record); + + // Step 2: Enqueue mutation for remote sync + await enqueueMutation({ + modelName, + modelId: record.id, + operation: isNew ? 'CREATE' : 'UPDATE', + data: JSON.stringify(record), + condition: '{}', + }); + + // Step 3: If online, trigger queue processing + if (this.connectivity.online) { + processMutationQueue(this.connectivity, this.conflictHandler) + .catch(err => console.error('Queue processing error:', err)); + } + + return record; + } + + async delete(modelName: string, recordId: string, version: number): Promise { + const tableName = modelName.toLowerCase() + 's'; + + await db.table(tableName).update(recordId, { _deleted: true }); + + await enqueueMutation({ + modelName, + modelId: recordId, + operation: 'DELETE', + data: JSON.stringify({ id: recordId, _version: version }), + condition: '{}', + }); + + if (this.connectivity.online) { + processMutationQueue(this.connectivity, this.conflictHandler) + .catch(err => console.error('Queue processing error:', err)); + } + } + + async query(modelName: string, filter?: (item: T) => boolean): Promise { + const tableName = modelName.toLowerCase() + 's'; + let collection = db.table(tableName).where('_deleted').equals(0); + const results = await collection.toArray(); + if (filter) return results.filter(filter) as T[]; + return results as T[]; + } +} +``` + + + +### Usage example + +```ts +const connectivity = new ConnectivityMonitor(); +const manager = new OfflineDataManager({ + connectivity, + conflictHandler: defaultConflictHandler, +}); + +// Creating a new post (works offline) +const newPost = { + id: crypto.randomUUID(), + title: 'My Post', + content: 'Written while offline', + _version: 1, + _deleted: false, + _lastChangedAt: 0, + createdAt: new Date().toISOString(), + updatedAt: new Date().toISOString(), +}; + +await manager.save('Post', newPost, true); + +// Querying (always local, always fast) +const posts = await manager.query('Post'); +``` + +## Sign-out: clear offline state + +When a user signs out, clear the mutation queue and all local data: + +```ts +async function clearOfflineState(): Promise { + await db.mutationQueue.clear(); + await db.syncMetadata.clear(); + await db.posts.clear(); + await db.comments.clear(); + // Add db.tableName.clear() for each model table +} + +async function handleSignOut(): Promise { + await clearOfflineState(); + await apolloClient.clearStore(); + await signOut(); +} +``` + + + +Call `clearOfflineState()` **before** `signOut()`. The sign-out invalidates auth tokens, and some Dexie.js operations may fail if they trigger async side effects that attempt network calls with an expired token. + + + +## Comparing with Local Caching + +| Aspect | Local Caching | Offline-First | +|--------|--------------|---------------| +| **Source of truth** | Apollo InMemoryCache | Dexie.js (IndexedDB) | +| **Offline reads** | Yes (from persisted cache) | Yes (from Dexie.js) | +| **Offline writes** | No | Yes (queued and replayed) | +| **Sync mechanism** | `refetchQueries` / `cache-and-network` | Base sync + delta sync via `syncPosts` queries | +| **Conflict resolution** | Last write wins | `_version`-based with custom handler | +| **Persistence library** | `apollo3-cache-persist` | `dexie` | +| **Complexity** | Low | High | + + + +**Manual testing steps:** + +1. Open IndexedDB inspector in DevTools (Application tab, Storage, IndexedDB) +2. Go offline (Network tab, check "Offline") +3. Perform mutations in your app +4. Verify queue entries in the `mutationQueue` table +5. Check dedup: create then update should show ONE entry, not two +6. Go back online and watch entries drain +7. Verify GraphQL mutations in the Network tab + +**Common issues:** + +| Symptom | Likely Cause | Fix | +|---------|-------------|-----| +| Entries not appearing | Forgot `enqueueMutation()` | Call both `db.table.put()` and `enqueueMutation()` | +| Entries not draining | Monitor not wired | Verify `connectivity.subscribe()` triggers `processMutationQueue()` | +| Multiple entries for same record | Dedup not working | Ensure `db.transaction('rw', ...)` wrapping | +| Stuck `inProgress` entries | App crashed mid-send | Clear `inProgress` flags on app startup | +| `ConflictUnhandled` errors | `_version` not propagated | Verify `dequeueAndSyncVersions()` | + + diff --git a/src/pages/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/choose-strategy/index.mdx b/src/pages/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/choose-strategy/index.mdx new file mode 100644 index 00000000000..cc5f05c2229 --- /dev/null +++ b/src/pages/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/choose-strategy/index.mdx @@ -0,0 +1,190 @@ +import { getCustomStaticPath } from '@/utils/getCustomStaticPath'; + +export const meta = { + title: 'Choose a migration strategy', + description: 'Use the decision framework and feature parity matrix to choose between API Only, Local Caching, and Offline-First migration strategies.', + platforms: [ + 'angular', + 'javascript', + 'nextjs', + 'react', + 'react-native', + 'vue' + ] +}; + +export const getStaticPaths = async () => { + return getCustomStaticPath(meta.platforms); +}; + +export function getStaticProps(context) { + return { + props: { + platform: context.params.platform, + meta + } + }; +} + +## The default recommendation + + + +**Start with API Only unless you have a specific need for caching or offline.** + +API Only covers roughly 70% of DataStore's features with roughly 10% of the complexity. Most apps that used DataStore never actually needed offline support -- DataStore provided it automatically. Before committing to a more complex strategy, honestly assess whether your users depend on offline functionality. + + + +## Decision flowchart + +Use this decision tree to determine which strategy fits your app: + +``` +Does your app need to work offline? + | + +-- Yes --> Offline-First Strategy + | (build-offline-support page) + | + +-- No + | + Do you need data to persist across + page refreshes or instant optimistic UI? + | + +-- Yes --> Local Caching Strategy + | (add-local-caching page) + | + +-- No --> API Only Strategy + (migrate-crud-operations page) +``` + +## API Only strategy + +### What it provides + +- Direct GraphQL queries and mutations through Apollo Client +- Apollo's in-memory normalized cache for the duration of the session +- React hooks (`useQuery`, `useMutation`) for declarative data fetching +- Real-time updates via Amplify subscriptions with refetch-based cache updates +- Full access to GraphQL filtering, pagination, and sorting + +### What you give up (compared to DataStore) + +- No data persistence across page refreshes or app restarts +- No offline mutation queuing -- operations fail when the network is unavailable +- No optimistic UI updates (the UI waits for server confirmation) +- No automatic background sync or delta queries + +### Complexity + +**Effort estimate: 1-2 hours for basic setup**, plus time to convert existing DataStore calls to Apollo queries and mutations. This is primarily a find-and-replace exercise with some GraphQL query writing. + +### Best for + +Apps where users are always online, where a brief loading spinner is acceptable during data operations, and where the simplicity of direct API calls outweighs the benefits of local persistence. This includes dashboards, admin panels, content management tools, and apps that primarily display server-side data. + +## Local Caching strategy + +### What it provides + +- Everything in API Only, plus: +- Persistent cache that survives page refreshes (via `apollo3-cache-persist`) +- Optimistic UI updates that show changes instantly before server confirmation +- `watchQuery` for reactive list updates similar to DataStore's `observeQuery` +- Faster perceived performance from cache-first data fetching + +### What you give up (compared to DataStore) + +- No offline mutation queuing -- writes still require network connectivity +- No automatic background sync or conflict resolution +- No delta/base query synchronization +- Cache is eventually consistent, not guaranteed to match server state + +### Complexity + +**Effort estimate: 2-4 hours including cache persistence setup.** Beyond the API Only foundation, you add cache persistence configuration, optimistic response functions for mutations, and cache update logic. The conceptual overhead is moderate -- you need to understand Apollo's normalized cache. + +### Best for + +Apps that benefit from instant UI feedback and cached data between sessions, but do not need true offline write support. Social feeds, collaborative editing with online-only users, e-commerce product browsing, and apps where users expect snappy interactions. + +## Offline-First strategy + +### What it provides + +- Everything in Local Caching, plus: +- Full offline read and write support via a local Dexie.js (IndexedDB) database +- Mutation queue that persists offline writes and replays them when connectivity returns +- Sync engine for delta and base query synchronization with the AppSync backend +- Conflict resolution using `_version` tracking (manual implementation) +- Network state detection and online/offline mode switching + +### What you give up (compared to DataStore) + +- Significantly more code to write and maintain +- Conflict resolution is manual rather than declarative +- Selective sync is less declarative than DataStore's sync expressions +- No built-in Hub events for sync lifecycle (custom event emitter needed) + +### Complexity + +**Effort estimate: 1-2 weeks for a full implementation.** This strategy requires building a local database layer, a mutation queue with retry logic, a sync engine, and conflict resolution handling. It is a significant engineering investment and should only be chosen if your app genuinely requires offline functionality. + +### Best for + +Field service apps, data collection tools used in areas with unreliable connectivity, apps where users must be able to create and edit records without network access, and any app where losing unsaved work due to a network interruption is unacceptable. + +## Offline might not be required + + + +DataStore gave every app offline support automatically, whether the app needed it or not. Before choosing the Offline-First strategy, honestly assess whether your users actually depend on offline functionality. + + + +Ask yourself these questions: + +- **Do your users actually use the app without connectivity?** If your app is primarily used on desktop browsers or in offices with reliable internet, offline support may be unnecessary overhead. +- **Do you have error reports or support tickets about offline scenarios?** If users have never complained about connectivity issues, they may not need offline support. +- **Would a loading spinner during brief network issues be acceptable?** Many apps can tolerate a few seconds of loading state during network hiccups without degrading the user experience. +- **Is the data time-sensitive?** If users need the absolute latest data (stock prices, live dashboards), offline cached data may be stale and misleading anyway. + +If you answered "no" to most of these questions, **start with API Only**. You can always adopt Local Caching or Offline-First later if the need arises. The migration strategies are additive -- each builds on the previous one. + +## Feature parity matrix + +### How to read this matrix + +This matrix compares three Apollo Client migration strategies against DataStore's feature set. Each cell indicates whether the strategy supports that DataStore capability: + +- **Yes** -- The strategy fully supports this feature +- **No** -- The strategy does not support this feature +- **Partial** -- The strategy supports this feature with caveats or limitations (see Notes column) + +The strategies are cumulative: Local Caching includes everything in API Only, and Offline-First includes everything in Local Caching. + +### Feature comparison + +| Feature | API Only | Local Caching | Offline-First | Notes | +|---------|----------|---------------|---------------|-------| +| Basic CRUD (save, query, delete) | Yes | Yes | Yes | Apollo `mutate()` and `query()` replace DataStore methods | +| Filtered queries (predicates) | Yes | Yes | Yes | GraphQL `filter` input syntax differs from DataStore predicates | +| Pagination and sorting | Yes | Yes | Yes | Cursor-based pagination via `nextToken`; sort via GraphQL arguments | +| Relationships (hasMany, belongsTo, etc.) | Yes | Yes | Yes | Nested selections in GraphQL queries; no automatic lazy loading | +| Real-time (observe) | Yes | Yes | Yes | Via Amplify library subscriptions, not Apollo subscriptions | +| Real-time list (observeQuery) | Partial | Yes | Yes | API Only uses subscription-triggered `refetch()`; Local Caching uses `watchQuery` for reactive cache updates | +| Optimistic UI updates | No | Yes | Yes | Requires Apollo cache manipulation via `optimisticResponse` | +| Persistent cache (survives refresh) | No | Yes | Yes | `apollo3-cache-persist` for Local Caching; Dexie.js for Offline-First | +| Offline mutations (queue and replay) | No | No | Yes | Custom mutation queue with IndexedDB persistence required | +| Automatic sync (delta/base queries) | No | No | Yes | Custom sync engine required; no built-in equivalent | +| Conflict resolution | No | No | Yes | Manual `_version` tracking; must handle `ConditionalCheckFailedException` | +| Selective sync (sync expressions) | No | No | Partial | Custom sync filters possible but not as declarative as DataStore expressions | +| Hub events (sync lifecycle) | No | No | Partial | Custom event emitter needed; no built-in lifecycle hooks | + +### Key takeaways + +- **API Only covers approximately 70% of DataStore features with approximately 10% of the complexity.** It handles all CRUD operations, filtering, pagination, relationships, and real-time updates. The main gaps are offline support and optimistic UI. + +- **Local Caching adds optimistic updates and persistence for moderate additional effort.** If your app benefits from instant UI feedback and cached data between sessions, this strategy provides meaningful improvements over API Only without the full complexity of offline support. + +- **Offline-First approaches full DataStore parity but requires significant custom code.** The sync engine, mutation queue, and conflict resolution logic are substantial engineering work. Choose this only if your app genuinely requires offline read and write capability. diff --git a/src/pages/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/index.mdx b/src/pages/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/index.mdx new file mode 100644 index 00000000000..e711c63bfdb --- /dev/null +++ b/src/pages/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/index.mdx @@ -0,0 +1,202 @@ +import { getCustomStaticPath } from '@/utils/getCustomStaticPath'; +import { getChildPageNodes } from '@/utils/getChildPageNodes'; + +export const meta = { + title: 'Migrate from DataStore', + description: 'Learn how to migrate from Amplify DataStore to Apollo Client for queries, mutations, and caching with Amplify subscriptions for real-time updates while keeping your Gen 1 backend.', + route: '/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore', + platforms: [ + 'angular', + 'javascript', + 'nextjs', + 'react', + 'react-native', + 'vue' + ] +}; + +export const getStaticPaths = async () => { + return getCustomStaticPath(meta.platforms); +}; + +export function getStaticProps(context) { + const childPageNodes = getChildPageNodes(meta.route); + return { + props: { + platform: context.params.platform, + meta, + childPageNodes + } + }; +} + +## Understanding DataStore + +AWS Amplify DataStore provided a local-first data layer that automatically synchronized data between your app and the cloud. When you used DataStore, you got several powerful capabilities without writing any synchronization logic yourself: a local database (IndexedDB in the browser) that persisted data across sessions, automatic bidirectional sync with your AppSync backend, built-in conflict resolution using version tracking, full offline support with mutation queuing and replay, and real-time updates through `observe()` and `observeQuery()`. + +DataStore abstracted away the complexity of GraphQL operations, network state management, and data consistency. You worked with simple `save`, `query`, and `delete` methods on local models, and DataStore handled everything else behind the scenes. + +This guide shows you how to get equivalent capabilities using Apollo Client for queries, mutations, and caching, combined with the Amplify library's built-in subscription support for real-time updates. Depending on how much of DataStore's feature set your app actually uses, you may find the migration simpler than expected. + +## What this guide covers + +This guide presents three migration strategies, each suited to different application needs: + +- **API Only** (simplest): Direct GraphQL queries and mutations via Apollo Client. No local persistence beyond Apollo's in-memory cache. Recommended starting point for most apps. + +- **Local Caching** (moderate): Apollo Client with a persistent cache (via `apollo3-cache-persist`) and optimistic updates. Cached data survives page refreshes without requiring a full sync engine. If you choose Offline-First instead, skip this -- Dexie.js replaces `apollo3-cache-persist`. + +- **Offline-First** (complex): A full offline architecture using Dexie.js as a local IndexedDB database, a custom mutation queue for offline writes, a sync engine for delta/base synchronization, and manual conflict resolution using `_version` tracking. + +Each strategy builds on the same Apollo Client foundation. Local Caching and Offline-First are mutually exclusive -- choose one or the other. + +## Quick comparison: before and after + +Here is a quick look at how common DataStore operations translate to Apollo Client: + +| DataStore Operation | Apollo Client Equivalent | +|---------------------|--------------------------| +| `DataStore.save(new Post({...}))` | `apolloClient.mutate({ mutation: CREATE_POST, variables: { input: {...} } })` | +| `DataStore.query(Post)` | `apolloClient.query({ query: LIST_POSTS })` | +| `DataStore.query(Post, id)` | `apolloClient.query({ query: GET_POST, variables: { id } })` | +| `DataStore.delete(post)` | `apolloClient.mutate({ mutation: DELETE_POST, variables: { input: { id, _version } } })` | +| `DataStore.observe(Post)` | `amplifyClient.graphql({ query: onCreatePost }).subscribe(...)` | +| `DataStore.observeQuery(Post)` | `useQuery(LIST_POSTS)` with subscription-triggered `refetch()` | + + + +Subscriptions use the Amplify library's `client.graphql()` rather than Apollo, because AppSync uses a custom WebSocket protocol that Amplify handles natively. Apollo Client handles all queries, mutations, and caching. + + + +## Who should use this guide + +This guide is for developers who have an existing Amplify Gen 1 application that uses DataStore and want to replace DataStore with Apollo Client. You do not need to migrate your backend to Gen 2 -- this guide assumes you keep your Gen 1 backend and only change the frontend data layer. + + + +**Gen 1 field name casing:** Gen 1 backends use uppercase ID suffixes in foreign keys (e.g., `postID`, `tagID`), while Gen 2 uses lowercase (`postId`, `tagId`). Code examples in this guide use the Gen 2 convention. If you are keeping your Gen 1 backend, adjust all field names in your GraphQL operations, sync queries, and filters to match your schema. Mismatched casing returns `null` silently. Verify field names in your `schema.graphql` or the AppSync console. + + + +It assumes you are familiar with: + +- React and React hooks +- Basic GraphQL concepts (queries, mutations, subscriptions) +- Amplify configuration and the `amplifyconfiguration.json` (or `aws-exports.js`) file +- Your app's data model and how it uses DataStore today + +You do not need prior experience with Apollo Client. The guide covers Apollo Client setup from scratch. + +## How to use this guide + +1. **Choose your strategy.** Start with the [Choose a migration strategy](/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/choose-strategy/) page to determine which migration strategy fits your app. Most apps should start with API Only. + +2. **Complete the prerequisites.** Follow the [Set up Apollo Client](/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/set-up-apollo/) page to install dependencies, define your GraphQL operations, and set up TypeScript helpers. + +3. **Set up Apollo Client.** The same page covers configuring Apollo Client with your AppSync endpoint and Cognito authentication. + +4. **Follow your strategy guide.** The sections you need depend on which strategy you chose: + + | Strategy | Pages | + |----------|-------| + | **API Only** | [Set up Apollo Client](/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/set-up-apollo/), [Migrate CRUD operations](/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/migrate-crud-operations/), [Migrate relationships](/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/migrate-relationships/) | + | **Local Caching** | API Only pages + [Add local caching](/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/add-local-caching/) | + | **Offline-First** | API Only pages + [Build offline support](/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/build-offline-support/) (skip Add local caching) | + + + +**Model coverage:** Code examples use Post and Comment models. For additional models (including join tables like PostTag), extend the same patterns -- see the "Adding a New Model" callouts in the Offline-First sections. + + + +## Migration checklists + +Use these checklists to plan and track your migration from DataStore to Apollo Client. Check items off as you complete them. + +### Pre-migration checklist + +Complete these steps before writing any migration code: + +- Choose your strategy using the decision framework and confirm your approach (API Only, Local Caching, or Offline-First) +- Review the parity matrix for your chosen strategy and note any feature gaps you need to address +- Verify your backend is deployed and `amplifyconfiguration.json` (or `aws-exports.js`) is generated (`amplify push`) +- Confirm Amplify is configured with `Amplify.configure(config)` running at app startup before any API calls +- Inventory all DataStore usage in your codebase: `DataStore.save()`, `DataStore.query()`, `DataStore.delete()`, `DataStore.observe()`, `DataStore.observeQuery()` +- Identify all models and relationships (`hasMany`, `belongsTo`, `hasOne`, `manyToMany`), noting which models have custom or composite primary keys +- Write GraphQL operations for each model, including `_version`, `_deleted`, and `_lastChangedAt` in all fragments +- Install Apollo Client: `npm install @apollo/client@^3.14.0 graphql` +- Set up Apollo Client and verify the connection works by running a simple list query against your AppSync endpoint +- Set up the Amplify subscription client using `generateClient()` + +### During migration checklist + +Follow these steps while migrating each feature. Work through one model at a time to keep changes manageable and testable. + +**For each DataStore model:** + +- Define a GraphQL fragment including all business fields plus `_version`, `_deleted`, and `_lastChangedAt` +- Define all GraphQL operations (list, get, create, update, delete) using the fragment +- Migrate list queries, filtering out soft-deleted records (`_deleted: true`) in the results +- Migrate single-item queries +- Migrate creates (no `_version` needed for creates) +- Migrate updates (include `_version` from the latest query result in the mutation input) +- Migrate deletes (include both `id` and `_version` in the mutation input) +- Migrate `observe` using Amplify subscription plus the refetch pattern +- Migrate `observeQuery` using `useQuery` combined with subscription-triggered `refetch()` +- Update error handling for Apollo's error link and component-level error states +- Test each migrated operation before moving to the next model + +**For predicates and filters:** + +- Convert DataStore predicates to GraphQL filter objects +- Migrate sorting to client-side `.sort()` or server-side `@index` queries +- Migrate pagination from page-based to cursor-based (`nextToken` and `limit`) + +### Post-migration checklist + +**Verification:** + +- Verify all CRUD operations work for every migrated model +- Verify real-time updates fire for all three event types (create, update, delete) on every model +- Verify authentication flow including sign-in, authenticated operations, and sign-out +- Verify sign-out cleanup clears the Apollo cache +- Verify `_version` handling succeeds without `ConditionalCheckFailedException` errors +- Verify soft-delete filtering so deleted records no longer appear in list views + +**Cleanup:** + +- Remove all DataStore imports from your codebase +- Remove generated DataStore model files +- Remove DataStore configuration calls (`DataStore.configure()`, `DataStore.start()`, `DataStore.stop()`) +- Remove `@aws-amplify/datastore` from your dependencies +- Run the app end-to-end with a full user workflow +- Monitor for errors post-deployment + +### Strategy-specific additions + + + +If you are following the Local Caching strategy, add these items to your migration plan: + +- Set up `apollo3-cache-persist` for persistent cache storage +- Configure `fetchPolicy` for each query (for example, `cache-and-network` for lists, `cache-first` for detail views) +- Implement optimistic updates for mutations using Apollo's `optimisticResponse` option +- Update the sign-out flow to purge the persistent cache in addition to clearing the in-memory cache + + + + + +If you are following the Offline-First strategy, add these items to your migration plan: + +- Set up Dexie.js as the local IndexedDB database +- Implement a mutation queue for offline writes +- Build a sync engine for delta and base synchronization +- Implement a conflict resolution handler using `_version` comparison +- Add network status detection to switch between online and offline modes +- Update the sign-out flow to clear both the local database and the mutation queue + + + + diff --git a/src/pages/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/migrate-crud-operations/index.mdx b/src/pages/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/migrate-crud-operations/index.mdx new file mode 100644 index 00000000000..503f2c40114 --- /dev/null +++ b/src/pages/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/migrate-crud-operations/index.mdx @@ -0,0 +1,734 @@ +import { getCustomStaticPath } from '@/utils/getCustomStaticPath'; + +export const meta = { + title: 'Migrate CRUD operations', + description: 'Migrate DataStore save, query, update, delete, predicates, pagination, and sorting to Apollo Client GraphQL operations.', + platforms: [ + 'angular', + 'javascript', + 'nextjs', + 'react', + 'react-native', + 'vue' + ] +}; + +export const getStaticPaths = async () => { + return getCustomStaticPath(meta.platforms); +}; + +export function getStaticProps(context) { + return { + props: { + platform: context.params.platform, + meta + } + }; +} + +This page covers how to migrate every DataStore CRUD operation and predicate/filter pattern to Apollo Client. DataStore conflates create and update into a single `save()` method and handles `_version` internally. With Apollo Client, you use distinct mutations for each operation and manage `_version` explicitly. + +**GraphQL operations used on this page** (`CREATE_POST`, `UPDATE_POST`, `DELETE_POST`, `GET_POST`, `LIST_POSTS`, and the `POST_DETAILS_FRAGMENT` fragment) are defined on the [Set up Apollo Client](/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/set-up-apollo/) page. Import them as needed: + +```ts +import { apolloClient } from './apolloClient'; +import { + CREATE_POST, UPDATE_POST, DELETE_POST, + GET_POST, LIST_POSTS, +} from './graphql/operations'; +``` + +## Create (save new record) + +DataStore uses `new Model()` plus `DataStore.save()` to create a record. Apollo Client uses the `CREATE_POST` mutation. + +**DataStore (before):** + +```ts +const newPost = await DataStore.save( + new Post({ + title: 'My First Post', + content: 'Hello world', + status: 'PUBLISHED', + rating: 5, + }) +); +``` + +**Apollo Client (after) -- imperative:** + +```ts +const { data } = await apolloClient.mutate({ + mutation: CREATE_POST, + variables: { + input: { + title: 'My First Post', + content: 'Hello world', + status: 'PUBLISHED', + rating: 5, + }, + }, +}); +const newPost = data.createPost; +// newPost._version is 1 (set by AppSync automatically) +``` + + + +**Apollo Client (after) -- React hook:** + +```tsx +import { useMutation } from '@apollo/client'; + +function CreatePostForm() { + const [createPost, { loading, error }] = useMutation(CREATE_POST, { + refetchQueries: [{ query: LIST_POSTS }], + }); + + async function handleSubmit(title: string, content: string) { + const { data } = await createPost({ + variables: { + input: { title, content, status: 'PUBLISHED', rating: 5 }, + }, + }); + console.log('Created:', data.createPost.id); + } + + return ( +
{ e.preventDefault(); handleSubmit('Title', 'Content'); }}> + {error &&

Error: {error.message}

} + +
+ ); +} +``` + +
+ +**Key differences:** + +- **No `_version` needed for creates.** AppSync sets `_version` to 1 automatically on new records. +- **`refetchQueries`** ensures the list view updates after a create. DataStore handled this automatically through its local store; Apollo requires explicit cache management. + +## Update (modify existing record) + +DataStore uses `Model.copyOf()` with an immer-based draft for immutable updates. Apollo Client uses the `UPDATE_POST` mutation with a plain object. Only changed fields need to be in the input. + + + +**`_version` is REQUIRED for updates.** You must query the record first to get the current `_version`. If you see `ConditionalCheckFailedException`, you are missing or passing a stale `_version`. + + + +**DataStore (before):** + +```ts +const original = await DataStore.query(Post, '123'); +const updated = await DataStore.save( + Post.copyOf(original, (draft) => { + draft.title = 'Updated Title'; + draft.rating = 4; + }) +); +``` + +**Apollo Client (after) -- imperative:** + +```ts +// Step 1: Query the current record to get _version +const { data: queryData } = await apolloClient.query({ + query: GET_POST, + variables: { id: '123' }, +}); +const post = queryData.getPost; + +// Step 2: Mutate with _version from query result +const { data } = await apolloClient.mutate({ + mutation: UPDATE_POST, + variables: { + input: { + id: '123', + title: 'Updated Title', + rating: 4, + _version: post._version, // REQUIRED + }, + }, +}); +``` + + + +**Apollo Client (after) -- React hook:** + +```tsx +import { useQuery, useMutation } from '@apollo/client'; + +function EditPostForm({ postId }: { postId: string }) { + const { data, loading: queryLoading } = useQuery(GET_POST, { + variables: { id: postId }, + }); + const [updatePost, { loading: updating, error }] = useMutation(UPDATE_POST); + + async function handleSave(title: string) { + const post = data.getPost; + await updatePost({ + variables: { + input: { + id: post.id, + title, + _version: post._version, + }, + }, + }); + } + + if (queryLoading) return

Loading...

; + + return ( +
+ {error &&

Error: {error.message}

} + +
+ ); +} +``` + +
+ +**Key differences:** + +- **No `copyOf()` or immer pattern.** Apollo uses plain objects -- pass only the fields you want to change. +- **Only changed fields + `id` + `_version` are needed.** You do not need to send the entire record. +- **Two-step process:** Query first (to get `_version`), then mutate. DataStore handled this internally. + +## Delete (single record) + +**`_version` is REQUIRED for deletes.** You must query the record first to get the current `_version`, even if you already have the ID. + +**DataStore (before):** + +```ts +const post = await DataStore.query(Post, '123'); +await DataStore.delete(post); +``` + +**Apollo Client (after) -- imperative:** + +```ts +// Step 1: Query to get current _version +const { data: queryData } = await apolloClient.query({ + query: GET_POST, + variables: { id: '123' }, +}); + +// Step 2: Delete with _version +await apolloClient.mutate({ + mutation: DELETE_POST, + variables: { + input: { + id: '123', + _version: queryData.getPost._version, + }, + }, + refetchQueries: [{ query: LIST_POSTS }], +}); +``` + + + +**Apollo Client (after) -- React hook:** + +```tsx +import { useMutation } from '@apollo/client'; + +function DeletePostButton({ post }: { post: { id: string; _version: number } }) { + const [deletePost, { loading }] = useMutation(DELETE_POST, { + refetchQueries: [{ query: LIST_POSTS }], + }); + + async function handleDelete() { + await deletePost({ + variables: { + input: { id: post.id, _version: post._version }, + }, + }); + } + + return ( + + ); +} +``` + + + +**Key differences:** + +- **No delete-by-ID shorthand.** Apollo always needs the mutation input object with both `id` and `_version`. +- **Delete is a soft delete** when conflict resolution is enabled. The record's `_deleted` field is set to `true` in DynamoDB, but the record is not physically removed. + +## Query by ID + +**DataStore (before):** + +```ts +const post = await DataStore.query(Post, '123'); +if (post) { + console.log(post.title); +} +``` + +**Apollo Client (after):** + +```ts +const { data } = await apolloClient.query({ + query: GET_POST, + variables: { id: '123' }, +}); +const post = data.getPost; +// Returns null instead of undefined when not found +if (post) { + console.log(post.title); +} +``` + +## List all records + + + +**You must filter out soft-deleted records.** DataStore did this automatically. Apollo Client returns all records including those with `_deleted: true`. Forgetting this is the most common migration bug. + + + +**DataStore (before):** + +```ts +const posts = await DataStore.query(Post); +``` + +**Apollo Client (after):** + +```ts +const { data } = await apolloClient.query({ query: LIST_POSTS }); +const posts = data.listPosts.items.filter((post) => !post._deleted); +``` + +## Batch delete (predicate-based) + +DataStore supported deleting multiple records with a predicate. Apollo Client has no equivalent -- you must query the matching records first, then delete each one individually. + +**DataStore (before):** + +```ts +await DataStore.delete(Post, (p) => p.status.eq('DRAFT')); +``` + +**Apollo Client (after):** + +```ts +// Step 1: Query posts matching the filter +const { data } = await apolloClient.query({ + query: LIST_POSTS, + variables: { filter: { status: { eq: 'DRAFT' } } }, +}); +const drafts = data.listPosts.items.filter((post) => !post._deleted); + +// Step 2: Delete each record individually +const results = await Promise.allSettled( + drafts.map((post) => + apolloClient.mutate({ + mutation: DELETE_POST, + variables: { + input: { id: post.id, _version: post._version }, + }, + }) + ) +); + +// Step 3: Check for partial failures +const failures = results.filter((r) => r.status === 'rejected'); +if (failures.length > 0) { + console.error(`${failures.length} of ${drafts.length} deletes failed`); +} + +// Refresh the list +await apolloClient.refetchQueries({ include: [LIST_POSTS] }); +``` + + + +Use `Promise.allSettled` (not `Promise.all`) so that one failure does not abort the remaining deletes. For large datasets (100+ records), process in batches of 10-25 with a brief delay between batches to avoid AppSync throttling. + + + +## CRUD quick reference + +| DataStore Method | Apollo Client Equivalent | Key Difference | +|---|---|---| +| `DataStore.save(new Model({...}))` | `apolloClient.mutate({ mutation: CREATE, variables: { input: {...} } })` | No `_version` needed for creates | +| `Model.copyOf(original, draft => {...})` + `DataStore.save()` | `apolloClient.mutate({ mutation: UPDATE, variables: { input: { id, _version, ...changes } } })` | Must pass `_version`; plain object instead of immer draft | +| `DataStore.delete(instance)` | `apolloClient.mutate({ mutation: DELETE, variables: { input: { id, _version } } })` | Must query first to get `_version` | +| `DataStore.query(Model, id)` | `apolloClient.query({ query: GET, variables: { id } })` | Returns `null` instead of `undefined` when not found | +| `DataStore.query(Model)` | `apolloClient.query({ query: LIST })` | Must filter `_deleted` records from results | +| `DataStore.delete(Model, predicate)` | Query with filter + delete each individually | No atomicity; use `Promise.allSettled` | + +## Filter operator mapping + +DataStore uses callback-based predicates. Apollo Client and AppSync use JSON filter objects passed as query variables. + +| Operator | DataStore Syntax | GraphQL Syntax | Notes | +|---|---|---|---| +| `eq` | `p.field.eq(value)` | `{ field: { eq: value } }` | Exact match | +| `ne` | `p.field.ne(value)` | `{ field: { ne: value } }` | Not equal | +| `gt` | `p.field.gt(value)` | `{ field: { gt: value } }` | Greater than | +| `ge` | `p.field.ge(value)` | `{ field: { ge: value } }` | Greater than or equal | +| `lt` | `p.field.lt(value)` | `{ field: { lt: value } }` | Less than | +| `le` | `p.field.le(value)` | `{ field: { le: value } }` | Less than or equal | +| `contains` | `p.field.contains(value)` | `{ field: { contains: value } }` | Substring match | +| `notContains` | `p.field.notContains(value)` | `{ field: { notContains: value } }` | Substring not present | +| `beginsWith` | `p.field.beginsWith(value)` | `{ field: { beginsWith: value } }` | String prefix match | +| `between` | `p.field.between(lo, hi)` | `{ field: { between: [lo, hi] } }` | Inclusive range | +| `in` | `p.field.in([v1, v2])` | **NOT AVAILABLE** | Use `or` + `eq` workaround | +| `notIn` | `p.field.notIn([v1, v2])` | **NOT AVAILABLE** | Use `and` + `ne` workaround | + +### Filter examples + +**eq -- Exact match:** + +```ts +// DataStore +const published = await DataStore.query(Post, (p) => p.status.eq('PUBLISHED')); + +// Apollo Client +const { data } = await apolloClient.query({ + query: LIST_POSTS, + variables: { filter: { status: { eq: 'PUBLISHED' } } }, +}); +const published = data.listPosts.items.filter((p) => !p._deleted); +``` + +**contains -- Substring match:** + +```ts +// DataStore +const reactPosts = await DataStore.query(Post, (p) => p.title.contains('React')); + +// Apollo Client +const { data } = await apolloClient.query({ + query: LIST_POSTS, + variables: { filter: { title: { contains: 'React' } } }, +}); +const reactPosts = data.listPosts.items.filter((p) => !p._deleted); +``` + +**between -- Inclusive range:** + +```ts +// DataStore +const midRated = await DataStore.query(Post, (p) => p.rating.between(2, 4)); + +// Apollo Client +const { data } = await apolloClient.query({ + query: LIST_POSTS, + variables: { filter: { rating: { between: [2, 4] } } }, +}); +const midRated = data.listPosts.items.filter((p) => !p._deleted); +``` + + + +**Other operators** (`ne`, `gt`, `ge`, `lt`, `le`, `notContains`, `beginsWith`) follow the same pattern as `eq` above -- replace the operator name and value. See the filter operator mapping table for the complete syntax reference. + + + +**Combining conditions with `and`:** + +```ts +// DataStore +const posts = await DataStore.query(Post, (p) => + p.and((p) => [p.rating.gt(4), p.status.eq('PUBLISHED')]) +); + +// Apollo Client +const { data } = await apolloClient.query({ + query: LIST_POSTS, + variables: { + filter: { + and: [{ rating: { gt: 4 } }, { status: { eq: 'PUBLISHED' } }], + }, + }, +}); +``` + + + +Top-level filter fields are **implicitly AND-ed** in AppSync. This means `{ status: { eq: 'PUBLISHED' }, rating: { gt: 4 } }` is equivalent to using explicit `and`. Use explicit `and` when you need it nested inside an `or`. + + + +**Combining conditions with `or`:** + +```ts +const { data } = await apolloClient.query({ + query: LIST_POSTS, + variables: { + filter: { + or: [ + { title: { contains: 'React' } }, + { title: { contains: 'Apollo' } }, + ], + }, + }, +}); +``` + +**Negating with `not`:** + +```ts +const { data } = await apolloClient.query({ + query: LIST_POSTS, + variables: { + filter: { not: { status: { eq: 'DRAFT' } } }, + }, +}); +``` + +### The `in` and `notIn` workaround + + + +The `in` and `notIn` operators do **NOT** exist in AppSync's `ModelFilterInput` types. If you attempt to use `{ field: { in: [...] } }`, AppSync will reject the query with a validation error. + + + +**Replacing `in` with `or` + `eq`:** + +```ts +// DataStore: p.status.in(['PUBLISHED', 'DRAFT']) +// Apollo: combine multiple eq conditions with or +const { data } = await apolloClient.query({ + query: LIST_POSTS, + variables: { + filter: { + or: [{ status: { eq: 'PUBLISHED' } }, { status: { eq: 'DRAFT' } }], + }, + }, +}); +``` + + + +```ts +function buildInFilter(field: string, values: string[]) { + return { + or: values.map((value) => ({ [field]: { eq: value } })), + }; +} + +function buildNotInFilter(field: string, values: string[]) { + return { + and: values.map((value) => ({ [field]: { ne: value } })), + }; +} + +// Usage: +const { data } = await apolloClient.query({ + query: LIST_POSTS, + variables: { filter: buildInFilter('status', ['PUBLISHED', 'DRAFT']) }, +}); +``` + + + +## Pagination migration + +DataStore uses **page-based** pagination (zero-indexed `page` number + `limit`). AppSync uses **cursor-based** pagination (`nextToken` + `limit`). This is not a rename -- it is a fundamental semantic change. + +| Aspect | DataStore (Page-Based) | Apollo/AppSync (Cursor-Based) | +|--------|----------------------|-------------------------------| +| Navigation | Random access -- jump to any page | Sequential only -- must traverse pages in order | +| Parameters | `{ page: 0, limit: 10 }` | `{ limit: 10, nextToken: '...' }` | +| First page | `page: 0` | Omit `nextToken` (or pass `null`) | +| Next page | `page: page + 1` | Use `nextToken` from previous response | +| End detection | `items.length < limit` | `nextToken === null` | + +**Apollo Client cursor-based pagination:** + +```ts +// Page 1 (first 10 items) -- no nextToken needed +const { data: page1Data } = await apolloClient.query({ + query: LIST_POSTS, + variables: { limit: 10 }, +}); +const page1Items = page1Data.listPosts.items.filter((p) => !p._deleted); +const nextToken = page1Data.listPosts.nextToken; + +// Page 2 -- use nextToken from previous response +if (nextToken) { + const { data: page2Data } = await apolloClient.query({ + query: LIST_POSTS, + variables: { limit: 10, nextToken }, + }); +} +``` + + + +### Load More pattern (React) + +The most common pagination pattern with cursor-based pagination is "Load More" (infinite scroll): + +```tsx +import { useQuery } from '@apollo/client'; + +function PostList() { + const { data, loading, error, fetchMore } = useQuery(LIST_POSTS, { + variables: { limit: 10 }, + }); + + if (loading && !data) return

Loading...

; + if (error) return

Error: {error.message}

; + + const posts = (data?.listPosts?.items ?? []).filter((p) => !p._deleted); + const nextToken = data?.listPosts?.nextToken; + + const handleLoadMore = () => { + fetchMore({ + variables: { limit: 10, nextToken }, + updateQuery: (prev, { fetchMoreResult }) => { + if (!fetchMoreResult) return prev; + return { + listPosts: { + ...fetchMoreResult.listPosts, + items: [ + ...prev.listPosts.items, + ...fetchMoreResult.listPosts.items, + ], + }, + }; + }, + }); + }; + + return ( +
+
    + {posts.map((post) => ( +
  • {post.title}
  • + ))} +
+ +
+ ); +} +``` + +
+ + + +When using `nextToken` with filters, AppSync may return **fewer items than `limit`**. Always check `nextToken === null` to determine if more pages exist -- do **not** use `items.length < limit` as the end-of-results indicator. + + + +## Sorting migration + +DataStore supports `SortDirection.ASCENDING` and `SortDirection.DESCENDING`. AppSync's basic `listModels` query has **no `sortDirection` argument** by default. + +### Client-side sorting (recommended) + +For most use cases, fetch results and sort them in JavaScript: + +```ts +// DataStore +const posts = await DataStore.query(Post, Predicates.ALL, { + sort: (s) => s.createdAt(SortDirection.DESCENDING), +}); + +// Apollo Client +const { data } = await apolloClient.query({ query: LIST_POSTS }); +const posts = [...data.listPosts.items] + .filter((p) => !p._deleted) + .sort((a, b) => + new Date(b.createdAt).getTime() - new Date(a.createdAt).getTime() + ); +``` + + + +If your model has a Global Secondary Index (GSI) defined with the `@index` directive, AppSync generates a query with `sortDirection` support: + +```graphql +type Post @model { + id: ID! + title: String! + status: String! @index(name: "byStatus", sortKeyFields: ["createdAt"]) + createdAt: AWSDateTime! +} +``` + +This generates a `postsByStatus` query that accepts `sortDirection`: + +```ts +const LIST_POSTS_BY_STATUS = gql` + query PostsByStatus( + $status: String! + $sortDirection: ModelSortDirection + $limit: Int + $nextToken: String + ) { + postsByStatus( + status: $status + sortDirection: $sortDirection + limit: $limit + nextToken: $nextToken + ) { + items { ...PostDetails } + nextToken + } + } +`; + +const { data } = await apolloClient.query({ + query: LIST_POSTS_BY_STATUS, + variables: { status: 'PUBLISHED', sortDirection: 'DESC', limit: 10 }, +}); +``` + +Server-side sorting requires backend schema changes and only works when querying by the index's partition key. For general-purpose sorting, use client-side sorting. + + + +## Common mistakes + + + +### 1. Forgetting _version in update or delete mutations + +The most frequent migration error. DataStore handled `_version` internally. With Apollo, you must include it yourself. + +### 2. Using CREATE mutation for updates + +DataStore's `save()` handled both creates and updates. With Apollo, you must call the correct mutation. + +### 3. Not filtering _deleted records from list results + +DataStore automatically hid soft-deleted records. Apollo returns all records, including deleted ones. Always use `.filter(item => !item._deleted)` on list query results. + +### 4. Not using refetchQueries after mutations + +DataStore's local store automatically updated queries after mutations. Apollo's cache may not update list queries automatically. Add `refetchQueries: [{ query: LIST_POSTS }]` to mutations that affect list views. + +### 5. Using stale _version values + +If you cache a record's `_version` and another user or process updates the record, your mutation will fail. Re-query with `fetchPolicy: 'network-only'` before mutating when freshness is critical. + + diff --git a/src/pages/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/migrate-relationships/index.mdx b/src/pages/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/migrate-relationships/index.mdx new file mode 100644 index 00000000000..7f012aa1b05 --- /dev/null +++ b/src/pages/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/migrate-relationships/index.mdx @@ -0,0 +1,507 @@ +import { getCustomStaticPath } from '@/utils/getCustomStaticPath'; + +export const meta = { + title: 'Migrate relationships', + description: 'Migrate DataStore hasMany, belongsTo, hasOne, and manyToMany relationships to Apollo Client with GraphQL selection sets.', + platforms: [ + 'angular', + 'javascript', + 'nextjs', + 'react', + 'react-native', + 'vue' + ] +}; + +export const getStaticPaths = async () => { + return getCustomStaticPath(meta.platforms); +}; + +export function getStaticProps(context) { + return { + props: { + platform: context.params.platform, + meta + } + }; +} + +Relationship handling is where DataStore and Apollo Client differ most fundamentally. DataStore **lazy-loads** relationships: you access a field and it fetches on demand, returning a Promise (for `belongsTo`/`hasOne`) or an AsyncCollection (for `hasMany`). Apollo Client **eagerly loads** relationships based on what you include in your GraphQL selection set. This gives you explicit control over data fetching granularity but requires you to think about what data you need upfront. + +## Schema reference + +All examples on this page use the following illustrative schema definitions from your Gen 1 backend: + +```graphql title="amplify/backend/api//schema.graphql" +type Post @model @auth(rules: [{ allow: owner }]) { + id: ID! + title: String! + content: String + status: String + rating: Int + comments: [Comment] @hasMany(indexName: "byPost", fields: ["id"]) + tags: [PostTag] @hasMany(indexName: "byPostTag", fields: ["id"]) + metadata: PostMetadata @hasOne(fields: ["id"]) +} + +type Comment @model @auth(rules: [{ allow: owner }]) { + id: ID! + content: String! + postID: ID! @index(name: "byPost") + post: Post @belongsTo(fields: ["postID"]) +} + +type Tag @model @auth(rules: [{ allow: owner }]) { + id: ID! + name: String! + posts: [PostTag] @hasMany(indexName: "byTag", fields: ["id"]) +} + +type PostTag @model @auth(rules: [{ allow: owner }]) { + id: ID! + postID: ID! @index(name: "byPostTag") + tagID: ID! @index(name: "byTag") + post: Post @belongsTo(fields: ["postID"]) + tag: Tag @belongsTo(fields: ["tagID"]) +} + +type PostMetadata @model @auth(rules: [{ allow: owner }]) { + id: ID! + postID: ID! @index(name: "byPost") + views: Int + likes: Int + post: Post @belongsTo(fields: ["postID"]) +} +``` + + + +All relationship examples include `_version`, `_deleted`, and `_lastChangedAt` fields in selections for conflict-resolution-enabled backends. See the [Set up Apollo Client](/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/set-up-apollo/) page for details. + + + + + +**Gen 1 field casing reminder:** Gen 1 backends use uppercase ID suffixes (`postID`, `tagID`) -- see [Set up Apollo Client](/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/set-up-apollo/) for details. Match your actual schema field names exactly; mismatches silently return `null`. + + + +## hasMany: Post to Comments + +A `hasMany` relationship means a parent record has zero or more child records. The key change: DataStore's `AsyncCollection` with `.toArray()` becomes a nested GraphQL selection with an `items` wrapper object. + +### DataStore (before) + +```ts +const post = await DataStore.query(Post, '123'); +const comments = await post.comments.toArray(); +// comments is Comment[] -- fetched on demand when you called .toArray() +``` + +### Apollo Client (after) -- eager loading (nested selection) + +Define a GraphQL query that includes the comments in the selection set: + +```ts +const GET_POST_WITH_COMMENTS = gql` + ${POST_DETAILS_FRAGMENT} + query GetPostWithComments($id: ID!) { + getPost(id: $id) { + ...PostDetails + comments { + items { + id + content + createdAt + _version + _deleted + _lastChangedAt + } + nextToken + } + } + } +`; + +const { data } = await apolloClient.query({ + query: GET_POST_WITH_COMMENTS, + variables: { id: '123' }, +}); + +const post = data.getPost; +const comments = data.getPost.comments.items.filter(c => !c._deleted); +``` + +The comments come back in the same response as the post -- no second request needed. + + + +Always filter `_deleted` records from nested `items` arrays. Soft-deleted child records are still returned by AppSync. + + + +### Apollo Client (after) -- lazy loading (separate query) + +If you do not always need comments, omit them from the initial query and fetch them separately when needed: + +```ts +const LIST_COMMENTS_BY_POST = gql` + query ListCommentsByPost($filter: ModelCommentFilterInput) { + listComments(filter: $filter) { + items { + id + content + createdAt + _version + _deleted + _lastChangedAt + } + nextToken + } + } +`; + +// Fetch comments for a specific post on demand +const { data } = await apolloClient.query({ + query: LIST_COMMENTS_BY_POST, + variables: { filter: { postID: { eq: '123' } } }, +}); +const comments = data.listComments.items.filter(c => !c._deleted); +``` + + + +**Over-fetching warning:** Use the nested selection (eager) pattern for data you always display together. Use the separate query (lazy) pattern for data that is optional or loaded on user action (for example, expanding a comments section). + + + + + +### React hook example + +```tsx +import { useQuery } from '@apollo/client'; + +function PostWithComments({ postId }: { postId: string }) { + const { data, loading, error } = useQuery(GET_POST_WITH_COMMENTS, { + variables: { id: postId }, + }); + + if (loading) return

Loading...

; + if (error) return

Error loading post.

; + + const post = data.getPost; + const comments = post.comments.items.filter(c => !c._deleted); + + return ( +
+

{post.title}

+

{post.content}

+

Comments ({comments.length})

+ {comments.map(comment => ( +
+

{comment.content}

+
+ ))} +
+ ); +} +``` + +
+ +## belongsTo: Comment to Post + +A `belongsTo` relationship means a child record references its parent. The key change: DataStore resolves the parent automatically via a Promise. Apollo uses a nested selection to include the parent in the response. + +### DataStore (before) + +```ts +const comment = await DataStore.query(Comment, 'abc'); +const post = await comment.post; // Promise resolves to the parent Post +``` + +### Apollo Client (after) + +```ts +const GET_COMMENT_WITH_POST = gql` + query GetCommentWithPost($id: ID!) { + getComment(id: $id) { + id + content + post { + id + title + status + _version + _deleted + _lastChangedAt + } + _version + _deleted + _lastChangedAt + } + } +`; + +const { data } = await apolloClient.query({ + query: GET_COMMENT_WITH_POST, + variables: { id: 'abc' }, +}); + +const comment = data.getComment; +const post = comment.post; // Parent Post is already loaded -- no extra request +``` + +The parent object is directly available as a nested field. No Promise, no `.then()` -- it is already resolved in the response. + + + +The foreign key field (`postID`) is also available on the Comment if you only need the parent's ID without fetching the full parent record. + + + +## hasOne: Post to PostMetadata + +A `hasOne` relationship represents 1:1 ownership. Similar to `belongsTo` -- DataStore returns a Promise, Apollo uses a nested selection. The result is `null` if no related record exists. + +### DataStore (before) + +```ts +const post = await DataStore.query(Post, '123'); +const metadata = await post.metadata; // Promise resolves to PostMetadata or undefined +``` + +### Apollo Client (after) + +```ts +const GET_POST_WITH_METADATA = gql` + ${POST_DETAILS_FRAGMENT} + query GetPostWithMetadata($id: ID!) { + getPost(id: $id) { + ...PostDetails + metadata { + id + views + likes + _version + _deleted + _lastChangedAt + } + } + } +`; + +const { data } = await apolloClient.query({ + query: GET_POST_WITH_METADATA, + variables: { id: '123' }, +}); + +const post = data.getPost; +const metadata = post.metadata; // PostMetadata object or null +``` + +## manyToMany: Post and Tag + +Many-to-many relationships use an explicit join table model. Posts and Tags are connected through the `PostTag` join model. The key change: instead of getting tags directly, you query `PostTag` join records and then extract the `tag` from each one. + +### DataStore (before) + +```ts +const post = await DataStore.query(Post, '123'); +const postTags = await post.tags.toArray(); +const tags = await Promise.all(postTags.map(pt => pt.tag)); +``` + +### Apollo Client (after) -- querying tags for a post + +```ts +const GET_POST_WITH_TAGS = gql` + ${POST_DETAILS_FRAGMENT} + query GetPostWithTags($id: ID!) { + getPost(id: $id) { + ...PostDetails + tags { + items { + id + tag { + id + name + _version + _deleted + _lastChangedAt + } + _version + _deleted + } + } + } + } +`; + +const { data } = await apolloClient.query({ + query: GET_POST_WITH_TAGS, + variables: { id: '123' }, +}); + +// Extract tags from the join records, filtering out deleted join entries +const tags = data.getPost.tags.items + .filter(pt => !pt._deleted) + .map(pt => pt.tag); +``` + + + +Filter `_deleted` on the **join records** (`PostTag`), not just the tags themselves. A deleted join record means the association was removed even if the Tag still exists. + + + +### Create a many-to-many association + +To associate a Post with a Tag, create a `PostTag` join record: + +```ts +const CREATE_POST_TAG = gql` + mutation CreatePostTag($input: CreatePostTagInput!) { + createPostTag(input: $input) { + id + postID + tagID + _version + _deleted + _lastChangedAt + } + } +`; + +await apolloClient.mutate({ + mutation: CREATE_POST_TAG, + variables: { input: { postID: '123', tagID: '456' } }, +}); +``` + +### Remove a many-to-many association + +To remove an association, delete the `PostTag` join record (you need its `id` and `_version`): + +```ts +const DELETE_POST_TAG = gql` + mutation DeletePostTag($input: DeletePostTagInput!) { + deletePostTag(input: $input) { + id + _version + } + } +`; + +await apolloClient.mutate({ + mutation: DELETE_POST_TAG, + variables: { + input: { + id: postTagRecord.id, + _version: postTagRecord._version, + }, + }, +}); +``` + +Deleting the `PostTag` join record removes the association between the Post and Tag. It does **not** delete the Post or the Tag themselves. + +## Create related records + +When creating a child record that belongs to a parent, the key difference is how you specify the relationship. + +**DataStore (before):** DataStore accepted the model instance for the relationship: + +```ts +const existingPost = await DataStore.query(Post, '123'); +await DataStore.save( + new Comment({ + content: 'Great post!', + post: existingPost, // Pass the model instance + }) +); +``` + +**Apollo Client (after):** Apollo requires the foreign key ID, not the model instance: + +```ts +const CREATE_COMMENT = gql` + mutation CreateComment($input: CreateCommentInput!) { + createComment(input: $input) { + id + content + postID + _version + _deleted + _lastChangedAt + } + } +`; + +await apolloClient.mutate({ + mutation: CREATE_COMMENT, + variables: { + input: { + content: 'Great post!', + postID: '123', // Pass the foreign key ID directly + }, + }, +}); +``` + +## Quick reference table + +| Relationship | DataStore Access Pattern | Apollo Client Access Pattern | Key Change | +|---|---|---|---| +| **hasMany** (Post to Comments) | `await post.comments.toArray()` | Nested `comments { items { ... } }` selection | AsyncCollection becomes `items` wrapper; eager-loaded in single request | +| **belongsTo** (Comment to Post) | `await comment.post` | Nested `post { ... }` selection | Promise becomes nested object; no await needed | +| **hasOne** (Post to Metadata) | `await post.metadata` | Nested `metadata { ... }` selection | Promise becomes nested object or `null` | +| **manyToMany** (Post to Tag) | `await post.tags.toArray()` then `await pt.tag` | Nested `tags { items { tag { ... } } }` selection | Must query through join table; filter `_deleted` on join records | +| **Creating children** | `new Comment({ post: existingPost })` | `{ input: { postID: '123' } }` | Model instance becomes foreign key ID | + +## Performance considerations + +### Eager vs. lazy loading + +DataStore always lazy-loaded relationships. Apollo gives you the choice: + +- **Eager loading** (nested selection): Fetches related data in the same GraphQL request. Use this for data you always display together. +- **Lazy loading** (separate query): Fetches related data only when needed. Use this for data that is optional or loaded on user action. + +### The N+1 query problem + +DataStore hid the N+1 problem because all data was local -- lazy-loading from IndexedDB was effectively free. With Apollo, each separate query is a network request: + +```ts +// BAD: N+1 -- separate query for each post's comments +const { data } = await apolloClient.query({ query: LIST_POSTS }); +for (const post of data.listPosts.items) { + await apolloClient.query({ + query: LIST_COMMENTS_BY_POST, + variables: { filter: { postID: { eq: post.id } } }, + }); +} + +// GOOD: include comments in the list query +const LIST_POSTS_WITH_COMMENTS = gql` + query ListPostsWithComments($filter: ModelPostFilterInput, $limit: Int) { + listPosts(filter: $filter, limit: $limit) { + items { + ...PostDetails + comments { + items { id content _version _deleted } + } + } + nextToken + } + } +`; +``` + +### Recommendations + +1. **Use nested selections** for data you always need together. One request is always faster than multiple. +2. **Use separate queries** for optional or on-demand data. +3. **Be mindful of depth.** Limit nesting to 2-3 levels to avoid large response sizes. +4. **Apollo's cache helps.** Once a related record is fetched, Apollo caches it by `__typename` and `id`. Subsequent queries for the same record may resolve from cache. diff --git a/src/pages/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/set-up-apollo/index.mdx b/src/pages/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/set-up-apollo/index.mdx new file mode 100644 index 00000000000..13f3c498f25 --- /dev/null +++ b/src/pages/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/set-up-apollo/index.mdx @@ -0,0 +1,767 @@ +import { getCustomStaticPath } from '@/utils/getCustomStaticPath'; + +export const meta = { + title: 'Set up Apollo Client', + description: 'Install Apollo Client, configure authentication with Cognito, set up error handling and retry logic, configure real-time subscriptions with Amplify, and write GraphQL operations for your AppSync backend.', + platforms: [ + 'angular', + 'javascript', + 'nextjs', + 'react', + 'react-native', + 'vue' + ] +}; + +export const getStaticPaths = async () => { + return getCustomStaticPath(meta.platforms); +}; + +export function getStaticProps(context) { + return { + props: { + platform: context.params.platform, + meta + } + }; +} + +This page covers everything you need to get Apollo Client working with your AppSync endpoint: prerequisites, installing Apollo Client, writing GraphQL operations, understanding `_version` metadata, configuring the link chain for authentication and error handling, and setting up real-time subscriptions with Amplify. + +## Before you begin + +Before starting the migration, make sure you have: + +- An existing **Amplify Gen 1 backend** with your data models deployed and working +- Your Amplify configuration file (`amplifyconfiguration.json` or `aws-exports.js`) in your project +- The `aws-amplify` v6 package installed and configured (`Amplify.configure(config)` called at app startup) +- Familiarity with **GraphQL syntax** -- queries, mutations, and subscriptions + + + +**You do NOT need to migrate your backend to Gen 2.** This guide assumes you are keeping your existing Gen 1 backend and replacing the DataStore client library with Apollo Client. The `aws-amplify` v6 library works with Gen 1 backends -- you only need to update how you call the API, not your infrastructure. + + + +## Install Apollo Client + +Install Apollo Client: + +```bash +npm install @apollo/client@^3.14.0 +``` + +You do **not** need to install `graphql` separately -- it is already provided by `aws-amplify`. Installing `graphql` explicitly would cause npm to resolve a newer version (v16), which conflicts with `aws-amplify`'s pinned `graphql@15.8.0` and fails with an `ERESOLVE` error. + + + +**Why Apollo Client v3 (not v4)?** The `apollo3-cache-persist` library -- needed for the Local Caching strategy covered later in this guide -- only supports Apollo Client v3. Starting with v3 avoids a disruptive version migration mid-project. Using `@apollo/client@^3.14.0` ensures you get the latest v3 release with all stability fixes. + + + +## Find your GraphQL endpoint + +Your GraphQL endpoint and auth configuration are in `aws-exports.js` (or `amplifyconfiguration.json`): + +```json +{ + "aws_appsync_graphqlEndpoint": "https://xxxxx.appsync-api.us-east-1.amazonaws.com/graphql", + "aws_appsync_authenticationType": "AMAZON_COGNITO_USER_POOLS", + "aws_appsync_region": "us-east-1" +} +``` + +You will use the `aws_appsync_graphqlEndpoint` value when configuring Apollo Client. + + + +**Gen 1 field name casing.** Gen 1 backends generate foreign key fields with **uppercase** ID suffixes (e.g., `postID`, `tagID`), while Gen 2 uses lowercase (`postId`, `tagId`). The code examples in this guide use the Gen 2 lowercase convention. **If you are keeping your Gen 1 backend, you must adjust all field names in GraphQL operations to match your actual schema.** A mismatched field name does not produce an error -- AppSync silently returns `null`, making this extremely difficult to debug. Check your `src/graphql/queries.js` or the AppSync console's Schema tab to verify the correct casing for every foreign key field before writing any operations. + + + +## Generate typed operations (optional) + +Your Gen 1 project already has auto-generated GraphQL operations in `src/graphql/` (queries, mutations, subscriptions). These operations continue to work with your Gen 1 backend -- you can reference them when writing the Apollo Client operations below. + +Alternatively, you can regenerate them or copy queries, mutations, and subscriptions directly from the **AWS AppSync console** by navigating to the Schema tab and the Queries tab. + +For full details on integrating generated types with Apollo Client, see the [Advanced patterns](/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/advanced-patterns/) page. + +## Write GraphQL operations + +Apollo Client uses `gql` tagged template literals to define GraphQL operations. This section shows the standard patterns using a `Post` model as the running example. + +### GraphQL fragment for reusable field selection + +Fragments let you define a reusable set of fields. Every operation references this fragment, ensuring consistent field selection across your app: + +```graphql +fragment PostDetails on Post { + id + title + content + status + rating + createdAt + updatedAt + _version + _deleted + _lastChangedAt + owner +} +``` + +If your model uses **owner-based authorization** (`@auth(rules: [{ allow: owner }])`), include the `owner` field in your fragments. This field is needed for owner-scoped subscriptions. + + + +The `_version`, `_deleted`, and `_lastChangedAt` fields are required for backends with conflict resolution enabled. If your app used DataStore, your backend has conflict resolution enabled. See the [_version metadata section](#understand-_version-metadata) below. + + + +### Complete operation definitions + +```ts title="src/graphql/operations.ts" +import { gql } from '@apollo/client'; + +// Fragment for consistent field selection +const POST_DETAILS_FRAGMENT = gql` + fragment PostDetails on Post { + id + title + content + status + rating + createdAt + updatedAt + _version + _deleted + _lastChangedAt + owner + } +`; + +// List all posts +export const LIST_POSTS = gql` + ${POST_DETAILS_FRAGMENT} + query ListPosts($filter: ModelPostFilterInput, $limit: Int, $nextToken: String) { + listPosts(filter: $filter, limit: $limit, nextToken: $nextToken) { + items { + ...PostDetails + } + nextToken + } + } +`; + +// Get a single post by ID +export const GET_POST = gql` + ${POST_DETAILS_FRAGMENT} + query GetPost($id: ID!) { + getPost(id: $id) { + ...PostDetails + } + } +`; + +// Create a new post +export const CREATE_POST = gql` + ${POST_DETAILS_FRAGMENT} + mutation CreatePost($input: CreatePostInput!) { + createPost(input: $input) { + ...PostDetails + } + } +`; + +// Update an existing post +export const UPDATE_POST = gql` + ${POST_DETAILS_FRAGMENT} + mutation UpdatePost($input: UpdatePostInput!) { + updatePost(input: $input) { + ...PostDetails + } + } +`; + +// Delete a post +export const DELETE_POST = gql` + ${POST_DETAILS_FRAGMENT} + mutation DeletePost($input: DeletePostInput!) { + deletePost(input: $input) { + ...PostDetails + } + } +`; +``` + +Every operation -- including mutations -- returns the full `PostDetails` fragment. This ensures you always have the latest `_version` value for subsequent mutations. + + + +**Multi-model apps:** Define a result interface for **each model** in your app. Co-locate them with the operations in a single `src/graphql/operations.ts` file (or split into `operations/post.ts`, `operations/comment.ts`, etc. for larger schemas). Your auto-generated `src/graphql/queries.js` and `src/graphql/mutations.js` files contain the exact field names and operation signatures for every model -- use them as a reference when writing your typed operations. + + + + + +**Replacing DataStore enums:** DataStore model files export TypeScript `enum` types (e.g., `PostStatus`). After migration, you no longer import from `./models`, so you need to define these values yourself. If your TypeScript configuration has `erasableSyntaxOnly: true` (the default in TypeScript 5.9+ and Vite 8 scaffolds -- note that Vite 8 sets this in `tsconfig.app.json`, not `tsconfig.json`), `enum` declarations are not allowed because they emit runtime code. Use a `const` object with `as const` instead: + +```ts +// Instead of: enum PostStatus { DRAFT = 'DRAFT', PUBLISHED = 'PUBLISHED' } +const PostStatus = { DRAFT: 'DRAFT', PUBLISHED: 'PUBLISHED', ARCHIVED: 'ARCHIVED' } as const; +type PostStatus = (typeof PostStatus)[keyof typeof PostStatus]; +``` + +Also note: Vite 8 scaffolds set `verbatimModuleSyntax: true` in `tsconfig.app.json`. This requires using `import type` for type-only imports (e.g., `import type { TypedDocumentNode } from '@apollo/client'` instead of `import { TypedDocumentNode }`). + + + +## Understand _version metadata + +This is one of the most important sections in this guide. If your app used DataStore, your backend **has conflict resolution enabled**, and you must handle three metadata fields correctly or your mutations will fail. + +### Why these fields exist + +DataStore enables **conflict resolution** on the AppSync backend via DynamoDB. This mechanism adds three metadata fields to every model: + +| Field | Type | Purpose | +|-------|------|---------| +| `_version` | `Int` | Optimistic locking counter. Incremented on every successful mutation. | +| `_deleted` | `Boolean` | Soft-delete flag. When `true`, the record is logically deleted but still exists in DynamoDB. | +| `_lastChangedAt` | `AWSTimestamp` | Millisecond timestamp of the last change. Set automatically by AppSync. | + +### When you need them + +- **All mutations require `_version`** in the input (except creates). Omitting it causes a `ConditionalCheckFailedException`. +- **All queries should select** `_version`, `_deleted`, and `_lastChangedAt` in the response fields. +- **List queries return soft-deleted records.** You must filter them out in your application code. + +### How to handle them + +Follow these three rules: + +**1. Always include metadata fields in response selections.** Every query and mutation response should include `_version`, `_deleted`, and `_lastChangedAt` (the `PostDetails` fragment above does this). + +**2. Always pass `_version` from the last query result into mutation inputs:** + +```ts +// First, query the current post (includes _version in response) +const { data } = await apolloClient.query({ + query: GET_POST, + variables: { id: postId }, +}); +const post = data.getPost; + +// Then, pass _version when updating +await apolloClient.mutate({ + mutation: UPDATE_POST, + variables: { + input: { + id: post.id, + title: 'Updated Title', + _version: post._version, // REQUIRED + }, + }, +}); +``` + +**3. Filter soft-deleted records from list query results:** + +```ts +const { data } = await apolloClient.query({ query: LIST_POSTS }); +const activePosts = data.listPosts.items.filter(post => !post._deleted); +``` + +### Helper: filter soft-deleted records + +A simple utility function to filter out soft-deleted records from any list query: + +```ts title="src/utils/filterDeleted.ts" +function filterDeleted(items: T[]): T[] { + return items.filter(item => !item._deleted); +} + +// Usage +const { data } = await apolloClient.query({ query: LIST_POSTS }); +const activePosts = filterDeleted(data.listPosts.items); +``` + +## Configure Apollo Client + +Apollo Client communicates with AppSync through a **link chain** -- a series of middleware functions that process each request. You will build four links: + +1. **HTTP Link** -- sends the actual GraphQL request to AppSync +2. **Auth Link** -- injects your Cognito ID token into each request +3. **Error Link** -- intercepts and logs GraphQL and network errors +4. **Retry Link** -- automatically retries failed network requests with backoff + +### The HTTP link + +```ts +import { createHttpLink } from '@apollo/client'; +import config from '../amplifyconfiguration.json'; + +const httpLink = createHttpLink({ + uri: config.aws_appsync_graphqlEndpoint, +}); +``` + + + +**Do NOT use `BatchHttpLink`.** AppSync does not support HTTP request batching. Batched requests will fail silently, returning errors for all operations in the batch. + + + +### The auth link + +The auth link injects your Cognito User Pools ID token into every request: + +```ts +import { setContext } from '@apollo/client/link/context'; +import { fetchAuthSession } from 'aws-amplify/auth'; + +const authLink = setContext(async (_, { headers }) => { + try { + const session = await fetchAuthSession(); + const token = session.tokens?.idToken?.toString(); + return { + headers: { + ...headers, + authorization: token || '', + }, + }; + } catch (error) { + console.error('Auth session error:', error); + return { headers }; + } +}); +``` + +`fetchAuthSession()` is called on every request, ensuring tokens are always fresh. Amplify automatically refreshes expired access tokens using the refresh token. + +### The error link + +The error link intercepts all GraphQL and network errors globally: + +```ts +import { onError } from '@apollo/client/link/error'; + +const errorLink = onError(({ graphQLErrors, networkError }) => { + if (graphQLErrors) { + for (const { message, locations, path } of graphQLErrors) { + console.error( + `[GraphQL error]: Message: ${message}, Location: ${locations}, Path: ${path}` + ); + + if (message.includes('Unauthorized') || message.includes('401')) { + // Token expired or invalid -- redirect to sign-in + } + } + } + + if (networkError) { + console.error(`[Network error]: ${networkError}`); + } +}); +``` + +**Common AppSync errors:** + +| Error Message | Cause | Action | +|---------------|-------|--------| +| `Unauthorized` or `401` | Expired or missing auth token | Redirect to sign-in | +| `ConditionalCheckFailedException` | Missing or stale `_version` in mutation input | Re-query to get latest `_version`, then retry | +| `ConflictUnhandled` | Conflict resolution rejected the mutation | Re-query and retry with fresh data | +| `Network error` | Connectivity issue | Retry link handles this automatically | + +### The retry link + +```ts +import { RetryLink } from '@apollo/client/link/retry'; + +const retryLink = new RetryLink({ + delay: { + initial: 300, + max: 5000, + jitter: true, + }, + attempts: { + max: 3, + retryIf: (error) => !!error, + }, +}); +``` + +Retries up to 3 times on any network error with exponential backoff. The `jitter: true` setting adds randomness to prevent thundering herd problems. + +### Put it all together + +Combine all four links into a single Apollo Client instance: + +```ts title="src/apolloClient.ts" +import { + ApolloClient, + InMemoryCache, + createHttpLink, + from, +} from '@apollo/client'; + +export const apolloClient = new ApolloClient({ + link: from([retryLink, errorLink, authLink, httpLink]), + cache: new InMemoryCache(), +}); +``` + +### Link chain order + +The `from()` function composes links **left to right** on outgoing requests and **right to left** on incoming responses: + +``` +Request --> RetryLink --> ErrorLink --> AuthLink --> HttpLink --> AppSync +Response <-- RetryLink <-- ErrorLink <-- AuthLink <-- HttpLink <-- AppSync +``` + +- **RetryLink is first** -- it wraps the entire chain, so if any downstream link or the network request fails, RetryLink can re-execute the full chain (including re-fetching the auth token) +- **ErrorLink is second** -- it sees all errors and can log or redirect +- **AuthLink is third** -- it injects the Cognito token right before the HTTP request +- **HttpLink is last** -- it sends the actual request to AppSync + + + +### Connect to React + +Wrap your application with `ApolloProvider` to make the client available to all components: + +```tsx title="src/App.tsx" +import { ApolloProvider } from '@apollo/client'; +import { apolloClient } from './apolloClient'; + +function App() { + return ( + + {/* Your app components can now use useQuery, useMutation, etc. */} + + ); +} +``` + +Any component inside `ApolloProvider` can use Apollo's React hooks (`useQuery`, `useMutation`) to interact with your AppSync API. + + + +## Sign-out and cache cleanup + +When a user signs out, you must clear Apollo Client's in-memory cache to prevent the next user from seeing stale data: + +```ts title="src/auth.ts" +import { signOut } from 'aws-amplify/auth'; +import { apolloClient } from './apolloClient'; + +async function handleSignOut() { + // 1. Clear Apollo Client's in-memory cache + await apolloClient.clearStore(); + + // 2. Sign out from Amplify (clears Cognito tokens) + await signOut(); +} +``` + + + +**Name the function `handleSignOut` (not `signOut`)** to avoid shadowing the Amplify import. Naming it `signOut` creates a recursive call -- the function calls itself instead of Amplify's `signOut`, causing a stack overflow. + + + +**Key details:** + +- **`clearStore()`** clears the in-memory cache and cancels all active queries. Use `resetStore()` instead if you want to clear the cache **and** refetch all active queries. +- **Order matters:** Clear the cache first, then sign out. If you sign out first, `clearStore()` may trigger refetches that fail because the auth token is already invalidated. +- **For the Local Caching strategy** (covered on the [Add local caching](/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/add-local-caching/) page), the sign-out function will also need to purge the persistent cache. + + + +Here is the full `src/apolloClient.ts` file combining everything above: + +```ts title="src/apolloClient.ts" +import { + ApolloClient, + InMemoryCache, + createHttpLink, + from, +} from '@apollo/client'; +import { setContext } from '@apollo/client/link/context'; +import { onError } from '@apollo/client/link/error'; +import { RetryLink } from '@apollo/client/link/retry'; +import { fetchAuthSession } from 'aws-amplify/auth'; +import config from '../amplifyconfiguration.json'; + +// --- HTTP Link --- +const httpLink = createHttpLink({ + uri: config.aws_appsync_graphqlEndpoint, +}); + +// --- Auth Link --- +const authLink = setContext(async (_, { headers }) => { + try { + const session = await fetchAuthSession(); + const token = session.tokens?.idToken?.toString(); + return { + headers: { + ...headers, + authorization: token || '', + }, + }; + } catch (error) { + console.error('Auth session error:', error); + return { headers }; + } +}); + +// --- Error Link --- +const errorLink = onError(({ graphQLErrors, networkError }) => { + if (graphQLErrors) { + for (const { message, locations, path } of graphQLErrors) { + console.error( + `[GraphQL error]: Message: ${message}, Location: ${locations}, Path: ${path}` + ); + + if (message.includes('Unauthorized') || message.includes('401')) { + // Token expired or invalid -- redirect to sign-in + } + } + } + + if (networkError) { + console.error(`[Network error]: ${networkError}`); + } +}); + +// --- Retry Link --- +const retryLink = new RetryLink({ + delay: { initial: 300, max: 5000, jitter: true }, + attempts: { max: 3, retryIf: (error) => !!error }, +}); + +// --- Apollo Client --- +// Link chain: RetryLink -> ErrorLink -> AuthLink -> HttpLink -> AppSync +export const apolloClient = new ApolloClient({ + link: from([retryLink, errorLink, authLink, httpLink]), + cache: new InMemoryCache(), +}); +``` + + + +## Set up real-time subscriptions + +Subscriptions use the Amplify library (not Apollo) because AppSync uses a custom WebSocket protocol that standard GraphQL subscription libraries cannot handle. + + + +**Do NOT use `graphql-ws`, `subscriptions-transport-ws`, or Apollo's `WebSocketLink` with AppSync.** These libraries do not speak AppSync's custom WebSocket protocol and will fail silently -- the WebSocket connection establishes successfully but subscription callbacks never fire. + + + +### Create the Amplify subscription client + +Create the Amplify client alongside your Apollo Client. You should already have Amplify configured at app startup: + +```ts +import { generateClient } from 'aws-amplify/api'; + +const amplifyClient = generateClient(); +``` + +You now have two clients: +- **`apolloClient`** -- for queries, mutations, and caching +- **`amplifyClient`** -- for subscriptions only + + + +**Tip:** Your auto-generated `src/graphql/subscriptions.js` file contains the exact subscription signatures for your schema, including any `$owner` and `$filter` parameters. Reference this file to verify the correct field names and available arguments for each subscription type before writing your own. + + + +### Subscription pattern: refetch on event (recommended) + +The simplest and most reliable approach: when a subscription event fires, refetch the list query from the server. + + + +```tsx +import { useQuery } from '@apollo/client'; +import { generateClient } from 'aws-amplify/api'; +import { useEffect } from 'react'; +import { LIST_POSTS } from './graphql/operations'; + +const amplifyClient = generateClient(); + +function PostList() { + const { data, loading, error, refetch } = useQuery(LIST_POSTS); + + useEffect(() => { + const subscriptions = [ + amplifyClient.graphql({ + query: `subscription OnCreatePost { + onCreatePost { id } + }` + }).subscribe({ + next: () => refetch(), + error: (err) => console.error('Create subscription error:', err), + }), + amplifyClient.graphql({ + query: `subscription OnUpdatePost { + onUpdatePost { id } + }` + }).subscribe({ + next: () => refetch(), + error: (err) => console.error('Update subscription error:', err), + }), + amplifyClient.graphql({ + query: `subscription OnDeletePost { + onDeletePost { id } + }` + }).subscribe({ + next: () => refetch(), + error: (err) => console.error('Delete subscription error:', err), + }), + ]; + + return () => subscriptions.forEach(sub => sub.unsubscribe()); + }, [refetch]); + + if (loading) return
Loading...
; + if (error) return
Error: {error.message}
; + + const activePosts = data?.listPosts?.items?.filter( + (post) => !post._deleted + ) || []; + + return ( +
    + {activePosts.map((post) => ( +
  • {post.title}
  • + ))} +
+ ); +} +``` + +
+ +**Why this pattern works well:** + +- The subscription payload only needs `id` since you are refetching the full list anyway, keeping the subscription lightweight +- No cache manipulation logic to get wrong -- the refetch guarantees consistency with the server +- One extra network round-trip per event, which is typically under 100ms and imperceptible for most applications + + + +For applications that need lower latency or handle high-frequency updates, you can update Apollo's cache directly from subscription data instead of refetching. This avoids the extra network round-trip but requires more code and careful cache management. + +```ts +import { useQuery } from '@apollo/client'; +import { generateClient } from 'aws-amplify/api'; +import { useEffect } from 'react'; +import { LIST_POSTS, POST_DETAILS_FRAGMENT } from './graphql/queries'; +import { apolloClient } from './apolloClient'; + +const amplifyClient = generateClient(); + +function PostListAdvanced() { + const { data, loading, error } = useQuery(LIST_POSTS); + + useEffect(() => { + const sub = amplifyClient.graphql({ + query: `subscription OnCreatePost { + onCreatePost { + id title content status rating + _version _deleted _lastChangedAt + createdAt updatedAt + } + }` + }).subscribe({ + next: ({ data }) => { + const newPost = data.onCreatePost; + apolloClient.cache.modify({ + fields: { + listPosts(existingData = { items: [] }) { + const newRef = apolloClient.cache.writeFragment({ + data: newPost, + fragment: POST_DETAILS_FRAGMENT, + }); + return { + ...existingData, + items: [...existingData.items, newRef], + }; + }, + }, + }); + }, + error: (err) => console.error('Create subscription error:', err), + }); + + return () => sub.unsubscribe(); + }, []); + + // ... render logic +} +``` + +**Recommendation:** Start with the refetch pattern. Only move to direct cache updates if you have measured a performance problem. + + + +### DataStore comparison + +| DataStore | Amplify + Apollo (Hybrid) | +|-----------|--------------------------| +| `DataStore.observe(Post).subscribe(...)` | `amplifyClient.graphql({ query: onCreatePost }).subscribe(...)` | +| `DataStore.observeQuery(Post)` | `useQuery(LIST_POSTS)` + subscription refetch | +| Automatic per-model subscriptions | Manual setup per subscription type (create, update, delete) | +| Single observe call for all event types | Separate subscription per event type | + +### Troubleshooting subscriptions + + + +**Subscription connects but never fires:** + +The subscription name must match your schema exactly. AppSync subscriptions are generated as `onCreateModelName`, `onUpdateModelName`, and `onDeleteModelName` (camelCase). Check your AppSync schema in the AWS console. + +**Auth error on subscription:** + +Amplify must be configured **before** creating the subscription client. Make sure `Amplify.configure(config)` runs at app startup before any call to `generateClient()`. + +**Subscription disconnects after ~5 minutes of inactivity:** + +This is normal behavior. Amplify's `AWSAppSyncRealTimeProvider` handles automatic reconnection without any action on your part. + +**Subscription works in development but not in production:** + +Check that your `amplifyconfiguration.json` (or `aws-exports.js`) configuration is correct for the production environment and that CORS is configured on your AppSync API to allow WebSocket connections from your production domain. + +**Subscription connects but receives no events (owner-based auth):** + +If your model uses **owner-based authorization** (`@auth(rules: [{ allow: owner }])`), you must pass the `$owner` variable in your subscriptions. Without it, the subscription connects successfully but AppSync silently filters out all events. This is the **most common cause** of "subscriptions work but nothing happens." + +Get the owner value from the current auth session and pass it as a variable: + +```ts +import { fetchAuthSession } from 'aws-amplify/auth'; + +const session = await fetchAuthSession(); +const owner = session.tokens?.idToken?.payload?.sub as string; + +(amplifyClient.graphql({ + query: `subscription OnCreatePost($owner: String!) { + onCreatePost(owner: $owner) { id } + }`, + variables: { owner }, +}) as any).subscribe({ next: () => refetch() }); +``` + +All three subscription types (`onCreate`, `onUpdate`, `onDelete`) need the `$owner` variable for owner-based auth models. See the [Advanced patterns](/gen1/[platform]/build-a-backend/more-features/datastore/migrate-from-datastore/advanced-patterns/) page for the complete React component pattern. + + From 3253b89e742f2978df80c7c19f98acb470f06699 Mon Sep 17 00:00:00 2001 From: Ahmed Hamouda Date: Wed, 1 Apr 2026 08:58:11 -0400 Subject: [PATCH 2/2] chore: add migration guide terms to spellcheck dictionary --- cspell.json | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/cspell.json b/cspell.json index eea51669780..84e37e30409 100644 --- a/cspell.json +++ b/cspell.json @@ -382,6 +382,7 @@ "callout--info", "callout", "callout", + "callouts", "camelCase", "canCancel", "cancelAllWithType", @@ -506,7 +507,9 @@ "DataStore", "dd3f5b", "deddd", + "dedup", "deduped", + "Dexie", "deeplink", "Deeplink", "deepskyblue", @@ -557,6 +560,7 @@ "echofunction", "ecommerce", "Elasticsearch", + "ERESOLVE", "ElasticSearch", "ElasticsearchEBSVolumeGB", "ElasticsearchInstanceCount", @@ -1027,6 +1031,7 @@ "PITR", "pkey", "placeindex", + "persistor", "pluggable", "png", "Podfile", @@ -1044,6 +1049,7 @@ "postname", "posts.graphql", "PostsTable", + "posttags", "posttitle", "powertools", "pre-annotated", @@ -1498,6 +1504,7 @@ "amplifyjsapp", "lightgray", "inappbrowser", + "immer", "importauth", "withoauth", "myawesomekey",