Resume After Refresh
What happens when a user accidentally closes the tab at 87% progress on a 2 GB video upload? With a naive uploader, they start over. With testChunks and a bit of localStorage bookkeeping, Resumable.js picks up where it left off. This example shows the full client-and-server flow for surviving page refreshes, browser crashes, and flaky connections. More patterns in the Examples hub.
How testChunks Works
When testChunks is set to true, Resumable.js sends a GET request for each chunk before uploading it. If the server responds with 200, that chunk is considered already received — skip it. If the server returns 404 (or any non-200), the chunk gets uploaded normally.
Think of it like a checklist. Before stuffing each envelope, you call the post office: "Did envelope 14 arrive?" If yes, move to 15. If no, resend 14. The overhead of those GET checks is tiny compared to re-uploading megabytes of data.
Client-Side: Persisting File Metadata
The browser's File object can't be serialized into localStorage. But you can store enough metadata to let the user re-select the same file and have Resumable.js reconcile it:
var r = new Resumable({
target: '/api/upload',
testTarget: '/api/upload', // GET requests go here for chunk checks
chunkSize: 2 * 1024 * 1024,
testChunks: true,
simultaneousUploads: 3,
generateUniqueIdentifier: function (file) {
// Deterministic ID based on file properties
return file.size + '-' + file.name.replace(/[^a-zA-Z0-9_-]/g, '');
}
});
r.assignBrowse(document.getElementById('browse-btn'));
// Persist in-progress uploads
r.on('fileAdded', function (file) {
var uploads = JSON.parse(localStorage.getItem('pending_uploads') || '[]');
uploads.push({
uniqueIdentifier: file.uniqueIdentifier,
fileName: file.fileName,
size: file.size
});
localStorage.setItem('pending_uploads', JSON.stringify(uploads));
r.upload();
});
r.on('fileSuccess', function (file) {
// Remove from pending list
var uploads = JSON.parse(localStorage.getItem('pending_uploads') || '[]');
uploads = uploads.filter(function (u) {
return u.uniqueIdentifier !== file.uniqueIdentifier;
});
localStorage.setItem('pending_uploads', JSON.stringify(uploads));
showStatus(file.fileName + ' complete.');
});
r.on('fileProgress', function (file) {
var pct = Math.round(file.progress() * 100);
showStatus(file.fileName + ': ' + pct + '%');
});
// On page load, check for pending uploads
window.addEventListener('DOMContentLoaded', function () {
var pending = JSON.parse(localStorage.getItem('pending_uploads') || '[]');
if (pending.length > 0) {
showStatus(
'You have ' + pending.length + ' unfinished upload(s). ' +
'Re-select the same file(s) to resume.'
);
}
});
function showStatus(msg) {
document.getElementById('status').textContent = msg;
}
Why generateUniqueIdentifier Matters
The default identifier uses file.size + '-' + file.name. That's usually fine. But if your app allows renaming before upload, or if multiple users might upload identically named files, you'll want something more deterministic — or hash-based. The key requirement: the same file must produce the same identifier before and after a refresh so the server's chunk inventory lines up.
Server-Side: GET Endpoint for Chunk Checks
Your server needs to handle both GET (does this chunk exist?) and POST (receive this chunk). Here's a minimal Express implementation:
const express = require('express');
const multer = require('multer');
const fs = require('fs');
const path = require('path');
const app = express();
const uploadDir = path.join(__dirname, 'uploads');
const upload = multer({ dest: uploadDir });
// GET — check if a chunk already exists
app.get('/api/upload', (req, res) => {
const chunkId = [
req.query.resumableIdentifier,
'chunk',
req.query.resumableChunkNumber
].join('-');
const chunkPath = path.join(uploadDir, chunkId);
if (fs.existsSync(chunkPath)) {
res.status(200).send('Found');
} else {
res.status(404).send('Not found');
}
});
// POST — receive and store a chunk
app.post('/api/upload', upload.single('file'), (req, res) => {
const chunkId = [
req.body.resumableIdentifier,
'chunk',
req.body.resumableChunkNumber
].join('-');
const chunkPath = path.join(uploadDir, chunkId);
fs.renameSync(req.file.path, chunkPath);
// Check if all chunks are present
const total = parseInt(req.body.resumableTotalChunks, 10);
let allPresent = true;
for (let i = 1; i <= total; i++) {
const p = path.join(uploadDir, req.body.resumableIdentifier + '-chunk-' + i);
if (!fs.existsSync(p)) { allPresent = false; break; }
}
if (allPresent) {
reassembleFile(req.body.resumableIdentifier, req.body.resumableFilename, total);
}
res.status(200).send('OK');
});
function reassembleFile(identifier, filename, totalChunks) {
const finalPath = path.join(uploadDir, filename);
const writeStream = fs.createWriteStream(finalPath);
for (let i = 1; i <= totalChunks; i++) {
const chunkPath = path.join(uploadDir, identifier + '-chunk-' + i);
const data = fs.readFileSync(chunkPath);
writeStream.write(data);
fs.unlinkSync(chunkPath); // clean up chunk
}
writeStream.end();
console.log('Reassembled:', filename);
}
app.listen(3000);
The Full Flow
- First visit — user selects a 500 MB file. Resumable.js generates an identifier, starts uploading chunks.
- Tab closes at chunk 120 of 250 — server has 120 chunk files on disk. localStorage has the file's metadata.
- User returns — the page shows "1 unfinished upload." They re-select the same file.
- Resumable.js sends GET for chunk 1 — server responds
200. Skip. Same for chunks 2–120. - Chunk 121 returns
404— upload resumes from here. - Chunks 121–250 upload normally. File reassembles on the server.
The user uploaded 120 chunks worth of data once, not twice. On a slow connection, that's the difference between a frustrated user and a delighted one.
Edge Cases to Handle
- Stale localStorage entries: set a TTL. If the pending upload is older than 24 hours, remove it and let the server clean up orphaned chunks via a cron job.
- Different file, same name and size: rare, but possible. If you need absolute certainty, compute a partial hash (first + last 2 MB) in
generateUniqueIdentifierusing the FileReader API. - Storage quota: localStorage caps at ~5 MB. For tracking many concurrent uploads, consider IndexedDB instead.
Resumable uploads aren't magic — they're just good bookkeeping on both sides of the wire.
