diff --git a/benchmark/fs/readfile-promises.js b/benchmark/fs/readfile-promises.js new file mode 100644 index 00000000000000..28633c3f06427b --- /dev/null +++ b/benchmark/fs/readfile-promises.js @@ -0,0 +1,63 @@ +// Call fs.promises.readFile over and over again really fast. +// Then see how many times it got called. +// Yes, this is a silly benchmark. Most benchmarks are silly. +'use strict'; + +const path = require('path'); +const common = require('../common.js'); +const fs = require('fs'); +const assert = require('assert'); + +const tmpdir = require('../../test/common/tmpdir'); +tmpdir.refresh(); +const filename = path.resolve(tmpdir.path, + `.removeme-benchmark-garbage-${process.pid}`); + +const bench = common.createBenchmark(main, { + duration: [5], + len: [1024, 16 * 1024 * 1024], + concurrent: [1, 10] +}); + +function main({ len, duration, concurrent }) { + try { fs.unlinkSync(filename); } catch { } + let data = Buffer.alloc(len, 'x'); + fs.writeFileSync(filename, data); + data = null; + + let writes = 0; + let benchEnded = false; + bench.start(); + setTimeout(() => { + benchEnded = true; + bench.end(writes); + try { fs.unlinkSync(filename); } catch { } + process.exit(0); + }, duration * 1000); + + function read() { + fs.promises.readFile(filename) + .then((res) => afterRead(undefined, res)) + .catch((err) => afterRead(err)); + } + + function afterRead(er, data) { + if (er) { + if (er.code === 'ENOENT') { + // Only OK if unlinked by the timer from main. + assert.ok(benchEnded); + return; + } + throw er; + } + + if (data.length !== len) + throw new Error('wrong number of bytes returned'); + + writes++; + if (!benchEnded) + read(); + } + + while (concurrent--) read(); +} diff --git a/lib/internal/fs/promises.js b/lib/internal/fs/promises.js index 32ad2123d47db0..aa927a3dff575d 100644 --- a/lib/internal/fs/promises.js +++ b/lib/internal/fs/promises.js @@ -4,9 +4,8 @@ // See https://github.com/libuv/libuv/pull/1501. const kIoMaxLength = 2 ** 31 - 1; -// Note: This is different from kReadFileBufferLength used for non-promisified -// fs.readFile. -const kReadFileMaxChunkSize = 2 ** 14; +const kReadFileBufferLength = 512 * 1024; +const kReadFileUnknownBufferLength = 64 * 1024; const kWriteFileMaxChunkSize = 2 ** 14; const { @@ -316,25 +315,46 @@ async function readFileHandle(filehandle, options) { if (size > kIoMaxLength) throw new ERR_FS_FILE_TOO_LARGE(size); - const chunks = []; - let isFirstChunk = true; - const firstChunkSize = size === 0 ? kReadFileMaxChunkSize : size; - const chunkSize = MathMin(firstChunkSize, kReadFileMaxChunkSize); let endOfFile = false; + let totalRead = 0; + const noSize = size === 0; + const buffers = []; + const fullBuffer = noSize ? undefined : Buffer.allocUnsafeSlow(size); do { if (signal?.aborted) { throw lazyDOMException('The operation was aborted', 'AbortError'); } - const buf = Buffer.alloc(isFirstChunk ? firstChunkSize : chunkSize); - const { bytesRead, buffer } = - await read(filehandle, buf, 0, buf.length, -1); - endOfFile = bytesRead === 0; - if (bytesRead > 0) - ArrayPrototypePush(chunks, buffer.slice(0, bytesRead)); - isFirstChunk = false; + let buffer; + let offset; + let length; + if (noSize) { + buffer = Buffer.allocUnsafeSlow(kReadFileUnknownBufferLength); + offset = 0; + length = kReadFileUnknownBufferLength; + } else { + buffer = fullBuffer; + offset = totalRead; + length = MathMin(size - totalRead, kReadFileBufferLength); + } + + const bytesRead = (await binding.read(filehandle.fd, buffer, offset, + length, -1, kUsePromises)) || 0; + totalRead += bytesRead; + endOfFile = bytesRead === 0 || totalRead === size; + if (noSize && bytesRead > 0) { + const isBufferFull = bytesRead === kReadFileUnknownBufferLength; + const chunkBuffer = isBufferFull ? buffer : buffer.slice(0, bytesRead); + ArrayPrototypePush(buffers, chunkBuffer); + } } while (!endOfFile); - const result = chunks.length === 1 ? chunks[0] : Buffer.concat(chunks); + let result; + if (size > 0) { + result = totalRead === size ? fullBuffer : fullBuffer.slice(0, totalRead); + } else { + result = buffers.length === 1 ? buffers[0] : Buffer.concat(buffers, + totalRead); + } return options.encoding ? result.toString(options.encoding) : result; } diff --git a/test/parallel/test-fs-promises-file-handle-readFile.js b/test/parallel/test-fs-promises-file-handle-readFile.js index d1e724d201ee71..66dc8f0fb0e0a1 100644 --- a/test/parallel/test-fs-promises-file-handle-readFile.js +++ b/test/parallel/test-fs-promises-file-handle-readFile.js @@ -10,7 +10,7 @@ const { open, readFile, writeFile, - truncate + truncate, } = fs.promises; const path = require('path'); const tmpdir = require('../common/tmpdir'); @@ -64,6 +64,7 @@ async function doReadAndCancel() { await assert.rejects(readFile(fileHandle, { signal }), { name: 'AbortError' }); + await fileHandle.close(); } // Signal aborted on first tick @@ -74,10 +75,11 @@ async function doReadAndCancel() { fs.writeFileSync(filePathForHandle, buffer); const controller = new AbortController(); const { signal } = controller; - tick(1, () => controller.abort()); + process.nextTick(() => controller.abort()); await assert.rejects(readFile(fileHandle, { signal }), { name: 'AbortError' - }); + }, 'tick-0'); + await fileHandle.close(); } // Signal aborted right before buffer read @@ -90,10 +92,12 @@ async function doReadAndCancel() { const controller = new AbortController(); const { signal } = controller; - tick(2, () => controller.abort()); + tick(1, () => controller.abort()); await assert.rejects(fileHandle.readFile({ signal, encoding: 'utf8' }), { name: 'AbortError' - }); + }, 'tick-1'); + + await fileHandle.close(); } // Validate file size is within range for reading @@ -111,6 +115,7 @@ async function doReadAndCancel() { name: 'RangeError', code: 'ERR_FS_FILE_TOO_LARGE' }); + await fileHandle.close(); } }