@@ -40,6 +40,8 @@ Section -> Buffer
40
40
41
41
// "wpc" + 1 in little-endian
42
42
const VERSION = 0x01637077 ;
43
+ const WRITE_LIMIT_TOTAL = 0x7fff0000 ;
44
+ const WRITE_LIMIT_CHUNK = 511 * 1024 * 1024 ;
43
45
44
46
/**
45
47
* @param {Buffer[] } buffers buffers
@@ -87,7 +89,7 @@ const readUInt64LE = Buffer.prototype.readBigUInt64LE
87
89
* @param {FileMiddleware } middleware this
88
90
* @param {BufferSerializableType[] | Promise<BufferSerializableType[]> } data data to be serialized
89
91
* @param {string | boolean } name file base name
90
- * @param {function(string | false, Buffer[]): Promise<void> } writeFile writes a file
92
+ * @param {function(string | false, Buffer[], number ): Promise<void> } writeFile writes a file
91
93
* @param {string | Hash } hashFunction hash function to use
92
94
* @returns {Promise<SerializeResult> } resulting file pointer and promise
93
95
*/
@@ -212,9 +214,9 @@ const serialize = async (
212
214
if ( name === true ) {
213
215
name = hashForName ( buf , hashFunction ) ;
214
216
}
215
- backgroundJobs . push ( writeFile ( name , buf ) ) ;
216
217
let size = 0 ;
217
218
for ( const b of buf ) size += b . length ;
219
+ backgroundJobs . push ( writeFile ( name , buf , size ) ) ;
218
220
return {
219
221
size,
220
222
name,
@@ -422,7 +424,7 @@ class FileMiddleware extends SerializerMiddleware {
422
424
// It's important that we don't touch existing files during serialization
423
425
// because serialize may read existing files (when deserializing)
424
426
const allWrittenFiles = new Set ( ) ;
425
- const writeFile = async ( name , content ) => {
427
+ const writeFile = async ( name , content , size ) => {
426
428
const file = name
427
429
? join ( this . fs , filename , `../${ name } ${ extension } ` )
428
430
: filename ;
@@ -441,10 +443,7 @@ class FileMiddleware extends SerializerMiddleware {
441
443
[ zConstants . BROTLI_PARAM_MODE ] : zConstants . BROTLI_MODE_TEXT ,
442
444
[ zConstants . BROTLI_PARAM_QUALITY ] : 2 ,
443
445
[ zConstants . BROTLI_PARAM_DISABLE_LITERAL_CONTEXT_MODELING ] : true ,
444
- [ zConstants . BROTLI_PARAM_SIZE_HINT ] : content . reduce (
445
- ( size , b ) => size + b . length ,
446
- 0
447
- )
446
+ [ zConstants . BROTLI_PARAM_SIZE_HINT ] : size
448
447
}
449
448
} ) ;
450
449
}
@@ -456,8 +455,44 @@ class FileMiddleware extends SerializerMiddleware {
456
455
stream . on ( "error" , err => reject ( err ) ) ;
457
456
stream . on ( "finish" , ( ) => resolve ( ) ) ;
458
457
}
459
- for ( const b of content ) stream . write ( b ) ;
460
- stream . end ( ) ;
458
+ // split into chunks for WRITE_LIMIT_CHUNK size
459
+ const chunks = [ ] ;
460
+ for ( const b of content ) {
461
+ if ( b . length < WRITE_LIMIT_CHUNK ) {
462
+ chunks . push ( b ) ;
463
+ } else {
464
+ for ( let i = 0 ; i < b . length ; i += WRITE_LIMIT_CHUNK ) {
465
+ chunks . push ( b . slice ( i , i + WRITE_LIMIT_CHUNK ) ) ;
466
+ }
467
+ }
468
+ }
469
+
470
+ const len = chunks . length ;
471
+ let i = 0 ;
472
+ const batchWrite = err => {
473
+ // will be handled in "on" error handler
474
+ if ( err ) return ;
475
+
476
+ if ( i === len ) {
477
+ stream . end ( ) ;
478
+ return ;
479
+ }
480
+
481
+ // queue up a batch of chunks up to the write limit
482
+ // end is exclusive
483
+ let end = i ;
484
+ let sum = chunks [ end ++ ] . length ;
485
+ while ( end < len ) {
486
+ sum += chunks [ end ] . length ;
487
+ if ( sum > WRITE_LIMIT_TOTAL ) break ;
488
+ end ++ ;
489
+ }
490
+ while ( i < end - 1 ) {
491
+ stream . write ( chunks [ i ++ ] ) ;
492
+ }
493
+ stream . write ( chunks [ i ++ ] , batchWrite ) ;
494
+ } ;
495
+ batchWrite ( ) ;
461
496
} ) ;
462
497
if ( name ) allWrittenFiles . add ( file ) ;
463
498
} ;
0 commit comments