@@ -4497,31 +4497,42 @@ Or do you want to use the entrypoints '${name}' and '${runtime}' independently o
44974497
44984498 this . logger . time ( "hashing: sort chunks" ) ;
44994499 /*
4500- * all non-runtime chunks need to be hashes first,
4501- * since runtime chunk might use their hashes.
4502- * runtime chunks need to be hashed in the correct order
4503- * since they may depend on each other (for async entrypoints).
4504- * So we put all non-runtime chunks first and hash them in any order.
4505- * And order runtime chunks according to referenced between each other.
4506- * Chunks need to be in deterministic order since we add hashes to full chunk
4507- * during these hashing.
4500+ * Chunks are hashed in 4 categories, in this order:
4501+ * 1. Async chunks - no hash dependencies on other chunks
4502+ * 2. Non-entry initial chunks (e.g. shared split chunks) - no hash
4503+ * dependencies on other chunks, but runtime chunks may read their
4504+ * hashes via GetChunkFilenameRuntimeModule (dependentHash)
4505+ * 3. Runtime chunks - may use hashes of async and non-entry initial
4506+ * chunks (via GetChunkFilenameRuntimeModule). Ordered by references
4507+ * between each other (for async entrypoints)
4508+ * 4. Entry chunks - may depend on runtimeChunk.hash (via
4509+ * createChunkHashHandler for ESM/CJS entry importing runtime)
4510+ *
4511+ * This ordering ensures all hash dependencies flow in one direction:
4512+ * async/initial → runtime → entry, with no circular dependencies.
4513+ * Chunks within each category are sorted by id for determinism.
45084514 */
45094515 /** @type {Chunk[] } */
45104516 const unorderedRuntimeChunks = [ ] ;
45114517 /** @type {Chunk[] } */
45124518 const initialChunks = [ ] ;
45134519 /** @type {Chunk[] } */
4520+ const entryChunks = [ ] ;
4521+ /** @type {Chunk[] } */
45144522 const asyncChunks = [ ] ;
45154523 for ( const c of this . chunks ) {
45164524 if ( c . hasRuntime ( ) ) {
45174525 unorderedRuntimeChunks . push ( c ) ;
4526+ } else if ( chunkGraph . getNumberOfEntryModules ( c ) > 0 ) {
4527+ entryChunks . push ( c ) ;
45184528 } else if ( c . canBeInitial ( ) ) {
45194529 initialChunks . push ( c ) ;
45204530 } else {
45214531 asyncChunks . push ( c ) ;
45224532 }
45234533 }
45244534 unorderedRuntimeChunks . sort ( byId ) ;
4535+ entryChunks . sort ( byId ) ;
45254536 initialChunks . sort ( byId ) ;
45264537 asyncChunks . sort ( byId ) ;
45274538
@@ -4693,8 +4704,9 @@ This prevents using hashes of each other and should be avoided.`);
46934704 this . logger . timeAggregate ( "hashing: hash chunks" ) ;
46944705 } ;
46954706 for ( const chunk of asyncChunks ) processChunk ( chunk ) ;
4696- for ( const chunk of runtimeChunks ) processChunk ( chunk ) ;
46974707 for ( const chunk of initialChunks ) processChunk ( chunk ) ;
4708+ for ( const chunk of runtimeChunks ) processChunk ( chunk ) ;
4709+ for ( const chunk of entryChunks ) processChunk ( chunk ) ;
46984710 if ( errors . length > 0 ) {
46994711 errors . sort (
47004712 compareSelect ( ( err ) => err . module , compareModulesByIdentifier )
0 commit comments