🐛 End file read stream if EOF is reached
This commit is contained in:
parent
db3b303e6c
commit
f6eb334602
2 changed files with 18 additions and 9 deletions
|
@ -12,7 +12,7 @@ interface IGetFileFromDisksOptions {
|
||||||
}
|
}
|
||||||
|
|
||||||
function getStream(disks: string[], index: number, offset: number, length: number = Infinity) {
|
function getStream(disks: string[], index: number, offset: number, length: number = Infinity) {
|
||||||
return fs.createReadStream(disks[index], { start: offset });
|
return fs.createReadStream(disks[index], { start: offset, end: offset + length - 1 });
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Takes a list of ReadableStreams (the disks), as well as the offset and length, and returns a stream for just one file. */
|
/** Takes a list of ReadableStreams (the disks), as well as the offset and length, and returns a stream for just one file. */
|
||||||
|
@ -26,12 +26,23 @@ export default async function getFileFromDisks(disks: string[], { diskStart, off
|
||||||
const outputStream = new stream.PassThrough();
|
const outputStream = new stream.PassThrough();
|
||||||
|
|
||||||
const onData = (chunk: Buffer) => {
|
const onData = (chunk: Buffer) => {
|
||||||
outputStream.write(chunk);
|
|
||||||
totalRead += chunk.length;
|
totalRead += chunk.length;
|
||||||
//need to end if we have read beyond the file
|
//If we've reached the end, we can stop reading after this chunk
|
||||||
//TODO: need to also shorten chunk if file ended inside it, before writing it to PassThrough.
|
const readTooManyBytes = totalRead - (localFileHeaderLength + storedSize);
|
||||||
if (localFileHeaderLength !== 0 && totalRead >= localFileHeaderLength + storedSize) {
|
if (localFileHeaderLength !== 0 && readTooManyBytes >= 0) {
|
||||||
outputStream.end();
|
//We can still write the whole chunk before ending the stream
|
||||||
|
if (readTooManyBytes === 0) {
|
||||||
|
outputStream.end(chunk);
|
||||||
|
} else {
|
||||||
|
//We must shorten the chunk, write the shortened chunk and the end the stream
|
||||||
|
const shortenedLength = chunk.length - readTooManyBytes;
|
||||||
|
const shortenedChunk = new Buffer(shortenedLength);
|
||||||
|
shortenedChunk.fill(chunk, 0, shortenedLength);
|
||||||
|
outputStream.end(shortenedChunk);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
//Nowhere near the end, so just write normally
|
||||||
|
outputStream.write(chunk);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
const onError = (error: any) => {
|
const onError = (error: any) => {
|
||||||
|
@ -57,8 +68,7 @@ export default async function getFileFromDisks(disks: string[], { diskStart, off
|
||||||
|
|
||||||
//Read local file header
|
//Read local file header
|
||||||
localFileHeaderLength = await readLocalFileHeader(outputStream);
|
localFileHeaderLength = await readLocalFileHeader(outputStream);
|
||||||
|
//now that local file header has been read, we will restrict length of stream to storedSize
|
||||||
//TODO: now that local file header has been read, restrict length of stream to storedSize
|
|
||||||
|
|
||||||
return outputStream;
|
return outputStream;
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,7 +25,6 @@ export default async function readLocalFileHeader(inputStream: stream.Readable):
|
||||||
}
|
}
|
||||||
|
|
||||||
//Local file header signature
|
//Local file header signature
|
||||||
console.log(localFileHeader);
|
|
||||||
const magic = localFileHeader.readUInt32LE(0);
|
const magic = localFileHeader.readUInt32LE(0);
|
||||||
if (magic !== 0x04034B50) {
|
if (magic !== 0x04034B50) {
|
||||||
throw new Error(`Local file header had wrong magic; expected 0x04034B50 but got 0x${magic.toString(16).padStart(8, '0')}.`);
|
throw new Error(`Local file header had wrong magic; expected 0x04034B50 but got 0x${magic.toString(16).padStart(8, '0')}.`);
|
||||||
|
|
Loading…
Reference in a new issue