🚧 Ignore length when reading from disk

This commit is contained in:
C-3PO 2018-07-06 00:02:03 +02:00
parent 06809ad8ed
commit e206280de1
Signed by: c3po
GPG key ID: 62993C4BB4D86F24

View file

@ -12,7 +12,7 @@ interface IGetFileFromDisksOptions {
} }
function getStream(disks: string[], index: number, offset: number, length: number = Infinity) { function getStream(disks: string[], index: number, offset: number, length: number = Infinity) {
return fs.createReadStream(disks[index], { start: offset, end: offset + length - 1 }); return fs.createReadStream(disks[index], { start: offset }); //, end: offset + length - 1 });
} }
/** Takes a list of ReadableStreams (the disks), as well as the offset and length, and returns a stream for just one file. */ /** Takes a list of ReadableStreams (the disks), as well as the offset and length, and returns a stream for just one file. */
@ -23,37 +23,15 @@ export default async function getFileFromDisks(disks: string[], { diskStart, off
let totalRead = 0; let totalRead = 0;
//Create new stream that concatenates disks until storedSize is reached, then ends the stream. //Create new stream that concatenates disks until storedSize is reached, then ends the stream.
const outputStream = new stream.PassThrough({ const outputStream = new stream.PassThrough();
/*read(num) {
if (num === undefined) {
throw new Error('Expected to receive number of bytes when reading from stream.');
}
totalRead += num;
//end of file reached
if (localFileHeaderLength !== 0 && totalRead >= localFileHeaderLength + storedSize) {
return null;
}
const chunk = curDisk.read(num);
//transparently switch to next disk as soon as we finished reading current disk
if (chunk === null) {
curDiskIndex += 1;
curDisk = getStream(disks, curDiskIndex, 0, (localFileHeaderLength === 0) ? Infinity : localFileHeaderLength + storedSize - totalRead);
//TODO: await new Promise((resolve) => { curDisk.on('readable', () => { resolve(); }); });
return curDisk.read(num);
} else {
return chunk;
}
},*/
});
const onData = (chunk: Buffer) => { const onData = (chunk: Buffer) => {
outputStream.write(chunk); outputStream.write(chunk);
totalRead += chunk.length; totalRead += chunk.length;
//TODO: need to end if we have read beyond the file //need to end if we have read beyond the file
//TODO: need to also shorten chunk if file ended inside it, before writing it to PassThrough.
if (localFileHeaderLength !== 0 && totalRead >= localFileHeaderLength + storedSize) { if (localFileHeaderLength !== 0 && totalRead >= localFileHeaderLength + storedSize) {
//TODO outputStream.end();
} }
}; };
const onError = (error: any) => { const onError = (error: any) => {