Finish getFileFromDisks

This commit is contained in:
C-3PO 2018-07-05 20:57:42 +02:00
parent f849744ac8
commit 6d6c673556
Signed by: c3po
GPG key ID: 62993C4BB4D86F24

View file

@ -1,5 +1,6 @@
import * as fs from 'fs'; import * as fs from 'fs';
import * as stream from 'stream'; import * as stream from 'stream';
import readLocalFileHeader from './readLocalFileHeader';
interface IGetFileFromDisksOptions { interface IGetFileFromDisksOptions {
/** Number of the disk where the local file header starts */ /** Number of the disk where the local file header starts */
@ -10,15 +11,34 @@ interface IGetFileFromDisksOptions {
storedSize: number; storedSize: number;
} }
function getStream(disks: string[], index: number, offset: number) {
return fs.createReadStream(disks[index], { start: offset });
}
/** Takes a list of ReadableStreams (the disks), as well as the offset and length, and returns a stream for just one file. */ /** Takes a list of ReadableStreams (the disks), as well as the offset and length, and returns a stream for just one file. */
export default function getFileFromDisks(disks: string[], { diskStart, offset, storedSize }: IGetFileFromDisksOptions): stream.Readable { export default function getFileFromDisks(disks: string[], { diskStart, offset, storedSize }: IGetFileFromDisksOptions): stream.Readable {
const diskStreams = disks.map((fileName) => fs.createReadStream(fileName)); let curDiskIndex = diskStart;
//TODO: Can local file header also be spread across multiple disks, or only the payload? let curDisk = getStream(disks, diskStart, offset);
//Read local file header
//...
//Create new stream that concatenates disks until storedSize is reached, then ends the stream. //Create new stream that concatenates disks until storedSize is reached, then ends the stream.
const outputStream = new stream.Readable(); const outputStream = new stream.Readable({
//... read(num) {
const chunk = curDisk.read(num);
//transparently switch to next disk as soon as we finished reading current disk
if (chunk === null) {
curDiskIndex += 1;
curDisk = getStream(disks, curDiskIndex, 0);
return curDisk.read(num);
} else {
return chunk;
}
},
});
//Read local file header
readLocalFileHeader(outputStream);
//TODO: now that local file header has been read, restrict length of stream to storedSize
return outputStream; return outputStream;
} }