Commit 891b7197 authored by Medicean's avatar Medicean

add thirdparty tar

parent dd7e6a95
The ISC License
Copyright (c) Isaac Z. Schlueter and Contributors
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
Like `chown -R`.
Takes the same arguments as `fs.chown()`
module.exports = chownr
chownr.sync = chownrSync
var fs = require("fs")
, path = require("path")
function chownr (p, uid, gid, cb) {
fs.readdir(p, function (er, children) {
// any error other than ENOTDIR means it's not readable, or
// doesn't exist. give up.
if (er && er.code !== "ENOTDIR") return cb(er)
if (er || !children.length) return fs.chown(p, uid, gid, cb)
var len = children.length
, errState = null
children.forEach(function (child) {
var pathChild = path.resolve(p, child);
fs.lstat(pathChild, function(er, stats) {
if (er)
return cb(er)
if (!stats.isSymbolicLink())
chownr(pathChild, uid, gid, then)
else
then()
})
})
function then (er) {
if (errState) return
if (er) return cb(errState = er)
if (-- len === 0) return fs.chown(p, uid, gid, cb)
}
})
}
function chownrSync (p, uid, gid) {
var children
try {
children = fs.readdirSync(p)
} catch (er) {
if (er && er.code === "ENOTDIR") return fs.chownSync(p, uid, gid)
throw er
}
if (!children.length) return fs.chownSync(p, uid, gid)
children.forEach(function (child) {
var pathChild = path.resolve(p, child)
var stats = fs.lstatSync(pathChild)
if (!stats.isSymbolicLink())
chownrSync(pathChild, uid, gid)
})
return fs.chownSync(p, uid, gid)
}
{
"_from": "chownr@^1.0.1",
"_id": "chownr@1.0.1",
"_inBundle": false,
"_integrity": "sha1-4qdQQqlVGQi+vSW4Uj1fl2nXkYE=",
"_location": "/chownr",
"_phantomChildren": {},
"_requested": {
"type": "range",
"registry": true,
"raw": "chownr@^1.0.1",
"name": "chownr",
"escapedName": "chownr",
"rawSpec": "^1.0.1",
"saveSpec": null,
"fetchSpec": "^1.0.1"
},
"_requiredBy": [
"/tar"
],
"_resolved": "http://registry.npm.taobao.org/chownr/download/chownr-1.0.1.tgz",
"_shasum": "e2a75042a9551908bebd25b8523d5f9769d79181",
"_spec": "chownr@^1.0.1",
"_where": "/Users/medicean/workspace/antSword/node_modules/tar",
"author": {
"name": "Isaac Z. Schlueter",
"email": "i@izs.me",
"url": "http://blog.izs.me/"
},
"bugs": {
"url": "https://github.com/isaacs/chownr/issues"
},
"bundleDependencies": false,
"deprecated": false,
"description": "like `chown -R`",
"devDependencies": {
"mkdirp": "0.3",
"rimraf": "",
"tap": "^1.2.0"
},
"files": [
"chownr.js"
],
"homepage": "https://github.com/isaacs/chownr#readme",
"license": "ISC",
"main": "chownr.js",
"name": "chownr",
"repository": {
"type": "git",
"url": "git://github.com/isaacs/chownr.git"
},
"scripts": {
"test": "tap test/*.js"
},
"version": "1.0.1"
}
The ISC License
Copyright (c) Isaac Z. Schlueter and Contributors
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# fs-minipass
Filesystem streams based on [minipass](http://npm.im/minipass).
4 classes are exported:
- ReadStream
- ReadStreamSync
- WriteStream
- WriteStreamSync
When using `ReadStreamSync`, all of the data is made available
immediately upon consuming the stream. Nothing is buffered in memory
when the stream is constructed. If the stream is piped to a writer,
then it will synchronously `read()` and emit data into the writer as
fast as the writer can consume it. (That is, it will respect
backpressure.) If you call `stream.read()` then it will read the
entire file and return the contents.
When using `WriteStreamSync`, every write is flushed to the file
synchronously. If your writes all come in a single tick, then it'll
write it all out in a single tick. It's as synchronous as you are.
The async versions work much like their node builtin counterparts,
with the exception of introducing significantly less Stream machinery
overhead.
## USAGE
It's just streams, you pipe them or read() them or write() to them.
```js
const fsm = require('fs-minipass')
const readStream = new fsm.ReadStream('file.txt')
const writeStream = new fsm.WriteStream('output.txt')
writeStream.write('some file header or whatever\n')
readStream.pipe(writeStream)
```
## ReadStream(path, options)
Path string is required, but somewhat irrelevant if an open file
descriptor is passed in as an option.
Options:
- `fd` Pass in a numeric file descriptor, if the file is already open.
- `readSize` The size of reads to do, defaults to 16MB
- `size` The size of the file, if known. Prevents zero-byte read()
call at the end.
- `autoClose` Set to `false` to prevent the file descriptor from being
closed when the file is done being read.
## WriteStream(path, options)
Path string is required, but somewhat irrelevant if an open file
descriptor is passed in as an option.
Options:
- `fd` Pass in a numeric file descriptor, if the file is already open.
- `mode` The mode to create the file with. Defaults to `0o666`.
- `start` The position in the file to start reading. If not
specified, then the file will start writing at position zero, and be
truncated by default.
- `autoClose` Set to `false` to prevent the file descriptor from being
closed when the stream is ended.
- `flags` Flags to use when opening the file. Irrelevant if `fd` is
passed in, since file won't be opened in that case. Defaults to
`'a'` if a `pos` is specified, or `'w'` otherwise.
'use strict'
const MiniPass = require('minipass')
const EE = require('events').EventEmitter
const fs = require('fs')
// for writev
const binding = process.binding('fs')
const writeBuffers = binding.writeBuffers
const FSReqWrap = binding.FSReqWrap
const _autoClose = Symbol('_autoClose')
const _close = Symbol('_close')
const _ended = Symbol('_ended')
const _fd = Symbol('_fd')
const _finished = Symbol('_finished')
const _flags = Symbol('_flags')
const _flush = Symbol('_flush')
const _handleChunk = Symbol('_handleChunk')
const _makeBuf = Symbol('_makeBuf')
const _mode = Symbol('_mode')
const _needDrain = Symbol('_needDrain')
const _onerror = Symbol('_onerror')
const _onopen = Symbol('_onopen')
const _onread = Symbol('_onread')
const _onwrite = Symbol('_onwrite')
const _open = Symbol('_open')
const _path = Symbol('_path')
const _pos = Symbol('_pos')
const _queue = Symbol('_queue')
const _read = Symbol('_read')
const _readSize = Symbol('_readSize')
const _reading = Symbol('_reading')
const _remain = Symbol('_remain')
const _size = Symbol('_size')
const _write = Symbol('_write')
const _writing = Symbol('_writing')
const _defaultFlag = Symbol('_defaultFlag')
class ReadStream extends MiniPass {
constructor (path, opt) {
opt = opt || {}
super(opt)
this.writable = false
if (typeof path !== 'string')
throw new TypeError('path must be a string')
this[_fd] = typeof opt.fd === 'number' ? opt.fd : null
this[_path] = path
this[_readSize] = opt.readSize || 16*1024*1024
this[_reading] = false
this[_size] = typeof opt.size === 'number' ? opt.size : Infinity
this[_remain] = this[_size]
this[_autoClose] = typeof opt.autoClose === 'boolean' ?
opt.autoClose : true
if (typeof this[_fd] === 'number')
this[_read]()
else
this[_open]()
}
get fd () { return this[_fd] }
get path () { return this[_path] }
write () {
throw new TypeError('this is a readable stream')
}
end () {
throw new TypeError('this is a readable stream')
}
[_open] () {
fs.open(this[_path], 'r', (er, fd) => this[_onopen](er, fd))
}
[_onopen] (er, fd) {
if (er)
this[_onerror](er)
else {
this[_fd] = fd
this.emit('open', fd)
this[_read]()
}
}
[_makeBuf] () {
return Buffer.allocUnsafe(Math.min(this[_readSize], this[_remain]))
}
[_read] () {
if (!this[_reading]) {
this[_reading] = true
const buf = this[_makeBuf]()
/* istanbul ignore if */
if (buf.length === 0) return process.nextTick(() => this[_onread](null, 0, buf))
fs.read(this[_fd], buf, 0, buf.length, null, (er, br, buf) =>
this[_onread](er, br, buf))
}
}
[_onread] (er, br, buf) {
this[_reading] = false
if (er)
this[_onerror](er)
else if (this[_handleChunk](br, buf))
this[_read]()
}
[_close] () {
if (this[_autoClose] && typeof this[_fd] === 'number') {
fs.close(this[_fd], _ => this.emit('close'))
this[_fd] = null
}
}
[_onerror] (er) {
this[_reading] = true
this[_close]()
this.emit('error', er)
}
[_handleChunk] (br, buf) {
let ret = false
// no effect if infinite
this[_remain] -= br
if (br > 0)
ret = super.write(br < buf.length ? buf.slice(0, br) : buf)
if (br === 0 || this[_remain] <= 0) {
ret = false
this[_close]()
super.end()
}
return ret
}
emit (ev, data) {
switch (ev) {
case 'prefinish':
case 'finish':
break
case 'drain':
if (typeof this[_fd] === 'number')
this[_read]()
break
default:
return super.emit(ev, data)
}
}
}
class ReadStreamSync extends ReadStream {
[_open] () {
let threw = true
try {
this[_onopen](null, fs.openSync(this[_path], 'r'))
threw = false
} finally {
if (threw)
this[_close]()
}
}
[_read] () {
let threw = true
try {
if (!this[_reading]) {
this[_reading] = true
do {
const buf = this[_makeBuf]()
/* istanbul ignore next */
const br = buf.length === 0 ? 0 : fs.readSync(this[_fd], buf, 0, buf.length, null)
if (!this[_handleChunk](br, buf))
break
} while (true)
this[_reading] = false
}
threw = false
} finally {
if (threw)
this[_close]()
}
}
[_close] () {
if (this[_autoClose] && typeof this[_fd] === 'number') {
try {
fs.closeSync(this[_fd])
} catch (er) {}
this[_fd] = null
this.emit('close')
}
}
}
class WriteStream extends EE {
constructor (path, opt) {
opt = opt || {}
super(opt)
this.readable = false
this[_writing] = false
this[_ended] = false
this[_needDrain] = false
this[_queue] = []
this[_path] = path
this[_fd] = typeof opt.fd === 'number' ? opt.fd : null
this[_mode] = opt.mode === undefined ? 0o666 : opt.mode
this[_pos] = typeof opt.start === 'number' ? opt.start : null
this[_autoClose] = typeof opt.autoClose === 'boolean' ?
opt.autoClose : true
// truncating makes no sense when writing into the middle
const defaultFlag = this[_pos] !== null ? 'r+' : 'w'
this[_defaultFlag] = opt.flags === undefined
this[_flags] = this[_defaultFlag] ? defaultFlag : opt.flags
if (this[_fd] === null)
this[_open]()
}
get fd () { return this[_fd] }
get path () { return this[_path] }
[_onerror] (er) {
this[_close]()
this[_writing] = true
this.emit('error', er)
}
[_open] () {
fs.open(this[_path], this[_flags], this[_mode],
(er, fd) => this[_onopen](er, fd))
}
[_onopen] (er, fd) {
if (this[_defaultFlag] &&
this[_flags] === 'r+' &&
er && er.code === 'ENOENT') {
this[_flags] = 'w'
this[_open]()
} else if (er)
this[_onerror](er)
else {
this[_fd] = fd
this.emit('open', fd)
this[_flush]()
}
}
end (buf, enc) {
if (buf)
this.write(buf, enc)
this[_ended] = true
// synthetic after-write logic, where drain/finish live
if (!this[_writing] && !this[_queue].length &&
typeof this[_fd] === 'number')
this[_onwrite](null, 0)
}
write (buf, enc) {
if (typeof buf === 'string')
buf = new Buffer(buf, enc)
if (this[_ended]) {
this.emit('error', new Error('write() after end()'))
return false
}
if (this[_fd] === null || this[_writing] || this[_queue].length) {
this[_queue].push(buf)
this[_needDrain] = true
return false
}
this[_writing] = true
this[_write](buf)
return true
}
[_write] (buf) {
fs.write(this[_fd], buf, 0, buf.length, this[_pos], (er, bw) =>
this[_onwrite](er, bw))
}
[_onwrite] (er, bw) {
if (er)
this[_onerror](er)
else {
if (this[_pos] !== null)
this[_pos] += bw
if (this[_queue].length)
this[_flush]()
else {
this[_writing] = false
if (this[_ended] && !this[_finished]) {
this[_finished] = true
this[_close]()
this.emit('finish')
} else if (this[_needDrain]) {
this[_needDrain] = false
this.emit('drain')
}
}
}
}
[_flush] () {
if (this[_queue].length === 0) {
if (this[_ended])
this[_onwrite](null, 0)
} else if (this[_queue].length === 1)
this[_write](this[_queue].pop())
else {
const iovec = this[_queue]
this[_queue] = []
writev(this[_fd], iovec, this[_pos],
(er, bw) => this[_onwrite](er, bw))
}
}
[_close] () {
if (this[_autoClose] && typeof this[_fd] === 'number') {
fs.close(this[_fd], _ => this.emit('close'))
this[_fd] = null
}
}
}
class WriteStreamSync extends WriteStream {
[_open] () {
let fd
try {
fd = fs.openSync(this[_path], this[_flags], this[_mode])
} catch (er) {
if (this[_defaultFlag] &&
this[_flags] === 'r+' &&
er && er.code === 'ENOENT') {
this[_flags] = 'w'
return this[_open]()
} else
throw er
}
this[_onopen](null, fd)
}
[_close] () {
if (this[_autoClose] && typeof this[_fd] === 'number') {
try {
fs.closeSync(this[_fd])
} catch (er) {}
this[_fd] = null
this.emit('close')
}
}
[_write] (buf) {
try {
this[_onwrite](null,
fs.writeSync(this[_fd], buf, 0, buf.length, this[_pos]))
} catch (er) {
this[_onwrite](er, 0)
}
}
}
const writev = (fd, iovec, pos, cb) => {
const done = (er, bw) => cb(er, bw, iovec)
const req = new FSReqWrap()
req.oncomplete = done
binding.writeBuffers(fd, iovec, pos, req)
}
exports.ReadStream = ReadStream
exports.ReadStreamSync = ReadStreamSync
exports.WriteStream = WriteStream
exports.WriteStreamSync = WriteStreamSync
{
"_from": "fs-minipass@^1.2.5",
"_id": "fs-minipass@1.2.5",
"_inBundle": false,
"_integrity": "sha1-BsJ3IYRU7CiN93raVKA7hwKqy50=",
"_location": "/fs-minipass",
"_phantomChildren": {},
"_requested": {
"type": "range",
"registry": true,
"raw": "fs-minipass@^1.2.5",
"name": "fs-minipass",
"escapedName": "fs-minipass",
"rawSpec": "^1.2.5",
"saveSpec": null,
"fetchSpec": "^1.2.5"
},
"_requiredBy": [
"/tar"
],
"_resolved": "http://registry.npm.taobao.org/fs-minipass/download/fs-minipass-1.2.5.tgz",
"_shasum": "06c277218454ec288df77ada54a03b8702aacb9d",
"_spec": "fs-minipass@^1.2.5",
"_where": "/Users/medicean/workspace/antSword/node_modules/tar",
"author": {
"name": "Isaac Z. Schlueter",
"email": "i@izs.me",
"url": "http://blog.izs.me/"
},
"bugs": {
"url": "https://github.com/npm/fs-minipass/issues"
},
"bundleDependencies": false,
"dependencies": {
"minipass": "^2.2.1"
},
"deprecated": false,
"description": "fs read and write streams based on minipass",
"devDependencies": {
"mutate-fs": "^2.0.1",
"tap": "^10.7.2"
},
"files": [
"index.js"
],
"homepage": "https://github.com/npm/fs-minipass#readme",
"keywords": [],
"license": "ISC",
"main": "index.js",
"name": "fs-minipass",
"repository": {
"type": "git",
"url": "git+https://github.com/npm/fs-minipass.git"
},
"scripts": {
"postpublish": "git push origin --all; git push origin --tags",
"postversion": "npm publish",
"preversion": "npm test",
"test": "tap test/*.js --100 -J"
},
"version": "1.2.5"
}
The ISC License
Copyright (c) npm, Inc. and Contributors
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# minipass
A _very_ minimal implementation of a [PassThrough
stream](https://nodejs.org/api/stream.html#stream_class_stream_passthrough)
[It's very
fast](https://docs.google.com/spreadsheets/d/1oObKSrVwLX_7Ut4Z6g3fZW-AX1j1-k6w-cDsrkaSbHM/edit#gid=0)
for objects, strings, and buffers.
Supports pipe()ing (including multi-pipe() and backpressure
transmission), buffering data until either a `data` event handler or
`pipe()` is added (so you don't lose the first chunk), and most other
cases where PassThrough is a good idea.
There is a `read()` method, but it's much more efficient to consume
data from this stream via `'data'` events or by calling `pipe()` into
some other stream. Calling `read()` requires the buffer to be
flattened in some cases, which requires copying memory.
There is also no `unpipe()` method. Once you start piping, there is
no stopping it!
If you set `objectMode: true` in the options, then whatever is written
will be emitted. Otherwise, it'll do a minimal amount of Buffer
copying to ensure proper Streams semantics when `read(n)` is called.
This is not a `through` or `through2` stream. It doesn't transform
the data, it just passes it right through. If you want to transform
the data, extend the class, and override the `write()` method. Once
you're done transforming the data however you want, call
`super.write()` with the transform output.
For an example of a stream that extends MiniPass to provide transform
capabilities, check out [minizlib](http://npm.im/minizlib).
## USAGE
```js
const MiniPass = require('minipass')
const mp = new MiniPass(options) // optional: { encoding }
mp.write('foo')
mp.pipe(someOtherStream)
mp.end('bar')
```
### collecting
```js
mp.collect().then(all => {
// all is an array of all the data emitted
// encoding is supported in this case, so
// so the result will be a collection of strings if
// an encoding is specified, or buffers/objects if not.
//
// In an async function, you may do
// const data = await stream.collect()
})
```
### iteration
You can iterate over streams synchronously or asynchronously in
platforms that support it.
Synchronous iteration will end when the currently available data is
consumed, even if the `end` event has not been reached. In string and
buffer mode, the data is concatenated, so unless multiple writes are
occurring in the same tick as the `read()`, sync iteration loops will
generally only have a single iteration.
To consume chunks in this way exactly as they have been written, with
no flattening, create the stream with the `{ objectMode: true }`
option.
```js
const mp = new Minipass({ objectMode: true })
mp.write('a')
mp.write('b')
for (let letter of mp) {
console.log(letter) // a, b
}
mp.write('c')
mp.write('d')
for (let letter of mp) {
console.log(letter) // c, d
}
mp.write('e')
mp.end()
for (let letter of mp) {
console.log(letter) // e
}
for (let letter of mp) {
console.log(letter) // nothing
}
```
Asynchronous iteration will continue until the end event is reached,
consuming all of the data.
```js
const mp = new Minipass({ encoding: 'utf8' })
// some source of some data
let i = 5
const inter = setInterval(() => {
if (i --> 0)
mp.write(Buffer.from('foo\n', 'utf8'))
else {
mp.end()
clearInterval(inter)
}
}, 100)
// consume the data with asynchronous iteration
async function consume () {
for await (let chunk of mp) {
console.log(chunk)
}
return 'ok'
}
consume().then(res => console.log(res))
// logs `foo\n` 5 times, and then `ok`
```
'use strict'
const EE = require('events')
const Yallist = require('yallist')
const EOF = Symbol('EOF')
const MAYBE_EMIT_END = Symbol('maybeEmitEnd')
const EMITTED_END = Symbol('emittedEnd')
const CLOSED = Symbol('closed')
const READ = Symbol('read')
const FLUSH = Symbol('flush')
const doIter = process.env._MP_NO_ITERATOR_SYMBOLS_ !== '1'
const ASYNCITERATOR = doIter && Symbol.asyncIterator || Symbol('asyncIterator not implemented')
const ITERATOR = doIter && Symbol.iterator || Symbol('iterator not implemented')
const FLUSHCHUNK = Symbol('flushChunk')
const SD = require('string_decoder').StringDecoder
const ENCODING = Symbol('encoding')
const DECODER = Symbol('decoder')
const FLOWING = Symbol('flowing')
const RESUME = Symbol('resume')
const BUFFERLENGTH = Symbol('bufferLength')
const BUFFERPUSH = Symbol('bufferPush')
const BUFFERSHIFT = Symbol('bufferShift')
const OBJECTMODE = Symbol('objectMode')
// Buffer in node 4.x < 4.5.0 doesn't have working Buffer.from
// or Buffer.alloc, and Buffer in node 10 deprecated the ctor.
// .M, this is fine .\^/M..
let B = Buffer
/* istanbul ignore next */
if (!B.alloc) {
B = require('safe-buffer').Buffer
}
module.exports = class MiniPass extends EE {
constructor (options) {
super()
this[FLOWING] = false
this.pipes = new Yallist()
this.buffer = new Yallist()
this[OBJECTMODE] = options && options.objectMode || false
if (this[OBJECTMODE])
this[ENCODING] = null
else
this[ENCODING] = options && options.encoding || null
if (this[ENCODING] === 'buffer')
this[ENCODING] = null
this[DECODER] = this[ENCODING] ? new SD(this[ENCODING]) : null
this[EOF] = false
this[EMITTED_END] = false
this[CLOSED] = false
this.writable = true
this.readable = true
this[BUFFERLENGTH] = 0
}
get bufferLength () { return this[BUFFERLENGTH] }
get encoding () { return this[ENCODING] }
set encoding (enc) {
if (this[OBJECTMODE])
throw new Error('cannot set encoding in objectMode')
if (this[ENCODING] && enc !== this[ENCODING] &&
(this[DECODER] && this[DECODER].lastNeed || this[BUFFERLENGTH]))
throw new Error('cannot change encoding')
if (this[ENCODING] !== enc) {
this[DECODER] = enc ? new SD(enc) : null
if (this.buffer.length)
this.buffer = this.buffer.map(chunk => this[DECODER].write(chunk))
}
this[ENCODING] = enc
}
setEncoding (enc) {
this.encoding = enc
}
write (chunk, encoding, cb) {
if (this[EOF])
throw new Error('write after end')
if (typeof encoding === 'function')
cb = encoding, encoding = 'utf8'
if (!encoding)
encoding = 'utf8'
// fast-path writing strings of same encoding to a stream with
// an empty buffer, skipping the buffer/decoder dance
if (typeof chunk === 'string' && !this[OBJECTMODE] &&
// unless it is a string already ready for us to use
!(encoding === this[ENCODING] && !this[DECODER].lastNeed)) {
chunk = B.from(chunk, encoding)
}
if (B.isBuffer(chunk) && this[ENCODING])
chunk = this[DECODER].write(chunk)
try {
return this.flowing
? (this.emit('data', chunk), this.flowing)
: (this[BUFFERPUSH](chunk), false)
} finally {
this.emit('readable')
if (cb)
cb()
}
}
read (n) {
try {
if (this[BUFFERLENGTH] === 0 || n === 0 || n > this[BUFFERLENGTH])
return null
if (this[OBJECTMODE])
n = null
if (this.buffer.length > 1 && !this[OBJECTMODE]) {
if (this.encoding)
this.buffer = new Yallist([
Array.from(this.buffer).join('')
])
else
this.buffer = new Yallist([
B.concat(Array.from(this.buffer), this[BUFFERLENGTH])
])
}
return this[READ](n || null, this.buffer.head.value)
} finally {
this[MAYBE_EMIT_END]()
}
}
[READ] (n, chunk) {
if (n === chunk.length || n === null)
this[BUFFERSHIFT]()
else {
this.buffer.head.value = chunk.slice(n)
chunk = chunk.slice(0, n)
this[BUFFERLENGTH] -= n
}
this.emit('data', chunk)
if (!this.buffer.length && !this[EOF])
this.emit('drain')
return chunk
}
end (chunk, encoding, cb) {
if (typeof chunk === 'function')
cb = chunk, chunk = null
if (typeof encoding === 'function')
cb = encoding, encoding = 'utf8'
if (chunk)
this.write(chunk, encoding)
if (cb)
this.once('end', cb)
this[EOF] = true
this.writable = false
if (this.flowing)
this[MAYBE_EMIT_END]()
}
// don't let the internal resume be overwritten
[RESUME] () {
this[FLOWING] = true
this.emit('resume')
if (this.buffer.length)
this[FLUSH]()
else if (this[EOF])
this[MAYBE_EMIT_END]()
else
this.emit('drain')
}
resume () {
return this[RESUME]()
}
pause () {
this[FLOWING] = false
}
get flowing () {
return this[FLOWING]
}
[BUFFERPUSH] (chunk) {
if (this[OBJECTMODE])
this[BUFFERLENGTH] += 1
else
this[BUFFERLENGTH] += chunk.length
return this.buffer.push(chunk)
}
[BUFFERSHIFT] () {
if (this.buffer.length) {
if (this[OBJECTMODE])
this[BUFFERLENGTH] -= 1
else
this[BUFFERLENGTH] -= this.buffer.head.value.length
}
return this.buffer.shift()
}
[FLUSH] () {
do {} while (this[FLUSHCHUNK](this[BUFFERSHIFT]()))
if (!this.buffer.length && !this[EOF])
this.emit('drain')
}
[FLUSHCHUNK] (chunk) {
return chunk ? (this.emit('data', chunk), this.flowing) : false
}
pipe (dest, opts) {
if (dest === process.stdout || dest === process.stderr)
(opts = opts || {}).end = false
const p = { dest: dest, opts: opts, ondrain: _ => this[RESUME]() }
this.pipes.push(p)
dest.on('drain', p.ondrain)
this[RESUME]()
return dest
}
addListener (ev, fn) {
return this.on(ev, fn)
}
on (ev, fn) {
try {
return super.on(ev, fn)
} finally {
if (ev === 'data' && !this.pipes.length && !this.flowing)
this[RESUME]()
else if (ev === 'end' && this[EMITTED_END]) {
super.emit('end')
this.removeAllListeners('end')
}
}
}
get emittedEnd () {
return this[EMITTED_END]
}
[MAYBE_EMIT_END] () {
if (!this[EMITTED_END] && this.buffer.length === 0 && this[EOF]) {
this.emit('end')
this.emit('prefinish')
this.emit('finish')
if (this[CLOSED])
this.emit('close')
}
}
emit (ev, data) {
if (ev === 'data') {
if (!data)
return
if (this.pipes.length)
this.pipes.forEach(p => p.dest.write(data) || this.pause())
} else if (ev === 'end') {
if (this[EMITTED_END] === true)
return
this[EMITTED_END] = true
this.readable = false
if (this[DECODER]) {
data = this[DECODER].end()
if (data) {
this.pipes.forEach(p => p.dest.write(data))
super.emit('data', data)
}
}
this.pipes.forEach(p => {
p.dest.removeListener('drain', p.ondrain)
if (!p.opts || p.opts.end !== false)
p.dest.end()
})
} else if (ev === 'close') {
this[CLOSED] = true
// don't emit close before 'end' and 'finish'
if (!this[EMITTED_END])
return
}
const args = new Array(arguments.length)
args[0] = ev
args[1] = data
if (arguments.length > 2) {
for (let i = 2; i < arguments.length; i++) {
args[i] = arguments[i]
}
}
try {
return super.emit.apply(this, args)
} finally {
if (ev !== 'end')
this[MAYBE_EMIT_END]()
else
this.removeAllListeners('end')
}
}
// const all = await stream.collect()
collect () {
return new Promise((resolve, reject) => {
const buf = []
this.on('data', c => buf.push(c))
this.on('end', () => resolve(buf))
this.on('error', reject)
})
}
// for await (let chunk of stream)
[ASYNCITERATOR] () {
const next = () => {
const res = this.read()
if (res !== null)
return Promise.resolve({ done: false, value: res })
if (this[EOF])
return Promise.resolve({ done: true })
let resolve = null
let reject = null
const onerr = er => {
this.removeListener('data', ondata)
this.removeListener('end', onend)
reject(er)
}
const ondata = value => {
this.removeListener('error', onerr)
this.removeListener('end', onend)
this.pause()
resolve({ value: value, done: !!this[EOF] })
}
const onend = () => {
this.removeListener('error', onerr)
this.removeListener('data', ondata)
resolve({ done: true })
}
return new Promise((res, rej) => {
reject = rej
resolve = res
this.once('error', onerr)
this.once('end', onend)
this.once('data', ondata)
this.resume()
})
}
return { next }
}
// for (let chunk of stream)
[ITERATOR] () {
const next = () => {
const value = this.read()
const done = value === null
return { value, done }
}
return { next }
}
}
The ISC License
Copyright (c) Isaac Z. Schlueter and Contributors
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# yallist
Yet Another Linked List
There are many doubly-linked list implementations like it, but this
one is mine.
For when an array would be too big, and a Map can't be iterated in
reverse order.
[![Build Status](https://travis-ci.org/isaacs/yallist.svg?branch=master)](https://travis-ci.org/isaacs/yallist) [![Coverage Status](https://coveralls.io/repos/isaacs/yallist/badge.svg?service=github)](https://coveralls.io/github/isaacs/yallist)
## basic usage
```javascript
var yallist = require('yallist')
var myList = yallist.create([1, 2, 3])
myList.push('foo')
myList.unshift('bar')
// of course pop() and shift() are there, too
console.log(myList.toArray()) // ['bar', 1, 2, 3, 'foo']
myList.forEach(function (k) {
// walk the list head to tail
})
myList.forEachReverse(function (k, index, list) {
// walk the list tail to head
})
var myDoubledList = myList.map(function (k) {
return k + k
})
// now myDoubledList contains ['barbar', 2, 4, 6, 'foofoo']
// mapReverse is also a thing
var myDoubledListReverse = myList.mapReverse(function (k) {
return k + k
}) // ['foofoo', 6, 4, 2, 'barbar']
var reduced = myList.reduce(function (set, entry) {
set += entry
return set
}, 'start')
console.log(reduced) // 'startfoo123bar'
```
## api
The whole API is considered "public".
Functions with the same name as an Array method work more or less the
same way.
There's reverse versions of most things because that's the point.
### Yallist
Default export, the class that holds and manages a list.
Call it with either a forEach-able (like an array) or a set of
arguments, to initialize the list.
The Array-ish methods all act like you'd expect. No magic length,
though, so if you change that it won't automatically prune or add
empty spots.
### Yallist.create(..)
Alias for Yallist function. Some people like factories.
#### yallist.head
The first node in the list
#### yallist.tail
The last node in the list
#### yallist.length
The number of nodes in the list. (Change this at your peril. It is
not magic like Array length.)
#### yallist.toArray()
Convert the list to an array.
#### yallist.forEach(fn, [thisp])
Call a function on each item in the list.
#### yallist.forEachReverse(fn, [thisp])
Call a function on each item in the list, in reverse order.
#### yallist.get(n)
Get the data at position `n` in the list. If you use this a lot,
probably better off just using an Array.
#### yallist.getReverse(n)
Get the data at position `n`, counting from the tail.
#### yallist.map(fn, thisp)
Create a new Yallist with the result of calling the function on each
item.
#### yallist.mapReverse(fn, thisp)
Same as `map`, but in reverse.
#### yallist.pop()
Get the data from the list tail, and remove the tail from the list.
#### yallist.push(item, ...)
Insert one or more items to the tail of the list.
#### yallist.reduce(fn, initialValue)
Like Array.reduce.
#### yallist.reduceReverse
Like Array.reduce, but in reverse.
#### yallist.reverse
Reverse the list in place.
#### yallist.shift()
Get the data from the list head, and remove the head from the list.
#### yallist.slice([from], [to])
Just like Array.slice, but returns a new Yallist.
#### yallist.sliceReverse([from], [to])
Just like yallist.slice, but the result is returned in reverse.
#### yallist.toArray()
Create an array representation of the list.
#### yallist.toArrayReverse()
Create a reversed array representation of the list.
#### yallist.unshift(item, ...)
Insert one or more items to the head of the list.
#### yallist.unshiftNode(node)
Move a Node object to the front of the list. (That is, pull it out of
wherever it lives, and make it the new head.)
If the node belongs to a different list, then that list will remove it
first.
#### yallist.pushNode(node)
Move a Node object to the end of the list. (That is, pull it out of
wherever it lives, and make it the new tail.)
If the node belongs to a list already, then that list will remove it
first.
#### yallist.removeNode(node)
Remove a node from the list, preserving referential integrity of head
and tail and other nodes.
Will throw an error if you try to have a list remove a node that
doesn't belong to it.
### Yallist.Node
The class that holds the data and is actually the list.
Call with `var n = new Node(value, previousNode, nextNode)`
Note that if you do direct operations on Nodes themselves, it's very
easy to get into weird states where the list is broken. Be careful :)
#### node.next
The next node in the list.
#### node.prev
The previous node in the list.
#### node.value
The data the node contains.
#### node.list
The list to which this node belongs. (Null if it does not belong to
any list.)
'use strict'
var Yallist = require('./yallist.js')
Yallist.prototype[Symbol.iterator] = function* () {
for (let walker = this.head; walker; walker = walker.next) {
yield walker.value
}
}
{
"_from": "yallist@^3.0.0",
"_id": "yallist@3.0.2",
"_inBundle": false,
"_integrity": "sha1-hFK0u36Dx8GI2AQcGoN8dz1ti7k=",
"_location": "/minipass/yallist",
"_phantomChildren": {},
"_requested": {
"type": "range",
"registry": true,
"raw": "yallist@^3.0.0",
"name": "yallist",
"escapedName": "yallist",
"rawSpec": "^3.0.0",
"saveSpec": null,
"fetchSpec": "^3.0.0"
},
"_requiredBy": [
"/minipass"
],
"_resolved": "http://registry.npm.taobao.org/yallist/download/yallist-3.0.2.tgz",
"_shasum": "8452b4bb7e83c7c188d8041c1a837c773d6d8bb9",
"_spec": "yallist@^3.0.0",
"_where": "/Users/medicean/workspace/antSword/node_modules/minipass",
"author": {
"name": "Isaac Z. Schlueter",
"email": "i@izs.me",
"url": "http://blog.izs.me/"
},
"bugs": {
"url": "https://github.com/isaacs/yallist/issues"
},
"bundleDependencies": false,
"dependencies": {},
"deprecated": false,
"description": "Yet Another Linked List",
"devDependencies": {
"tap": "^10.3.0"
},
"directories": {
"test": "test"
},
"files": [
"yallist.js",
"iterator.js"
],
"homepage": "https://github.com/isaacs/yallist#readme",
"license": "ISC",
"main": "yallist.js",
"name": "yallist",
"repository": {
"type": "git",
"url": "git+https://github.com/isaacs/yallist.git"
},
"scripts": {
"postpublish": "git push origin --all; git push origin --tags",
"postversion": "npm publish",
"preversion": "npm test",
"test": "tap test/*.js --100"
},
"version": "3.0.2"
}
'use strict'
module.exports = Yallist
Yallist.Node = Node
Yallist.create = Yallist
function Yallist (list) {
var self = this
if (!(self instanceof Yallist)) {
self = new Yallist()
}
self.tail = null
self.head = null
self.length = 0
if (list && typeof list.forEach === 'function') {
list.forEach(function (item) {
self.push(item)
})
} else if (arguments.length > 0) {
for (var i = 0, l = arguments.length; i < l; i++) {
self.push(arguments[i])
}
}
return self
}
Yallist.prototype.removeNode = function (node) {
if (node.list !== this) {
throw new Error('removing node which does not belong to this list')
}
var next = node.next
var prev = node.prev
if (next) {
next.prev = prev
}
if (prev) {
prev.next = next
}
if (node === this.head) {
this.head = next
}
if (node === this.tail) {
this.tail = prev
}
node.list.length--
node.next = null
node.prev = null
node.list = null
}
Yallist.prototype.unshiftNode = function (node) {
if (node === this.head) {
return
}
if (node.list) {
node.list.removeNode(node)
}
var head = this.head
node.list = this
node.next = head
if (head) {
head.prev = node
}
this.head = node
if (!this.tail) {
this.tail = node
}
this.length++
}
Yallist.prototype.pushNode = function (node) {
if (node === this.tail) {
return
}
if (node.list) {
node.list.removeNode(node)
}
var tail = this.tail
node.list = this
node.prev = tail
if (tail) {
tail.next = node
}
this.tail = node
if (!this.head) {
this.head = node
}
this.length++
}
Yallist.prototype.push = function () {
for (var i = 0, l = arguments.length; i < l; i++) {
push(this, arguments[i])
}
return this.length
}
Yallist.prototype.unshift = function () {
for (var i = 0, l = arguments.length; i < l; i++) {
unshift(this, arguments[i])
}
return this.length
}
Yallist.prototype.pop = function () {
if (!this.tail) {
return undefined
}
var res = this.tail.value
this.tail = this.tail.prev
if (this.tail) {
this.tail.next = null
} else {
this.head = null
}
this.length--
return res
}
Yallist.prototype.shift = function () {
if (!this.head) {
return undefined
}
var res = this.head.value
this.head = this.head.next
if (this.head) {
this.head.prev = null
} else {
this.tail = null
}
this.length--
return res
}
Yallist.prototype.forEach = function (fn, thisp) {
thisp = thisp || this
for (var walker = this.head, i = 0; walker !== null; i++) {
fn.call(thisp, walker.value, i, this)
walker = walker.next
}
}
Yallist.prototype.forEachReverse = function (fn, thisp) {
thisp = thisp || this
for (var walker = this.tail, i = this.length - 1; walker !== null; i--) {
fn.call(thisp, walker.value, i, this)
walker = walker.prev
}
}
Yallist.prototype.get = function (n) {
for (var i = 0, walker = this.head; walker !== null && i < n; i++) {
// abort out of the list early if we hit a cycle
walker = walker.next
}
if (i === n && walker !== null) {
return walker.value
}
}
Yallist.prototype.getReverse = function (n) {
for (var i = 0, walker = this.tail; walker !== null && i < n; i++) {
// abort out of the list early if we hit a cycle
walker = walker.prev
}
if (i === n && walker !== null) {
return walker.value
}
}
Yallist.prototype.map = function (fn, thisp) {
thisp = thisp || this
var res = new Yallist()
for (var walker = this.head; walker !== null;) {
res.push(fn.call(thisp, walker.value, this))
walker = walker.next
}
return res
}
Yallist.prototype.mapReverse = function (fn, thisp) {
thisp = thisp || this
var res = new Yallist()
for (var walker = this.tail; walker !== null;) {
res.push(fn.call(thisp, walker.value, this))
walker = walker.prev
}
return res
}
Yallist.prototype.reduce = function (fn, initial) {
var acc
var walker = this.head
if (arguments.length > 1) {
acc = initial
} else if (this.head) {
walker = this.head.next
acc = this.head.value
} else {
throw new TypeError('Reduce of empty list with no initial value')
}
for (var i = 0; walker !== null; i++) {
acc = fn(acc, walker.value, i)
walker = walker.next
}
return acc
}
Yallist.prototype.reduceReverse = function (fn, initial) {
var acc
var walker = this.tail
if (arguments.length > 1) {
acc = initial
} else if (this.tail) {
walker = this.tail.prev
acc = this.tail.value
} else {
throw new TypeError('Reduce of empty list with no initial value')
}
for (var i = this.length - 1; walker !== null; i--) {
acc = fn(acc, walker.value, i)
walker = walker.prev
}
return acc
}
Yallist.prototype.toArray = function () {
var arr = new Array(this.length)
for (var i = 0, walker = this.head; walker !== null; i++) {
arr[i] = walker.value
walker = walker.next
}
return arr
}
Yallist.prototype.toArrayReverse = function () {
var arr = new Array(this.length)
for (var i = 0, walker = this.tail; walker !== null; i++) {
arr[i] = walker.value
walker = walker.prev
}
return arr
}
Yallist.prototype.slice = function (from, to) {
to = to || this.length
if (to < 0) {
to += this.length
}
from = from || 0
if (from < 0) {
from += this.length
}
var ret = new Yallist()
if (to < from || to < 0) {
return ret
}
if (from < 0) {
from = 0
}
if (to > this.length) {
to = this.length
}
for (var i = 0, walker = this.head; walker !== null && i < from; i++) {
walker = walker.next
}
for (; walker !== null && i < to; i++, walker = walker.next) {
ret.push(walker.value)
}
return ret
}
Yallist.prototype.sliceReverse = function (from, to) {
to = to || this.length
if (to < 0) {
to += this.length
}
from = from || 0
if (from < 0) {
from += this.length
}
var ret = new Yallist()
if (to < from || to < 0) {
return ret
}
if (from < 0) {
from = 0
}
if (to > this.length) {
to = this.length
}
for (var i = this.length, walker = this.tail; walker !== null && i > to; i--) {
walker = walker.prev
}
for (; walker !== null && i > from; i--, walker = walker.prev) {
ret.push(walker.value)
}
return ret
}
Yallist.prototype.reverse = function () {
var head = this.head
var tail = this.tail
for (var walker = head; walker !== null; walker = walker.prev) {
var p = walker.prev
walker.prev = walker.next
walker.next = p
}
this.head = tail
this.tail = head
return this
}
function push (self, item) {
self.tail = new Node(item, self.tail, null, self)
if (!self.head) {
self.head = self.tail
}
self.length++
}
function unshift (self, item) {
self.head = new Node(item, null, self.head, self)
if (!self.tail) {
self.tail = self.head
}
self.length++
}
function Node (value, prev, next, list) {
if (!(this instanceof Node)) {
return new Node(value, prev, next, list)
}
this.list = list
this.value = value
if (prev) {
prev.next = this
this.prev = prev
} else {
this.prev = null
}
if (next) {
next.prev = this
this.next = next
} else {
this.next = null
}
}
try {
// add if support or Symbol.iterator is present
require('./iterator.js')
} catch (er) {}
{
"_from": "minipass@^2.3.3",
"_id": "minipass@2.3.4",
"_inBundle": false,
"_integrity": "sha1-R2jXYF7WGU1tV2FpueEu9x6dmVc=",
"_location": "/minipass",
"_phantomChildren": {},
"_requested": {
"type": "range",
"registry": true,
"raw": "minipass@^2.3.3",
"name": "minipass",
"escapedName": "minipass",
"rawSpec": "^2.3.3",
"saveSpec": null,
"fetchSpec": "^2.3.3"
},
"_requiredBy": [
"/fs-minipass",
"/minizlib",
"/tar"
],
"_resolved": "http://registry.npm.taobao.org/minipass/download/minipass-2.3.4.tgz",
"_shasum": "4768d7605ed6194d6d576169b9e12ef71e9d9957",
"_spec": "minipass@^2.3.3",
"_where": "/Users/medicean/workspace/antSword/node_modules/tar",
"author": {
"name": "Isaac Z. Schlueter",
"email": "i@izs.me",
"url": "http://blog.izs.me/"
},
"bugs": {
"url": "https://github.com/isaacs/minipass/issues"
},
"bundleDependencies": false,
"dependencies": {
"safe-buffer": "^5.1.2",
"yallist": "^3.0.0"
},
"deprecated": false,
"description": "minimal implementation of a PassThrough stream",
"devDependencies": {
"end-of-stream": "^1.4.0",
"tap": "^12.0.1",
"through2": "^2.0.3"
},
"files": [
"index.js"
],
"homepage": "https://github.com/isaacs/minipass#readme",
"keywords": [
"passthrough",
"stream"
],
"license": "ISC",
"main": "index.js",
"name": "minipass",
"repository": {
"type": "git",
"url": "git+https://github.com/isaacs/minipass.git"
},
"scripts": {
"postpublish": "git push origin --all; git push origin --tags",
"postversion": "npm publish",
"preversion": "npm test",
"test": "tap test/*.js --100"
},
"version": "2.3.4"
}
Minizlib was created by Isaac Z. Schlueter.
It is a derivative work of the Node.js project.
"""
Copyright Isaac Z. Schlueter and Contributors
Copyright Node.js contributors. All rights reserved.
Copyright Joyent, Inc. and other Node contributors. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
# minizlib
A tiny fast zlib stream built on [minipass](http://npm.im/minipass)
and Node.js's zlib binding.
This module was created to serve the needs of
[node-tar](http://npm.im/tar) v2. If your needs are different, then
it may not be for you.
## How does this differ from the streams in `require('zlib')`?
First, there are no convenience methods to compress or decompress a
buffer. If you want those, use the built-in `zlib` module. This is
only streams.
This module compresses and decompresses the data as fast as you feed
it in. It is synchronous, and runs on the main process thread. Zlib
operations can be high CPU, but they're very fast, and doing it this
way means much less bookkeeping and artificial deferral.
Node's built in zlib streams are built on top of `stream.Transform`.
They do the maximally safe thing with respect to consistent
asynchrony, buffering, and backpressure.
This module _does_ support backpressure, and will buffer output chunks
that are not consumed, but is less of a mediator between the input and
output. There is no high or low watermarks, no state objects, and so
artificial async deferrals. It will not protect you from Zalgo.
If you write, data will be emitted right away. If you write
everything synchronously in one tick, and you are listening to the
`data` event to consume it, then it'll all be emitted right away in
that same tick. If you want data to be emitted in the next tick, then
write it in the next tick.
It is thus the responsibility of the reader and writer to manage their
own consumption and process execution flow.
The goal is to compress and decompress as fast as possible, even for
files that are too large to store all in one buffer.
The API is very similar to the built-in zlib module. There are
classes that you instantiate with `new` and they are streams that can
be piped together.
module.exports = Object.freeze({
Z_NO_FLUSH: 0,
Z_PARTIAL_FLUSH: 1,
Z_SYNC_FLUSH: 2,
Z_FULL_FLUSH: 3,
Z_FINISH: 4,
Z_BLOCK: 5,
Z_OK: 0,
Z_STREAM_END: 1,
Z_NEED_DICT: 2,
Z_ERRNO: -1,
Z_STREAM_ERROR: -2,
Z_DATA_ERROR: -3,
Z_MEM_ERROR: -4,
Z_BUF_ERROR: -5,
Z_VERSION_ERROR: -6,
Z_NO_COMPRESSION: 0,
Z_BEST_SPEED: 1,
Z_BEST_COMPRESSION: 9,
Z_DEFAULT_COMPRESSION: -1,
Z_FILTERED: 1,
Z_HUFFMAN_ONLY: 2,
Z_RLE: 3,
Z_FIXED: 4,
Z_DEFAULT_STRATEGY: 0,
ZLIB_VERNUM: 4736,
DEFLATE: 1,
INFLATE: 2,
GZIP: 3,
GUNZIP: 4,
DEFLATERAW: 5,
INFLATERAW: 6,
UNZIP: 7,
Z_MIN_WINDOWBITS: 8,
Z_MAX_WINDOWBITS: 15,
Z_DEFAULT_WINDOWBITS: 15,
Z_MIN_CHUNK: 64,
Z_MAX_CHUNK: Infinity,
Z_DEFAULT_CHUNK: 16384,
Z_MIN_MEMLEVEL: 1,
Z_MAX_MEMLEVEL: 9,
Z_DEFAULT_MEMLEVEL: 8,
Z_MIN_LEVEL: -1,
Z_MAX_LEVEL: 9,
Z_DEFAULT_LEVEL: -1
})
'use strict'
const assert = require('assert')
const Buffer = require('buffer').Buffer
const binding = process.binding('zlib')
const constants = exports.constants = require('./constants.js')
const MiniPass = require('minipass')
class ZlibError extends Error {
constructor (msg, errno) {
super('zlib: ' + msg)
this.errno = errno
this.code = codes.get(errno)
}
get name () {
return 'ZlibError'
}
}
// translation table for return codes.
const codes = new Map([
[constants.Z_OK, 'Z_OK'],
[constants.Z_STREAM_END, 'Z_STREAM_END'],
[constants.Z_NEED_DICT, 'Z_NEED_DICT'],
[constants.Z_ERRNO, 'Z_ERRNO'],
[constants.Z_STREAM_ERROR, 'Z_STREAM_ERROR'],
[constants.Z_DATA_ERROR, 'Z_DATA_ERROR'],
[constants.Z_MEM_ERROR, 'Z_MEM_ERROR'],
[constants.Z_BUF_ERROR, 'Z_BUF_ERROR'],
[constants.Z_VERSION_ERROR, 'Z_VERSION_ERROR']
])
const validFlushFlags = new Set([
constants.Z_NO_FLUSH,
constants.Z_PARTIAL_FLUSH,
constants.Z_SYNC_FLUSH,
constants.Z_FULL_FLUSH,
constants.Z_FINISH,
constants.Z_BLOCK
])
const strategies = new Set([
constants.Z_FILTERED,
constants.Z_HUFFMAN_ONLY,
constants.Z_RLE,
constants.Z_FIXED,
constants.Z_DEFAULT_STRATEGY
])
// the Zlib class they all inherit from
// This thing manages the queue of requests, and returns
// true or false if there is anything in the queue when
// you call the .write() method.
const _opts = Symbol('opts')
const _chunkSize = Symbol('chunkSize')
const _flushFlag = Symbol('flushFlag')
const _finishFlush = Symbol('finishFlush')
const _handle = Symbol('handle')
const _hadError = Symbol('hadError')
const _buffer = Symbol('buffer')
const _offset = Symbol('offset')
const _level = Symbol('level')
const _strategy = Symbol('strategy')
const _ended = Symbol('ended')
const _writeState = Symbol('writeState')
class Zlib extends MiniPass {
constructor (opts, mode) {
super(opts)
this[_ended] = false
this[_opts] = opts = opts || {}
this[_chunkSize] = opts.chunkSize || constants.Z_DEFAULT_CHUNK
if (opts.flush && !validFlushFlags.has(opts.flush)) {
throw new TypeError('Invalid flush flag: ' + opts.flush)
}
if (opts.finishFlush && !validFlushFlags.has(opts.finishFlush)) {
throw new TypeError('Invalid flush flag: ' + opts.finishFlush)
}
this[_flushFlag] = opts.flush || constants.Z_NO_FLUSH
this[_finishFlush] = typeof opts.finishFlush !== 'undefined' ?
opts.finishFlush : constants.Z_FINISH
if (opts.chunkSize) {
if (opts.chunkSize < constants.Z_MIN_CHUNK) {
throw new RangeError('Invalid chunk size: ' + opts.chunkSize)
}
}
if (opts.windowBits) {
if (opts.windowBits < constants.Z_MIN_WINDOWBITS ||
opts.windowBits > constants.Z_MAX_WINDOWBITS) {
throw new RangeError('Invalid windowBits: ' + opts.windowBits)
}
}
if (opts.level) {
if (opts.level < constants.Z_MIN_LEVEL ||
opts.level > constants.Z_MAX_LEVEL) {
throw new RangeError('Invalid compression level: ' + opts.level)
}
}
if (opts.memLevel) {
if (opts.memLevel < constants.Z_MIN_MEMLEVEL ||
opts.memLevel > constants.Z_MAX_MEMLEVEL) {
throw new RangeError('Invalid memLevel: ' + opts.memLevel)
}
}
if (opts.strategy && !(strategies.has(opts.strategy)))
throw new TypeError('Invalid strategy: ' + opts.strategy)
if (opts.dictionary) {
if (!(opts.dictionary instanceof Buffer)) {
throw new TypeError('Invalid dictionary: it should be a Buffer instance')
}
}
this[_handle] = new binding.Zlib(mode)
this[_hadError] = false
this[_handle].onerror = (message, errno) => {
// there is no way to cleanly recover.
// continuing only obscures problems.
this.close()
this[_hadError] = true
const error = new ZlibError(message, errno)
this.emit('error', error)
}
const level = typeof opts.level === 'number' ? opts.level
: constants.Z_DEFAULT_COMPRESSION
var strategy = typeof opts.strategy === 'number' ? opts.strategy
: constants.Z_DEFAULT_STRATEGY
this[_writeState] = new Uint32Array(2);
const window = opts.windowBits || constants.Z_DEFAULT_WINDOWBITS
const memLevel = opts.memLevel || constants.Z_DEFAULT_MEMLEVEL
// API changed in node v9
/* istanbul ignore next */
if (/^v[0-8]\./.test(process.version)) {
this[_handle].init(window,
level,
memLevel,
strategy,
opts.dictionary)
} else {
this[_handle].init(window,
level,
memLevel,
strategy,
this[_writeState],
() => {},
opts.dictionary)
}
this[_buffer] = Buffer.allocUnsafe(this[_chunkSize])
this[_offset] = 0
this[_level] = level
this[_strategy] = strategy
this.once('end', this.close)
}
close () {
if (this[_handle]) {
this[_handle].close()
this[_handle] = null
this.emit('close')
}
}
params (level, strategy) {
if (!this[_handle])
throw new Error('cannot switch params when binding is closed')
// no way to test this without also not supporting params at all
/* istanbul ignore if */
if (!this[_handle].params)
throw new Error('not supported in this implementation')
if (level < constants.Z_MIN_LEVEL ||
level > constants.Z_MAX_LEVEL) {
throw new RangeError('Invalid compression level: ' + level)
}
if (!(strategies.has(strategy)))
throw new TypeError('Invalid strategy: ' + strategy)
if (this[_level] !== level || this[_strategy] !== strategy) {
this.flush(constants.Z_SYNC_FLUSH)
assert(this[_handle], 'zlib binding closed')
this[_handle].params(level, strategy)
/* istanbul ignore else */
if (!this[_hadError]) {
this[_level] = level
this[_strategy] = strategy
}
}
}
reset () {
assert(this[_handle], 'zlib binding closed')
return this[_handle].reset()
}
flush (kind) {
if (kind === undefined)
kind = constants.Z_FULL_FLUSH
if (this.ended)
return
const flushFlag = this[_flushFlag]
this[_flushFlag] = kind
this.write(Buffer.alloc(0))
this[_flushFlag] = flushFlag
}
end (chunk, encoding, cb) {
if (chunk)
this.write(chunk, encoding)
this.flush(this[_finishFlush])
this[_ended] = true
return super.end(null, null, cb)
}
get ended () {
return this[_ended]
}
write (chunk, encoding, cb) {
// process the chunk using the sync process
// then super.write() all the outputted chunks
if (typeof encoding === 'function')
cb = encoding, encoding = 'utf8'
if (typeof chunk === 'string')
chunk = new Buffer(chunk, encoding)
let availInBefore = chunk && chunk.length
let availOutBefore = this[_chunkSize] - this[_offset]
let inOff = 0 // the offset of the input buffer
const flushFlag = this[_flushFlag]
let writeReturn = true
assert(this[_handle], 'zlib binding closed')
do {
let res = this[_handle].writeSync(
flushFlag,
chunk, // in
inOff, // in_off
availInBefore, // in_len
this[_buffer], // out
this[_offset], //out_off
availOutBefore // out_len
)
if (this[_hadError])
break
// API changed in v9
/* istanbul ignore next */
let availInAfter = res ? res[0] : this[_writeState][1]
/* istanbul ignore next */
let availOutAfter = res ? res[1] : this[_writeState][0]
const have = availOutBefore - availOutAfter
assert(have >= 0, 'have should not go down')
if (have > 0) {
const out = this[_buffer].slice(
this[_offset], this[_offset] + have
)
this[_offset] += have
// serve some output to the consumer.
writeReturn = super.write(out) && writeReturn
}
// exhausted the output buffer, or used all the input create a new one.
if (availOutAfter === 0 || this[_offset] >= this[_chunkSize]) {
availOutBefore = this[_chunkSize]
this[_offset] = 0
this[_buffer] = Buffer.allocUnsafe(this[_chunkSize])
}
if (availOutAfter === 0) {
// Not actually done. Need to reprocess.
// Also, update the availInBefore to the availInAfter value,
// so that if we have to hit it a third (fourth, etc.) time,
// it'll have the correct byte counts.
inOff += (availInBefore - availInAfter)
availInBefore = availInAfter
continue
}
break
} while (!this[_hadError])
if (cb)
cb()
return writeReturn
}
}
// minimal 2-byte header
class Deflate extends Zlib {
constructor (opts) {
super(opts, constants.DEFLATE)
}
}
class Inflate extends Zlib {
constructor (opts) {
super(opts, constants.INFLATE)
}
}
// gzip - bigger header, same deflate compression
class Gzip extends Zlib {
constructor (opts) {
super(opts, constants.GZIP)
}
}
class Gunzip extends Zlib {
constructor (opts) {
super(opts, constants.GUNZIP)
}
}
// raw - no header
class DeflateRaw extends Zlib {
constructor (opts) {
super(opts, constants.DEFLATERAW)
}
}
class InflateRaw extends Zlib {
constructor (opts) {
super(opts, constants.INFLATERAW)
}
}
// auto-detect header.
class Unzip extends Zlib {
constructor (opts) {
super(opts, constants.UNZIP)
}
}
exports.Deflate = Deflate
exports.Inflate = Inflate
exports.Gzip = Gzip
exports.Gunzip = Gunzip
exports.DeflateRaw = DeflateRaw
exports.InflateRaw = InflateRaw
exports.Unzip = Unzip
{
"_from": "minizlib@^1.1.0",
"_id": "minizlib@1.1.0",
"_inBundle": false,
"_integrity": "sha1-EeE2WM5GvDpwomeqxYNZ0eDCnOs=",
"_location": "/minizlib",
"_phantomChildren": {},
"_requested": {
"type": "range",
"registry": true,
"raw": "minizlib@^1.1.0",
"name": "minizlib",
"escapedName": "minizlib",
"rawSpec": "^1.1.0",
"saveSpec": null,
"fetchSpec": "^1.1.0"
},
"_requiredBy": [
"/tar"
],
"_resolved": "http://registry.npm.taobao.org/minizlib/download/minizlib-1.1.0.tgz",
"_shasum": "11e13658ce46bc3a70a267aac58359d1e0c29ceb",
"_spec": "minizlib@^1.1.0",
"_where": "/Users/medicean/workspace/antSword/node_modules/tar",
"author": {
"name": "Isaac Z. Schlueter",
"email": "i@izs.me",
"url": "http://blog.izs.me/"
},
"bugs": {
"url": "https://github.com/isaacs/minizlib/issues"
},
"bundleDependencies": false,
"dependencies": {
"minipass": "^2.2.1"
},
"deprecated": false,
"description": "A small fast zlib stream built on [minipass](http://npm.im/minipass) and Node.js's zlib binding.",
"devDependencies": {
"tap": "^10.7.2"
},
"files": [
"index.js",
"constants.js"
],
"homepage": "https://github.com/isaacs/minizlib#readme",
"keywords": [
"zlib",
"gzip",
"gunzip",
"deflate",
"inflate",
"compression",
"zip",
"unzip"
],
"license": "MIT",
"main": "index.js",
"name": "minizlib",
"repository": {
"type": "git",
"url": "git+https://github.com/isaacs/minizlib.git"
},
"scripts": {
"postpublish": "git push origin --all; git push origin --tags",
"postversion": "npm publish",
"preversion": "npm test",
"test": "tap test/*.js --100 -J"
},
"version": "1.1.0"
}
The ISC License
Copyright (c) Isaac Z. Schlueter and Contributors
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# node-tar
[![Build Status](https://travis-ci.org/npm/node-tar.svg?branch=master)](https://travis-ci.org/npm/node-tar)
[Fast](./benchmarks) and full-featured Tar for Node.js
The API is designed to mimic the behavior of `tar(1)` on unix systems.
If you are familiar with how tar works, most of this will hopefully be
straightforward for you. If not, then hopefully this module can teach
you useful unix skills that may come in handy someday :)
## Background
A "tar file" or "tarball" is an archive of file system entries
(directories, files, links, etc.) The name comes from "tape archive".
If you run `man tar` on almost any Unix command line, you'll learn
quite a bit about what it can do, and its history.
Tar has 5 main top-level commands:
* `c` Create an archive
* `r` Replace entries within an archive
* `u` Update entries within an archive (ie, replace if they're newer)
* `t` List out the contents of an archive
* `x` Extract an archive to disk
The other flags and options modify how this top level function works.
## High-Level API
These 5 functions are the high-level API. All of them have a
single-character name (for unix nerds familiar with `tar(1)`) as well
as a long name (for everyone else).
All the high-level functions take the following arguments, all three
of which are optional and may be omitted.
1. `options` - An optional object specifying various options
2. `paths` - An array of paths to add or extract
3. `callback` - Called when the command is completed, if async. (If
sync or no file specified, providing a callback throws a
`TypeError`.)
If the command is sync (ie, if `options.sync=true`), then the
callback is not allowed, since the action will be completed immediately.
If a `file` argument is specified, and the command is async, then a
`Promise` is returned. In this case, if async, a callback may be
provided which is called when the command is completed.
If a `file` option is not specified, then a stream is returned. For
`create`, this is a readable stream of the generated archive. For
`list` and `extract` this is a writable stream that an archive should
be written into. If a file is not specified, then a callback is not
allowed, because you're already getting a stream to work with.
`replace` and `update` only work on existing archives, and so require
a `file` argument.
Sync commands without a file argument return a stream that acts on its
input immediately in the same tick. For readable streams, this means
that all of the data is immediately available by calling
`stream.read()`. For writable streams, it will be acted upon as soon
as it is provided, but this can be at any time.
### Warnings
Some things cause tar to emit a warning, but should usually not cause
the entire operation to fail. There are three ways to handle
warnings:
1. **Ignore them** (default) Invalid entries won't be put in the
archive, and invalid entries won't be unpacked. This is usually
fine, but can hide failures that you might care about.
2. **Notice them** Add an `onwarn` function to the options, or listen
to the `'warn'` event on any tar stream. The function will get
called as `onwarn(message, data)`. Handle as appropriate.
3. **Explode them.** Set `strict: true` in the options object, and
`warn` messages will be emitted as `'error'` events instead. If
there's no `error` handler, this causes the program to crash. If
used with a promise-returning/callback-taking method, then it'll
send the error to the promise/callback.
### Examples
The API mimics the `tar(1)` command line functionality, with aliases
for more human-readable option and function names. The goal is that
if you know how to use `tar(1)` in Unix, then you know how to use
`require('tar')` in JavaScript.
To replicate `tar czf my-tarball.tgz files and folders`, you'd do:
```js
tar.c(
{
gzip: <true|gzip options>,
file: 'my-tarball.tgz'
},
['some', 'files', 'and', 'folders']
).then(_ => { .. tarball has been created .. })
```
To replicate `tar cz files and folders > my-tarball.tgz`, you'd do:
```js
tar.c( // or tar.create
{
gzip: <true|gzip options>
},
['some', 'files', 'and', 'folders']
).pipe(fs.createWriteStream('my-tarball.tgz')
```
To replicate `tar xf my-tarball.tgz` you'd do:
```js
tar.x( // or tar.extract(
{
file: 'my-tarball.tgz'
}
).then(_=> { .. tarball has been dumped in cwd .. })
```
To replicate `cat my-tarball.tgz | tar x -C some-dir --strip=1`:
```js
fs.createReadStream('my-tarball.tgz').pipe(
tar.x({
strip: 1,
C: 'some-dir' // alias for cwd:'some-dir', also ok
})
)
```
To replicate `tar tf my-tarball.tgz`, do this:
```js
tar.t({
file: 'my-tarball.tgz',
onentry: entry => { .. do whatever with it .. }
})
```
To replicate `cat my-tarball.tgz | tar t` do:
```js
fs.createReadStream('my-tarball.tgz')
.pipe(tar.t())
.on('entry', entry => { .. do whatever with it .. })
```
To do anything synchronous, add `sync: true` to the options. Note
that sync functions don't take a callback and don't return a promise.
When the function returns, it's already done. Sync methods without a
file argument return a sync stream, which flushes immediately. But,
of course, it still won't be done until you `.end()` it.
To filter entries, add `filter: <function>` to the options.
Tar-creating methods call the filter with `filter(path, stat)`.
Tar-reading methods (including extraction) call the filter with
`filter(path, entry)`. The filter is called in the `this`-context of
the `Pack` or `Unpack` stream object.
The arguments list to `tar t` and `tar x` specify a list of filenames
to extract or list, so they're equivalent to a filter that tests if
the file is in the list.
For those who _aren't_ fans of tar's single-character command names:
```
tar.c === tar.create
tar.r === tar.replace (appends to archive, file is required)
tar.u === tar.update (appends if newer, file is required)
tar.x === tar.extract
tar.t === tar.list
```
Keep reading for all the command descriptions and options, as well as
the low-level API that they are built on.
### tar.c(options, fileList, callback) [alias: tar.create]
Create a tarball archive.
The `fileList` is an array of paths to add to the tarball. Adding a
directory also adds its children recursively.
An entry in `fileList` that starts with an `@` symbol is a tar archive
whose entries will be added. To add a file that starts with `@`,
prepend it with `./`.
The following options are supported:
- `file` Write the tarball archive to the specified filename. If this
is specified, then the callback will be fired when the file has been
written, and a promise will be returned that resolves when the file
is written. If a filename is not specified, then a Readable Stream
will be returned which will emit the file data. [Alias: `f`]
- `sync` Act synchronously. If this is set, then any provided file
will be fully written after the call to `tar.c`. If this is set,
and a file is not provided, then the resulting stream will already
have the data ready to `read` or `emit('data')` as soon as you
request it.
- `onwarn` A function that will get called with `(message, data)` for
any warnings encountered.
- `strict` Treat warnings as crash-worthy errors. Default false.
- `cwd` The current working directory for creating the archive.
Defaults to `process.cwd()`. [Alias: `C`]
- `prefix` A path portion to prefix onto the entries in the archive.
- `gzip` Set to any truthy value to create a gzipped archive, or an
object with settings for `zlib.Gzip()` [Alias: `z`]
- `filter` A function that gets called with `(path, stat)` for each
entry being added. Return `true` to add the entry to the archive,
or `false` to omit it.
- `portable` Omit metadata that is system-specific: `ctime`, `atime`,
`uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note
that `mtime` is still included, because this is necessary other
time-based operations.
- `preservePaths` Allow absolute paths. By default, `/` is stripped
from absolute paths. [Alias: `P`]
- `mode` The mode to set on the created file archive
- `noDirRecurse` Do not recursively archive the contents of
directories. [Alias: `n`]
- `follow` Set to true to pack the targets of symbolic links. Without
this option, symbolic links are archived as such. [Alias: `L`, `h`]
- `noPax` Suppress pax extended headers. Note that this means that
long paths and linkpaths will be truncated, and large or negative
numeric values may be interpreted incorrectly.
- `noMtime` Set to true to omit writing `mtime` values for entries.
Note that this prevents using other mtime-based features like
`tar.update` or the `keepNewer` option with the resulting tar archive.
[Alias: `m`, `no-mtime`]
- `mtime` Set to a `Date` object to force a specific `mtime` for
everything added to the archive. Overridden by `noMtime`.
The following options are mostly internal, but can be modified in some
advanced use cases, such as re-using caches between runs.
- `linkCache` A Map object containing the device and inode value for
any file whose nlink is > 1, to identify hard links.
- `statCache` A Map object that caches calls `lstat`.
- `readdirCache` A Map object that caches calls to `readdir`.
- `jobs` A number specifying how many concurrent jobs to run.
Defaults to 4.
- `maxReadSize` The maximum buffer size for `fs.read()` operations.
Defaults to 16 MB.
### tar.x(options, fileList, callback) [alias: tar.extract]
Extract a tarball archive.
The `fileList` is an array of paths to extract from the tarball. If
no paths are provided, then all the entries are extracted.
If the archive is gzipped, then tar will detect this and unzip it.
Note that all directories that are created will be forced to be
writable, readable, and listable by their owner, to avoid cases where
a directory prevents extraction of child entries by virtue of its
mode.
Most extraction errors will cause a `warn` event to be emitted. If
the `cwd` is missing, or not a directory, then the extraction will
fail completely.
The following options are supported:
- `cwd` Extract files relative to the specified directory. Defaults
to `process.cwd()`. If provided, this must exist and must be a
directory. [Alias: `C`]
- `file` The archive file to extract. If not specified, then a
Writable stream is returned where the archive data should be
written. [Alias: `f`]
- `sync` Create files and directories synchronously.
- `strict` Treat warnings as crash-worthy errors. Default false.
- `filter` A function that gets called with `(path, entry)` for each
entry being unpacked. Return `true` to unpack the entry from the
archive, or `false` to skip it.
- `newer` Set to true to keep the existing file on disk if it's newer
than the file in the archive. [Alias: `keep-newer`,
`keep-newer-files`]
- `keep` Do not overwrite existing files. In particular, if a file
appears more than once in an archive, later copies will not
overwrite earlier copies. [Alias: `k`, `keep-existing`]
- `preservePaths` Allow absolute paths, paths containing `..`, and
extracting through symbolic links. By default, `/` is stripped from
absolute paths, `..` paths are not extracted, and any file whose
location would be modified by a symbolic link is not extracted.
[Alias: `P`]
- `unlink` Unlink files before creating them. Without this option,
tar overwrites existing files, which preserves existing hardlinks.
With this option, existing hardlinks will be broken, as will any
symlink that would affect the location of an extracted file. [Alias:
`U`]
- `strip` Remove the specified number of leading path elements.
Pathnames with fewer elements will be silently skipped. Note that
the pathname is edited after applying the filter, but before
security checks. [Alias: `strip-components`, `stripComponents`]
- `onwarn` A function that will get called with `(message, data)` for
any warnings encountered.
- `preserveOwner` If true, tar will set the `uid` and `gid` of
extracted entries to the `uid` and `gid` fields in the archive.
This defaults to true when run as root, and false otherwise. If
false, then files and directories will be set with the owner and
group of the user running the process. This is similar to `-p` in
`tar(1)`, but ACLs and other system-specific data is never unpacked
in this implementation, and modes are set by default already.
[Alias: `p`]
- `uid` Set to a number to force ownership of all extracted files and
folders, and all implicitly created directories, to be owned by the
specified user id, regardless of the `uid` field in the archive.
Cannot be used along with `preserveOwner`. Requires also setting a
`gid` option.
- `gid` Set to a number to force ownership of all extracted files and
folders, and all implicitly created directories, to be owned by the
specified group id, regardless of the `gid` field in the archive.
Cannot be used along with `preserveOwner`. Requires also setting a
`uid` option.
- `noMtime` Set to true to omit writing `mtime` value for extracted
entries. [Alias: `m`, `no-mtime`]
- `transform` Provide a function that takes an `entry` object, and
returns a stream, or any falsey value. If a stream is provided,
then that stream's data will be written instead of the contents of
the archive entry. If a falsey value is provided, then the entry is
written to disk as normal. (To exclude items from extraction, use
the `filter` option described above.)
- `onentry` A function that gets called with `(entry)` for each entry
that passes the filter.
The following options are mostly internal, but can be modified in some
advanced use cases, such as re-using caches between runs.
- `maxReadSize` The maximum buffer size for `fs.read()` operations.
Defaults to 16 MB.
- `umask` Filter the modes of entries like `process.umask()`.
- `dmode` Default mode for directories
- `fmode` Default mode for files
- `dirCache` A Map object of which directories exist.
- `maxMetaEntrySize` The maximum size of meta entries that is
supported. Defaults to 1 MB.
Note that using an asynchronous stream type with the `transform`
option will cause undefined behavior in sync extractions.
[MiniPass](http://npm.im/minipass)-based streams are designed for this
use case.
### tar.t(options, fileList, callback) [alias: tar.list]
List the contents of a tarball archive.
The `fileList` is an array of paths to list from the tarball. If
no paths are provided, then all the entries are listed.
If the archive is gzipped, then tar will detect this and unzip it.
Returns an event emitter that emits `entry` events with
`tar.ReadEntry` objects. However, they don't emit `'data'` or `'end'`
events. (If you want to get actual readable entries, use the
`tar.Parse` class instead.)
The following options are supported:
- `cwd` Extract files relative to the specified directory. Defaults
to `process.cwd()`. [Alias: `C`]
- `file` The archive file to list. If not specified, then a
Writable stream is returned where the archive data should be
written. [Alias: `f`]
- `sync` Read the specified file synchronously. (This has no effect
when a file option isn't specified, because entries are emitted as
fast as they are parsed from the stream anyway.)
- `strict` Treat warnings as crash-worthy errors. Default false.
- `filter` A function that gets called with `(path, entry)` for each
entry being listed. Return `true` to emit the entry from the
archive, or `false` to skip it.
- `onentry` A function that gets called with `(entry)` for each entry
that passes the filter. This is important for when both `file` and
`sync` are set, because it will be called synchronously.
- `maxReadSize` The maximum buffer size for `fs.read()` operations.
Defaults to 16 MB.
- `noResume` By default, `entry` streams are resumed immediately after
the call to `onentry`. Set `noResume: true` to suppress this
behavior. Note that by opting into this, the stream will never
complete until the entry data is consumed.
### tar.u(options, fileList, callback) [alias: tar.update]
Add files to an archive if they are newer than the entry already in
the tarball archive.
The `fileList` is an array of paths to add to the tarball. Adding a
directory also adds its children recursively.
An entry in `fileList` that starts with an `@` symbol is a tar archive
whose entries will be added. To add a file that starts with `@`,
prepend it with `./`.
The following options are supported:
- `file` Required. Write the tarball archive to the specified
filename. [Alias: `f`]
- `sync` Act synchronously. If this is set, then any provided file
will be fully written after the call to `tar.c`.
- `onwarn` A function that will get called with `(message, data)` for
any warnings encountered.
- `strict` Treat warnings as crash-worthy errors. Default false.
- `cwd` The current working directory for adding entries to the
archive. Defaults to `process.cwd()`. [Alias: `C`]
- `prefix` A path portion to prefix onto the entries in the archive.
- `gzip` Set to any truthy value to create a gzipped archive, or an
object with settings for `zlib.Gzip()` [Alias: `z`]
- `filter` A function that gets called with `(path, stat)` for each
entry being added. Return `true` to add the entry to the archive,
or `false` to omit it.
- `portable` Omit metadata that is system-specific: `ctime`, `atime`,
`uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note
that `mtime` is still included, because this is necessary other
time-based operations.
- `preservePaths` Allow absolute paths. By default, `/` is stripped
from absolute paths. [Alias: `P`]
- `maxReadSize` The maximum buffer size for `fs.read()` operations.
Defaults to 16 MB.
- `noDirRecurse` Do not recursively archive the contents of
directories. [Alias: `n`]
- `follow` Set to true to pack the targets of symbolic links. Without
this option, symbolic links are archived as such. [Alias: `L`, `h`]
- `noPax` Suppress pax extended headers. Note that this means that
long paths and linkpaths will be truncated, and large or negative
numeric values may be interpreted incorrectly.
- `noMtime` Set to true to omit writing `mtime` values for entries.
Note that this prevents using other mtime-based features like
`tar.update` or the `keepNewer` option with the resulting tar archive.
[Alias: `m`, `no-mtime`]
- `mtime` Set to a `Date` object to force a specific `mtime` for
everything added to the archive. Overridden by `noMtime`.
### tar.r(options, fileList, callback) [alias: tar.replace]
Add files to an existing archive. Because later entries override
earlier entries, this effectively replaces any existing entries.
The `fileList` is an array of paths to add to the tarball. Adding a
directory also adds its children recursively.
An entry in `fileList` that starts with an `@` symbol is a tar archive
whose entries will be added. To add a file that starts with `@`,
prepend it with `./`.
The following options are supported:
- `file` Required. Write the tarball archive to the specified
filename. [Alias: `f`]
- `sync` Act synchronously. If this is set, then any provided file
will be fully written after the call to `tar.c`.
- `onwarn` A function that will get called with `(message, data)` for
any warnings encountered.
- `strict` Treat warnings as crash-worthy errors. Default false.
- `cwd` The current working directory for adding entries to the
archive. Defaults to `process.cwd()`. [Alias: `C`]
- `prefix` A path portion to prefix onto the entries in the archive.
- `gzip` Set to any truthy value to create a gzipped archive, or an
object with settings for `zlib.Gzip()` [Alias: `z`]
- `filter` A function that gets called with `(path, stat)` for each
entry being added. Return `true` to add the entry to the archive,
or `false` to omit it.
- `portable` Omit metadata that is system-specific: `ctime`, `atime`,
`uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note
that `mtime` is still included, because this is necessary other
time-based operations.
- `preservePaths` Allow absolute paths. By default, `/` is stripped
from absolute paths. [Alias: `P`]
- `maxReadSize` The maximum buffer size for `fs.read()` operations.
Defaults to 16 MB.
- `noDirRecurse` Do not recursively archive the contents of
directories. [Alias: `n`]
- `follow` Set to true to pack the targets of symbolic links. Without
this option, symbolic links are archived as such. [Alias: `L`, `h`]
- `noPax` Suppress pax extended headers. Note that this means that
long paths and linkpaths will be truncated, and large or negative
numeric values may be interpreted incorrectly.
- `noMtime` Set to true to omit writing `mtime` values for entries.
Note that this prevents using other mtime-based features like
`tar.update` or the `keepNewer` option with the resulting tar archive.
[Alias: `m`, `no-mtime`]
- `mtime` Set to a `Date` object to force a specific `mtime` for
everything added to the archive. Overridden by `noMtime`.
## Low-Level API
### class tar.Pack
A readable tar stream.
Has all the standard readable stream interface stuff. `'data'` and
`'end'` events, `read()` method, `pause()` and `resume()`, etc.
#### constructor(options)
The following options are supported:
- `onwarn` A function that will get called with `(message, data)` for
any warnings encountered.
- `strict` Treat warnings as crash-worthy errors. Default false.
- `cwd` The current working directory for creating the archive.
Defaults to `process.cwd()`.
- `prefix` A path portion to prefix onto the entries in the archive.
- `gzip` Set to any truthy value to create a gzipped archive, or an
object with settings for `zlib.Gzip()`
- `filter` A function that gets called with `(path, stat)` for each
entry being added. Return `true` to add the entry to the archive,
or `false` to omit it.
- `portable` Omit metadata that is system-specific: `ctime`, `atime`,
`uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note
that `mtime` is still included, because this is necessary other
time-based operations.
- `preservePaths` Allow absolute paths. By default, `/` is stripped
from absolute paths.
- `linkCache` A Map object containing the device and inode value for
any file whose nlink is > 1, to identify hard links.
- `statCache` A Map object that caches calls `lstat`.
- `readdirCache` A Map object that caches calls to `readdir`.
- `jobs` A number specifying how many concurrent jobs to run.
Defaults to 4.
- `maxReadSize` The maximum buffer size for `fs.read()` operations.
Defaults to 16 MB.
- `noDirRecurse` Do not recursively archive the contents of
directories.
- `follow` Set to true to pack the targets of symbolic links. Without
this option, symbolic links are archived as such.
- `noPax` Suppress pax extended headers. Note that this means that
long paths and linkpaths will be truncated, and large or negative
numeric values may be interpreted incorrectly.
- `noMtime` Set to true to omit writing `mtime` values for entries.
Note that this prevents using other mtime-based features like
`tar.update` or the `keepNewer` option with the resulting tar archive.
- `mtime` Set to a `Date` object to force a specific `mtime` for
everything added to the archive. Overridden by `noMtime`.
#### add(path)
Adds an entry to the archive. Returns the Pack stream.
#### write(path)
Adds an entry to the archive. Returns true if flushed.
#### end()
Finishes the archive.
### class tar.Pack.Sync
Synchronous version of `tar.Pack`.
### class tar.Unpack
A writable stream that unpacks a tar archive onto the file system.
All the normal writable stream stuff is supported. `write()` and
`end()` methods, `'drain'` events, etc.
Note that all directories that are created will be forced to be
writable, readable, and listable by their owner, to avoid cases where
a directory prevents extraction of child entries by virtue of its
mode.
`'close'` is emitted when it's done writing stuff to the file system.
Most unpack errors will cause a `warn` event to be emitted. If the
`cwd` is missing, or not a directory, then an error will be emitted.
#### constructor(options)
- `cwd` Extract files relative to the specified directory. Defaults
to `process.cwd()`. If provided, this must exist and must be a
directory.
- `filter` A function that gets called with `(path, entry)` for each
entry being unpacked. Return `true` to unpack the entry from the
archive, or `false` to skip it.
- `newer` Set to true to keep the existing file on disk if it's newer
than the file in the archive.
- `keep` Do not overwrite existing files. In particular, if a file
appears more than once in an archive, later copies will not
overwrite earlier copies.
- `preservePaths` Allow absolute paths, paths containing `..`, and
extracting through symbolic links. By default, `/` is stripped from
absolute paths, `..` paths are not extracted, and any file whose
location would be modified by a symbolic link is not extracted.
- `unlink` Unlink files before creating them. Without this option,
tar overwrites existing files, which preserves existing hardlinks.
With this option, existing hardlinks will be broken, as will any
symlink that would affect the location of an extracted file.
- `strip` Remove the specified number of leading path elements.
Pathnames with fewer elements will be silently skipped. Note that
the pathname is edited after applying the filter, but before
security checks.
- `onwarn` A function that will get called with `(message, data)` for
any warnings encountered.
- `umask` Filter the modes of entries like `process.umask()`.
- `dmode` Default mode for directories
- `fmode` Default mode for files
- `dirCache` A Map object of which directories exist.
- `maxMetaEntrySize` The maximum size of meta entries that is
supported. Defaults to 1 MB.
- `preserveOwner` If true, tar will set the `uid` and `gid` of
extracted entries to the `uid` and `gid` fields in the archive.
This defaults to true when run as root, and false otherwise. If
false, then files and directories will be set with the owner and
group of the user running the process. This is similar to `-p` in
`tar(1)`, but ACLs and other system-specific data is never unpacked
in this implementation, and modes are set by default already.
- `win32` True if on a windows platform. Causes behavior where
filenames containing `<|>?` chars are converted to
windows-compatible values while being unpacked.
- `uid` Set to a number to force ownership of all extracted files and
folders, and all implicitly created directories, to be owned by the
specified user id, regardless of the `uid` field in the archive.
Cannot be used along with `preserveOwner`. Requires also setting a
`gid` option.
- `gid` Set to a number to force ownership of all extracted files and
folders, and all implicitly created directories, to be owned by the
specified group id, regardless of the `gid` field in the archive.
Cannot be used along with `preserveOwner`. Requires also setting a
`uid` option.
- `noMtime` Set to true to omit writing `mtime` value for extracted
entries.
- `transform` Provide a function that takes an `entry` object, and
returns a stream, or any falsey value. If a stream is provided,
then that stream's data will be written instead of the contents of
the archive entry. If a falsey value is provided, then the entry is
written to disk as normal. (To exclude items from extraction, use
the `filter` option described above.)
- `strict` Treat warnings as crash-worthy errors. Default false.
- `onentry` A function that gets called with `(entry)` for each entry
that passes the filter.
- `onwarn` A function that will get called with `(message, data)` for
any warnings encountered.
### class tar.Unpack.Sync
Synchronous version of `tar.Unpack`.
Note that using an asynchronous stream type with the `transform`
option will cause undefined behavior in sync unpack streams.
[MiniPass](http://npm.im/minipass)-based streams are designed for this
use case.
### class tar.Parse
A writable stream that parses a tar archive stream. All the standard
writable stream stuff is supported.
If the archive is gzipped, then tar will detect this and unzip it.
Emits `'entry'` events with `tar.ReadEntry` objects, which are
themselves readable streams that you can pipe wherever.
Each `entry` will not emit until the one before it is flushed through,
so make sure to either consume the data (with `on('data', ...)` or
`.pipe(...)`) or throw it away with `.resume()` to keep the stream
flowing.
#### constructor(options)
Returns an event emitter that emits `entry` events with
`tar.ReadEntry` objects.
The following options are supported:
- `strict` Treat warnings as crash-worthy errors. Default false.
- `filter` A function that gets called with `(path, entry)` for each
entry being listed. Return `true` to emit the entry from the
archive, or `false` to skip it.
- `onentry` A function that gets called with `(entry)` for each entry
that passes the filter.
- `onwarn` A function that will get called with `(message, data)` for
any warnings encountered.
#### abort(message, error)
Stop all parsing activities. This is called when there are zlib
errors. It also emits a warning with the message and error provided.
### class tar.ReadEntry extends [MiniPass](http://npm.im/minipass)
A representation of an entry that is being read out of a tar archive.
It has the following fields:
- `extended` The extended metadata object provided to the constructor.
- `globalExtended` The global extended metadata object provided to the
constructor.
- `remain` The number of bytes remaining to be written into the
stream.
- `blockRemain` The number of 512-byte blocks remaining to be written
into the stream.
- `ignore` Whether this entry should be ignored.
- `meta` True if this represents metadata about the next entry, false
if it represents a filesystem object.
- All the fields from the header, extended header, and global extended
header are added to the ReadEntry object. So it has `path`, `type`,
`size, `mode`, and so on.
#### constructor(header, extended, globalExtended)
Create a new ReadEntry object with the specified header, extended
header, and global extended header values.
### class tar.WriteEntry extends [MiniPass](http://npm.im/minipass)
A representation of an entry that is being written from the file
system into a tar archive.
Emits data for the Header, and for the Pax Extended Header if one is
required, as well as any body data.
Creating a WriteEntry for a directory does not also create
WriteEntry objects for all of the directory contents.
It has the following fields:
- `path` The path field that will be written to the archive. By
default, this is also the path from the cwd to the file system
object.
- `portable` Omit metadata that is system-specific: `ctime`, `atime`,
`uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note
that `mtime` is still included, because this is necessary other
time-based operations.
- `myuid` If supported, the uid of the user running the current
process.
- `myuser` The `env.USER` string if set, or `''`. Set as the entry
`uname` field if the file's `uid` matches `this.myuid`.
- `maxReadSize` The maximum buffer size for `fs.read()` operations.
Defaults to 1 MB.
- `linkCache` A Map object containing the device and inode value for
any file whose nlink is > 1, to identify hard links.
- `statCache` A Map object that caches calls `lstat`.
- `preservePaths` Allow absolute paths. By default, `/` is stripped
from absolute paths.
- `cwd` The current working directory for creating the archive.
Defaults to `process.cwd()`.
- `absolute` The absolute path to the entry on the filesystem. By
default, this is `path.resolve(this.cwd, this.path)`, but it can be
overridden explicitly.
- `strict` Treat warnings as crash-worthy errors. Default false.
- `win32` True if on a windows platform. Causes behavior where paths
replace `\` with `/` and filenames containing the windows-compatible
forms of `<|>?:` characters are converted to actual `<|>?:` characters
in the archive.
- `noPax` Suppress pax extended headers. Note that this means that
long paths and linkpaths will be truncated, and large or negative
numeric values may be interpreted incorrectly.
- `noMtime` Set to true to omit writing `mtime` values for entries.
Note that this prevents using other mtime-based features like
`tar.update` or the `keepNewer` option with the resulting tar archive.
#### constructor(path, options)
`path` is the path of the entry as it is written in the archive.
The following options are supported:
- `portable` Omit metadata that is system-specific: `ctime`, `atime`,
`uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note
that `mtime` is still included, because this is necessary other
time-based operations.
- `maxReadSize` The maximum buffer size for `fs.read()` operations.
Defaults to 1 MB.
- `linkCache` A Map object containing the device and inode value for
any file whose nlink is > 1, to identify hard links.
- `statCache` A Map object that caches calls `lstat`.
- `preservePaths` Allow absolute paths. By default, `/` is stripped
from absolute paths.
- `cwd` The current working directory for creating the archive.
Defaults to `process.cwd()`.
- `absolute` The absolute path to the entry on the filesystem. By
default, this is `path.resolve(this.cwd, this.path)`, but it can be
overridden explicitly.
- `strict` Treat warnings as crash-worthy errors. Default false.
- `win32` True if on a windows platform. Causes behavior where paths
replace `\` with `/`.
- `onwarn` A function that will get called with `(message, data)` for
any warnings encountered.
- `noMtime` Set to true to omit writing `mtime` values for entries.
Note that this prevents using other mtime-based features like
`tar.update` or the `keepNewer` option with the resulting tar archive.
- `umask` Set to restrict the modes on the entries in the archive,
somewhat like how umask works on file creation. Defaults to
`process.umask()` on unix systems, or `0o22` on Windows.
#### warn(message, data)
If strict, emit an error with the provided message.
Othewise, emit a `'warn'` event with the provided message and data.
### class tar.WriteEntry.Sync
Synchronous version of tar.WriteEntry
### class tar.WriteEntry.Tar
A version of tar.WriteEntry that gets its data from a tar.ReadEntry
instead of from the filesystem.
#### constructor(readEntry, options)
`readEntry` is the entry being read out of another archive.
The following options are supported:
- `portable` Omit metadata that is system-specific: `ctime`, `atime`,
`uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note
that `mtime` is still included, because this is necessary other
time-based operations.
- `preservePaths` Allow absolute paths. By default, `/` is stripped
from absolute paths.
- `strict` Treat warnings as crash-worthy errors. Default false.
- `onwarn` A function that will get called with `(message, data)` for
any warnings encountered.
- `noMtime` Set to true to omit writing `mtime` values for entries.
Note that this prevents using other mtime-based features like
`tar.update` or the `keepNewer` option with the resulting tar archive.
### class tar.Header
A class for reading and writing header blocks.
It has the following fields:
- `nullBlock` True if decoding a block which is entirely composed of
`0x00` null bytes. (Useful because tar files are terminated by
at least 2 null blocks.)
- `cksumValid` True if the checksum in the header is valid, false
otherwise.
- `needPax` True if the values, as encoded, will require a Pax
extended header.
- `path` The path of the entry.
- `mode` The 4 lowest-order octal digits of the file mode. That is,
read/write/execute permissions for world, group, and owner, and the
setuid, setgid, and sticky bits.
- `uid` Numeric user id of the file owner
- `gid` Numeric group id of the file owner
- `size` Size of the file in bytes
- `mtime` Modified time of the file
- `cksum` The checksum of the header. This is generated by adding all
the bytes of the header block, treating the checksum field itself as
all ascii space characters (that is, `0x20`).
- `type` The human-readable name of the type of entry this represents,
or the alphanumeric key if unknown.
- `typeKey` The alphanumeric key for the type of entry this header
represents.
- `linkpath` The target of Link and SymbolicLink entries.
- `uname` Human-readable user name of the file owner
- `gname` Human-readable group name of the file owner
- `devmaj` The major portion of the device number. Always `0` for
files, directories, and links.
- `devmin` The minor portion of the device number. Always `0` for
files, directories, and links.
- `atime` File access time.
- `ctime` File change time.
#### constructor(data, [offset=0])
`data` is optional. It is either a Buffer that should be interpreted
as a tar Header starting at the specified offset and continuing for
512 bytes, or a data object of keys and values to set on the header
object, and eventually encode as a tar Header.
#### decode(block, offset)
Decode the provided buffer starting at the specified offset.
Buffer length must be greater than 512 bytes.
#### set(data)
Set the fields in the data object.
#### encode(buffer, offset)
Encode the header fields into the buffer at the specified offset.
Returns `this.needPax` to indicate whether a Pax Extended Header is
required to properly encode the specified data.
### class tar.Pax
An object representing a set of key-value pairs in an Pax extended
header entry.
It has the following fields. Where the same name is used, they have
the same semantics as the tar.Header field of the same name.
- `global` True if this represents a global extended header, or false
if it is for a single entry.
- `atime`
- `charset`
- `comment`
- `ctime`
- `gid`
- `gname`
- `linkpath`
- `mtime`
- `path`
- `size`
- `uid`
- `uname`
- `dev`
- `ino`
- `nlink`
#### constructor(object, global)
Set the fields set in the object. `global` is a boolean that defaults
to false.
#### encode()
Return a Buffer containing the header and body for the Pax extended
header entry, or `null` if there is nothing to encode.
#### encodeBody()
Return a string representing the body of the pax extended header
entry.
#### encodeField(fieldName)
Return a string representing the key/value encoding for the specified
fieldName, or `''` if the field is unset.
### tar.Pax.parse(string, extended, global)
Return a new Pax object created by parsing the contents of the string
provided.
If the `extended` object is set, then also add the fields from that
object. (This is necessary because multiple metadata entries can
occur in sequence.)
### tar.types
A translation table for the `type` field in tar headers.
#### tar.types.name.get(code)
Get the human-readable name for a given alphanumeric code.
#### tar.types.code.get(name)
Get the alphanumeric code for a given human-readable name.
'use strict'
// high-level commands
exports.c = exports.create = require('./lib/create.js')
exports.r = exports.replace = require('./lib/replace.js')
exports.t = exports.list = require('./lib/list.js')
exports.u = exports.update = require('./lib/update.js')
exports.x = exports.extract = require('./lib/extract.js')
// classes
exports.Pack = require('./lib/pack.js')
exports.Unpack = require('./lib/unpack.js')
exports.Parse = require('./lib/parse.js')
exports.ReadEntry = require('./lib/read-entry.js')
exports.WriteEntry = require('./lib/write-entry.js')
exports.Header = require('./lib/header.js')
exports.Pax = require('./lib/pax.js')
exports.types = require('./lib/types.js')
'use strict'
// Buffer in node 4.x < 4.5.0 doesn't have working Buffer.from
// or Buffer.alloc, and Buffer in node 10 deprecated the ctor.
// .M, this is fine .\^/M..
let B = Buffer
/* istanbul ignore next */
if (!B.alloc) {
B = require('safe-buffer').Buffer
}
module.exports = B
'use strict'
// tar -c
const hlo = require('./high-level-opt.js')
const Pack = require('./pack.js')
const fs = require('fs')
const fsm = require('fs-minipass')
const t = require('./list.js')
const path = require('path')
const c = module.exports = (opt_, files, cb) => {
if (typeof files === 'function')
cb = files
if (Array.isArray(opt_))
files = opt_, opt_ = {}
if (!files || !Array.isArray(files) || !files.length)
throw new TypeError('no files or directories specified')
files = Array.from(files)
const opt = hlo(opt_)
if (opt.sync && typeof cb === 'function')
throw new TypeError('callback not supported for sync tar functions')
if (!opt.file && typeof cb === 'function')
throw new TypeError('callback only supported with file option')
return opt.file && opt.sync ? createFileSync(opt, files)
: opt.file ? createFile(opt, files, cb)
: opt.sync ? createSync(opt, files)
: create(opt, files)
}
const createFileSync = (opt, files) => {
const p = new Pack.Sync(opt)
const stream = new fsm.WriteStreamSync(opt.file, {
mode: opt.mode || 0o666
})
p.pipe(stream)
addFilesSync(p, files)
}
const createFile = (opt, files, cb) => {
const p = new Pack(opt)
const stream = new fsm.WriteStream(opt.file, {
mode: opt.mode || 0o666
})
p.pipe(stream)
const promise = new Promise((res, rej) => {
stream.on('error', rej)
stream.on('close', res)
p.on('error', rej)
})
addFilesAsync(p, files)
return cb ? promise.then(cb, cb) : promise
}
const addFilesSync = (p, files) => {
files.forEach(file => {
if (file.charAt(0) === '@')
t({
file: path.resolve(p.cwd, file.substr(1)),
sync: true,
noResume: true,
onentry: entry => p.add(entry)
})
else
p.add(file)
})
p.end()
}
const addFilesAsync = (p, files) => {
while (files.length) {
const file = files.shift()
if (file.charAt(0) === '@')
return t({
file: path.resolve(p.cwd, file.substr(1)),
noResume: true,
onentry: entry => p.add(entry)
}).then(_ => addFilesAsync(p, files))
else
p.add(file)
}
p.end()
}
const createSync = (opt, files) => {
const p = new Pack.Sync(opt)
addFilesSync(p, files)
return p
}
const create = (opt, files) => {
const p = new Pack(opt)
addFilesAsync(p, files)
return p
}
'use strict'
// tar -x
const hlo = require('./high-level-opt.js')
const Unpack = require('./unpack.js')
const fs = require('fs')
const fsm = require('fs-minipass')
const path = require('path')
const x = module.exports = (opt_, files, cb) => {
if (typeof opt_ === 'function')
cb = opt_, files = null, opt_ = {}
else if (Array.isArray(opt_))
files = opt_, opt_ = {}
if (typeof files === 'function')
cb = files, files = null
if (!files)
files = []
else
files = Array.from(files)
const opt = hlo(opt_)
if (opt.sync && typeof cb === 'function')
throw new TypeError('callback not supported for sync tar functions')
if (!opt.file && typeof cb === 'function')
throw new TypeError('callback only supported with file option')
if (files.length)
filesFilter(opt, files)
return opt.file && opt.sync ? extractFileSync(opt)
: opt.file ? extractFile(opt, cb)
: opt.sync ? extractSync(opt)
: extract(opt)
}
// construct a filter that limits the file entries listed
// include child entries if a dir is included
const filesFilter = (opt, files) => {
const map = new Map(files.map(f => [f.replace(/\/+$/, ''), true]))
const filter = opt.filter
const mapHas = (file, r) => {
const root = r || path.parse(file).root || '.'
const ret = file === root ? false
: map.has(file) ? map.get(file)
: mapHas(path.dirname(file), root)
map.set(file, ret)
return ret
}
opt.filter = filter
? (file, entry) => filter(file, entry) && mapHas(file.replace(/\/+$/, ''))
: file => mapHas(file.replace(/\/+$/, ''))
}
const extractFileSync = opt => {
const u = new Unpack.Sync(opt)
const file = opt.file
let threw = true
let fd
const stat = fs.statSync(file)
// This trades a zero-byte read() syscall for a stat
// However, it will usually result in less memory allocation
const readSize = opt.maxReadSize || 16*1024*1024
const stream = new fsm.ReadStreamSync(file, {
readSize: readSize,
size: stat.size
})
stream.pipe(u)
}
const extractFile = (opt, cb) => {
const u = new Unpack(opt)
const readSize = opt.maxReadSize || 16*1024*1024
const file = opt.file
const p = new Promise((resolve, reject) => {
u.on('error', reject)
u.on('close', resolve)
// This trades a zero-byte read() syscall for a stat
// However, it will usually result in less memory allocation
fs.stat(file, (er, stat) => {
if (er)
reject(er)
else {
const stream = new fsm.ReadStream(file, {
readSize: readSize,
size: stat.size
})
stream.on('error', reject)
stream.pipe(u)
}
})
})
return cb ? p.then(cb, cb) : p
}
const extractSync = opt => {
return new Unpack.Sync(opt)
}
const extract = opt => {
return new Unpack(opt)
}
'use strict'
// parse a 512-byte header block to a data object, or vice-versa
// encode returns `true` if a pax extended header is needed, because
// the data could not be faithfully encoded in a simple header.
// (Also, check header.needPax to see if it needs a pax header.)
const Buffer = require('./buffer.js')
const types = require('./types.js')
const pathModule = require('path').posix
const large = require('./large-numbers.js')
const SLURP = Symbol('slurp')
const TYPE = Symbol('type')
class Header {
constructor (data, off, ex, gex) {
this.cksumValid = false
this.needPax = false
this.nullBlock = false
this.block = null
this.path = null
this.mode = null
this.uid = null
this.gid = null
this.size = null
this.mtime = null
this.cksum = null
this[TYPE] = '0'
this.linkpath = null
this.uname = null
this.gname = null
this.devmaj = 0
this.devmin = 0
this.atime = null
this.ctime = null
if (Buffer.isBuffer(data))
this.decode(data, off || 0, ex, gex)
else if (data)
this.set(data)
}
decode (buf, off, ex, gex) {
if (!off)
off = 0
if (!buf || !(buf.length >= off + 512))
throw new Error('need 512 bytes for header')
this.path = decString(buf, off, 100)
this.mode = decNumber(buf, off + 100, 8)
this.uid = decNumber(buf, off + 108, 8)
this.gid = decNumber(buf, off + 116, 8)
this.size = decNumber(buf, off + 124, 12)
this.mtime = decDate(buf, off + 136, 12)
this.cksum = decNumber(buf, off + 148, 12)
// if we have extended or global extended headers, apply them now
// See https://github.com/npm/node-tar/pull/187
this[SLURP](ex)
this[SLURP](gex, true)
// old tar versions marked dirs as a file with a trailing /
this[TYPE] = decString(buf, off + 156, 1)
if (this[TYPE] === '')
this[TYPE] = '0'
if (this[TYPE] === '0' && this.path.substr(-1) === '/')
this[TYPE] = '5'
// tar implementations sometimes incorrectly put the stat(dir).size
// as the size in the tarball, even though Directory entries are
// not able to have any body at all. In the very rare chance that
// it actually DOES have a body, we weren't going to do anything with
// it anyway, and it'll just be a warning about an invalid header.
if (this[TYPE] === '5')
this.size = 0
this.linkpath = decString(buf, off + 157, 100)
if (buf.slice(off + 257, off + 265).toString() === 'ustar\u000000') {
this.uname = decString(buf, off + 265, 32)
this.gname = decString(buf, off + 297, 32)
this.devmaj = decNumber(buf, off + 329, 8)
this.devmin = decNumber(buf, off + 337, 8)
if (buf[off + 475] !== 0) {
// definitely a prefix, definitely >130 chars.
const prefix = decString(buf, off + 345, 155)
this.path = prefix + '/' + this.path
} else {
const prefix = decString(buf, off + 345, 130)
if (prefix)
this.path = prefix + '/' + this.path
this.atime = decDate(buf, off + 476, 12)
this.ctime = decDate(buf, off + 488, 12)
}
}
let sum = 8 * 0x20
for (let i = off; i < off + 148; i++) {
sum += buf[i]
}
for (let i = off + 156; i < off + 512; i++) {
sum += buf[i]
}
this.cksumValid = sum === this.cksum
if (this.cksum === null && sum === 8 * 0x20)
this.nullBlock = true
}
[SLURP] (ex, global) {
for (let k in ex) {
// we slurp in everything except for the path attribute in
// a global extended header, because that's weird.
if (ex[k] !== null && ex[k] !== undefined &&
!(global && k === 'path'))
this[k] = ex[k]
}
}
encode (buf, off) {
if (!buf) {
buf = this.block = Buffer.alloc(512)
off = 0
}
if (!off)
off = 0
if (!(buf.length >= off + 512))
throw new Error('need 512 bytes for header')
const prefixSize = this.ctime || this.atime ? 130 : 155
const split = splitPrefix(this.path || '', prefixSize)
const path = split[0]
const prefix = split[1]
this.needPax = split[2]
this.needPax = encString(buf, off, 100, path) || this.needPax
this.needPax = encNumber(buf, off + 100, 8, this.mode) || this.needPax
this.needPax = encNumber(buf, off + 108, 8, this.uid) || this.needPax
this.needPax = encNumber(buf, off + 116, 8, this.gid) || this.needPax
this.needPax = encNumber(buf, off + 124, 12, this.size) || this.needPax
this.needPax = encDate(buf, off + 136, 12, this.mtime) || this.needPax
buf[off + 156] = this[TYPE].charCodeAt(0)
this.needPax = encString(buf, off + 157, 100, this.linkpath) || this.needPax
buf.write('ustar\u000000', off + 257, 8)
this.needPax = encString(buf, off + 265, 32, this.uname) || this.needPax
this.needPax = encString(buf, off + 297, 32, this.gname) || this.needPax
this.needPax = encNumber(buf, off + 329, 8, this.devmaj) || this.needPax
this.needPax = encNumber(buf, off + 337, 8, this.devmin) || this.needPax
this.needPax = encString(buf, off + 345, prefixSize, prefix) || this.needPax
if (buf[off + 475] !== 0)
this.needPax = encString(buf, off + 345, 155, prefix) || this.needPax
else {
this.needPax = encString(buf, off + 345, 130, prefix) || this.needPax
this.needPax = encDate(buf, off + 476, 12, this.atime) || this.needPax
this.needPax = encDate(buf, off + 488, 12, this.ctime) || this.needPax
}
let sum = 8 * 0x20
for (let i = off; i < off + 148; i++) {
sum += buf[i]
}
for (let i = off + 156; i < off + 512; i++) {
sum += buf[i]
}
this.cksum = sum
encNumber(buf, off + 148, 8, this.cksum)
this.cksumValid = true
return this.needPax
}
set (data) {
for (let i in data) {
if (data[i] !== null && data[i] !== undefined)
this[i] = data[i]
}
}
get type () {
return types.name.get(this[TYPE]) || this[TYPE]
}
get typeKey () {
return this[TYPE]
}
set type (type) {
if (types.code.has(type))
this[TYPE] = types.code.get(type)
else
this[TYPE] = type
}
}
const splitPrefix = (p, prefixSize) => {
const pathSize = 100
let pp = p
let prefix = ''
let ret
const root = pathModule.parse(p).root || '.'
if (Buffer.byteLength(pp) < pathSize)
ret = [pp, prefix, false]
else {
// first set prefix to the dir, and path to the base
prefix = pathModule.dirname(pp)
pp = pathModule.basename(pp)
do {
// both fit!
if (Buffer.byteLength(pp) <= pathSize &&
Buffer.byteLength(prefix) <= prefixSize)
ret = [pp, prefix, false]
// prefix fits in prefix, but path doesn't fit in path
else if (Buffer.byteLength(pp) > pathSize &&
Buffer.byteLength(prefix) <= prefixSize)
ret = [pp.substr(0, pathSize - 1), prefix, true]
else {
// make path take a bit from prefix
pp = pathModule.join(pathModule.basename(prefix), pp)
prefix = pathModule.dirname(prefix)
}
} while (prefix !== root && !ret)
// at this point, found no resolution, just truncate
if (!ret)
ret = [p.substr(0, pathSize - 1), '', true]
}
return ret
}
const decString = (buf, off, size) =>
buf.slice(off, off + size).toString('utf8').replace(/\0.*/, '')
const decDate = (buf, off, size) =>
numToDate(decNumber(buf, off, size))
const numToDate = num => num === null ? null : new Date(num * 1000)
const decNumber = (buf, off, size) =>
buf[off] & 0x80 ? large.parse(buf.slice(off, off + size))
: decSmallNumber(buf, off, size)
const nanNull = value => isNaN(value) ? null : value
const decSmallNumber = (buf, off, size) =>
nanNull(parseInt(
buf.slice(off, off + size)
.toString('utf8').replace(/\0.*$/, '').trim(), 8))
// the maximum encodable as a null-terminated octal, by field size
const MAXNUM = {
12: 0o77777777777,
8 : 0o7777777
}
const encNumber = (buf, off, size, number) =>
number === null ? false :
number > MAXNUM[size] || number < 0
? (large.encode(number, buf.slice(off, off + size)), true)
: (encSmallNumber(buf, off, size, number), false)
const encSmallNumber = (buf, off, size, number) =>
buf.write(octalString(number, size), off, size, 'ascii')
const octalString = (number, size) =>
padOctal(Math.floor(number).toString(8), size)
const padOctal = (string, size) =>
(string.length === size - 1 ? string
: new Array(size - string.length - 1).join('0') + string + ' ') + '\0'
const encDate = (buf, off, size, date) =>
date === null ? false :
encNumber(buf, off, size, date.getTime() / 1000)
// enough to fill the longest string we've got
const NULLS = new Array(156).join('\0')
// pad with nulls, return true if it's longer or non-ascii
const encString = (buf, off, size, string) =>
string === null ? false :
(buf.write(string + NULLS, off, size, 'utf8'),
string.length !== Buffer.byteLength(string) || string.length > size)
module.exports = Header
'use strict'
// turn tar(1) style args like `C` into the more verbose things like `cwd`
const argmap = new Map([
['C', 'cwd'],
['f', 'file'],
['z', 'gzip'],
['P', 'preservePaths'],
['U', 'unlink'],
['strip-components', 'strip'],
['stripComponents', 'strip'],
['keep-newer', 'newer'],
['keepNewer', 'newer'],
['keep-newer-files', 'newer'],
['keepNewerFiles', 'newer'],
['k', 'keep'],
['keep-existing', 'keep'],
['keepExisting', 'keep'],
['m', 'noMtime'],
['no-mtime', 'noMtime'],
['p', 'preserveOwner'],
['L', 'follow'],
['h', 'follow']
])
const parse = module.exports = opt => opt ? Object.keys(opt).map(k => [
argmap.has(k) ? argmap.get(k) : k, opt[k]
]).reduce((set, kv) => (set[kv[0]] = kv[1], set), Object.create(null)) : {}
'use strict'
// Tar can encode large and negative numbers using a leading byte of
// 0xff for negative, and 0x80 for positive. The trailing byte in the
// section will always be 0x20, or in some implementations 0x00.
// this module encodes and decodes these things.
const encode = exports.encode = (num, buf) => {
buf[buf.length - 1] = 0x20
if (num < 0)
encodeNegative(num, buf)
else
encodePositive(num, buf)
return buf
}
const encodePositive = (num, buf) => {
buf[0] = 0x80
for (var i = buf.length - 2; i > 0; i--) {
if (num === 0)
buf[i] = 0
else {
buf[i] = num % 0x100
num = Math.floor(num / 0x100)
}
}
}
const encodeNegative = (num, buf) => {
buf[0] = 0xff
var flipped = false
num = num * -1
for (var i = buf.length - 2; i > 0; i--) {
var byte
if (num === 0)
byte = 0
else {
byte = num % 0x100
num = Math.floor(num / 0x100)
}
if (flipped)
buf[i] = onesComp(byte)
else if (byte === 0)
buf[i] = 0
else {
flipped = true
buf[i] = twosComp(byte)
}
}
}
const parse = exports.parse = (buf) => {
var post = buf[buf.length - 1]
var pre = buf[0]
return pre === 0x80 ? pos(buf.slice(1, buf.length - 1))
: twos(buf.slice(1, buf.length - 1))
}
const twos = (buf) => {
var len = buf.length
var sum = 0
var flipped = false
for (var i = len - 1; i > -1; i--) {
var byte = buf[i]
var f
if (flipped)
f = onesComp(byte)
else if (byte === 0)
f = byte
else {
flipped = true
f = twosComp(byte)
}
if (f !== 0)
sum += f * Math.pow(256, len - i - 1)
}
return sum * -1
}
const pos = (buf) => {
var len = buf.length
var sum = 0
for (var i = len - 1; i > -1; i--) {
var byte = buf[i]
if (byte !== 0)
sum += byte * Math.pow(256, len - i - 1)
}
return sum
}
const onesComp = byte => (0xff ^ byte) & 0xff
const twosComp = byte => ((0xff ^ byte) + 1) & 0xff
'use strict'
const Buffer = require('./buffer.js')
// XXX: This shares a lot in common with extract.js
// maybe some DRY opportunity here?
// tar -t
const hlo = require('./high-level-opt.js')
const Parser = require('./parse.js')
const fs = require('fs')
const fsm = require('fs-minipass')
const path = require('path')
const t = module.exports = (opt_, files, cb) => {
if (typeof opt_ === 'function')
cb = opt_, files = null, opt_ = {}
else if (Array.isArray(opt_))
files = opt_, opt_ = {}
if (typeof files === 'function')
cb = files, files = null
if (!files)
files = []
else
files = Array.from(files)
const opt = hlo(opt_)
if (opt.sync && typeof cb === 'function')
throw new TypeError('callback not supported for sync tar functions')
if (!opt.file && typeof cb === 'function')
throw new TypeError('callback only supported with file option')
if (files.length)
filesFilter(opt, files)
if (!opt.noResume)
onentryFunction(opt)
return opt.file && opt.sync ? listFileSync(opt)
: opt.file ? listFile(opt, cb)
: list(opt)
}
const onentryFunction = opt => {
const onentry = opt.onentry
opt.onentry = onentry ? e => {
onentry(e)
e.resume()
} : e => e.resume()
}
// construct a filter that limits the file entries listed
// include child entries if a dir is included
const filesFilter = (opt, files) => {
const map = new Map(files.map(f => [f.replace(/\/+$/, ''), true]))
const filter = opt.filter
const mapHas = (file, r) => {
const root = r || path.parse(file).root || '.'
const ret = file === root ? false
: map.has(file) ? map.get(file)
: mapHas(path.dirname(file), root)
map.set(file, ret)
return ret
}
opt.filter = filter
? (file, entry) => filter(file, entry) && mapHas(file.replace(/\/+$/, ''))
: file => mapHas(file.replace(/\/+$/, ''))
}
const listFileSync = opt => {
const p = list(opt)
const file = opt.file
let threw = true
let fd
try {
const stat = fs.statSync(file)
const readSize = opt.maxReadSize || 16*1024*1024
if (stat.size < readSize) {
p.end(fs.readFileSync(file))
} else {
let pos = 0
const buf = Buffer.allocUnsafe(readSize)
fd = fs.openSync(file, 'r')
while (pos < stat.size) {
let bytesRead = fs.readSync(fd, buf, 0, readSize, pos)
pos += bytesRead
p.write(buf.slice(0, bytesRead))
}
p.end()
}
threw = false
} finally {
if (threw && fd)
try { fs.closeSync(fd) } catch (er) {}
}
}
const listFile = (opt, cb) => {
const parse = new Parser(opt)
const readSize = opt.maxReadSize || 16*1024*1024
const file = opt.file
const p = new Promise((resolve, reject) => {
parse.on('error', reject)
parse.on('end', resolve)
fs.stat(file, (er, stat) => {
if (er)
reject(er)
else {
const stream = new fsm.ReadStream(file, {
readSize: readSize,
size: stat.size
})
stream.on('error', reject)
stream.pipe(parse)
}
})
})
return cb ? p.then(cb, cb) : p
}
const list = opt => new Parser(opt)
'use strict'
// wrapper around mkdirp for tar's needs.
// TODO: This should probably be a class, not functionally
// passing around state in a gazillion args.
const mkdirp = require('mkdirp')
const fs = require('fs')
const path = require('path')
const chownr = require('chownr')
class SymlinkError extends Error {
constructor (symlink, path) {
super('Cannot extract through symbolic link')
this.path = path
this.symlink = symlink
}
get name () {
return 'SylinkError'
}
}
class CwdError extends Error {
constructor (path, code) {
super(code + ': Cannot cd into \'' + path + '\'')
this.path = path
this.code = code
}
get name () {
return 'CwdError'
}
}
const mkdir = module.exports = (dir, opt, cb) => {
// if there's any overlap between mask and mode,
// then we'll need an explicit chmod
const umask = opt.umask
const mode = opt.mode | 0o0700
const needChmod = (mode & umask) !== 0
const uid = opt.uid
const gid = opt.gid
const doChown = typeof uid === 'number' &&
typeof gid === 'number' &&
( uid !== opt.processUid || gid !== opt.processGid )
const preserve = opt.preserve
const unlink = opt.unlink
const cache = opt.cache
const cwd = opt.cwd
const done = (er, created) => {
if (er)
cb(er)
else {
cache.set(dir, true)
if (created && doChown)
chownr(created, uid, gid, er => done(er))
else if (needChmod)
fs.chmod(dir, mode, cb)
else
cb()
}
}
if (cache && cache.get(dir) === true)
return done()
if (dir === cwd)
return fs.lstat(dir, (er, st) => {
if (er || !st.isDirectory())
er = new CwdError(dir, er && er.code || 'ENOTDIR')
done(er)
})
if (preserve)
return mkdirp(dir, mode, done)
const sub = path.relative(cwd, dir)
const parts = sub.split(/\/|\\/)
mkdir_(cwd, parts, mode, cache, unlink, cwd, null, done)
}
const mkdir_ = (base, parts, mode, cache, unlink, cwd, created, cb) => {
if (!parts.length)
return cb(null, created)
const p = parts.shift()
const part = base + '/' + p
if (cache.get(part))
return mkdir_(part, parts, mode, cache, unlink, cwd, created, cb)
fs.mkdir(part, mode, onmkdir(part, parts, mode, cache, unlink, cwd, created, cb))
}
const onmkdir = (part, parts, mode, cache, unlink, cwd, created, cb) => er => {
if (er) {
if (er.path && path.dirname(er.path) === cwd &&
(er.code === 'ENOTDIR' || er.code === 'ENOENT'))
return cb(new CwdError(cwd, er.code))
fs.lstat(part, (statEr, st) => {
if (statEr)
cb(statEr)
else if (st.isDirectory())
mkdir_(part, parts, mode, cache, unlink, cwd, created, cb)
else if (unlink)
fs.unlink(part, er => {
if (er)
return cb(er)
fs.mkdir(part, mode, onmkdir(part, parts, mode, cache, unlink, cwd, created, cb))
})
else if (st.isSymbolicLink())
return cb(new SymlinkError(part, part + '/' + parts.join('/')))
else
cb(er)
})
} else {
created = created || part
mkdir_(part, parts, mode, cache, unlink, cwd, created, cb)
}
}
const mkdirSync = module.exports.sync = (dir, opt) => {
// if there's any overlap between mask and mode,
// then we'll need an explicit chmod
const umask = opt.umask
const mode = opt.mode | 0o0700
const needChmod = (mode & umask) !== 0
const uid = opt.uid
const gid = opt.gid
const doChown = typeof uid === 'number' &&
typeof gid === 'number' &&
( uid !== opt.processUid || gid !== opt.processGid )
const preserve = opt.preserve
const unlink = opt.unlink
const cache = opt.cache
const cwd = opt.cwd
const done = (created) => {
cache.set(dir, true)
if (created && doChown)
chownr.sync(created, uid, gid)
if (needChmod)
fs.chmodSync(dir, mode)
}
if (cache && cache.get(dir) === true)
return done()
if (dir === cwd) {
let ok = false
let code = 'ENOTDIR'
try {
ok = fs.lstatSync(dir).isDirectory()
} catch (er) {
code = er.code
} finally {
if (!ok)
throw new CwdError(dir, code)
}
done()
return
}
if (preserve)
return done(mkdirp.sync(dir, mode))
const sub = path.relative(cwd, dir)
const parts = sub.split(/\/|\\/)
let created = null
for (let p = parts.shift(), part = cwd;
p && (part += '/' + p);
p = parts.shift()) {
if (cache.get(part))
continue
try {
fs.mkdirSync(part, mode)
created = created || part
cache.set(part, true)
} catch (er) {
if (er.path && path.dirname(er.path) === cwd &&
(er.code === 'ENOTDIR' || er.code === 'ENOENT'))
return new CwdError(cwd, er.code)
const st = fs.lstatSync(part)
if (st.isDirectory()) {
cache.set(part, true)
continue
} else if (unlink) {
fs.unlinkSync(part)
fs.mkdirSync(part, mode)
created = created || part
cache.set(part, true)
continue
} else if (st.isSymbolicLink())
return new SymlinkError(part, part + '/' + parts.join('/'))
}
}
return done(created)
}
'use strict'
module.exports = (mode, isDir) => {
mode &= 0o7777
// if dirs are readable, then they should be listable
if (isDir) {
if (mode & 0o400)
mode |= 0o100
if (mode & 0o40)
mode |= 0o10
if (mode & 0o4)
mode |= 0o1
}
return mode
}
'use strict'
const Buffer = require('./buffer.js')
// A readable tar stream creator
// Technically, this is a transform stream that you write paths into,
// and tar format comes out of.
// The `add()` method is like `write()` but returns this,
// and end() return `this` as well, so you can
// do `new Pack(opt).add('files').add('dir').end().pipe(output)
// You could also do something like:
// streamOfPaths().pipe(new Pack()).pipe(new fs.WriteStream('out.tar'))
class PackJob {
constructor (path, absolute) {
this.path = path || './'
this.absolute = absolute
this.entry = null
this.stat = null
this.readdir = null
this.pending = false
this.ignore = false
this.piped = false
}
}
const MiniPass = require('minipass')
const zlib = require('minizlib')
const ReadEntry = require('./read-entry.js')
const WriteEntry = require('./write-entry.js')
const WriteEntrySync = WriteEntry.Sync
const WriteEntryTar = WriteEntry.Tar
const Yallist = require('yallist')
const EOF = Buffer.alloc(1024)
const ONSTAT = Symbol('onStat')
const ENDED = Symbol('ended')
const QUEUE = Symbol('queue')
const CURRENT = Symbol('current')
const PROCESS = Symbol('process')
const PROCESSING = Symbol('processing')
const PROCESSJOB = Symbol('processJob')
const JOBS = Symbol('jobs')
const JOBDONE = Symbol('jobDone')
const ADDFSENTRY = Symbol('addFSEntry')
const ADDTARENTRY = Symbol('addTarEntry')
const STAT = Symbol('stat')
const READDIR = Symbol('readdir')
const ONREADDIR = Symbol('onreaddir')
const PIPE = Symbol('pipe')
const ENTRY = Symbol('entry')
const ENTRYOPT = Symbol('entryOpt')
const WRITEENTRYCLASS = Symbol('writeEntryClass')
const WRITE = Symbol('write')
const ONDRAIN = Symbol('ondrain')
const fs = require('fs')
const path = require('path')
const warner = require('./warn-mixin.js')
const Pack = warner(class Pack extends MiniPass {
constructor (opt) {
super(opt)
opt = opt || Object.create(null)
this.opt = opt
this.cwd = opt.cwd || process.cwd()
this.maxReadSize = opt.maxReadSize
this.preservePaths = !!opt.preservePaths
this.strict = !!opt.strict
this.noPax = !!opt.noPax
this.prefix = (opt.prefix || '').replace(/(\\|\/)+$/, '')
this.linkCache = opt.linkCache || new Map()
this.statCache = opt.statCache || new Map()
this.readdirCache = opt.readdirCache || new Map()
this[WRITEENTRYCLASS] = WriteEntry
if (typeof opt.onwarn === 'function')
this.on('warn', opt.onwarn)
this.zip = null
if (opt.gzip) {
if (typeof opt.gzip !== 'object')
opt.gzip = {}
this.zip = new zlib.Gzip(opt.gzip)
this.zip.on('data', chunk => super.write(chunk))
this.zip.on('end', _ => super.end())
this.zip.on('drain', _ => this[ONDRAIN]())
this.on('resume', _ => this.zip.resume())
} else
this.on('drain', this[ONDRAIN])
this.portable = !!opt.portable
this.noDirRecurse = !!opt.noDirRecurse
this.follow = !!opt.follow
this.noMtime = !!opt.noMtime
this.mtime = opt.mtime || null
this.filter = typeof opt.filter === 'function' ? opt.filter : _ => true
this[QUEUE] = new Yallist
this[JOBS] = 0
this.jobs = +opt.jobs || 4
this[PROCESSING] = false
this[ENDED] = false
}
[WRITE] (chunk) {
return super.write(chunk)
}
add (path) {
this.write(path)
return this
}
end (path) {
if (path)
this.write(path)
this[ENDED] = true
this[PROCESS]()
return this
}
write (path) {
if (this[ENDED])
throw new Error('write after end')
if (path instanceof ReadEntry)
this[ADDTARENTRY](path)
else
this[ADDFSENTRY](path)
return this.flowing
}
[ADDTARENTRY] (p) {
const absolute = path.resolve(this.cwd, p.path)
if (this.prefix)
p.path = this.prefix + '/' + p.path.replace(/^\.(\/+|$)/, '')
// in this case, we don't have to wait for the stat
if (!this.filter(p.path, p))
p.resume()
else {
const job = new PackJob(p.path, absolute, false)
job.entry = new WriteEntryTar(p, this[ENTRYOPT](job))
job.entry.on('end', _ => this[JOBDONE](job))
this[JOBS] += 1
this[QUEUE].push(job)
}
this[PROCESS]()
}
[ADDFSENTRY] (p) {
const absolute = path.resolve(this.cwd, p)
if (this.prefix)
p = this.prefix + '/' + p.replace(/^\.(\/+|$)/, '')
this[QUEUE].push(new PackJob(p, absolute))
this[PROCESS]()
}
[STAT] (job) {
job.pending = true
this[JOBS] += 1
const stat = this.follow ? 'stat' : 'lstat'
fs[stat](job.absolute, (er, stat) => {
job.pending = false
this[JOBS] -= 1
if (er)
this.emit('error', er)
else
this[ONSTAT](job, stat)
})
}
[ONSTAT] (job, stat) {
this.statCache.set(job.absolute, stat)
job.stat = stat
// now we have the stat, we can filter it.
if (!this.filter(job.path, stat))
job.ignore = true
this[PROCESS]()
}
[READDIR] (job) {
job.pending = true
this[JOBS] += 1
fs.readdir(job.absolute, (er, entries) => {
job.pending = false
this[JOBS] -= 1
if (er)
return this.emit('error', er)
this[ONREADDIR](job, entries)
})
}
[ONREADDIR] (job, entries) {
this.readdirCache.set(job.absolute, entries)
job.readdir = entries
this[PROCESS]()
}
[PROCESS] () {
if (this[PROCESSING])
return
this[PROCESSING] = true
for (let w = this[QUEUE].head;
w !== null && this[JOBS] < this.jobs;
w = w.next) {
this[PROCESSJOB](w.value)
if (w.value.ignore) {
const p = w.next
this[QUEUE].removeNode(w)
w.next = p
}
}
this[PROCESSING] = false
if (this[ENDED] && !this[QUEUE].length && this[JOBS] === 0) {
if (this.zip)
this.zip.end(EOF)
else {
super.write(EOF)
super.end()
}
}
}
get [CURRENT] () {
return this[QUEUE] && this[QUEUE].head && this[QUEUE].head.value
}
[JOBDONE] (job) {
this[QUEUE].shift()
this[JOBS] -= 1
this[PROCESS]()
}
[PROCESSJOB] (job) {
if (job.pending)
return
if (job.entry) {
if (job === this[CURRENT] && !job.piped)
this[PIPE](job)
return
}
if (!job.stat) {
if (this.statCache.has(job.absolute))
this[ONSTAT](job, this.statCache.get(job.absolute))
else
this[STAT](job)
}
if (!job.stat)
return
// filtered out!
if (job.ignore)
return
if (!this.noDirRecurse && job.stat.isDirectory() && !job.readdir) {
if (this.readdirCache.has(job.absolute))
this[ONREADDIR](job, this.readdirCache.get(job.absolute))
else
this[READDIR](job)
if (!job.readdir)
return
}
// we know it doesn't have an entry, because that got checked above
job.entry = this[ENTRY](job)
if (!job.entry) {
job.ignore = true
return
}
if (job === this[CURRENT] && !job.piped)
this[PIPE](job)
}
[ENTRYOPT] (job) {
return {
onwarn: (msg, data) => {
this.warn(msg, data)
},
noPax: this.noPax,
cwd: this.cwd,
absolute: job.absolute,
preservePaths: this.preservePaths,
maxReadSize: this.maxReadSize,
strict: this.strict,
portable: this.portable,
linkCache: this.linkCache,
statCache: this.statCache,
noMtime: this.noMtime,
mtime: this.mtime
}
}
[ENTRY] (job) {
this[JOBS] += 1
try {
return new this[WRITEENTRYCLASS](job.path, this[ENTRYOPT](job))
.on('end', () => this[JOBDONE](job))
.on('error', er => this.emit('error', er))
} catch (er) {
this.emit('error', er)
}
}
[ONDRAIN] () {
if (this[CURRENT] && this[CURRENT].entry)
this[CURRENT].entry.resume()
}
// like .pipe() but using super, because our write() is special
[PIPE] (job) {
job.piped = true
if (job.readdir)
job.readdir.forEach(entry => {
const p = this.prefix ?
job.path.slice(this.prefix.length + 1) || './'
: job.path
const base = p === './' ? '' : p.replace(/\/*$/, '/')
this[ADDFSENTRY](base + entry)
})
const source = job.entry
const zip = this.zip
if (zip)
source.on('data', chunk => {
if (!zip.write(chunk))
source.pause()
})
else
source.on('data', chunk => {
if (!super.write(chunk))
source.pause()
})
}
pause () {
if (this.zip)
this.zip.pause()
return super.pause()
}
})
class PackSync extends Pack {
constructor (opt) {
super(opt)
this[WRITEENTRYCLASS] = WriteEntrySync
}
// pause/resume are no-ops in sync streams.
pause () {}
resume () {}
[STAT] (job) {
const stat = this.follow ? 'statSync' : 'lstatSync'
this[ONSTAT](job, fs[stat](job.absolute))
}
[READDIR] (job, stat) {
this[ONREADDIR](job, fs.readdirSync(job.absolute))
}
// gotta get it all in this tick
[PIPE] (job) {
const source = job.entry
const zip = this.zip
if (job.readdir)
job.readdir.forEach(entry => {
const p = this.prefix ?
job.path.slice(this.prefix.length + 1) || './'
: job.path
const base = p === './' ? '' : p.replace(/\/*$/, '/')
this[ADDFSENTRY](base + entry)
})
if (zip)
source.on('data', chunk => {
zip.write(chunk)
})
else
source.on('data', chunk => {
super[WRITE](chunk)
})
}
}
Pack.Sync = PackSync
module.exports = Pack
'use strict'
// this[BUFFER] is the remainder of a chunk if we're waiting for
// the full 512 bytes of a header to come in. We will Buffer.concat()
// it to the next write(), which is a mem copy, but a small one.
//
// this[QUEUE] is a Yallist of entries that haven't been emitted
// yet this can only get filled up if the user keeps write()ing after
// a write() returns false, or does a write() with more than one entry
//
// We don't buffer chunks, we always parse them and either create an
// entry, or push it into the active entry. The ReadEntry class knows
// to throw data away if .ignore=true
//
// Shift entry off the buffer when it emits 'end', and emit 'entry' for
// the next one in the list.
//
// At any time, we're pushing body chunks into the entry at WRITEENTRY,
// and waiting for 'end' on the entry at READENTRY
//
// ignored entries get .resume() called on them straight away
const warner = require('./warn-mixin.js')
const path = require('path')
const Header = require('./header.js')
const EE = require('events')
const Yallist = require('yallist')
const maxMetaEntrySize = 1024 * 1024
const Entry = require('./read-entry.js')
const Pax = require('./pax.js')
const zlib = require('minizlib')
const gzipHeader = Buffer.from([0x1f, 0x8b])
const STATE = Symbol('state')
const WRITEENTRY = Symbol('writeEntry')
const READENTRY = Symbol('readEntry')
const NEXTENTRY = Symbol('nextEntry')
const PROCESSENTRY = Symbol('processEntry')
const EX = Symbol('extendedHeader')
const GEX = Symbol('globalExtendedHeader')
const META = Symbol('meta')
const EMITMETA = Symbol('emitMeta')
const BUFFER = Symbol('buffer')
const QUEUE = Symbol('queue')
const ENDED = Symbol('ended')
const EMITTEDEND = Symbol('emittedEnd')
const EMIT = Symbol('emit')
const UNZIP = Symbol('unzip')
const CONSUMECHUNK = Symbol('consumeChunk')
const CONSUMECHUNKSUB = Symbol('consumeChunkSub')
const CONSUMEBODY = Symbol('consumeBody')
const CONSUMEMETA = Symbol('consumeMeta')
const CONSUMEHEADER = Symbol('consumeHeader')
const CONSUMING = Symbol('consuming')
const BUFFERCONCAT = Symbol('bufferConcat')
const MAYBEEND = Symbol('maybeEnd')
const WRITING = Symbol('writing')
const ABORTED = Symbol('aborted')
const DONE = Symbol('onDone')
const noop = _ => true
module.exports = warner(class Parser extends EE {
constructor (opt) {
opt = opt || {}
super(opt)
if (opt.ondone)
this.on(DONE, opt.ondone)
else
this.on(DONE, _ => {
this.emit('prefinish')
this.emit('finish')
this.emit('end')
this.emit('close')
})
this.strict = !!opt.strict
this.maxMetaEntrySize = opt.maxMetaEntrySize || maxMetaEntrySize
this.filter = typeof opt.filter === 'function' ? opt.filter : noop
// have to set this so that streams are ok piping into it
this.writable = true
this.readable = false
this[QUEUE] = new Yallist()
this[BUFFER] = null
this[READENTRY] = null
this[WRITEENTRY] = null
this[STATE] = 'begin'
this[META] = ''
this[EX] = null
this[GEX] = null
this[ENDED] = false
this[UNZIP] = null
this[ABORTED] = false
if (typeof opt.onwarn === 'function')
this.on('warn', opt.onwarn)
if (typeof opt.onentry === 'function')
this.on('entry', opt.onentry)
}
[CONSUMEHEADER] (chunk, position) {
const header = new Header(chunk, position, this[EX], this[GEX])
if (header.nullBlock)
this[EMIT]('nullBlock')
else if (!header.cksumValid)
this.warn('invalid entry', header)
else if (!header.path)
this.warn('invalid: path is required', header)
else {
const type = header.type
if (/^(Symbolic)?Link$/.test(type) && !header.linkpath)
this.warn('invalid: linkpath required', header)
else if (!/^(Symbolic)?Link$/.test(type) && header.linkpath)
this.warn('invalid: linkpath forbidden', header)
else {
const entry = this[WRITEENTRY] = new Entry(header, this[EX], this[GEX])
if (entry.meta) {
if (entry.size > this.maxMetaEntrySize) {
entry.ignore = true
this[EMIT]('ignoredEntry', entry)
this[STATE] = 'ignore'
} else if (entry.size > 0) {
this[META] = ''
entry.on('data', c => this[META] += c)
this[STATE] = 'meta'
}
} else {
this[EX] = null
entry.ignore = entry.ignore || !this.filter(entry.path, entry)
if (entry.ignore) {
this[EMIT]('ignoredEntry', entry)
this[STATE] = entry.remain ? 'ignore' : 'begin'
} else {
if (entry.remain)
this[STATE] = 'body'
else {
this[STATE] = 'begin'
entry.end()
}
if (!this[READENTRY]) {
this[QUEUE].push(entry)
this[NEXTENTRY]()
} else
this[QUEUE].push(entry)
}
}
}
}
}
[PROCESSENTRY] (entry) {
let go = true
if (!entry) {
this[READENTRY] = null
go = false
} else if (Array.isArray(entry))
this.emit.apply(this, entry)
else {
this[READENTRY] = entry
this.emit('entry', entry)
if (!entry.emittedEnd) {
entry.on('end', _ => this[NEXTENTRY]())
go = false
}
}
return go
}
[NEXTENTRY] () {
do {} while (this[PROCESSENTRY](this[QUEUE].shift()))
if (!this[QUEUE].length) {
// At this point, there's nothing in the queue, but we may have an
// entry which is being consumed (readEntry).
// If we don't, then we definitely can handle more data.
// If we do, and either it's flowing, or it has never had any data
// written to it, then it needs more.
// The only other possibility is that it has returned false from a
// write() call, so we wait for the next drain to continue.
const re = this[READENTRY]
const drainNow = !re || re.flowing || re.size === re.remain
if (drainNow) {
if (!this[WRITING])
this.emit('drain')
} else
re.once('drain', _ => this.emit('drain'))
}
}
[CONSUMEBODY] (chunk, position) {
// write up to but no more than writeEntry.blockRemain
const entry = this[WRITEENTRY]
const br = entry.blockRemain
const c = (br >= chunk.length && position === 0) ? chunk
: chunk.slice(position, position + br)
entry.write(c)
if (!entry.blockRemain) {
this[STATE] = 'begin'
this[WRITEENTRY] = null
entry.end()
}
return c.length
}
[CONSUMEMETA] (chunk, position) {
const entry = this[WRITEENTRY]
const ret = this[CONSUMEBODY](chunk, position)
// if we finished, then the entry is reset
if (!this[WRITEENTRY])
this[EMITMETA](entry)
return ret
}
[EMIT] (ev, data, extra) {
if (!this[QUEUE].length && !this[READENTRY])
this.emit(ev, data, extra)
else
this[QUEUE].push([ev, data, extra])
}
[EMITMETA] (entry) {
this[EMIT]('meta', this[META])
switch (entry.type) {
case 'ExtendedHeader':
case 'OldExtendedHeader':
this[EX] = Pax.parse(this[META], this[EX], false)
break
case 'GlobalExtendedHeader':
this[GEX] = Pax.parse(this[META], this[GEX], true)
break
case 'NextFileHasLongPath':
case 'OldGnuLongPath':
this[EX] = this[EX] || Object.create(null)
this[EX].path = this[META].replace(/\0.*/, '')
break
case 'NextFileHasLongLinkpath':
this[EX] = this[EX] || Object.create(null)
this[EX].linkpath = this[META].replace(/\0.*/, '')
break
/* istanbul ignore next */
default: throw new Error('unknown meta: ' + entry.type)
}
}
abort (msg, error) {
this[ABORTED] = true
this.warn(msg, error)
this.emit('abort', error)
this.emit('error', error)
}
write (chunk) {
if (this[ABORTED])
return
// first write, might be gzipped
if (this[UNZIP] === null && chunk) {
if (this[BUFFER]) {
chunk = Buffer.concat([this[BUFFER], chunk])
this[BUFFER] = null
}
if (chunk.length < gzipHeader.length) {
this[BUFFER] = chunk
return true
}
for (let i = 0; this[UNZIP] === null && i < gzipHeader.length; i++) {
if (chunk[i] !== gzipHeader[i])
this[UNZIP] = false
}
if (this[UNZIP] === null) {
const ended = this[ENDED]
this[ENDED] = false
this[UNZIP] = new zlib.Unzip()
this[UNZIP].on('data', chunk => this[CONSUMECHUNK](chunk))
this[UNZIP].on('error', er =>
this.abort(er.message, er))
this[UNZIP].on('end', _ => {
this[ENDED] = true
this[CONSUMECHUNK]()
})
this[WRITING] = true
const ret = this[UNZIP][ended ? 'end' : 'write' ](chunk)
this[WRITING] = false
return ret
}
}
this[WRITING] = true
if (this[UNZIP])
this[UNZIP].write(chunk)
else
this[CONSUMECHUNK](chunk)
this[WRITING] = false
// return false if there's a queue, or if the current entry isn't flowing
const ret =
this[QUEUE].length ? false :
this[READENTRY] ? this[READENTRY].flowing :
true
// if we have no queue, then that means a clogged READENTRY
if (!ret && !this[QUEUE].length)
this[READENTRY].once('drain', _ => this.emit('drain'))
return ret
}
[BUFFERCONCAT] (c) {
if (c && !this[ABORTED])
this[BUFFER] = this[BUFFER] ? Buffer.concat([this[BUFFER], c]) : c
}
[MAYBEEND] () {
if (this[ENDED] &&
!this[EMITTEDEND] &&
!this[ABORTED] &&
!this[CONSUMING]) {
this[EMITTEDEND] = true
const entry = this[WRITEENTRY]
if (entry && entry.blockRemain) {
const have = this[BUFFER] ? this[BUFFER].length : 0
this.warn('Truncated input (needed ' + entry.blockRemain +
' more bytes, only ' + have + ' available)', entry)
if (this[BUFFER])
entry.write(this[BUFFER])
entry.end()
}
this[EMIT](DONE)
}
}
[CONSUMECHUNK] (chunk) {
if (this[CONSUMING]) {
this[BUFFERCONCAT](chunk)
} else if (!chunk && !this[BUFFER]) {
this[MAYBEEND]()
} else {
this[CONSUMING] = true
if (this[BUFFER]) {
this[BUFFERCONCAT](chunk)
const c = this[BUFFER]
this[BUFFER] = null
this[CONSUMECHUNKSUB](c)
} else {
this[CONSUMECHUNKSUB](chunk)
}
while (this[BUFFER] && this[BUFFER].length >= 512 && !this[ABORTED]) {
const c = this[BUFFER]
this[BUFFER] = null
this[CONSUMECHUNKSUB](c)
}
this[CONSUMING] = false
}
if (!this[BUFFER] || this[ENDED])
this[MAYBEEND]()
}
[CONSUMECHUNKSUB] (chunk) {
// we know that we are in CONSUMING mode, so anything written goes into
// the buffer. Advance the position and put any remainder in the buffer.
let position = 0
let length = chunk.length
while (position + 512 <= length && !this[ABORTED]) {
switch (this[STATE]) {
case 'begin':
this[CONSUMEHEADER](chunk, position)
position += 512
break
case 'ignore':
case 'body':
position += this[CONSUMEBODY](chunk, position)
break
case 'meta':
position += this[CONSUMEMETA](chunk, position)
break
/* istanbul ignore next */
default:
throw new Error('invalid state: ' + this[STATE])
}
}
if (position < length) {
if (this[BUFFER])
this[BUFFER] = Buffer.concat([chunk.slice(position), this[BUFFER]])
else
this[BUFFER] = chunk.slice(position)
}
}
end (chunk) {
if (!this[ABORTED]) {
if (this[UNZIP])
this[UNZIP].end(chunk)
else {
this[ENDED] = true
this.write(chunk)
}
}
}
})
'use strict'
const Buffer = require('./buffer.js')
const Header = require('./header.js')
const path = require('path')
class Pax {
constructor (obj, global) {
this.atime = obj.atime || null
this.charset = obj.charset || null
this.comment = obj.comment || null
this.ctime = obj.ctime || null
this.gid = obj.gid || null
this.gname = obj.gname || null
this.linkpath = obj.linkpath || null
this.mtime = obj.mtime || null
this.path = obj.path || null
this.size = obj.size || null
this.uid = obj.uid || null
this.uname = obj.uname || null
this.dev = obj.dev || null
this.ino = obj.ino || null
this.nlink = obj.nlink || null
this.global = global || false
}
encode () {
const body = this.encodeBody()
if (body === '')
return null
const bodyLen = Buffer.byteLength(body)
// round up to 512 bytes
// add 512 for header
const bufLen = 512 * Math.ceil(1 + bodyLen / 512)
const buf = Buffer.allocUnsafe(bufLen)
// 0-fill the header section, it might not hit every field
for (let i = 0; i < 512; i++) {
buf[i] = 0
}
new Header({
// XXX split the path
// then the path should be PaxHeader + basename, but less than 99,
// prepend with the dirname
path: ('PaxHeader/' + path.basename(this.path)).slice(0, 99),
mode: this.mode || 0o644,
uid: this.uid || null,
gid: this.gid || null,
size: bodyLen,
mtime: this.mtime || null,
type: this.global ? 'GlobalExtendedHeader' : 'ExtendedHeader',
linkpath: '',
uname: this.uname || '',
gname: this.gname || '',
devmaj: 0,
devmin: 0,
atime: this.atime || null,
ctime: this.ctime || null
}).encode(buf)
buf.write(body, 512, bodyLen, 'utf8')
// null pad after the body
for (let i = bodyLen + 512; i < buf.length; i++) {
buf[i] = 0
}
return buf
}
encodeBody () {
return (
this.encodeField('path') +
this.encodeField('ctime') +
this.encodeField('atime') +
this.encodeField('dev') +
this.encodeField('ino') +
this.encodeField('nlink') +
this.encodeField('charset') +
this.encodeField('comment') +
this.encodeField('gid') +
this.encodeField('gname') +
this.encodeField('linkpath') +
this.encodeField('mtime') +
this.encodeField('size') +
this.encodeField('uid') +
this.encodeField('uname')
)
}
encodeField (field) {
if (this[field] === null || this[field] === undefined)
return ''
const v = this[field] instanceof Date ? this[field].getTime() / 1000
: this[field]
const s = ' ' +
(field === 'dev' || field === 'ino' || field === 'nlink'
? 'SCHILY.' : '') +
field + '=' + v + '\n'
const byteLen = Buffer.byteLength(s)
// the digits includes the length of the digits in ascii base-10
// so if it's 9 characters, then adding 1 for the 9 makes it 10
// which makes it 11 chars.
let digits = Math.floor(Math.log(byteLen) / Math.log(10)) + 1
if (byteLen + digits >= Math.pow(10, digits))
digits += 1
const len = digits + byteLen
return len + s
}
}
Pax.parse = (string, ex, g) => new Pax(merge(parseKV(string), ex), g)
const merge = (a, b) =>
b ? Object.keys(a).reduce((s, k) => (s[k] = a[k], s), b) : a
const parseKV = string =>
string
.replace(/\n$/, '')
.split('\n')
.reduce(parseKVLine, Object.create(null))
const parseKVLine = (set, line) => {
const n = parseInt(line, 10)
// XXX Values with \n in them will fail this.
// Refactor to not be a naive line-by-line parse.
if (n !== Buffer.byteLength(line) + 1)
return set
line = line.substr((n + ' ').length)
const kv = line.split('=')
const k = kv.shift().replace(/^SCHILY\.(dev|ino|nlink)/, '$1')
if (!k)
return set
const v = kv.join('=')
set[k] = /^([A-Z]+\.)?([mac]|birth|creation)time$/.test(k)
? new Date(v * 1000)
: /^[0-9]+$/.test(v) ? +v
: v
return set
}
module.exports = Pax
'use strict'
const types = require('./types.js')
const MiniPass = require('minipass')
const SLURP = Symbol('slurp')
module.exports = class ReadEntry extends MiniPass {
constructor (header, ex, gex) {
super()
this.extended = ex
this.globalExtended = gex
this.header = header
this.startBlockSize = 512 * Math.ceil(header.size / 512)
this.blockRemain = this.startBlockSize
this.remain = header.size
this.type = header.type
this.meta = false
this.ignore = false
switch (this.type) {
case 'File':
case 'OldFile':
case 'Link':
case 'SymbolicLink':
case 'CharacterDevice':
case 'BlockDevice':
case 'Directory':
case 'FIFO':
case 'ContiguousFile':
case 'GNUDumpDir':
break
case 'NextFileHasLongLinkpath':
case 'NextFileHasLongPath':
case 'OldGnuLongPath':
case 'GlobalExtendedHeader':
case 'ExtendedHeader':
case 'OldExtendedHeader':
this.meta = true
break
// NOTE: gnutar and bsdtar treat unrecognized types as 'File'
// it may be worth doing the same, but with a warning.
default:
this.ignore = true
}
this.path = header.path
this.mode = header.mode
if (this.mode)
this.mode = this.mode & 0o7777
this.uid = header.uid
this.gid = header.gid
this.uname = header.uname
this.gname = header.gname
this.size = header.size
this.mtime = header.mtime
this.atime = header.atime
this.ctime = header.ctime
this.linkpath = header.linkpath
this.uname = header.uname
this.gname = header.gname
if (ex) this[SLURP](ex)
if (gex) this[SLURP](gex, true)
}
write (data) {
const writeLen = data.length
if (writeLen > this.blockRemain)
throw new Error('writing more to entry than is appropriate')
const r = this.remain
const br = this.blockRemain
this.remain = Math.max(0, r - writeLen)
this.blockRemain = Math.max(0, br - writeLen)
if (this.ignore)
return true
if (r >= writeLen)
return super.write(data)
// r < writeLen
return super.write(data.slice(0, r))
}
[SLURP] (ex, global) {
for (let k in ex) {
// we slurp in everything except for the path attribute in
// a global extended header, because that's weird.
if (ex[k] !== null && ex[k] !== undefined &&
!(global && k === 'path'))
this[k] = ex[k]
}
}
}
'use strict'
const Buffer = require('./buffer.js')
// tar -r
const hlo = require('./high-level-opt.js')
const Pack = require('./pack.js')
const Parse = require('./parse.js')
const fs = require('fs')
const fsm = require('fs-minipass')
const t = require('./list.js')
const path = require('path')
// starting at the head of the file, read a Header
// If the checksum is invalid, that's our position to start writing
// If it is, jump forward by the specified size (round up to 512)
// and try again.
// Write the new Pack stream starting there.
const Header = require('./header.js')
const r = module.exports = (opt_, files, cb) => {
const opt = hlo(opt_)
if (!opt.file)
throw new TypeError('file is required')
if (opt.gzip)
throw new TypeError('cannot append to compressed archives')
if (!files || !Array.isArray(files) || !files.length)
throw new TypeError('no files or directories specified')
files = Array.from(files)
return opt.sync ? replaceSync(opt, files)
: replace(opt, files, cb)
}
const replaceSync = (opt, files) => {
const p = new Pack.Sync(opt)
let threw = true
let fd
let position
try {
try {
fd = fs.openSync(opt.file, 'r+')
} catch (er) {
if (er.code === 'ENOENT')
fd = fs.openSync(opt.file, 'w+')
else
throw er
}
const st = fs.fstatSync(fd)
const headBuf = Buffer.alloc(512)
POSITION: for (position = 0; position < st.size; position += 512) {
for (let bufPos = 0, bytes = 0; bufPos < 512; bufPos += bytes) {
bytes = fs.readSync(
fd, headBuf, bufPos, headBuf.length - bufPos, position + bufPos
)
if (position === 0 && headBuf[0] === 0x1f && headBuf[1] === 0x8b)
throw new Error('cannot append to compressed archives')
if (!bytes)
break POSITION
}
let h = new Header(headBuf)
if (!h.cksumValid)
break
let entryBlockSize = 512 * Math.ceil(h.size / 512)
if (position + entryBlockSize + 512 > st.size)
break
// the 512 for the header we just parsed will be added as well
// also jump ahead all the blocks for the body
position += entryBlockSize
if (opt.mtimeCache)
opt.mtimeCache.set(h.path, h.mtime)
}
threw = false
streamSync(opt, p, position, fd, files)
} finally {
if (threw)
try { fs.closeSync(fd) } catch (er) {}
}
}
const streamSync = (opt, p, position, fd, files) => {
const stream = new fsm.WriteStreamSync(opt.file, {
fd: fd,
start: position
})
p.pipe(stream)
addFilesSync(p, files)
}
const replace = (opt, files, cb) => {
files = Array.from(files)
const p = new Pack(opt)
const getPos = (fd, size, cb_) => {
const cb = (er, pos) => {
if (er)
fs.close(fd, _ => cb_(er))
else
cb_(null, pos)
}
let position = 0
if (size === 0)
return cb(null, 0)
let bufPos = 0
const headBuf = Buffer.alloc(512)
const onread = (er, bytes) => {
if (er)
return cb(er)
bufPos += bytes
if (bufPos < 512 && bytes)
return fs.read(
fd, headBuf, bufPos, headBuf.length - bufPos,
position + bufPos, onread
)
if (position === 0 && headBuf[0] === 0x1f && headBuf[1] === 0x8b)
return cb(new Error('cannot append to compressed archives'))
// truncated header
if (bufPos < 512)
return cb(null, position)
const h = new Header(headBuf)
if (!h.cksumValid)
return cb(null, position)
const entryBlockSize = 512 * Math.ceil(h.size / 512)
if (position + entryBlockSize + 512 > size)
return cb(null, position)
position += entryBlockSize + 512
if (position >= size)
return cb(null, position)
if (opt.mtimeCache)
opt.mtimeCache.set(h.path, h.mtime)
bufPos = 0
fs.read(fd, headBuf, 0, 512, position, onread)
}
fs.read(fd, headBuf, 0, 512, position, onread)
}
const promise = new Promise((resolve, reject) => {
p.on('error', reject)
let flag = 'r+'
const onopen = (er, fd) => {
if (er && er.code === 'ENOENT' && flag === 'r+') {
flag = 'w+'
return fs.open(opt.file, flag, onopen)
}
if (er)
return reject(er)
fs.fstat(fd, (er, st) => {
if (er)
return reject(er)
getPos(fd, st.size, (er, position) => {
if (er)
return reject(er)
const stream = new fsm.WriteStream(opt.file, {
fd: fd,
start: position
})
p.pipe(stream)
stream.on('error', reject)
stream.on('close', resolve)
addFilesAsync(p, files)
})
})
}
fs.open(opt.file, flag, onopen)
})
return cb ? promise.then(cb, cb) : promise
}
const addFilesSync = (p, files) => {
files.forEach(file => {
if (file.charAt(0) === '@')
t({
file: path.resolve(p.cwd, file.substr(1)),
sync: true,
noResume: true,
onentry: entry => p.add(entry)
})
else
p.add(file)
})
p.end()
}
const addFilesAsync = (p, files) => {
while (files.length) {
const file = files.shift()
if (file.charAt(0) === '@')
return t({
file: path.resolve(p.cwd, file.substr(1)),
noResume: true,
onentry: entry => p.add(entry)
}).then(_ => addFilesAsync(p, files))
else
p.add(file)
}
p.end()
}
'use strict'
// map types from key to human-friendly name
exports.name = new Map([
['0', 'File'],
// same as File
['', 'OldFile'],
['1', 'Link'],
['2', 'SymbolicLink'],
// Devices and FIFOs aren't fully supported
// they are parsed, but skipped when unpacking
['3', 'CharacterDevice'],
['4', 'BlockDevice'],
['5', 'Directory'],
['6', 'FIFO'],
// same as File
['7', 'ContiguousFile'],
// pax headers
['g', 'GlobalExtendedHeader'],
['x', 'ExtendedHeader'],
// vendor-specific stuff
// skip
['A', 'SolarisACL'],
// like 5, but with data, which should be skipped
['D', 'GNUDumpDir'],
// metadata only, skip
['I', 'Inode'],
// data = link path of next file
['K', 'NextFileHasLongLinkpath'],
// data = path of next file
['L', 'NextFileHasLongPath'],
// skip
['M', 'ContinuationFile'],
// like L
['N', 'OldGnuLongPath'],
// skip
['S', 'SparseFile'],
// skip
['V', 'TapeVolumeHeader'],
// like x
['X', 'OldExtendedHeader']
])
// map the other direction
exports.code = new Map(Array.from(exports.name).map(kv => [kv[1], kv[0]]))
'use strict'
const assert = require('assert')
const EE = require('events').EventEmitter
const Parser = require('./parse.js')
const fs = require('fs')
const fsm = require('fs-minipass')
const path = require('path')
const mkdir = require('./mkdir.js')
const mkdirSync = mkdir.sync
const wc = require('./winchars.js')
const ONENTRY = Symbol('onEntry')
const CHECKFS = Symbol('checkFs')
const ISREUSABLE = Symbol('isReusable')
const MAKEFS = Symbol('makeFs')
const FILE = Symbol('file')
const DIRECTORY = Symbol('directory')
const LINK = Symbol('link')
const SYMLINK = Symbol('symlink')
const HARDLINK = Symbol('hardlink')
const UNSUPPORTED = Symbol('unsupported')
const UNKNOWN = Symbol('unknown')
const CHECKPATH = Symbol('checkPath')
const MKDIR = Symbol('mkdir')
const ONERROR = Symbol('onError')
const PENDING = Symbol('pending')
const PEND = Symbol('pend')
const UNPEND = Symbol('unpend')
const ENDED = Symbol('ended')
const MAYBECLOSE = Symbol('maybeClose')
const SKIP = Symbol('skip')
const DOCHOWN = Symbol('doChown')
const UID = Symbol('uid')
const GID = Symbol('gid')
const crypto = require('crypto')
// Unlinks on Windows are not atomic.
//
// This means that if you have a file entry, followed by another
// file entry with an identical name, and you cannot re-use the file
// (because it's a hardlink, or because unlink:true is set, or it's
// Windows, which does not have useful nlink values), then the unlink
// will be committed to the disk AFTER the new file has been written
// over the old one, deleting the new file.
//
// To work around this, on Windows systems, we rename the file and then
// delete the renamed file. It's a sloppy kludge, but frankly, I do not
// know of a better way to do this, given windows' non-atomic unlink
// semantics.
//
// See: https://github.com/npm/node-tar/issues/183
/* istanbul ignore next */
const unlinkFile = (path, cb) => {
if (process.platform !== 'win32')
return fs.unlink(path, cb)
const name = path + '.DELETE.' + crypto.randomBytes(16).toString('hex')
fs.rename(path, name, er => {
if (er)
return cb(er)
fs.unlink(name, cb)
})
}
/* istanbul ignore next */
const unlinkFileSync = path => {
if (process.platform !== 'win32')
return fs.unlinkSync(path)
const name = path + '.DELETE.' + crypto.randomBytes(16).toString('hex')
fs.renameSync(path, name)
fs.unlinkSync(name)
}
// this.gid, entry.gid, this.processUid
const uint32 = (a, b, c) =>
a === a >>> 0 ? a
: b === b >>> 0 ? b
: c
class Unpack extends Parser {
constructor (opt) {
if (!opt)
opt = {}
opt.ondone = _ => {
this[ENDED] = true
this[MAYBECLOSE]()
}
super(opt)
this.transform = typeof opt.transform === 'function' ? opt.transform : null
this.writable = true
this.readable = false
this[PENDING] = 0
this[ENDED] = false
this.dirCache = opt.dirCache || new Map()
if (typeof opt.uid === 'number' || typeof opt.gid === 'number') {
// need both or neither
if (typeof opt.uid !== 'number' || typeof opt.gid !== 'number')
throw new TypeError('cannot set owner without number uid and gid')
if (opt.preserveOwner)
throw new TypeError(
'cannot preserve owner in archive and also set owner explicitly')
this.uid = opt.uid
this.gid = opt.gid
this.setOwner = true
} else {
this.uid = null
this.gid = null
this.setOwner = false
}
// default true for root
if (opt.preserveOwner === undefined && typeof opt.uid !== 'number')
this.preserveOwner = process.getuid && process.getuid() === 0
else
this.preserveOwner = !!opt.preserveOwner
this.processUid = (this.preserveOwner || this.setOwner) && process.getuid ?
process.getuid() : null
this.processGid = (this.preserveOwner || this.setOwner) && process.getgid ?
process.getgid() : null
// mostly just for testing, but useful in some cases.
// Forcibly trigger a chown on every entry, no matter what
this.forceChown = opt.forceChown === true
// turn ><?| in filenames into 0xf000-higher encoded forms
this.win32 = !!opt.win32 || process.platform === 'win32'
// do not unpack over files that are newer than what's in the archive
this.newer = !!opt.newer
// do not unpack over ANY files
this.keep = !!opt.keep
// do not set mtime/atime of extracted entries
this.noMtime = !!opt.noMtime
// allow .., absolute path entries, and unpacking through symlinks
// without this, warn and skip .., relativize absolutes, and error
// on symlinks in extraction path
this.preservePaths = !!opt.preservePaths
// unlink files and links before writing. This breaks existing hard
// links, and removes symlink directories rather than erroring
this.unlink = !!opt.unlink
this.cwd = path.resolve(opt.cwd || process.cwd())
this.strip = +opt.strip || 0
this.processUmask = process.umask()
this.umask = typeof opt.umask === 'number' ? opt.umask : this.processUmask
// default mode for dirs created as parents
this.dmode = opt.dmode || (0o0777 & (~this.umask))
this.fmode = opt.fmode || (0o0666 & (~this.umask))
this.on('entry', entry => this[ONENTRY](entry))
}
[MAYBECLOSE] () {
if (this[ENDED] && this[PENDING] === 0) {
this.emit('prefinish')
this.emit('finish')
this.emit('end')
this.emit('close')
}
}
[CHECKPATH] (entry) {
if (this.strip) {
const parts = entry.path.split(/\/|\\/)
if (parts.length < this.strip)
return false
entry.path = parts.slice(this.strip).join('/')
}
if (!this.preservePaths) {
const p = entry.path
if (p.match(/(^|\/|\\)\.\.(\\|\/|$)/)) {
this.warn('path contains \'..\'', p)
return false
}
// absolutes on posix are also absolutes on win32
// so we only need to test this one to get both
if (path.win32.isAbsolute(p)) {
const parsed = path.win32.parse(p)
this.warn('stripping ' + parsed.root + ' from absolute path', p)
entry.path = p.substr(parsed.root.length)
}
}
// only encode : chars that aren't drive letter indicators
if (this.win32) {
const parsed = path.win32.parse(entry.path)
entry.path = parsed.root === '' ? wc.encode(entry.path)
: parsed.root + wc.encode(entry.path.substr(parsed.root.length))
}
if (path.isAbsolute(entry.path))
entry.absolute = entry.path
else
entry.absolute = path.resolve(this.cwd, entry.path)
return true
}
[ONENTRY] (entry) {
if (!this[CHECKPATH](entry))
return entry.resume()
assert.equal(typeof entry.absolute, 'string')
switch (entry.type) {
case 'Directory':
case 'GNUDumpDir':
if (entry.mode)
entry.mode = entry.mode | 0o700
case 'File':
case 'OldFile':
case 'ContiguousFile':
case 'Link':
case 'SymbolicLink':
return this[CHECKFS](entry)
case 'CharacterDevice':
case 'BlockDevice':
case 'FIFO':
return this[UNSUPPORTED](entry)
}
}
[ONERROR] (er, entry) {
// Cwd has to exist, or else nothing works. That's serious.
// Other errors are warnings, which raise the error in strict
// mode, but otherwise continue on.
if (er.name === 'CwdError')
this.emit('error', er)
else {
this.warn(er.message, er)
this[UNPEND]()
entry.resume()
}
}
[MKDIR] (dir, mode, cb) {
mkdir(dir, {
uid: this.uid,
gid: this.gid,
processUid: this.processUid,
processGid: this.processGid,
umask: this.processUmask,
preserve: this.preservePaths,
unlink: this.unlink,
cache: this.dirCache,
cwd: this.cwd,
mode: mode
}, cb)
}
[DOCHOWN] (entry) {
// in preserve owner mode, chown if the entry doesn't match process
// in set owner mode, chown if setting doesn't match process
return this.forceChown ||
this.preserveOwner &&
( typeof entry.uid === 'number' && entry.uid !== this.processUid ||
typeof entry.gid === 'number' && entry.gid !== this.processGid )
||
( typeof this.uid === 'number' && this.uid !== this.processUid ||
typeof this.gid === 'number' && this.gid !== this.processGid )
}
[UID] (entry) {
return uint32(this.uid, entry.uid, this.processUid)
}
[GID] (entry) {
return uint32(this.gid, entry.gid, this.processGid)
}
[FILE] (entry) {
const mode = entry.mode & 0o7777 || this.fmode
const stream = new fsm.WriteStream(entry.absolute, {
mode: mode,
autoClose: false
})
stream.on('error', er => this[ONERROR](er, entry))
let actions = 1
const done = er => {
if (er)
return this[ONERROR](er, entry)
if (--actions === 0)
fs.close(stream.fd, _ => this[UNPEND]())
}
stream.on('finish', _ => {
// if futimes fails, try utimes
// if utimes fails, fail with the original error
// same for fchown/chown
const abs = entry.absolute
const fd = stream.fd
if (entry.mtime && !this.noMtime) {
actions++
const atime = entry.atime || new Date()
const mtime = entry.mtime
fs.futimes(fd, atime, mtime, er =>
er ? fs.utimes(abs, atime, mtime, er2 => done(er2 && er))
: done())
}
if (this[DOCHOWN](entry)) {
actions++
const uid = this[UID](entry)
const gid = this[GID](entry)
fs.fchown(fd, uid, gid, er =>
er ? fs.chown(abs, uid, gid, er2 => done(er2 && er))
: done())
}
done()
})
const tx = this.transform ? this.transform(entry) || entry : entry
if (tx !== entry) {
tx.on('error', er => this[ONERROR](er, entry))
entry.pipe(tx)
}
tx.pipe(stream)
}
[DIRECTORY] (entry) {
const mode = entry.mode & 0o7777 || this.dmode
this[MKDIR](entry.absolute, mode, er => {
if (er)
return this[ONERROR](er, entry)
let actions = 1
const done = _ => {
if (--actions === 0) {
this[UNPEND]()
entry.resume()
}
}
if (entry.mtime && !this.noMtime) {
actions++
fs.utimes(entry.absolute, entry.atime || new Date(), entry.mtime, done)
}
if (this[DOCHOWN](entry)) {
actions++
fs.chown(entry.absolute, this[UID](entry), this[GID](entry), done)
}
done()
})
}
[UNSUPPORTED] (entry) {
this.warn('unsupported entry type: ' + entry.type, entry)
entry.resume()
}
[SYMLINK] (entry) {
this[LINK](entry, entry.linkpath, 'symlink')
}
[HARDLINK] (entry) {
this[LINK](entry, path.resolve(this.cwd, entry.linkpath), 'link')
}
[PEND] () {
this[PENDING]++
}
[UNPEND] () {
this[PENDING]--
this[MAYBECLOSE]()
}
[SKIP] (entry) {
this[UNPEND]()
entry.resume()
}
// Check if we can reuse an existing filesystem entry safely and
// overwrite it, rather than unlinking and recreating
// Windows doesn't report a useful nlink, so we just never reuse entries
[ISREUSABLE] (entry, st) {
return entry.type === 'File' &&
!this.unlink &&
st.isFile() &&
st.nlink <= 1 &&
process.platform !== 'win32'
}
// check if a thing is there, and if so, try to clobber it
[CHECKFS] (entry) {
this[PEND]()
this[MKDIR](path.dirname(entry.absolute), this.dmode, er => {
if (er)
return this[ONERROR](er, entry)
fs.lstat(entry.absolute, (er, st) => {
if (st && (this.keep || this.newer && st.mtime > entry.mtime))
this[SKIP](entry)
else if (er || this[ISREUSABLE](entry, st))
this[MAKEFS](null, entry)
else if (st.isDirectory()) {
if (entry.type === 'Directory') {
if (!entry.mode || (st.mode & 0o7777) === entry.mode)
this[MAKEFS](null, entry)
else
fs.chmod(entry.absolute, entry.mode, er => this[MAKEFS](er, entry))
} else
fs.rmdir(entry.absolute, er => this[MAKEFS](er, entry))
} else
unlinkFile(entry.absolute, er => this[MAKEFS](er, entry))
})
})
}
[MAKEFS] (er, entry) {
if (er)
return this[ONERROR](er, entry)
switch (entry.type) {
case 'File':
case 'OldFile':
case 'ContiguousFile':
return this[FILE](entry)
case 'Link':
return this[HARDLINK](entry)
case 'SymbolicLink':
return this[SYMLINK](entry)
case 'Directory':
case 'GNUDumpDir':
return this[DIRECTORY](entry)
}
}
[LINK] (entry, linkpath, link) {
// XXX: get the type ('file' or 'dir') for windows
fs[link](linkpath, entry.absolute, er => {
if (er)
return this[ONERROR](er, entry)
this[UNPEND]()
entry.resume()
})
}
}
class UnpackSync extends Unpack {
constructor (opt) {
super(opt)
}
[CHECKFS] (entry) {
const er = this[MKDIR](path.dirname(entry.absolute), this.dmode)
if (er)
return this[ONERROR](er, entry)
try {
const st = fs.lstatSync(entry.absolute)
if (this.keep || this.newer && st.mtime > entry.mtime)
return this[SKIP](entry)
else if (this[ISREUSABLE](entry, st))
return this[MAKEFS](null, entry)
else {
try {
if (st.isDirectory()) {
if (entry.type === 'Directory') {
if (entry.mode && (st.mode & 0o7777) !== entry.mode)
fs.chmodSync(entry.absolute, entry.mode)
} else
fs.rmdirSync(entry.absolute)
} else
unlinkFileSync(entry.absolute)
return this[MAKEFS](null, entry)
} catch (er) {
return this[ONERROR](er, entry)
}
}
} catch (er) {
return this[MAKEFS](null, entry)
}
}
[FILE] (entry) {
const mode = entry.mode & 0o7777 || this.fmode
const oner = er => {
try { fs.closeSync(fd) } catch (_) {}
if (er)
this[ONERROR](er, entry)
}
let stream
let fd
try {
fd = fs.openSync(entry.absolute, 'w', mode)
} catch (er) {
return oner(er)
}
const tx = this.transform ? this.transform(entry) || entry : entry
if (tx !== entry) {
tx.on('error', er => this[ONERROR](er, entry))
entry.pipe(tx)
}
tx.on('data', chunk => {
try {
fs.writeSync(fd, chunk, 0, chunk.length)
} catch (er) {
oner(er)
}
})
tx.on('end', _ => {
let er = null
// try both, falling futimes back to utimes
// if either fails, handle the first error
if (entry.mtime && !this.noMtime) {
const atime = entry.atime || new Date()
const mtime = entry.mtime
try {
fs.futimesSync(fd, atime, mtime)
} catch (futimeser) {
try {
fs.utimesSync(entry.absolute, atime, mtime)
} catch (utimeser) {
er = futimeser
}
}
}
if (this[DOCHOWN](entry)) {
const uid = this[UID](entry)
const gid = this[GID](entry)
try {
fs.fchownSync(fd, uid, gid)
} catch (fchowner) {
try {
fs.chownSync(entry.absolute, uid, gid)
} catch (chowner) {
er = er || fchowner
}
}
}
oner(er)
})
}
[DIRECTORY] (entry) {
const mode = entry.mode & 0o7777 || this.dmode
const er = this[MKDIR](entry.absolute, mode)
if (er)
return this[ONERROR](er, entry)
if (entry.mtime && !this.noMtime) {
try {
fs.utimesSync(entry.absolute, entry.atime || new Date(), entry.mtime)
} catch (er) {}
}
if (this[DOCHOWN](entry)) {
try {
fs.chownSync(entry.absolute, this[UID](entry), this[GID](entry))
} catch (er) {}
}
entry.resume()
}
[MKDIR] (dir, mode) {
try {
return mkdir.sync(dir, {
uid: this.uid,
gid: this.gid,
processUid: this.processUid,
processGid: this.processGid,
umask: this.processUmask,
preserve: this.preservePaths,
unlink: this.unlink,
cache: this.dirCache,
cwd: this.cwd,
mode: mode
})
} catch (er) {
return er
}
}
[LINK] (entry, linkpath, link) {
try {
fs[link + 'Sync'](linkpath, entry.absolute)
entry.resume()
} catch (er) {
return this[ONERROR](er, entry)
}
}
}
Unpack.Sync = UnpackSync
module.exports = Unpack
'use strict'
// tar -u
const hlo = require('./high-level-opt.js')
const r = require('./replace.js')
// just call tar.r with the filter and mtimeCache
const u = module.exports = (opt_, files, cb) => {
const opt = hlo(opt_)
if (!opt.file)
throw new TypeError('file is required')
if (opt.gzip)
throw new TypeError('cannot append to compressed archives')
if (!files || !Array.isArray(files) || !files.length)
throw new TypeError('no files or directories specified')
files = Array.from(files)
mtimeFilter(opt)
return r(opt, files, cb)
}
const mtimeFilter = opt => {
const filter = opt.filter
if (!opt.mtimeCache)
opt.mtimeCache = new Map()
opt.filter = filter ? (path, stat) =>
filter(path, stat) && !(opt.mtimeCache.get(path) > stat.mtime)
: (path, stat) => !(opt.mtimeCache.get(path) > stat.mtime)
}
'use strict'
module.exports = Base => class extends Base {
warn (msg, data) {
if (!this.strict)
this.emit('warn', msg, data)
else if (data instanceof Error)
this.emit('error', data)
else {
const er = new Error(msg)
er.data = data
this.emit('error', er)
}
}
}
'use strict'
// When writing files on Windows, translate the characters to their
// 0xf000 higher-encoded versions.
const raw = [
'|',
'<',
'>',
'?',
':'
]
const win = raw.map(char =>
String.fromCharCode(0xf000 + char.charCodeAt(0)))
const toWin = new Map(raw.map((char, i) => [char, win[i]]))
const toRaw = new Map(win.map((char, i) => [char, raw[i]]))
module.exports = {
encode: s => raw.reduce((s, c) => s.split(c).join(toWin.get(c)), s),
decode: s => win.reduce((s, c) => s.split(c).join(toRaw.get(c)), s)
}
'use strict'
const Buffer = require('./buffer.js')
const MiniPass = require('minipass')
const Pax = require('./pax.js')
const Header = require('./header.js')
const ReadEntry = require('./read-entry.js')
const fs = require('fs')
const path = require('path')
const types = require('./types.js')
const maxReadSize = 16 * 1024 * 1024
const PROCESS = Symbol('process')
const FILE = Symbol('file')
const DIRECTORY = Symbol('directory')
const SYMLINK = Symbol('symlink')
const HARDLINK = Symbol('hardlink')
const HEADER = Symbol('header')
const READ = Symbol('read')
const LSTAT = Symbol('lstat')
const ONLSTAT = Symbol('onlstat')
const ONREAD = Symbol('onread')
const ONREADLINK = Symbol('onreadlink')
const OPENFILE = Symbol('openfile')
const ONOPENFILE = Symbol('onopenfile')
const CLOSE = Symbol('close')
const MODE = Symbol('mode')
const warner = require('./warn-mixin.js')
const winchars = require('./winchars.js')
const modeFix = require('./mode-fix.js')
const WriteEntry = warner(class WriteEntry extends MiniPass {
constructor (p, opt) {
opt = opt || {}
super(opt)
if (typeof p !== 'string')
throw new TypeError('path is required')
this.path = p
// suppress atime, ctime, uid, gid, uname, gname
this.portable = !!opt.portable
// until node has builtin pwnam functions, this'll have to do
this.myuid = process.getuid && process.getuid()
this.myuser = process.env.USER || ''
this.maxReadSize = opt.maxReadSize || maxReadSize
this.linkCache = opt.linkCache || new Map()
this.statCache = opt.statCache || new Map()
this.preservePaths = !!opt.preservePaths
this.cwd = opt.cwd || process.cwd()
this.strict = !!opt.strict
this.noPax = !!opt.noPax
this.noMtime = !!opt.noMtime
this.mtime = opt.mtime || null
if (typeof opt.onwarn === 'function')
this.on('warn', opt.onwarn)
if (!this.preservePaths && path.win32.isAbsolute(p)) {
// absolutes on posix are also absolutes on win32
// so we only need to test this one to get both
const parsed = path.win32.parse(p)
this.warn('stripping ' + parsed.root + ' from absolute path', p)
this.path = p.substr(parsed.root.length)
}
this.win32 = !!opt.win32 || process.platform === 'win32'
if (this.win32) {
this.path = winchars.decode(this.path.replace(/\\/g, '/'))
p = p.replace(/\\/g, '/')
}
this.absolute = opt.absolute || path.resolve(this.cwd, p)
if (this.path === '')
this.path = './'
if (this.statCache.has(this.absolute))
this[ONLSTAT](this.statCache.get(this.absolute))
else
this[LSTAT]()
}
[LSTAT] () {
fs.lstat(this.absolute, (er, stat) => {
if (er)
return this.emit('error', er)
this[ONLSTAT](stat)
})
}
[ONLSTAT] (stat) {
this.statCache.set(this.absolute, stat)
this.stat = stat
if (!stat.isFile())
stat.size = 0
this.type = getType(stat)
this.emit('stat', stat)
this[PROCESS]()
}
[PROCESS] () {
switch (this.type) {
case 'File': return this[FILE]()
case 'Directory': return this[DIRECTORY]()
case 'SymbolicLink': return this[SYMLINK]()
// unsupported types are ignored.
default: return this.end()
}
}
[MODE] (mode) {
return modeFix(mode, this.type === 'Directory')
}
[HEADER] () {
if (this.type === 'Directory' && this.portable)
this.noMtime = true
this.header = new Header({
path: this.path,
linkpath: this.linkpath,
// only the permissions and setuid/setgid/sticky bitflags
// not the higher-order bits that specify file type
mode: this[MODE](this.stat.mode),
uid: this.portable ? null : this.stat.uid,
gid: this.portable ? null : this.stat.gid,
size: this.stat.size,
mtime: this.noMtime ? null : this.mtime || this.stat.mtime,
type: this.type,
uname: this.portable ? null :
this.stat.uid === this.myuid ? this.myuser : '',
atime: this.portable ? null : this.stat.atime,
ctime: this.portable ? null : this.stat.ctime
})
if (this.header.encode() && !this.noPax)
this.write(new Pax({
atime: this.portable ? null : this.header.atime,
ctime: this.portable ? null : this.header.ctime,
gid: this.portable ? null : this.header.gid,
mtime: this.noMtime ? null : this.mtime || this.header.mtime,
path: this.path,
linkpath: this.linkpath,
size: this.header.size,
uid: this.portable ? null : this.header.uid,
uname: this.portable ? null : this.header.uname,
dev: this.portable ? null : this.stat.dev,
ino: this.portable ? null : this.stat.ino,
nlink: this.portable ? null : this.stat.nlink
}).encode())
this.write(this.header.block)
}
[DIRECTORY] () {
if (this.path.substr(-1) !== '/')
this.path += '/'
this.stat.size = 0
this[HEADER]()
this.end()
}
[SYMLINK] () {
fs.readlink(this.absolute, (er, linkpath) => {
if (er)
return this.emit('error', er)
this[ONREADLINK](linkpath)
})
}
[ONREADLINK] (linkpath) {
this.linkpath = linkpath
this[HEADER]()
this.end()
}
[HARDLINK] (linkpath) {
this.type = 'Link'
this.linkpath = path.relative(this.cwd, linkpath)
this.stat.size = 0
this[HEADER]()
this.end()
}
[FILE] () {
if (this.stat.nlink > 1) {
const linkKey = this.stat.dev + ':' + this.stat.ino
if (this.linkCache.has(linkKey)) {
const linkpath = this.linkCache.get(linkKey)
if (linkpath.indexOf(this.cwd) === 0)
return this[HARDLINK](linkpath)
}
this.linkCache.set(linkKey, this.absolute)
}
this[HEADER]()
if (this.stat.size === 0)
return this.end()
this[OPENFILE]()
}
[OPENFILE] () {
fs.open(this.absolute, 'r', (er, fd) => {
if (er)
return this.emit('error', er)
this[ONOPENFILE](fd)
})
}
[ONOPENFILE] (fd) {
const blockLen = 512 * Math.ceil(this.stat.size / 512)
const bufLen = Math.min(blockLen, this.maxReadSize)
const buf = Buffer.allocUnsafe(bufLen)
this[READ](fd, buf, 0, buf.length, 0, this.stat.size, blockLen)
}
[READ] (fd, buf, offset, length, pos, remain, blockRemain) {
fs.read(fd, buf, offset, length, pos, (er, bytesRead) => {
if (er)
return this[CLOSE](fd, _ => this.emit('error', er))
this[ONREAD](fd, buf, offset, length, pos, remain, blockRemain, bytesRead)
})
}
[CLOSE] (fd, cb) {
fs.close(fd, cb)
}
[ONREAD] (fd, buf, offset, length, pos, remain, blockRemain, bytesRead) {
if (bytesRead <= 0 && remain > 0) {
const er = new Error('unexpected EOF')
er.path = this.absolute
er.syscall = 'read'
er.code = 'EOF'
this.emit('error', er)
}
// null out the rest of the buffer, if we could fit the block padding
if (bytesRead === remain) {
for (let i = bytesRead; i < length && bytesRead < blockRemain; i++) {
buf[i + offset] = 0
bytesRead ++
remain ++
}
}
const writeBuf = offset === 0 && bytesRead === buf.length ?
buf : buf.slice(offset, offset + bytesRead)
remain -= bytesRead
blockRemain -= bytesRead
pos += bytesRead
offset += bytesRead
this.write(writeBuf)
if (!remain) {
if (blockRemain)
this.write(Buffer.alloc(blockRemain))
this.end()
this[CLOSE](fd, _ => _)
return
}
if (offset >= length) {
buf = Buffer.allocUnsafe(length)
offset = 0
}
length = buf.length - offset
this[READ](fd, buf, offset, length, pos, remain, blockRemain)
}
})
class WriteEntrySync extends WriteEntry {
constructor (path, opt) {
super(path, opt)
}
[LSTAT] () {
this[ONLSTAT](fs.lstatSync(this.absolute))
}
[SYMLINK] () {
this[ONREADLINK](fs.readlinkSync(this.absolute))
}
[OPENFILE] () {
this[ONOPENFILE](fs.openSync(this.absolute, 'r'))
}
[READ] (fd, buf, offset, length, pos, remain, blockRemain) {
let threw = true
try {
const bytesRead = fs.readSync(fd, buf, offset, length, pos)
this[ONREAD](fd, buf, offset, length, pos, remain, blockRemain, bytesRead)
threw = false
} finally {
if (threw)
try { this[CLOSE](fd) } catch (er) {}
}
}
[CLOSE] (fd) {
fs.closeSync(fd)
}
}
const WriteEntryTar = warner(class WriteEntryTar extends MiniPass {
constructor (readEntry, opt) {
opt = opt || {}
super(opt)
this.preservePaths = !!opt.preservePaths
this.portable = !!opt.portable
this.strict = !!opt.strict
this.noPax = !!opt.noPax
this.noMtime = !!opt.noMtime
this.readEntry = readEntry
this.type = readEntry.type
if (this.type === 'Directory' && this.portable)
this.noMtime = true
this.path = readEntry.path
this.mode = this[MODE](readEntry.mode)
this.uid = this.portable ? null : readEntry.uid
this.gid = this.portable ? null : readEntry.gid
this.uname = this.portable ? null : readEntry.uname
this.gname = this.portable ? null : readEntry.gname
this.size = readEntry.size
this.mtime = this.noMtime ? null : opt.mtime || readEntry.mtime
this.atime = this.portable ? null : readEntry.atime
this.ctime = this.portable ? null : readEntry.ctime
this.linkpath = readEntry.linkpath
if (typeof opt.onwarn === 'function')
this.on('warn', opt.onwarn)
if (path.isAbsolute(this.path) && !this.preservePaths) {
const parsed = path.parse(this.path)
this.warn(
'stripping ' + parsed.root + ' from absolute path',
this.path
)
this.path = this.path.substr(parsed.root.length)
}
this.remain = readEntry.size
this.blockRemain = readEntry.startBlockSize
this.header = new Header({
path: this.path,
linkpath: this.linkpath,
// only the permissions and setuid/setgid/sticky bitflags
// not the higher-order bits that specify file type
mode: this.mode,
uid: this.portable ? null : this.uid,
gid: this.portable ? null : this.gid,
size: this.size,
mtime: this.noMtime ? null : this.mtime,
type: this.type,
uname: this.portable ? null : this.uname,
atime: this.portable ? null : this.atime,
ctime: this.portable ? null : this.ctime
})
if (this.header.encode() && !this.noPax)
super.write(new Pax({
atime: this.portable ? null : this.atime,
ctime: this.portable ? null : this.ctime,
gid: this.portable ? null : this.gid,
mtime: this.noMtime ? null : this.mtime,
path: this.path,
linkpath: this.linkpath,
size: this.size,
uid: this.portable ? null : this.uid,
uname: this.portable ? null : this.uname,
dev: this.portable ? null : this.readEntry.dev,
ino: this.portable ? null : this.readEntry.ino,
nlink: this.portable ? null : this.readEntry.nlink
}).encode())
super.write(this.header.block)
readEntry.pipe(this)
}
[MODE] (mode) {
return modeFix(mode, this.type === 'Directory')
}
write (data) {
const writeLen = data.length
if (writeLen > this.blockRemain)
throw new Error('writing more to entry than is appropriate')
this.blockRemain -= writeLen
return super.write(data)
}
end () {
if (this.blockRemain)
this.write(Buffer.alloc(this.blockRemain))
return super.end()
}
})
WriteEntry.Sync = WriteEntrySync
WriteEntry.Tar = WriteEntryTar
const getType = stat =>
stat.isFile() ? 'File'
: stat.isDirectory() ? 'Directory'
: stat.isSymbolicLink() ? 'SymbolicLink'
: 'Unsupported'
module.exports = WriteEntry
The ISC License
Copyright (c) Isaac Z. Schlueter and Contributors
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# yallist
Yet Another Linked List
There are many doubly-linked list implementations like it, but this
one is mine.
For when an array would be too big, and a Map can't be iterated in
reverse order.
[![Build Status](https://travis-ci.org/isaacs/yallist.svg?branch=master)](https://travis-ci.org/isaacs/yallist) [![Coverage Status](https://coveralls.io/repos/isaacs/yallist/badge.svg?service=github)](https://coveralls.io/github/isaacs/yallist)
## basic usage
```javascript
var yallist = require('yallist')
var myList = yallist.create([1, 2, 3])
myList.push('foo')
myList.unshift('bar')
// of course pop() and shift() are there, too
console.log(myList.toArray()) // ['bar', 1, 2, 3, 'foo']
myList.forEach(function (k) {
// walk the list head to tail
})
myList.forEachReverse(function (k, index, list) {
// walk the list tail to head
})
var myDoubledList = myList.map(function (k) {
return k + k
})
// now myDoubledList contains ['barbar', 2, 4, 6, 'foofoo']
// mapReverse is also a thing
var myDoubledListReverse = myList.mapReverse(function (k) {
return k + k
}) // ['foofoo', 6, 4, 2, 'barbar']
var reduced = myList.reduce(function (set, entry) {
set += entry
return set
}, 'start')
console.log(reduced) // 'startfoo123bar'
```
## api
The whole API is considered "public".
Functions with the same name as an Array method work more or less the
same way.
There's reverse versions of most things because that's the point.
### Yallist
Default export, the class that holds and manages a list.
Call it with either a forEach-able (like an array) or a set of
arguments, to initialize the list.
The Array-ish methods all act like you'd expect. No magic length,
though, so if you change that it won't automatically prune or add
empty spots.
### Yallist.create(..)
Alias for Yallist function. Some people like factories.
#### yallist.head
The first node in the list
#### yallist.tail
The last node in the list
#### yallist.length
The number of nodes in the list. (Change this at your peril. It is
not magic like Array length.)
#### yallist.toArray()
Convert the list to an array.
#### yallist.forEach(fn, [thisp])
Call a function on each item in the list.
#### yallist.forEachReverse(fn, [thisp])
Call a function on each item in the list, in reverse order.
#### yallist.get(n)
Get the data at position `n` in the list. If you use this a lot,
probably better off just using an Array.
#### yallist.getReverse(n)
Get the data at position `n`, counting from the tail.
#### yallist.map(fn, thisp)
Create a new Yallist with the result of calling the function on each
item.
#### yallist.mapReverse(fn, thisp)
Same as `map`, but in reverse.
#### yallist.pop()
Get the data from the list tail, and remove the tail from the list.
#### yallist.push(item, ...)
Insert one or more items to the tail of the list.
#### yallist.reduce(fn, initialValue)
Like Array.reduce.
#### yallist.reduceReverse
Like Array.reduce, but in reverse.
#### yallist.reverse
Reverse the list in place.
#### yallist.shift()
Get the data from the list head, and remove the head from the list.
#### yallist.slice([from], [to])
Just like Array.slice, but returns a new Yallist.
#### yallist.sliceReverse([from], [to])
Just like yallist.slice, but the result is returned in reverse.
#### yallist.toArray()
Create an array representation of the list.
#### yallist.toArrayReverse()
Create a reversed array representation of the list.
#### yallist.unshift(item, ...)
Insert one or more items to the head of the list.
#### yallist.unshiftNode(node)
Move a Node object to the front of the list. (That is, pull it out of
wherever it lives, and make it the new head.)
If the node belongs to a different list, then that list will remove it
first.
#### yallist.pushNode(node)
Move a Node object to the end of the list. (That is, pull it out of
wherever it lives, and make it the new tail.)
If the node belongs to a list already, then that list will remove it
first.
#### yallist.removeNode(node)
Remove a node from the list, preserving referential integrity of head
and tail and other nodes.
Will throw an error if you try to have a list remove a node that
doesn't belong to it.
### Yallist.Node
The class that holds the data and is actually the list.
Call with `var n = new Node(value, previousNode, nextNode)`
Note that if you do direct operations on Nodes themselves, it's very
easy to get into weird states where the list is broken. Be careful :)
#### node.next
The next node in the list.
#### node.prev
The previous node in the list.
#### node.value
The data the node contains.
#### node.list
The list to which this node belongs. (Null if it does not belong to
any list.)
'use strict'
var Yallist = require('./yallist.js')
Yallist.prototype[Symbol.iterator] = function* () {
for (let walker = this.head; walker; walker = walker.next) {
yield walker.value
}
}
{
"_from": "yallist@^3.0.2",
"_id": "yallist@3.0.2",
"_inBundle": false,
"_integrity": "sha1-hFK0u36Dx8GI2AQcGoN8dz1ti7k=",
"_location": "/tar/yallist",
"_phantomChildren": {},
"_requested": {
"type": "range",
"registry": true,
"raw": "yallist@^3.0.2",
"name": "yallist",
"escapedName": "yallist",
"rawSpec": "^3.0.2",
"saveSpec": null,
"fetchSpec": "^3.0.2"
},
"_requiredBy": [
"/tar"
],
"_resolved": "http://registry.npm.taobao.org/yallist/download/yallist-3.0.2.tgz",
"_shasum": "8452b4bb7e83c7c188d8041c1a837c773d6d8bb9",
"_spec": "yallist@^3.0.2",
"_where": "/Users/medicean/workspace/antSword/node_modules/tar",
"author": {
"name": "Isaac Z. Schlueter",
"email": "i@izs.me",
"url": "http://blog.izs.me/"
},
"bugs": {
"url": "https://github.com/isaacs/yallist/issues"
},
"bundleDependencies": false,
"dependencies": {},
"deprecated": false,
"description": "Yet Another Linked List",
"devDependencies": {
"tap": "^10.3.0"
},
"directories": {
"test": "test"
},
"files": [
"yallist.js",
"iterator.js"
],
"homepage": "https://github.com/isaacs/yallist#readme",
"license": "ISC",
"main": "yallist.js",
"name": "yallist",
"repository": {
"type": "git",
"url": "git+https://github.com/isaacs/yallist.git"
},
"scripts": {
"postpublish": "git push origin --all; git push origin --tags",
"postversion": "npm publish",
"preversion": "npm test",
"test": "tap test/*.js --100"
},
"version": "3.0.2"
}
'use strict'
module.exports = Yallist
Yallist.Node = Node
Yallist.create = Yallist
function Yallist (list) {
var self = this
if (!(self instanceof Yallist)) {
self = new Yallist()
}
self.tail = null
self.head = null
self.length = 0
if (list && typeof list.forEach === 'function') {
list.forEach(function (item) {
self.push(item)
})
} else if (arguments.length > 0) {
for (var i = 0, l = arguments.length; i < l; i++) {
self.push(arguments[i])
}
}
return self
}
Yallist.prototype.removeNode = function (node) {
if (node.list !== this) {
throw new Error('removing node which does not belong to this list')
}
var next = node.next
var prev = node.prev
if (next) {
next.prev = prev
}
if (prev) {
prev.next = next
}
if (node === this.head) {
this.head = next
}
if (node === this.tail) {
this.tail = prev
}
node.list.length--
node.next = null
node.prev = null
node.list = null
}
Yallist.prototype.unshiftNode = function (node) {
if (node === this.head) {
return
}
if (node.list) {
node.list.removeNode(node)
}
var head = this.head
node.list = this
node.next = head
if (head) {
head.prev = node
}
this.head = node
if (!this.tail) {
this.tail = node
}
this.length++
}
Yallist.prototype.pushNode = function (node) {
if (node === this.tail) {
return
}
if (node.list) {
node.list.removeNode(node)
}
var tail = this.tail
node.list = this
node.prev = tail
if (tail) {
tail.next = node
}
this.tail = node
if (!this.head) {
this.head = node
}
this.length++
}
Yallist.prototype.push = function () {
for (var i = 0, l = arguments.length; i < l; i++) {
push(this, arguments[i])
}
return this.length
}
Yallist.prototype.unshift = function () {
for (var i = 0, l = arguments.length; i < l; i++) {
unshift(this, arguments[i])
}
return this.length
}
Yallist.prototype.pop = function () {
if (!this.tail) {
return undefined
}
var res = this.tail.value
this.tail = this.tail.prev
if (this.tail) {
this.tail.next = null
} else {
this.head = null
}
this.length--
return res
}
Yallist.prototype.shift = function () {
if (!this.head) {
return undefined
}
var res = this.head.value
this.head = this.head.next
if (this.head) {
this.head.prev = null
} else {
this.tail = null
}
this.length--
return res
}
Yallist.prototype.forEach = function (fn, thisp) {
thisp = thisp || this
for (var walker = this.head, i = 0; walker !== null; i++) {
fn.call(thisp, walker.value, i, this)
walker = walker.next
}
}
Yallist.prototype.forEachReverse = function (fn, thisp) {
thisp = thisp || this
for (var walker = this.tail, i = this.length - 1; walker !== null; i--) {
fn.call(thisp, walker.value, i, this)
walker = walker.prev
}
}
Yallist.prototype.get = function (n) {
for (var i = 0, walker = this.head; walker !== null && i < n; i++) {
// abort out of the list early if we hit a cycle
walker = walker.next
}
if (i === n && walker !== null) {
return walker.value
}
}
Yallist.prototype.getReverse = function (n) {
for (var i = 0, walker = this.tail; walker !== null && i < n; i++) {
// abort out of the list early if we hit a cycle
walker = walker.prev
}
if (i === n && walker !== null) {
return walker.value
}
}
Yallist.prototype.map = function (fn, thisp) {
thisp = thisp || this
var res = new Yallist()
for (var walker = this.head; walker !== null;) {
res.push(fn.call(thisp, walker.value, this))
walker = walker.next
}
return res
}
Yallist.prototype.mapReverse = function (fn, thisp) {
thisp = thisp || this
var res = new Yallist()
for (var walker = this.tail; walker !== null;) {
res.push(fn.call(thisp, walker.value, this))
walker = walker.prev
}
return res
}
Yallist.prototype.reduce = function (fn, initial) {
var acc
var walker = this.head
if (arguments.length > 1) {
acc = initial
} else if (this.head) {
walker = this.head.next
acc = this.head.value
} else {
throw new TypeError('Reduce of empty list with no initial value')
}
for (var i = 0; walker !== null; i++) {
acc = fn(acc, walker.value, i)
walker = walker.next
}
return acc
}
Yallist.prototype.reduceReverse = function (fn, initial) {
var acc
var walker = this.tail
if (arguments.length > 1) {
acc = initial
} else if (this.tail) {
walker = this.tail.prev
acc = this.tail.value
} else {
throw new TypeError('Reduce of empty list with no initial value')
}
for (var i = this.length - 1; walker !== null; i--) {
acc = fn(acc, walker.value, i)
walker = walker.prev
}
return acc
}
Yallist.prototype.toArray = function () {
var arr = new Array(this.length)
for (var i = 0, walker = this.head; walker !== null; i++) {
arr[i] = walker.value
walker = walker.next
}
return arr
}
Yallist.prototype.toArrayReverse = function () {
var arr = new Array(this.length)
for (var i = 0, walker = this.tail; walker !== null; i++) {
arr[i] = walker.value
walker = walker.prev
}
return arr
}
Yallist.prototype.slice = function (from, to) {
to = to || this.length
if (to < 0) {
to += this.length
}
from = from || 0
if (from < 0) {
from += this.length
}
var ret = new Yallist()
if (to < from || to < 0) {
return ret
}
if (from < 0) {
from = 0
}
if (to > this.length) {
to = this.length
}
for (var i = 0, walker = this.head; walker !== null && i < from; i++) {
walker = walker.next
}
for (; walker !== null && i < to; i++, walker = walker.next) {
ret.push(walker.value)
}
return ret
}
Yallist.prototype.sliceReverse = function (from, to) {
to = to || this.length
if (to < 0) {
to += this.length
}
from = from || 0
if (from < 0) {
from += this.length
}
var ret = new Yallist()
if (to < from || to < 0) {
return ret
}
if (from < 0) {
from = 0
}
if (to > this.length) {
to = this.length
}
for (var i = this.length, walker = this.tail; walker !== null && i > to; i--) {
walker = walker.prev
}
for (; walker !== null && i > from; i--, walker = walker.prev) {
ret.push(walker.value)
}
return ret
}
Yallist.prototype.reverse = function () {
var head = this.head
var tail = this.tail
for (var walker = head; walker !== null; walker = walker.prev) {
var p = walker.prev
walker.prev = walker.next
walker.next = p
}
this.head = tail
this.tail = head
return this
}
function push (self, item) {
self.tail = new Node(item, self.tail, null, self)
if (!self.head) {
self.head = self.tail
}
self.length++
}
function unshift (self, item) {
self.head = new Node(item, null, self.head, self)
if (!self.tail) {
self.tail = self.head
}
self.length++
}
function Node (value, prev, next, list) {
if (!(this instanceof Node)) {
return new Node(value, prev, next, list)
}
this.list = list
this.value = value
if (prev) {
prev.next = this
this.prev = prev
} else {
this.prev = null
}
if (next) {
next.prev = this
this.next = next
} else {
this.next = null
}
}
try {
// add if support or Symbol.iterator is present
require('./iterator.js')
} catch (er) {}
{
"_from": "tar",
"_id": "tar@4.4.6",
"_inBundle": false,
"_integrity": "sha1-YxEPCcALTmCsi8/hvzyGYCNfvJs=",
"_location": "/tar",
"_phantomChildren": {},
"_requested": {
"type": "tag",
"registry": true,
"raw": "tar",
"name": "tar",
"escapedName": "tar",
"rawSpec": "",
"saveSpec": null,
"fetchSpec": "latest"
},
"_requiredBy": [
"#USER",
"/"
],
"_resolved": "http://registry.npm.taobao.org/tar/download/tar-4.4.6.tgz",
"_shasum": "63110f09c00b4e60ac8bcfe1bf3c8660235fbc9b",
"_spec": "tar",
"_where": "/Users/medicean/workspace/antSword",
"author": {
"name": "Isaac Z. Schlueter",
"email": "i@izs.me",
"url": "http://blog.izs.me/"
},
"bugs": {
"url": "https://github.com/npm/node-tar/issues"
},
"bundleDependencies": false,
"dependencies": {
"chownr": "^1.0.1",
"fs-minipass": "^1.2.5",
"minipass": "^2.3.3",
"minizlib": "^1.1.0",
"mkdirp": "^0.5.0",
"safe-buffer": "^5.1.2",
"yallist": "^3.0.2"
},
"deprecated": false,
"description": "tar for node",
"devDependencies": {
"chmodr": "^1.0.2",
"end-of-stream": "^1.4.1",
"events-to-array": "^1.1.2",
"mutate-fs": "^2.1.1",
"rimraf": "^2.6.2",
"tap": "^12.0.1",
"tar-fs": "^1.16.2",
"tar-stream": "^1.6.0"
},
"engines": {
"node": ">=4.5"
},
"files": [
"index.js",
"lib/"
],
"homepage": "https://github.com/npm/node-tar#readme",
"license": "ISC",
"name": "tar",
"repository": {
"type": "git",
"url": "git+https://github.com/npm/node-tar.git"
},
"scripts": {
"bench": "for i in benchmarks/*/*.js; do echo $i; for j in {1..5}; do node $i || break; done; done",
"genparse": "node scripts/generate-parse-fixtures.js",
"postpublish": "git push origin --all; git push origin --tags",
"postversion": "npm publish",
"preversion": "npm test",
"test": "tap test/*.js --100 -J --coverage-report=text -c"
},
"version": "4.4.6"
}
{
"name": "antsword",
"version": "1.2.0",
"version": "2.0.0",
"lockfileVersion": 1,
"requires": true,
"dependencies": {
......@@ -60,6 +60,11 @@
"resolved": "http://registry.npm.taobao.org/bytes/download/bytes-3.0.0.tgz",
"integrity": "sha1-0ygVQE1olpn4Wk6k+odV3ROpYEg="
},
"chownr": {
"version": "1.0.1",
"resolved": "http://registry.npm.taobao.org/chownr/download/chownr-1.0.1.tgz",
"integrity": "sha1-4qdQQqlVGQi+vSW4Uj1fl2nXkYE="
},
"co": {
"version": "4.6.0",
"resolved": "http://registry.npm.taobao.org/co/download/co-4.6.0.tgz",
......@@ -291,6 +296,14 @@
"resolved": "http://registry.npm.taobao.org/formidable/download/formidable-1.2.1.tgz",
"integrity": "sha1-cPt8oCkO5v+WEJBBX0s989IIJlk="
},
"fs-minipass": {
"version": "1.2.5",
"resolved": "http://registry.npm.taobao.org/fs-minipass/download/fs-minipass-1.2.5.tgz",
"integrity": "sha1-BsJ3IYRU7CiN93raVKA7hwKqy50=",
"requires": {
"minipass": "^2.2.1"
}
},
"ftp": {
"version": "0.3.10",
"resolved": "http://registry.npm.taobao.org/ftp/download/ftp-0.3.10.tgz",
......@@ -508,6 +521,30 @@
"resolved": "https://registry.npmjs.org/minimist/-/minimist-0.0.8.tgz",
"integrity": "sha1-hX/Kv8M5fSYluCKCYuhqp6ARsF0="
},
"minipass": {
"version": "2.3.4",
"resolved": "http://registry.npm.taobao.org/minipass/download/minipass-2.3.4.tgz",
"integrity": "sha1-R2jXYF7WGU1tV2FpueEu9x6dmVc=",
"requires": {
"safe-buffer": "^5.1.2",
"yallist": "^3.0.0"
},
"dependencies": {
"yallist": {
"version": "3.0.2",
"resolved": "http://registry.npm.taobao.org/yallist/download/yallist-3.0.2.tgz",
"integrity": "sha1-hFK0u36Dx8GI2AQcGoN8dz1ti7k="
}
}
},
"minizlib": {
"version": "1.1.0",
"resolved": "http://registry.npm.taobao.org/minizlib/download/minizlib-1.1.0.tgz",
"integrity": "sha1-EeE2WM5GvDpwomeqxYNZ0eDCnOs=",
"requires": {
"minipass": "^2.2.1"
}
},
"mkdirp": {
"version": "0.5.1",
"resolved": "http://registry.npm.taobao.org/mkdirp/download/mkdirp-0.5.1.tgz",
......@@ -828,6 +865,27 @@
}
}
},
"tar": {
"version": "4.4.6",
"resolved": "http://registry.npm.taobao.org/tar/download/tar-4.4.6.tgz",
"integrity": "sha1-YxEPCcALTmCsi8/hvzyGYCNfvJs=",
"requires": {
"chownr": "^1.0.1",
"fs-minipass": "^1.2.5",
"minipass": "^2.3.3",
"minizlib": "^1.1.0",
"mkdirp": "^0.5.0",
"safe-buffer": "^5.1.2",
"yallist": "^3.0.2"
},
"dependencies": {
"yallist": {
"version": "3.0.2",
"resolved": "http://registry.npm.taobao.org/yallist/download/yallist-3.0.2.tgz",
"integrity": "sha1-hFK0u36Dx8GI2AQcGoN8dz1ti7k="
}
}
},
"through": {
"version": "2.3.8",
"resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz",
......
......@@ -10,6 +10,7 @@
"nedb": "^1.5.1",
"superagent": "^3.8.3",
"superagent-proxy": "^1.0.3",
"tar": "^4.4.6",
"through": "^2.3.8"
},
"scripts": {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment