diff --git "a/luckyexcel.umd.js" "b/luckyexcel.umd.js" new file mode 100644--- /dev/null +++ "b/luckyexcel.umd.js" @@ -0,0 +1,22903 @@ +(function(f){if(typeof exports==="object"&&typeof module!=="undefined"){module.exports=f()}else if(typeof define==="function"&&define.amd){define([],f)}else{var g;if(typeof window!=="undefined"){g=window}else if(typeof global!=="undefined"){g=global}else if(typeof self!=="undefined"){g=self}else{g=this}g.LuckyExcel = f()}})(function(){var define,module,exports;return (function(){function r(e,n,t){function o(i,f){if(!n[i]){if(!e[i]){var c="function"==typeof require&&require;if(!f&&c)return c(i,!0);if(u)return u(i,!0);var a=new Error("Cannot find module '"+i+"'");throw a.code="MODULE_NOT_FOUND",a}var p=n[i]={exports:{}};e[i][0].call(p.exports,function(r){var n=e[i][1][r];return o(n||r)},p,p.exports,r,e,n,t)}return n[i].exports}for(var u="function"==typeof require&&require,i=0;i<t.length;i++)o(t[i]);return o}return r})()({1:[function(require,module,exports){ +'use strict' + +exports.byteLength = byteLength +exports.toByteArray = toByteArray +exports.fromByteArray = fromByteArray + +var lookup = [] +var revLookup = [] +var Arr = typeof Uint8Array !== 'undefined' ? Uint8Array : Array + +var code = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/' +for (var i = 0, len = code.length; i < len; ++i) { + lookup[i] = code[i] + revLookup[code.charCodeAt(i)] = i +} + +// Support decoding URL-safe base64 strings, as Node.js does. +// See: https://en.wikipedia.org/wiki/Base64#URL_applications +revLookup['-'.charCodeAt(0)] = 62 +revLookup['_'.charCodeAt(0)] = 63 + +function getLens (b64) { + var len = b64.length + + if (len % 4 > 0) { + throw new Error('Invalid string. Length must be a multiple of 4') + } + + // Trim off extra bytes after placeholder bytes are found + // See: https://github.com/beatgammit/base64-js/issues/42 + var validLen = b64.indexOf('=') + if (validLen === -1) validLen = len + + var placeHoldersLen = validLen === len + ? 0 + : 4 - (validLen % 4) + + return [validLen, placeHoldersLen] +} + +// base64 is 4/3 + up to two characters of the original data +function byteLength (b64) { + var lens = getLens(b64) + var validLen = lens[0] + var placeHoldersLen = lens[1] + return ((validLen + placeHoldersLen) * 3 / 4) - placeHoldersLen +} + +function _byteLength (b64, validLen, placeHoldersLen) { + return ((validLen + placeHoldersLen) * 3 / 4) - placeHoldersLen +} + +function toByteArray (b64) { + var tmp + var lens = getLens(b64) + var validLen = lens[0] + var placeHoldersLen = lens[1] + + var arr = new Arr(_byteLength(b64, validLen, placeHoldersLen)) + + var curByte = 0 + + // if there are placeholders, only get up to the last complete 4 chars + var len = placeHoldersLen > 0 + ? validLen - 4 + : validLen + + var i + for (i = 0; i < len; i += 4) { + tmp = + (revLookup[b64.charCodeAt(i)] << 18) | + (revLookup[b64.charCodeAt(i + 1)] << 12) | + (revLookup[b64.charCodeAt(i + 2)] << 6) | + revLookup[b64.charCodeAt(i + 3)] + arr[curByte++] = (tmp >> 16) & 0xFF + arr[curByte++] = (tmp >> 8) & 0xFF + arr[curByte++] = tmp & 0xFF + } + + if (placeHoldersLen === 2) { + tmp = + (revLookup[b64.charCodeAt(i)] << 2) | + (revLookup[b64.charCodeAt(i + 1)] >> 4) + arr[curByte++] = tmp & 0xFF + } + + if (placeHoldersLen === 1) { + tmp = + (revLookup[b64.charCodeAt(i)] << 10) | + (revLookup[b64.charCodeAt(i + 1)] << 4) | + (revLookup[b64.charCodeAt(i + 2)] >> 2) + arr[curByte++] = (tmp >> 8) & 0xFF + arr[curByte++] = tmp & 0xFF + } + + return arr +} + +function tripletToBase64 (num) { + return lookup[num >> 18 & 0x3F] + + lookup[num >> 12 & 0x3F] + + lookup[num >> 6 & 0x3F] + + lookup[num & 0x3F] +} + +function encodeChunk (uint8, start, end) { + var tmp + var output = [] + for (var i = start; i < end; i += 3) { + tmp = + ((uint8[i] << 16) & 0xFF0000) + + ((uint8[i + 1] << 8) & 0xFF00) + + (uint8[i + 2] & 0xFF) + output.push(tripletToBase64(tmp)) + } + return output.join('') +} + +function fromByteArray (uint8) { + var tmp + var len = uint8.length + var extraBytes = len % 3 // if we have 1 byte left, pad 2 bytes + var parts = [] + var maxChunkLength = 16383 // must be multiple of 3 + + // go through the array every three bytes, we'll deal with trailing stuff later + for (var i = 0, len2 = len - extraBytes; i < len2; i += maxChunkLength) { + parts.push(encodeChunk( + uint8, i, (i + maxChunkLength) > len2 ? len2 : (i + maxChunkLength) + )) + } + + // pad the end with zeros, but make sure to not forget the extra bytes + if (extraBytes === 1) { + tmp = uint8[len - 1] + parts.push( + lookup[tmp >> 2] + + lookup[(tmp << 4) & 0x3F] + + '==' + ) + } else if (extraBytes === 2) { + tmp = (uint8[len - 2] << 8) + uint8[len - 1] + parts.push( + lookup[tmp >> 10] + + lookup[(tmp >> 4) & 0x3F] + + lookup[(tmp << 2) & 0x3F] + + '=' + ) + } + + return parts.join('') +} + +},{}],2:[function(require,module,exports){ + +},{}],3:[function(require,module,exports){ +(function (Buffer){ +/*! + * The buffer module from node.js, for the browser. + * + * @author Feross Aboukhadijeh <https://feross.org> + * @license MIT + */ +/* eslint-disable no-proto */ + +'use strict' + +var base64 = require('base64-js') +var ieee754 = require('ieee754') + +exports.Buffer = Buffer +exports.SlowBuffer = SlowBuffer +exports.INSPECT_MAX_BYTES = 50 + +var K_MAX_LENGTH = 0x7fffffff +exports.kMaxLength = K_MAX_LENGTH + +/** + * If `Buffer.TYPED_ARRAY_SUPPORT`: + * === true Use Uint8Array implementation (fastest) + * === false Print warning and recommend using `buffer` v4.x which has an Object + * implementation (most compatible, even IE6) + * + * Browsers that support typed arrays are IE 10+, Firefox 4+, Chrome 7+, Safari 5.1+, + * Opera 11.6+, iOS 4.2+. + * + * We report that the browser does not support typed arrays if the are not subclassable + * using __proto__. Firefox 4-29 lacks support for adding new properties to `Uint8Array` + * (See: https://bugzilla.mozilla.org/show_bug.cgi?id=695438). IE 10 lacks support + * for __proto__ and has a buggy typed array implementation. + */ +Buffer.TYPED_ARRAY_SUPPORT = typedArraySupport() + +if (!Buffer.TYPED_ARRAY_SUPPORT && typeof console !== 'undefined' && + typeof console.error === 'function') { + console.error( + 'This browser lacks typed array (Uint8Array) support which is required by ' + + '`buffer` v5.x. Use `buffer` v4.x if you require old browser support.' + ) +} + +function typedArraySupport () { + // Can typed array instances can be augmented? + try { + var arr = new Uint8Array(1) + arr.__proto__ = { __proto__: Uint8Array.prototype, foo: function () { return 42 } } + return arr.foo() === 42 + } catch (e) { + return false + } +} + +Object.defineProperty(Buffer.prototype, 'parent', { + enumerable: true, + get: function () { + if (!Buffer.isBuffer(this)) return undefined + return this.buffer + } +}) + +Object.defineProperty(Buffer.prototype, 'offset', { + enumerable: true, + get: function () { + if (!Buffer.isBuffer(this)) return undefined + return this.byteOffset + } +}) + +function createBuffer (length) { + if (length > K_MAX_LENGTH) { + throw new RangeError('The value "' + length + '" is invalid for option "size"') + } + // Return an augmented `Uint8Array` instance + var buf = new Uint8Array(length) + buf.__proto__ = Buffer.prototype + return buf +} + +/** + * The Buffer constructor returns instances of `Uint8Array` that have their + * prototype changed to `Buffer.prototype`. Furthermore, `Buffer` is a subclass of + * `Uint8Array`, so the returned instances will have all the node `Buffer` methods + * and the `Uint8Array` methods. Square bracket notation works as expected -- it + * returns a single octet. + * + * The `Uint8Array` prototype remains unmodified. + */ + +function Buffer (arg, encodingOrOffset, length) { + // Common case. + if (typeof arg === 'number') { + if (typeof encodingOrOffset === 'string') { + throw new TypeError( + 'The "string" argument must be of type string. Received type number' + ) + } + return allocUnsafe(arg) + } + return from(arg, encodingOrOffset, length) +} + +// Fix subarray() in ES2016. See: https://github.com/feross/buffer/pull/97 +if (typeof Symbol !== 'undefined' && Symbol.species != null && + Buffer[Symbol.species] === Buffer) { + Object.defineProperty(Buffer, Symbol.species, { + value: null, + configurable: true, + enumerable: false, + writable: false + }) +} + +Buffer.poolSize = 8192 // not used by this implementation + +function from (value, encodingOrOffset, length) { + if (typeof value === 'string') { + return fromString(value, encodingOrOffset) + } + + if (ArrayBuffer.isView(value)) { + return fromArrayLike(value) + } + + if (value == null) { + throw TypeError( + 'The first argument must be one of type string, Buffer, ArrayBuffer, Array, ' + + 'or Array-like Object. Received type ' + (typeof value) + ) + } + + if (isInstance(value, ArrayBuffer) || + (value && isInstance(value.buffer, ArrayBuffer))) { + return fromArrayBuffer(value, encodingOrOffset, length) + } + + if (typeof value === 'number') { + throw new TypeError( + 'The "value" argument must not be of type number. Received type number' + ) + } + + var valueOf = value.valueOf && value.valueOf() + if (valueOf != null && valueOf !== value) { + return Buffer.from(valueOf, encodingOrOffset, length) + } + + var b = fromObject(value) + if (b) return b + + if (typeof Symbol !== 'undefined' && Symbol.toPrimitive != null && + typeof value[Symbol.toPrimitive] === 'function') { + return Buffer.from( + value[Symbol.toPrimitive]('string'), encodingOrOffset, length + ) + } + + throw new TypeError( + 'The first argument must be one of type string, Buffer, ArrayBuffer, Array, ' + + 'or Array-like Object. Received type ' + (typeof value) + ) +} + +/** + * Functionally equivalent to Buffer(arg, encoding) but throws a TypeError + * if value is a number. + * Buffer.from(str[, encoding]) + * Buffer.from(array) + * Buffer.from(buffer) + * Buffer.from(arrayBuffer[, byteOffset[, length]]) + **/ +Buffer.from = function (value, encodingOrOffset, length) { + return from(value, encodingOrOffset, length) +} + +// Note: Change prototype *after* Buffer.from is defined to workaround Chrome bug: +// https://github.com/feross/buffer/pull/148 +Buffer.prototype.__proto__ = Uint8Array.prototype +Buffer.__proto__ = Uint8Array + +function assertSize (size) { + if (typeof size !== 'number') { + throw new TypeError('"size" argument must be of type number') + } else if (size < 0) { + throw new RangeError('The value "' + size + '" is invalid for option "size"') + } +} + +function alloc (size, fill, encoding) { + assertSize(size) + if (size <= 0) { + return createBuffer(size) + } + if (fill !== undefined) { + // Only pay attention to encoding if it's a string. This + // prevents accidentally sending in a number that would + // be interpretted as a start offset. + return typeof encoding === 'string' + ? createBuffer(size).fill(fill, encoding) + : createBuffer(size).fill(fill) + } + return createBuffer(size) +} + +/** + * Creates a new filled Buffer instance. + * alloc(size[, fill[, encoding]]) + **/ +Buffer.alloc = function (size, fill, encoding) { + return alloc(size, fill, encoding) +} + +function allocUnsafe (size) { + assertSize(size) + return createBuffer(size < 0 ? 0 : checked(size) | 0) +} + +/** + * Equivalent to Buffer(num), by default creates a non-zero-filled Buffer instance. + * */ +Buffer.allocUnsafe = function (size) { + return allocUnsafe(size) +} +/** + * Equivalent to SlowBuffer(num), by default creates a non-zero-filled Buffer instance. + */ +Buffer.allocUnsafeSlow = function (size) { + return allocUnsafe(size) +} + +function fromString (string, encoding) { + if (typeof encoding !== 'string' || encoding === '') { + encoding = 'utf8' + } + + if (!Buffer.isEncoding(encoding)) { + throw new TypeError('Unknown encoding: ' + encoding) + } + + var length = byteLength(string, encoding) | 0 + var buf = createBuffer(length) + + var actual = buf.write(string, encoding) + + if (actual !== length) { + // Writing a hex string, for example, that contains invalid characters will + // cause everything after the first invalid character to be ignored. (e.g. + // 'abxxcd' will be treated as 'ab') + buf = buf.slice(0, actual) + } + + return buf +} + +function fromArrayLike (array) { + var length = array.length < 0 ? 0 : checked(array.length) | 0 + var buf = createBuffer(length) + for (var i = 0; i < length; i += 1) { + buf[i] = array[i] & 255 + } + return buf +} + +function fromArrayBuffer (array, byteOffset, length) { + if (byteOffset < 0 || array.byteLength < byteOffset) { + throw new RangeError('"offset" is outside of buffer bounds') + } + + if (array.byteLength < byteOffset + (length || 0)) { + throw new RangeError('"length" is outside of buffer bounds') + } + + var buf + if (byteOffset === undefined && length === undefined) { + buf = new Uint8Array(array) + } else if (length === undefined) { + buf = new Uint8Array(array, byteOffset) + } else { + buf = new Uint8Array(array, byteOffset, length) + } + + // Return an augmented `Uint8Array` instance + buf.__proto__ = Buffer.prototype + return buf +} + +function fromObject (obj) { + if (Buffer.isBuffer(obj)) { + var len = checked(obj.length) | 0 + var buf = createBuffer(len) + + if (buf.length === 0) { + return buf + } + + obj.copy(buf, 0, 0, len) + return buf + } + + if (obj.length !== undefined) { + if (typeof obj.length !== 'number' || numberIsNaN(obj.length)) { + return createBuffer(0) + } + return fromArrayLike(obj) + } + + if (obj.type === 'Buffer' && Array.isArray(obj.data)) { + return fromArrayLike(obj.data) + } +} + +function checked (length) { + // Note: cannot use `length < K_MAX_LENGTH` here because that fails when + // length is NaN (which is otherwise coerced to zero.) + if (length >= K_MAX_LENGTH) { + throw new RangeError('Attempt to allocate Buffer larger than maximum ' + + 'size: 0x' + K_MAX_LENGTH.toString(16) + ' bytes') + } + return length | 0 +} + +function SlowBuffer (length) { + if (+length != length) { // eslint-disable-line eqeqeq + length = 0 + } + return Buffer.alloc(+length) +} + +Buffer.isBuffer = function isBuffer (b) { + return b != null && b._isBuffer === true && + b !== Buffer.prototype // so Buffer.isBuffer(Buffer.prototype) will be false +} + +Buffer.compare = function compare (a, b) { + if (isInstance(a, Uint8Array)) a = Buffer.from(a, a.offset, a.byteLength) + if (isInstance(b, Uint8Array)) b = Buffer.from(b, b.offset, b.byteLength) + if (!Buffer.isBuffer(a) || !Buffer.isBuffer(b)) { + throw new TypeError( + 'The "buf1", "buf2" arguments must be one of type Buffer or Uint8Array' + ) + } + + if (a === b) return 0 + + var x = a.length + var y = b.length + + for (var i = 0, len = Math.min(x, y); i < len; ++i) { + if (a[i] !== b[i]) { + x = a[i] + y = b[i] + break + } + } + + if (x < y) return -1 + if (y < x) return 1 + return 0 +} + +Buffer.isEncoding = function isEncoding (encoding) { + switch (String(encoding).toLowerCase()) { + case 'hex': + case 'utf8': + case 'utf-8': + case 'ascii': + case 'latin1': + case 'binary': + case 'base64': + case 'ucs2': + case 'ucs-2': + case 'utf16le': + case 'utf-16le': + return true + default: + return false + } +} + +Buffer.concat = function concat (list, length) { + if (!Array.isArray(list)) { + throw new TypeError('"list" argument must be an Array of Buffers') + } + + if (list.length === 0) { + return Buffer.alloc(0) + } + + var i + if (length === undefined) { + length = 0 + for (i = 0; i < list.length; ++i) { + length += list[i].length + } + } + + var buffer = Buffer.allocUnsafe(length) + var pos = 0 + for (i = 0; i < list.length; ++i) { + var buf = list[i] + if (isInstance(buf, Uint8Array)) { + buf = Buffer.from(buf) + } + if (!Buffer.isBuffer(buf)) { + throw new TypeError('"list" argument must be an Array of Buffers') + } + buf.copy(buffer, pos) + pos += buf.length + } + return buffer +} + +function byteLength (string, encoding) { + if (Buffer.isBuffer(string)) { + return string.length + } + if (ArrayBuffer.isView(string) || isInstance(string, ArrayBuffer)) { + return string.byteLength + } + if (typeof string !== 'string') { + throw new TypeError( + 'The "string" argument must be one of type string, Buffer, or ArrayBuffer. ' + + 'Received type ' + typeof string + ) + } + + var len = string.length + var mustMatch = (arguments.length > 2 && arguments[2] === true) + if (!mustMatch && len === 0) return 0 + + // Use a for loop to avoid recursion + var loweredCase = false + for (;;) { + switch (encoding) { + case 'ascii': + case 'latin1': + case 'binary': + return len + case 'utf8': + case 'utf-8': + return utf8ToBytes(string).length + case 'ucs2': + case 'ucs-2': + case 'utf16le': + case 'utf-16le': + return len * 2 + case 'hex': + return len >>> 1 + case 'base64': + return base64ToBytes(string).length + default: + if (loweredCase) { + return mustMatch ? -1 : utf8ToBytes(string).length // assume utf8 + } + encoding = ('' + encoding).toLowerCase() + loweredCase = true + } + } +} +Buffer.byteLength = byteLength + +function slowToString (encoding, start, end) { + var loweredCase = false + + // No need to verify that "this.length <= MAX_UINT32" since it's a read-only + // property of a typed array. + + // This behaves neither like String nor Uint8Array in that we set start/end + // to their upper/lower bounds if the value passed is out of range. + // undefined is handled specially as per ECMA-262 6th Edition, + // Section 13.3.3.7 Runtime Semantics: KeyedBindingInitialization. + if (start === undefined || start < 0) { + start = 0 + } + // Return early if start > this.length. Done here to prevent potential uint32 + // coercion fail below. + if (start > this.length) { + return '' + } + + if (end === undefined || end > this.length) { + end = this.length + } + + if (end <= 0) { + return '' + } + + // Force coersion to uint32. This will also coerce falsey/NaN values to 0. + end >>>= 0 + start >>>= 0 + + if (end <= start) { + return '' + } + + if (!encoding) encoding = 'utf8' + + while (true) { + switch (encoding) { + case 'hex': + return hexSlice(this, start, end) + + case 'utf8': + case 'utf-8': + return utf8Slice(this, start, end) + + case 'ascii': + return asciiSlice(this, start, end) + + case 'latin1': + case 'binary': + return latin1Slice(this, start, end) + + case 'base64': + return base64Slice(this, start, end) + + case 'ucs2': + case 'ucs-2': + case 'utf16le': + case 'utf-16le': + return utf16leSlice(this, start, end) + + default: + if (loweredCase) throw new TypeError('Unknown encoding: ' + encoding) + encoding = (encoding + '').toLowerCase() + loweredCase = true + } + } +} + +// This property is used by `Buffer.isBuffer` (and the `is-buffer` npm package) +// to detect a Buffer instance. It's not possible to use `instanceof Buffer` +// reliably in a browserify context because there could be multiple different +// copies of the 'buffer' package in use. This method works even for Buffer +// instances that were created from another copy of the `buffer` package. +// See: https://github.com/feross/buffer/issues/154 +Buffer.prototype._isBuffer = true + +function swap (b, n, m) { + var i = b[n] + b[n] = b[m] + b[m] = i +} + +Buffer.prototype.swap16 = function swap16 () { + var len = this.length + if (len % 2 !== 0) { + throw new RangeError('Buffer size must be a multiple of 16-bits') + } + for (var i = 0; i < len; i += 2) { + swap(this, i, i + 1) + } + return this +} + +Buffer.prototype.swap32 = function swap32 () { + var len = this.length + if (len % 4 !== 0) { + throw new RangeError('Buffer size must be a multiple of 32-bits') + } + for (var i = 0; i < len; i += 4) { + swap(this, i, i + 3) + swap(this, i + 1, i + 2) + } + return this +} + +Buffer.prototype.swap64 = function swap64 () { + var len = this.length + if (len % 8 !== 0) { + throw new RangeError('Buffer size must be a multiple of 64-bits') + } + for (var i = 0; i < len; i += 8) { + swap(this, i, i + 7) + swap(this, i + 1, i + 6) + swap(this, i + 2, i + 5) + swap(this, i + 3, i + 4) + } + return this +} + +Buffer.prototype.toString = function toString () { + var length = this.length + if (length === 0) return '' + if (arguments.length === 0) return utf8Slice(this, 0, length) + return slowToString.apply(this, arguments) +} + +Buffer.prototype.toLocaleString = Buffer.prototype.toString + +Buffer.prototype.equals = function equals (b) { + if (!Buffer.isBuffer(b)) throw new TypeError('Argument must be a Buffer') + if (this === b) return true + return Buffer.compare(this, b) === 0 +} + +Buffer.prototype.inspect = function inspect () { + var str = '' + var max = exports.INSPECT_MAX_BYTES + str = this.toString('hex', 0, max).replace(/(.{2})/g, '$1 ').trim() + if (this.length > max) str += ' ... ' + return '<Buffer ' + str + '>' +} + +Buffer.prototype.compare = function compare (target, start, end, thisStart, thisEnd) { + if (isInstance(target, Uint8Array)) { + target = Buffer.from(target, target.offset, target.byteLength) + } + if (!Buffer.isBuffer(target)) { + throw new TypeError( + 'The "target" argument must be one of type Buffer or Uint8Array. ' + + 'Received type ' + (typeof target) + ) + } + + if (start === undefined) { + start = 0 + } + if (end === undefined) { + end = target ? target.length : 0 + } + if (thisStart === undefined) { + thisStart = 0 + } + if (thisEnd === undefined) { + thisEnd = this.length + } + + if (start < 0 || end > target.length || thisStart < 0 || thisEnd > this.length) { + throw new RangeError('out of range index') + } + + if (thisStart >= thisEnd && start >= end) { + return 0 + } + if (thisStart >= thisEnd) { + return -1 + } + if (start >= end) { + return 1 + } + + start >>>= 0 + end >>>= 0 + thisStart >>>= 0 + thisEnd >>>= 0 + + if (this === target) return 0 + + var x = thisEnd - thisStart + var y = end - start + var len = Math.min(x, y) + + var thisCopy = this.slice(thisStart, thisEnd) + var targetCopy = target.slice(start, end) + + for (var i = 0; i < len; ++i) { + if (thisCopy[i] !== targetCopy[i]) { + x = thisCopy[i] + y = targetCopy[i] + break + } + } + + if (x < y) return -1 + if (y < x) return 1 + return 0 +} + +// Finds either the first index of `val` in `buffer` at offset >= `byteOffset`, +// OR the last index of `val` in `buffer` at offset <= `byteOffset`. +// +// Arguments: +// - buffer - a Buffer to search +// - val - a string, Buffer, or number +// - byteOffset - an index into `buffer`; will be clamped to an int32 +// - encoding - an optional encoding, relevant is val is a string +// - dir - true for indexOf, false for lastIndexOf +function bidirectionalIndexOf (buffer, val, byteOffset, encoding, dir) { + // Empty buffer means no match + if (buffer.length === 0) return -1 + + // Normalize byteOffset + if (typeof byteOffset === 'string') { + encoding = byteOffset + byteOffset = 0 + } else if (byteOffset > 0x7fffffff) { + byteOffset = 0x7fffffff + } else if (byteOffset < -0x80000000) { + byteOffset = -0x80000000 + } + byteOffset = +byteOffset // Coerce to Number. + if (numberIsNaN(byteOffset)) { + // byteOffset: it it's undefined, null, NaN, "foo", etc, search whole buffer + byteOffset = dir ? 0 : (buffer.length - 1) + } + + // Normalize byteOffset: negative offsets start from the end of the buffer + if (byteOffset < 0) byteOffset = buffer.length + byteOffset + if (byteOffset >= buffer.length) { + if (dir) return -1 + else byteOffset = buffer.length - 1 + } else if (byteOffset < 0) { + if (dir) byteOffset = 0 + else return -1 + } + + // Normalize val + if (typeof val === 'string') { + val = Buffer.from(val, encoding) + } + + // Finally, search either indexOf (if dir is true) or lastIndexOf + if (Buffer.isBuffer(val)) { + // Special case: looking for empty string/buffer always fails + if (val.length === 0) { + return -1 + } + return arrayIndexOf(buffer, val, byteOffset, encoding, dir) + } else if (typeof val === 'number') { + val = val & 0xFF // Search for a byte value [0-255] + if (typeof Uint8Array.prototype.indexOf === 'function') { + if (dir) { + return Uint8Array.prototype.indexOf.call(buffer, val, byteOffset) + } else { + return Uint8Array.prototype.lastIndexOf.call(buffer, val, byteOffset) + } + } + return arrayIndexOf(buffer, [ val ], byteOffset, encoding, dir) + } + + throw new TypeError('val must be string, number or Buffer') +} + +function arrayIndexOf (arr, val, byteOffset, encoding, dir) { + var indexSize = 1 + var arrLength = arr.length + var valLength = val.length + + if (encoding !== undefined) { + encoding = String(encoding).toLowerCase() + if (encoding === 'ucs2' || encoding === 'ucs-2' || + encoding === 'utf16le' || encoding === 'utf-16le') { + if (arr.length < 2 || val.length < 2) { + return -1 + } + indexSize = 2 + arrLength /= 2 + valLength /= 2 + byteOffset /= 2 + } + } + + function read (buf, i) { + if (indexSize === 1) { + return buf[i] + } else { + return buf.readUInt16BE(i * indexSize) + } + } + + var i + if (dir) { + var foundIndex = -1 + for (i = byteOffset; i < arrLength; i++) { + if (read(arr, i) === read(val, foundIndex === -1 ? 0 : i - foundIndex)) { + if (foundIndex === -1) foundIndex = i + if (i - foundIndex + 1 === valLength) return foundIndex * indexSize + } else { + if (foundIndex !== -1) i -= i - foundIndex + foundIndex = -1 + } + } + } else { + if (byteOffset + valLength > arrLength) byteOffset = arrLength - valLength + for (i = byteOffset; i >= 0; i--) { + var found = true + for (var j = 0; j < valLength; j++) { + if (read(arr, i + j) !== read(val, j)) { + found = false + break + } + } + if (found) return i + } + } + + return -1 +} + +Buffer.prototype.includes = function includes (val, byteOffset, encoding) { + return this.indexOf(val, byteOffset, encoding) !== -1 +} + +Buffer.prototype.indexOf = function indexOf (val, byteOffset, encoding) { + return bidirectionalIndexOf(this, val, byteOffset, encoding, true) +} + +Buffer.prototype.lastIndexOf = function lastIndexOf (val, byteOffset, encoding) { + return bidirectionalIndexOf(this, val, byteOffset, encoding, false) +} + +function hexWrite (buf, string, offset, length) { + offset = Number(offset) || 0 + var remaining = buf.length - offset + if (!length) { + length = remaining + } else { + length = Number(length) + if (length > remaining) { + length = remaining + } + } + + var strLen = string.length + + if (length > strLen / 2) { + length = strLen / 2 + } + for (var i = 0; i < length; ++i) { + var parsed = parseInt(string.substr(i * 2, 2), 16) + if (numberIsNaN(parsed)) return i + buf[offset + i] = parsed + } + return i +} + +function utf8Write (buf, string, offset, length) { + return blitBuffer(utf8ToBytes(string, buf.length - offset), buf, offset, length) +} + +function asciiWrite (buf, string, offset, length) { + return blitBuffer(asciiToBytes(string), buf, offset, length) +} + +function latin1Write (buf, string, offset, length) { + return asciiWrite(buf, string, offset, length) +} + +function base64Write (buf, string, offset, length) { + return blitBuffer(base64ToBytes(string), buf, offset, length) +} + +function ucs2Write (buf, string, offset, length) { + return blitBuffer(utf16leToBytes(string, buf.length - offset), buf, offset, length) +} + +Buffer.prototype.write = function write (string, offset, length, encoding) { + // Buffer#write(string) + if (offset === undefined) { + encoding = 'utf8' + length = this.length + offset = 0 + // Buffer#write(string, encoding) + } else if (length === undefined && typeof offset === 'string') { + encoding = offset + length = this.length + offset = 0 + // Buffer#write(string, offset[, length][, encoding]) + } else if (isFinite(offset)) { + offset = offset >>> 0 + if (isFinite(length)) { + length = length >>> 0 + if (encoding === undefined) encoding = 'utf8' + } else { + encoding = length + length = undefined + } + } else { + throw new Error( + 'Buffer.write(string, encoding, offset[, length]) is no longer supported' + ) + } + + var remaining = this.length - offset + if (length === undefined || length > remaining) length = remaining + + if ((string.length > 0 && (length < 0 || offset < 0)) || offset > this.length) { + throw new RangeError('Attempt to write outside buffer bounds') + } + + if (!encoding) encoding = 'utf8' + + var loweredCase = false + for (;;) { + switch (encoding) { + case 'hex': + return hexWrite(this, string, offset, length) + + case 'utf8': + case 'utf-8': + return utf8Write(this, string, offset, length) + + case 'ascii': + return asciiWrite(this, string, offset, length) + + case 'latin1': + case 'binary': + return latin1Write(this, string, offset, length) + + case 'base64': + // Warning: maxLength not taken into account in base64Write + return base64Write(this, string, offset, length) + + case 'ucs2': + case 'ucs-2': + case 'utf16le': + case 'utf-16le': + return ucs2Write(this, string, offset, length) + + default: + if (loweredCase) throw new TypeError('Unknown encoding: ' + encoding) + encoding = ('' + encoding).toLowerCase() + loweredCase = true + } + } +} + +Buffer.prototype.toJSON = function toJSON () { + return { + type: 'Buffer', + data: Array.prototype.slice.call(this._arr || this, 0) + } +} + +function base64Slice (buf, start, end) { + if (start === 0 && end === buf.length) { + return base64.fromByteArray(buf) + } else { + return base64.fromByteArray(buf.slice(start, end)) + } +} + +function utf8Slice (buf, start, end) { + end = Math.min(buf.length, end) + var res = [] + + var i = start + while (i < end) { + var firstByte = buf[i] + var codePoint = null + var bytesPerSequence = (firstByte > 0xEF) ? 4 + : (firstByte > 0xDF) ? 3 + : (firstByte > 0xBF) ? 2 + : 1 + + if (i + bytesPerSequence <= end) { + var secondByte, thirdByte, fourthByte, tempCodePoint + + switch (bytesPerSequence) { + case 1: + if (firstByte < 0x80) { + codePoint = firstByte + } + break + case 2: + secondByte = buf[i + 1] + if ((secondByte & 0xC0) === 0x80) { + tempCodePoint = (firstByte & 0x1F) << 0x6 | (secondByte & 0x3F) + if (tempCodePoint > 0x7F) { + codePoint = tempCodePoint + } + } + break + case 3: + secondByte = buf[i + 1] + thirdByte = buf[i + 2] + if ((secondByte & 0xC0) === 0x80 && (thirdByte & 0xC0) === 0x80) { + tempCodePoint = (firstByte & 0xF) << 0xC | (secondByte & 0x3F) << 0x6 | (thirdByte & 0x3F) + if (tempCodePoint > 0x7FF && (tempCodePoint < 0xD800 || tempCodePoint > 0xDFFF)) { + codePoint = tempCodePoint + } + } + break + case 4: + secondByte = buf[i + 1] + thirdByte = buf[i + 2] + fourthByte = buf[i + 3] + if ((secondByte & 0xC0) === 0x80 && (thirdByte & 0xC0) === 0x80 && (fourthByte & 0xC0) === 0x80) { + tempCodePoint = (firstByte & 0xF) << 0x12 | (secondByte & 0x3F) << 0xC | (thirdByte & 0x3F) << 0x6 | (fourthByte & 0x3F) + if (tempCodePoint > 0xFFFF && tempCodePoint < 0x110000) { + codePoint = tempCodePoint + } + } + } + } + + if (codePoint === null) { + // we did not generate a valid codePoint so insert a + // replacement char (U+FFFD) and advance only 1 byte + codePoint = 0xFFFD + bytesPerSequence = 1 + } else if (codePoint > 0xFFFF) { + // encode to utf16 (surrogate pair dance) + codePoint -= 0x10000 + res.push(codePoint >>> 10 & 0x3FF | 0xD800) + codePoint = 0xDC00 | codePoint & 0x3FF + } + + res.push(codePoint) + i += bytesPerSequence + } + + return decodeCodePointsArray(res) +} + +// Based on http://stackoverflow.com/a/22747272/680742, the browser with +// the lowest limit is Chrome, with 0x10000 args. +// We go 1 magnitude less, for safety +var MAX_ARGUMENTS_LENGTH = 0x1000 + +function decodeCodePointsArray (codePoints) { + var len = codePoints.length + if (len <= MAX_ARGUMENTS_LENGTH) { + return String.fromCharCode.apply(String, codePoints) // avoid extra slice() + } + + // Decode in chunks to avoid "call stack size exceeded". + var res = '' + var i = 0 + while (i < len) { + res += String.fromCharCode.apply( + String, + codePoints.slice(i, i += MAX_ARGUMENTS_LENGTH) + ) + } + return res +} + +function asciiSlice (buf, start, end) { + var ret = '' + end = Math.min(buf.length, end) + + for (var i = start; i < end; ++i) { + ret += String.fromCharCode(buf[i] & 0x7F) + } + return ret +} + +function latin1Slice (buf, start, end) { + var ret = '' + end = Math.min(buf.length, end) + + for (var i = start; i < end; ++i) { + ret += String.fromCharCode(buf[i]) + } + return ret +} + +function hexSlice (buf, start, end) { + var len = buf.length + + if (!start || start < 0) start = 0 + if (!end || end < 0 || end > len) end = len + + var out = '' + for (var i = start; i < end; ++i) { + out += toHex(buf[i]) + } + return out +} + +function utf16leSlice (buf, start, end) { + var bytes = buf.slice(start, end) + var res = '' + for (var i = 0; i < bytes.length; i += 2) { + res += String.fromCharCode(bytes[i] + (bytes[i + 1] * 256)) + } + return res +} + +Buffer.prototype.slice = function slice (start, end) { + var len = this.length + start = ~~start + end = end === undefined ? len : ~~end + + if (start < 0) { + start += len + if (start < 0) start = 0 + } else if (start > len) { + start = len + } + + if (end < 0) { + end += len + if (end < 0) end = 0 + } else if (end > len) { + end = len + } + + if (end < start) end = start + + var newBuf = this.subarray(start, end) + // Return an augmented `Uint8Array` instance + newBuf.__proto__ = Buffer.prototype + return newBuf +} + +/* + * Need to make sure that buffer isn't trying to write out of bounds. + */ +function checkOffset (offset, ext, length) { + if ((offset % 1) !== 0 || offset < 0) throw new RangeError('offset is not uint') + if (offset + ext > length) throw new RangeError('Trying to access beyond buffer length') +} + +Buffer.prototype.readUIntLE = function readUIntLE (offset, byteLength, noAssert) { + offset = offset >>> 0 + byteLength = byteLength >>> 0 + if (!noAssert) checkOffset(offset, byteLength, this.length) + + var val = this[offset] + var mul = 1 + var i = 0 + while (++i < byteLength && (mul *= 0x100)) { + val += this[offset + i] * mul + } + + return val +} + +Buffer.prototype.readUIntBE = function readUIntBE (offset, byteLength, noAssert) { + offset = offset >>> 0 + byteLength = byteLength >>> 0 + if (!noAssert) { + checkOffset(offset, byteLength, this.length) + } + + var val = this[offset + --byteLength] + var mul = 1 + while (byteLength > 0 && (mul *= 0x100)) { + val += this[offset + --byteLength] * mul + } + + return val +} + +Buffer.prototype.readUInt8 = function readUInt8 (offset, noAssert) { + offset = offset >>> 0 + if (!noAssert) checkOffset(offset, 1, this.length) + return this[offset] +} + +Buffer.prototype.readUInt16LE = function readUInt16LE (offset, noAssert) { + offset = offset >>> 0 + if (!noAssert) checkOffset(offset, 2, this.length) + return this[offset] | (this[offset + 1] << 8) +} + +Buffer.prototype.readUInt16BE = function readUInt16BE (offset, noAssert) { + offset = offset >>> 0 + if (!noAssert) checkOffset(offset, 2, this.length) + return (this[offset] << 8) | this[offset + 1] +} + +Buffer.prototype.readUInt32LE = function readUInt32LE (offset, noAssert) { + offset = offset >>> 0 + if (!noAssert) checkOffset(offset, 4, this.length) + + return ((this[offset]) | + (this[offset + 1] << 8) | + (this[offset + 2] << 16)) + + (this[offset + 3] * 0x1000000) +} + +Buffer.prototype.readUInt32BE = function readUInt32BE (offset, noAssert) { + offset = offset >>> 0 + if (!noAssert) checkOffset(offset, 4, this.length) + + return (this[offset] * 0x1000000) + + ((this[offset + 1] << 16) | + (this[offset + 2] << 8) | + this[offset + 3]) +} + +Buffer.prototype.readIntLE = function readIntLE (offset, byteLength, noAssert) { + offset = offset >>> 0 + byteLength = byteLength >>> 0 + if (!noAssert) checkOffset(offset, byteLength, this.length) + + var val = this[offset] + var mul = 1 + var i = 0 + while (++i < byteLength && (mul *= 0x100)) { + val += this[offset + i] * mul + } + mul *= 0x80 + + if (val >= mul) val -= Math.pow(2, 8 * byteLength) + + return val +} + +Buffer.prototype.readIntBE = function readIntBE (offset, byteLength, noAssert) { + offset = offset >>> 0 + byteLength = byteLength >>> 0 + if (!noAssert) checkOffset(offset, byteLength, this.length) + + var i = byteLength + var mul = 1 + var val = this[offset + --i] + while (i > 0 && (mul *= 0x100)) { + val += this[offset + --i] * mul + } + mul *= 0x80 + + if (val >= mul) val -= Math.pow(2, 8 * byteLength) + + return val +} + +Buffer.prototype.readInt8 = function readInt8 (offset, noAssert) { + offset = offset >>> 0 + if (!noAssert) checkOffset(offset, 1, this.length) + if (!(this[offset] & 0x80)) return (this[offset]) + return ((0xff - this[offset] + 1) * -1) +} + +Buffer.prototype.readInt16LE = function readInt16LE (offset, noAssert) { + offset = offset >>> 0 + if (!noAssert) checkOffset(offset, 2, this.length) + var val = this[offset] | (this[offset + 1] << 8) + return (val & 0x8000) ? val | 0xFFFF0000 : val +} + +Buffer.prototype.readInt16BE = function readInt16BE (offset, noAssert) { + offset = offset >>> 0 + if (!noAssert) checkOffset(offset, 2, this.length) + var val = this[offset + 1] | (this[offset] << 8) + return (val & 0x8000) ? val | 0xFFFF0000 : val +} + +Buffer.prototype.readInt32LE = function readInt32LE (offset, noAssert) { + offset = offset >>> 0 + if (!noAssert) checkOffset(offset, 4, this.length) + + return (this[offset]) | + (this[offset + 1] << 8) | + (this[offset + 2] << 16) | + (this[offset + 3] << 24) +} + +Buffer.prototype.readInt32BE = function readInt32BE (offset, noAssert) { + offset = offset >>> 0 + if (!noAssert) checkOffset(offset, 4, this.length) + + return (this[offset] << 24) | + (this[offset + 1] << 16) | + (this[offset + 2] << 8) | + (this[offset + 3]) +} + +Buffer.prototype.readFloatLE = function readFloatLE (offset, noAssert) { + offset = offset >>> 0 + if (!noAssert) checkOffset(offset, 4, this.length) + return ieee754.read(this, offset, true, 23, 4) +} + +Buffer.prototype.readFloatBE = function readFloatBE (offset, noAssert) { + offset = offset >>> 0 + if (!noAssert) checkOffset(offset, 4, this.length) + return ieee754.read(this, offset, false, 23, 4) +} + +Buffer.prototype.readDoubleLE = function readDoubleLE (offset, noAssert) { + offset = offset >>> 0 + if (!noAssert) checkOffset(offset, 8, this.length) + return ieee754.read(this, offset, true, 52, 8) +} + +Buffer.prototype.readDoubleBE = function readDoubleBE (offset, noAssert) { + offset = offset >>> 0 + if (!noAssert) checkOffset(offset, 8, this.length) + return ieee754.read(this, offset, false, 52, 8) +} + +function checkInt (buf, value, offset, ext, max, min) { + if (!Buffer.isBuffer(buf)) throw new TypeError('"buffer" argument must be a Buffer instance') + if (value > max || value < min) throw new RangeError('"value" argument is out of bounds') + if (offset + ext > buf.length) throw new RangeError('Index out of range') +} + +Buffer.prototype.writeUIntLE = function writeUIntLE (value, offset, byteLength, noAssert) { + value = +value + offset = offset >>> 0 + byteLength = byteLength >>> 0 + if (!noAssert) { + var maxBytes = Math.pow(2, 8 * byteLength) - 1 + checkInt(this, value, offset, byteLength, maxBytes, 0) + } + + var mul = 1 + var i = 0 + this[offset] = value & 0xFF + while (++i < byteLength && (mul *= 0x100)) { + this[offset + i] = (value / mul) & 0xFF + } + + return offset + byteLength +} + +Buffer.prototype.writeUIntBE = function writeUIntBE (value, offset, byteLength, noAssert) { + value = +value + offset = offset >>> 0 + byteLength = byteLength >>> 0 + if (!noAssert) { + var maxBytes = Math.pow(2, 8 * byteLength) - 1 + checkInt(this, value, offset, byteLength, maxBytes, 0) + } + + var i = byteLength - 1 + var mul = 1 + this[offset + i] = value & 0xFF + while (--i >= 0 && (mul *= 0x100)) { + this[offset + i] = (value / mul) & 0xFF + } + + return offset + byteLength +} + +Buffer.prototype.writeUInt8 = function writeUInt8 (value, offset, noAssert) { + value = +value + offset = offset >>> 0 + if (!noAssert) checkInt(this, value, offset, 1, 0xff, 0) + this[offset] = (value & 0xff) + return offset + 1 +} + +Buffer.prototype.writeUInt16LE = function writeUInt16LE (value, offset, noAssert) { + value = +value + offset = offset >>> 0 + if (!noAssert) checkInt(this, value, offset, 2, 0xffff, 0) + this[offset] = (value & 0xff) + this[offset + 1] = (value >>> 8) + return offset + 2 +} + +Buffer.prototype.writeUInt16BE = function writeUInt16BE (value, offset, noAssert) { + value = +value + offset = offset >>> 0 + if (!noAssert) checkInt(this, value, offset, 2, 0xffff, 0) + this[offset] = (value >>> 8) + this[offset + 1] = (value & 0xff) + return offset + 2 +} + +Buffer.prototype.writeUInt32LE = function writeUInt32LE (value, offset, noAssert) { + value = +value + offset = offset >>> 0 + if (!noAssert) checkInt(this, value, offset, 4, 0xffffffff, 0) + this[offset + 3] = (value >>> 24) + this[offset + 2] = (value >>> 16) + this[offset + 1] = (value >>> 8) + this[offset] = (value & 0xff) + return offset + 4 +} + +Buffer.prototype.writeUInt32BE = function writeUInt32BE (value, offset, noAssert) { + value = +value + offset = offset >>> 0 + if (!noAssert) checkInt(this, value, offset, 4, 0xffffffff, 0) + this[offset] = (value >>> 24) + this[offset + 1] = (value >>> 16) + this[offset + 2] = (value >>> 8) + this[offset + 3] = (value & 0xff) + return offset + 4 +} + +Buffer.prototype.writeIntLE = function writeIntLE (value, offset, byteLength, noAssert) { + value = +value + offset = offset >>> 0 + if (!noAssert) { + var limit = Math.pow(2, (8 * byteLength) - 1) + + checkInt(this, value, offset, byteLength, limit - 1, -limit) + } + + var i = 0 + var mul = 1 + var sub = 0 + this[offset] = value & 0xFF + while (++i < byteLength && (mul *= 0x100)) { + if (value < 0 && sub === 0 && this[offset + i - 1] !== 0) { + sub = 1 + } + this[offset + i] = ((value / mul) >> 0) - sub & 0xFF + } + + return offset + byteLength +} + +Buffer.prototype.writeIntBE = function writeIntBE (value, offset, byteLength, noAssert) { + value = +value + offset = offset >>> 0 + if (!noAssert) { + var limit = Math.pow(2, (8 * byteLength) - 1) + + checkInt(this, value, offset, byteLength, limit - 1, -limit) + } + + var i = byteLength - 1 + var mul = 1 + var sub = 0 + this[offset + i] = value & 0xFF + while (--i >= 0 && (mul *= 0x100)) { + if (value < 0 && sub === 0 && this[offset + i + 1] !== 0) { + sub = 1 + } + this[offset + i] = ((value / mul) >> 0) - sub & 0xFF + } + + return offset + byteLength +} + +Buffer.prototype.writeInt8 = function writeInt8 (value, offset, noAssert) { + value = +value + offset = offset >>> 0 + if (!noAssert) checkInt(this, value, offset, 1, 0x7f, -0x80) + if (value < 0) value = 0xff + value + 1 + this[offset] = (value & 0xff) + return offset + 1 +} + +Buffer.prototype.writeInt16LE = function writeInt16LE (value, offset, noAssert) { + value = +value + offset = offset >>> 0 + if (!noAssert) checkInt(this, value, offset, 2, 0x7fff, -0x8000) + this[offset] = (value & 0xff) + this[offset + 1] = (value >>> 8) + return offset + 2 +} + +Buffer.prototype.writeInt16BE = function writeInt16BE (value, offset, noAssert) { + value = +value + offset = offset >>> 0 + if (!noAssert) checkInt(this, value, offset, 2, 0x7fff, -0x8000) + this[offset] = (value >>> 8) + this[offset + 1] = (value & 0xff) + return offset + 2 +} + +Buffer.prototype.writeInt32LE = function writeInt32LE (value, offset, noAssert) { + value = +value + offset = offset >>> 0 + if (!noAssert) checkInt(this, value, offset, 4, 0x7fffffff, -0x80000000) + this[offset] = (value & 0xff) + this[offset + 1] = (value >>> 8) + this[offset + 2] = (value >>> 16) + this[offset + 3] = (value >>> 24) + return offset + 4 +} + +Buffer.prototype.writeInt32BE = function writeInt32BE (value, offset, noAssert) { + value = +value + offset = offset >>> 0 + if (!noAssert) checkInt(this, value, offset, 4, 0x7fffffff, -0x80000000) + if (value < 0) value = 0xffffffff + value + 1 + this[offset] = (value >>> 24) + this[offset + 1] = (value >>> 16) + this[offset + 2] = (value >>> 8) + this[offset + 3] = (value & 0xff) + return offset + 4 +} + +function checkIEEE754 (buf, value, offset, ext, max, min) { + if (offset + ext > buf.length) throw new RangeError('Index out of range') + if (offset < 0) throw new RangeError('Index out of range') +} + +function writeFloat (buf, value, offset, littleEndian, noAssert) { + value = +value + offset = offset >>> 0 + if (!noAssert) { + checkIEEE754(buf, value, offset, 4, 3.4028234663852886e+38, -3.4028234663852886e+38) + } + ieee754.write(buf, value, offset, littleEndian, 23, 4) + return offset + 4 +} + +Buffer.prototype.writeFloatLE = function writeFloatLE (value, offset, noAssert) { + return writeFloat(this, value, offset, true, noAssert) +} + +Buffer.prototype.writeFloatBE = function writeFloatBE (value, offset, noAssert) { + return writeFloat(this, value, offset, false, noAssert) +} + +function writeDouble (buf, value, offset, littleEndian, noAssert) { + value = +value + offset = offset >>> 0 + if (!noAssert) { + checkIEEE754(buf, value, offset, 8, 1.7976931348623157E+308, -1.7976931348623157E+308) + } + ieee754.write(buf, value, offset, littleEndian, 52, 8) + return offset + 8 +} + +Buffer.prototype.writeDoubleLE = function writeDoubleLE (value, offset, noAssert) { + return writeDouble(this, value, offset, true, noAssert) +} + +Buffer.prototype.writeDoubleBE = function writeDoubleBE (value, offset, noAssert) { + return writeDouble(this, value, offset, false, noAssert) +} + +// copy(targetBuffer, targetStart=0, sourceStart=0, sourceEnd=buffer.length) +Buffer.prototype.copy = function copy (target, targetStart, start, end) { + if (!Buffer.isBuffer(target)) throw new TypeError('argument should be a Buffer') + if (!start) start = 0 + if (!end && end !== 0) end = this.length + if (targetStart >= target.length) targetStart = target.length + if (!targetStart) targetStart = 0 + if (end > 0 && end < start) end = start + + // Copy 0 bytes; we're done + if (end === start) return 0 + if (target.length === 0 || this.length === 0) return 0 + + // Fatal error conditions + if (targetStart < 0) { + throw new RangeError('targetStart out of bounds') + } + if (start < 0 || start >= this.length) throw new RangeError('Index out of range') + if (end < 0) throw new RangeError('sourceEnd out of bounds') + + // Are we oob? + if (end > this.length) end = this.length + if (target.length - targetStart < end - start) { + end = target.length - targetStart + start + } + + var len = end - start + + if (this === target && typeof Uint8Array.prototype.copyWithin === 'function') { + // Use built-in when available, missing from IE11 + this.copyWithin(targetStart, start, end) + } else if (this === target && start < targetStart && targetStart < end) { + // descending copy from end + for (var i = len - 1; i >= 0; --i) { + target[i + targetStart] = this[i + start] + } + } else { + Uint8Array.prototype.set.call( + target, + this.subarray(start, end), + targetStart + ) + } + + return len +} + +// Usage: +// buffer.fill(number[, offset[, end]]) +// buffer.fill(buffer[, offset[, end]]) +// buffer.fill(string[, offset[, end]][, encoding]) +Buffer.prototype.fill = function fill (val, start, end, encoding) { + // Handle string cases: + if (typeof val === 'string') { + if (typeof start === 'string') { + encoding = start + start = 0 + end = this.length + } else if (typeof end === 'string') { + encoding = end + end = this.length + } + if (encoding !== undefined && typeof encoding !== 'string') { + throw new TypeError('encoding must be a string') + } + if (typeof encoding === 'string' && !Buffer.isEncoding(encoding)) { + throw new TypeError('Unknown encoding: ' + encoding) + } + if (val.length === 1) { + var code = val.charCodeAt(0) + if ((encoding === 'utf8' && code < 128) || + encoding === 'latin1') { + // Fast path: If `val` fits into a single byte, use that numeric value. + val = code + } + } + } else if (typeof val === 'number') { + val = val & 255 + } + + // Invalid ranges are not set to a default, so can range check early. + if (start < 0 || this.length < start || this.length < end) { + throw new RangeError('Out of range index') + } + + if (end <= start) { + return this + } + + start = start >>> 0 + end = end === undefined ? this.length : end >>> 0 + + if (!val) val = 0 + + var i + if (typeof val === 'number') { + for (i = start; i < end; ++i) { + this[i] = val + } + } else { + var bytes = Buffer.isBuffer(val) + ? val + : Buffer.from(val, encoding) + var len = bytes.length + if (len === 0) { + throw new TypeError('The value "' + val + + '" is invalid for argument "value"') + } + for (i = 0; i < end - start; ++i) { + this[i + start] = bytes[i % len] + } + } + + return this +} + +// HELPER FUNCTIONS +// ================ + +var INVALID_BASE64_RE = /[^+/0-9A-Za-z-_]/g + +function base64clean (str) { + // Node takes equal signs as end of the Base64 encoding + str = str.split('=')[0] + // Node strips out invalid characters like \n and \t from the string, base64-js does not + str = str.trim().replace(INVALID_BASE64_RE, '') + // Node converts strings with length < 2 to '' + if (str.length < 2) return '' + // Node allows for non-padded base64 strings (missing trailing ===), base64-js does not + while (str.length % 4 !== 0) { + str = str + '=' + } + return str +} + +function toHex (n) { + if (n < 16) return '0' + n.toString(16) + return n.toString(16) +} + +function utf8ToBytes (string, units) { + units = units || Infinity + var codePoint + var length = string.length + var leadSurrogate = null + var bytes = [] + + for (var i = 0; i < length; ++i) { + codePoint = string.charCodeAt(i) + + // is surrogate component + if (codePoint > 0xD7FF && codePoint < 0xE000) { + // last char was a lead + if (!leadSurrogate) { + // no lead yet + if (codePoint > 0xDBFF) { + // unexpected trail + if ((units -= 3) > -1) bytes.push(0xEF, 0xBF, 0xBD) + continue + } else if (i + 1 === length) { + // unpaired lead + if ((units -= 3) > -1) bytes.push(0xEF, 0xBF, 0xBD) + continue + } + + // valid lead + leadSurrogate = codePoint + + continue + } + + // 2 leads in a row + if (codePoint < 0xDC00) { + if ((units -= 3) > -1) bytes.push(0xEF, 0xBF, 0xBD) + leadSurrogate = codePoint + continue + } + + // valid surrogate pair + codePoint = (leadSurrogate - 0xD800 << 10 | codePoint - 0xDC00) + 0x10000 + } else if (leadSurrogate) { + // valid bmp char, but last char was a lead + if ((units -= 3) > -1) bytes.push(0xEF, 0xBF, 0xBD) + } + + leadSurrogate = null + + // encode utf8 + if (codePoint < 0x80) { + if ((units -= 1) < 0) break + bytes.push(codePoint) + } else if (codePoint < 0x800) { + if ((units -= 2) < 0) break + bytes.push( + codePoint >> 0x6 | 0xC0, + codePoint & 0x3F | 0x80 + ) + } else if (codePoint < 0x10000) { + if ((units -= 3) < 0) break + bytes.push( + codePoint >> 0xC | 0xE0, + codePoint >> 0x6 & 0x3F | 0x80, + codePoint & 0x3F | 0x80 + ) + } else if (codePoint < 0x110000) { + if ((units -= 4) < 0) break + bytes.push( + codePoint >> 0x12 | 0xF0, + codePoint >> 0xC & 0x3F | 0x80, + codePoint >> 0x6 & 0x3F | 0x80, + codePoint & 0x3F | 0x80 + ) + } else { + throw new Error('Invalid code point') + } + } + + return bytes +} + +function asciiToBytes (str) { + var byteArray = [] + for (var i = 0; i < str.length; ++i) { + // Node's code seems to be doing this and not & 0x7F.. + byteArray.push(str.charCodeAt(i) & 0xFF) + } + return byteArray +} + +function utf16leToBytes (str, units) { + var c, hi, lo + var byteArray = [] + for (var i = 0; i < str.length; ++i) { + if ((units -= 2) < 0) break + + c = str.charCodeAt(i) + hi = c >> 8 + lo = c % 256 + byteArray.push(lo) + byteArray.push(hi) + } + + return byteArray +} + +function base64ToBytes (str) { + return base64.toByteArray(base64clean(str)) +} + +function blitBuffer (src, dst, offset, length) { + for (var i = 0; i < length; ++i) { + if ((i + offset >= dst.length) || (i >= src.length)) break + dst[i + offset] = src[i] + } + return i +} + +// ArrayBuffer or Uint8Array objects from other contexts (i.e. iframes) do not pass +// the `instanceof` check but they should be treated as of that type. +// See: https://github.com/feross/buffer/issues/166 +function isInstance (obj, type) { + return obj instanceof type || + (obj != null && obj.constructor != null && obj.constructor.name != null && + obj.constructor.name === type.name) +} +function numberIsNaN (obj) { + // For IE11 support + return obj !== obj // eslint-disable-line no-self-compare +} + +}).call(this,require("buffer").Buffer) +},{"base64-js":1,"buffer":3,"ieee754":6}],4:[function(require,module,exports){ +(function (Buffer){ +// Copyright Joyent, Inc. and other Node contributors. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to permit +// persons to whom the Software is furnished to do so, subject to the +// following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +// USE OR OTHER DEALINGS IN THE SOFTWARE. + +// NOTE: These type checking functions intentionally don't use `instanceof` +// because it is fragile and can be easily faked with `Object.create()`. + +function isArray(arg) { + if (Array.isArray) { + return Array.isArray(arg); + } + return objectToString(arg) === '[object Array]'; +} +exports.isArray = isArray; + +function isBoolean(arg) { + return typeof arg === 'boolean'; +} +exports.isBoolean = isBoolean; + +function isNull(arg) { + return arg === null; +} +exports.isNull = isNull; + +function isNullOrUndefined(arg) { + return arg == null; +} +exports.isNullOrUndefined = isNullOrUndefined; + +function isNumber(arg) { + return typeof arg === 'number'; +} +exports.isNumber = isNumber; + +function isString(arg) { + return typeof arg === 'string'; +} +exports.isString = isString; + +function isSymbol(arg) { + return typeof arg === 'symbol'; +} +exports.isSymbol = isSymbol; + +function isUndefined(arg) { + return arg === void 0; +} +exports.isUndefined = isUndefined; + +function isRegExp(re) { + return objectToString(re) === '[object RegExp]'; +} +exports.isRegExp = isRegExp; + +function isObject(arg) { + return typeof arg === 'object' && arg !== null; +} +exports.isObject = isObject; + +function isDate(d) { + return objectToString(d) === '[object Date]'; +} +exports.isDate = isDate; + +function isError(e) { + return (objectToString(e) === '[object Error]' || e instanceof Error); +} +exports.isError = isError; + +function isFunction(arg) { + return typeof arg === 'function'; +} +exports.isFunction = isFunction; + +function isPrimitive(arg) { + return arg === null || + typeof arg === 'boolean' || + typeof arg === 'number' || + typeof arg === 'string' || + typeof arg === 'symbol' || // ES6 symbol + typeof arg === 'undefined'; +} +exports.isPrimitive = isPrimitive; + +exports.isBuffer = Buffer.isBuffer; + +function objectToString(o) { + return Object.prototype.toString.call(o); +} + +}).call(this,{"isBuffer":require("../../is-buffer/index.js")}) +},{"../../is-buffer/index.js":9}],5:[function(require,module,exports){ +// Copyright Joyent, Inc. and other Node contributors. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to permit +// persons to whom the Software is furnished to do so, subject to the +// following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +// USE OR OTHER DEALINGS IN THE SOFTWARE. + +var objectCreate = Object.create || objectCreatePolyfill +var objectKeys = Object.keys || objectKeysPolyfill +var bind = Function.prototype.bind || functionBindPolyfill + +function EventEmitter() { + if (!this._events || !Object.prototype.hasOwnProperty.call(this, '_events')) { + this._events = objectCreate(null); + this._eventsCount = 0; + } + + this._maxListeners = this._maxListeners || undefined; +} +module.exports = EventEmitter; + +// Backwards-compat with node 0.10.x +EventEmitter.EventEmitter = EventEmitter; + +EventEmitter.prototype._events = undefined; +EventEmitter.prototype._maxListeners = undefined; + +// By default EventEmitters will print a warning if more than 10 listeners are +// added to it. This is a useful default which helps finding memory leaks. +var defaultMaxListeners = 10; + +var hasDefineProperty; +try { + var o = {}; + if (Object.defineProperty) Object.defineProperty(o, 'x', { value: 0 }); + hasDefineProperty = o.x === 0; +} catch (err) { hasDefineProperty = false } +if (hasDefineProperty) { + Object.defineProperty(EventEmitter, 'defaultMaxListeners', { + enumerable: true, + get: function() { + return defaultMaxListeners; + }, + set: function(arg) { + // check whether the input is a positive number (whose value is zero or + // greater and not a NaN). + if (typeof arg !== 'number' || arg < 0 || arg !== arg) + throw new TypeError('"defaultMaxListeners" must be a positive number'); + defaultMaxListeners = arg; + } + }); +} else { + EventEmitter.defaultMaxListeners = defaultMaxListeners; +} + +// Obviously not all Emitters should be limited to 10. This function allows +// that to be increased. Set to zero for unlimited. +EventEmitter.prototype.setMaxListeners = function setMaxListeners(n) { + if (typeof n !== 'number' || n < 0 || isNaN(n)) + throw new TypeError('"n" argument must be a positive number'); + this._maxListeners = n; + return this; +}; + +function $getMaxListeners(that) { + if (that._maxListeners === undefined) + return EventEmitter.defaultMaxListeners; + return that._maxListeners; +} + +EventEmitter.prototype.getMaxListeners = function getMaxListeners() { + return $getMaxListeners(this); +}; + +// These standalone emit* functions are used to optimize calling of event +// handlers for fast cases because emit() itself often has a variable number of +// arguments and can be deoptimized because of that. These functions always have +// the same number of arguments and thus do not get deoptimized, so the code +// inside them can execute faster. +function emitNone(handler, isFn, self) { + if (isFn) + handler.call(self); + else { + var len = handler.length; + var listeners = arrayClone(handler, len); + for (var i = 0; i < len; ++i) + listeners[i].call(self); + } +} +function emitOne(handler, isFn, self, arg1) { + if (isFn) + handler.call(self, arg1); + else { + var len = handler.length; + var listeners = arrayClone(handler, len); + for (var i = 0; i < len; ++i) + listeners[i].call(self, arg1); + } +} +function emitTwo(handler, isFn, self, arg1, arg2) { + if (isFn) + handler.call(self, arg1, arg2); + else { + var len = handler.length; + var listeners = arrayClone(handler, len); + for (var i = 0; i < len; ++i) + listeners[i].call(self, arg1, arg2); + } +} +function emitThree(handler, isFn, self, arg1, arg2, arg3) { + if (isFn) + handler.call(self, arg1, arg2, arg3); + else { + var len = handler.length; + var listeners = arrayClone(handler, len); + for (var i = 0; i < len; ++i) + listeners[i].call(self, arg1, arg2, arg3); + } +} + +function emitMany(handler, isFn, self, args) { + if (isFn) + handler.apply(self, args); + else { + var len = handler.length; + var listeners = arrayClone(handler, len); + for (var i = 0; i < len; ++i) + listeners[i].apply(self, args); + } +} + +EventEmitter.prototype.emit = function emit(type) { + var er, handler, len, args, i, events; + var doError = (type === 'error'); + + events = this._events; + if (events) + doError = (doError && events.error == null); + else if (!doError) + return false; + + // If there is no 'error' event listener then throw. + if (doError) { + if (arguments.length > 1) + er = arguments[1]; + if (er instanceof Error) { + throw er; // Unhandled 'error' event + } else { + // At least give some kind of context to the user + var err = new Error('Unhandled "error" event. (' + er + ')'); + err.context = er; + throw err; + } + return false; + } + + handler = events[type]; + + if (!handler) + return false; + + var isFn = typeof handler === 'function'; + len = arguments.length; + switch (len) { + // fast cases + case 1: + emitNone(handler, isFn, this); + break; + case 2: + emitOne(handler, isFn, this, arguments[1]); + break; + case 3: + emitTwo(handler, isFn, this, arguments[1], arguments[2]); + break; + case 4: + emitThree(handler, isFn, this, arguments[1], arguments[2], arguments[3]); + break; + // slower + default: + args = new Array(len - 1); + for (i = 1; i < len; i++) + args[i - 1] = arguments[i]; + emitMany(handler, isFn, this, args); + } + + return true; +}; + +function _addListener(target, type, listener, prepend) { + var m; + var events; + var existing; + + if (typeof listener !== 'function') + throw new TypeError('"listener" argument must be a function'); + + events = target._events; + if (!events) { + events = target._events = objectCreate(null); + target._eventsCount = 0; + } else { + // To avoid recursion in the case that type === "newListener"! Before + // adding it to the listeners, first emit "newListener". + if (events.newListener) { + target.emit('newListener', type, + listener.listener ? listener.listener : listener); + + // Re-assign `events` because a newListener handler could have caused the + // this._events to be assigned to a new object + events = target._events; + } + existing = events[type]; + } + + if (!existing) { + // Optimize the case of one listener. Don't need the extra array object. + existing = events[type] = listener; + ++target._eventsCount; + } else { + if (typeof existing === 'function') { + // Adding the second element, need to change to array. + existing = events[type] = + prepend ? [listener, existing] : [existing, listener]; + } else { + // If we've already got an array, just append. + if (prepend) { + existing.unshift(listener); + } else { + existing.push(listener); + } + } + + // Check for listener leak + if (!existing.warned) { + m = $getMaxListeners(target); + if (m && m > 0 && existing.length > m) { + existing.warned = true; + var w = new Error('Possible EventEmitter memory leak detected. ' + + existing.length + ' "' + String(type) + '" listeners ' + + 'added. Use emitter.setMaxListeners() to ' + + 'increase limit.'); + w.name = 'MaxListenersExceededWarning'; + w.emitter = target; + w.type = type; + w.count = existing.length; + if (typeof console === 'object' && console.warn) { + console.warn('%s: %s', w.name, w.message); + } + } + } + } + + return target; +} + +EventEmitter.prototype.addListener = function addListener(type, listener) { + return _addListener(this, type, listener, false); +}; + +EventEmitter.prototype.on = EventEmitter.prototype.addListener; + +EventEmitter.prototype.prependListener = + function prependListener(type, listener) { + return _addListener(this, type, listener, true); + }; + +function onceWrapper() { + if (!this.fired) { + this.target.removeListener(this.type, this.wrapFn); + this.fired = true; + switch (arguments.length) { + case 0: + return this.listener.call(this.target); + case 1: + return this.listener.call(this.target, arguments[0]); + case 2: + return this.listener.call(this.target, arguments[0], arguments[1]); + case 3: + return this.listener.call(this.target, arguments[0], arguments[1], + arguments[2]); + default: + var args = new Array(arguments.length); + for (var i = 0; i < args.length; ++i) + args[i] = arguments[i]; + this.listener.apply(this.target, args); + } + } +} + +function _onceWrap(target, type, listener) { + var state = { fired: false, wrapFn: undefined, target: target, type: type, listener: listener }; + var wrapped = bind.call(onceWrapper, state); + wrapped.listener = listener; + state.wrapFn = wrapped; + return wrapped; +} + +EventEmitter.prototype.once = function once(type, listener) { + if (typeof listener !== 'function') + throw new TypeError('"listener" argument must be a function'); + this.on(type, _onceWrap(this, type, listener)); + return this; +}; + +EventEmitter.prototype.prependOnceListener = + function prependOnceListener(type, listener) { + if (typeof listener !== 'function') + throw new TypeError('"listener" argument must be a function'); + this.prependListener(type, _onceWrap(this, type, listener)); + return this; + }; + +// Emits a 'removeListener' event if and only if the listener was removed. +EventEmitter.prototype.removeListener = + function removeListener(type, listener) { + var list, events, position, i, originalListener; + + if (typeof listener !== 'function') + throw new TypeError('"listener" argument must be a function'); + + events = this._events; + if (!events) + return this; + + list = events[type]; + if (!list) + return this; + + if (list === listener || list.listener === listener) { + if (--this._eventsCount === 0) + this._events = objectCreate(null); + else { + delete events[type]; + if (events.removeListener) + this.emit('removeListener', type, list.listener || listener); + } + } else if (typeof list !== 'function') { + position = -1; + + for (i = list.length - 1; i >= 0; i--) { + if (list[i] === listener || list[i].listener === listener) { + originalListener = list[i].listener; + position = i; + break; + } + } + + if (position < 0) + return this; + + if (position === 0) + list.shift(); + else + spliceOne(list, position); + + if (list.length === 1) + events[type] = list[0]; + + if (events.removeListener) + this.emit('removeListener', type, originalListener || listener); + } + + return this; + }; + +EventEmitter.prototype.removeAllListeners = + function removeAllListeners(type) { + var listeners, events, i; + + events = this._events; + if (!events) + return this; + + // not listening for removeListener, no need to emit + if (!events.removeListener) { + if (arguments.length === 0) { + this._events = objectCreate(null); + this._eventsCount = 0; + } else if (events[type]) { + if (--this._eventsCount === 0) + this._events = objectCreate(null); + else + delete events[type]; + } + return this; + } + + // emit removeListener for all listeners on all events + if (arguments.length === 0) { + var keys = objectKeys(events); + var key; + for (i = 0; i < keys.length; ++i) { + key = keys[i]; + if (key === 'removeListener') continue; + this.removeAllListeners(key); + } + this.removeAllListeners('removeListener'); + this._events = objectCreate(null); + this._eventsCount = 0; + return this; + } + + listeners = events[type]; + + if (typeof listeners === 'function') { + this.removeListener(type, listeners); + } else if (listeners) { + // LIFO order + for (i = listeners.length - 1; i >= 0; i--) { + this.removeListener(type, listeners[i]); + } + } + + return this; + }; + +function _listeners(target, type, unwrap) { + var events = target._events; + + if (!events) + return []; + + var evlistener = events[type]; + if (!evlistener) + return []; + + if (typeof evlistener === 'function') + return unwrap ? [evlistener.listener || evlistener] : [evlistener]; + + return unwrap ? unwrapListeners(evlistener) : arrayClone(evlistener, evlistener.length); +} + +EventEmitter.prototype.listeners = function listeners(type) { + return _listeners(this, type, true); +}; + +EventEmitter.prototype.rawListeners = function rawListeners(type) { + return _listeners(this, type, false); +}; + +EventEmitter.listenerCount = function(emitter, type) { + if (typeof emitter.listenerCount === 'function') { + return emitter.listenerCount(type); + } else { + return listenerCount.call(emitter, type); + } +}; + +EventEmitter.prototype.listenerCount = listenerCount; +function listenerCount(type) { + var events = this._events; + + if (events) { + var evlistener = events[type]; + + if (typeof evlistener === 'function') { + return 1; + } else if (evlistener) { + return evlistener.length; + } + } + + return 0; +} + +EventEmitter.prototype.eventNames = function eventNames() { + return this._eventsCount > 0 ? Reflect.ownKeys(this._events) : []; +}; + +// About 1.5x faster than the two-arg version of Array#splice(). +function spliceOne(list, index) { + for (var i = index, k = i + 1, n = list.length; k < n; i += 1, k += 1) + list[i] = list[k]; + list.pop(); +} + +function arrayClone(arr, n) { + var copy = new Array(n); + for (var i = 0; i < n; ++i) + copy[i] = arr[i]; + return copy; +} + +function unwrapListeners(arr) { + var ret = new Array(arr.length); + for (var i = 0; i < ret.length; ++i) { + ret[i] = arr[i].listener || arr[i]; + } + return ret; +} + +function objectCreatePolyfill(proto) { + var F = function() {}; + F.prototype = proto; + return new F; +} +function objectKeysPolyfill(obj) { + var keys = []; + for (var k in obj) if (Object.prototype.hasOwnProperty.call(obj, k)) { + keys.push(k); + } + return k; +} +function functionBindPolyfill(context) { + var fn = this; + return function () { + return fn.apply(context, arguments); + }; +} + +},{}],6:[function(require,module,exports){ +exports.read = function (buffer, offset, isLE, mLen, nBytes) { + var e, m + var eLen = (nBytes * 8) - mLen - 1 + var eMax = (1 << eLen) - 1 + var eBias = eMax >> 1 + var nBits = -7 + var i = isLE ? (nBytes - 1) : 0 + var d = isLE ? -1 : 1 + var s = buffer[offset + i] + + i += d + + e = s & ((1 << (-nBits)) - 1) + s >>= (-nBits) + nBits += eLen + for (; nBits > 0; e = (e * 256) + buffer[offset + i], i += d, nBits -= 8) {} + + m = e & ((1 << (-nBits)) - 1) + e >>= (-nBits) + nBits += mLen + for (; nBits > 0; m = (m * 256) + buffer[offset + i], i += d, nBits -= 8) {} + + if (e === 0) { + e = 1 - eBias + } else if (e === eMax) { + return m ? NaN : ((s ? -1 : 1) * Infinity) + } else { + m = m + Math.pow(2, mLen) + e = e - eBias + } + return (s ? -1 : 1) * m * Math.pow(2, e - mLen) +} + +exports.write = function (buffer, value, offset, isLE, mLen, nBytes) { + var e, m, c + var eLen = (nBytes * 8) - mLen - 1 + var eMax = (1 << eLen) - 1 + var eBias = eMax >> 1 + var rt = (mLen === 23 ? Math.pow(2, -24) - Math.pow(2, -77) : 0) + var i = isLE ? 0 : (nBytes - 1) + var d = isLE ? 1 : -1 + var s = value < 0 || (value === 0 && 1 / value < 0) ? 1 : 0 + + value = Math.abs(value) + + if (isNaN(value) || value === Infinity) { + m = isNaN(value) ? 1 : 0 + e = eMax + } else { + e = Math.floor(Math.log(value) / Math.LN2) + if (value * (c = Math.pow(2, -e)) < 1) { + e-- + c *= 2 + } + if (e + eBias >= 1) { + value += rt / c + } else { + value += rt * Math.pow(2, 1 - eBias) + } + if (value * c >= 2) { + e++ + c /= 2 + } + + if (e + eBias >= eMax) { + m = 0 + e = eMax + } else if (e + eBias >= 1) { + m = ((value * c) - 1) * Math.pow(2, mLen) + e = e + eBias + } else { + m = value * Math.pow(2, eBias - 1) * Math.pow(2, mLen) + e = 0 + } + } + + for (; mLen >= 8; buffer[offset + i] = m & 0xff, i += d, m /= 256, mLen -= 8) {} + + e = (e << mLen) | m + eLen += mLen + for (; eLen > 0; buffer[offset + i] = e & 0xff, i += d, e /= 256, eLen -= 8) {} + + buffer[offset + i - d] |= s * 128 +} + +},{}],7:[function(require,module,exports){ +(function (global){ +'use strict'; +var Mutation = global.MutationObserver || global.WebKitMutationObserver; + +var scheduleDrain; + +{ + if (Mutation) { + var called = 0; + var observer = new Mutation(nextTick); + var element = global.document.createTextNode(''); + observer.observe(element, { + characterData: true + }); + scheduleDrain = function () { + element.data = (called = ++called % 2); + }; + } else if (!global.setImmediate && typeof global.MessageChannel !== 'undefined') { + var channel = new global.MessageChannel(); + channel.port1.onmessage = nextTick; + scheduleDrain = function () { + channel.port2.postMessage(0); + }; + } else if ('document' in global && 'onreadystatechange' in global.document.createElement('script')) { + scheduleDrain = function () { + + // Create a <script> element; its readystatechange event will be fired asynchronously once it is inserted + // into the document. Do so, thus queuing up the task. Remember to clean up once it's been called. + var scriptEl = global.document.createElement('script'); + scriptEl.onreadystatechange = function () { + nextTick(); + + scriptEl.onreadystatechange = null; + scriptEl.parentNode.removeChild(scriptEl); + scriptEl = null; + }; + global.document.documentElement.appendChild(scriptEl); + }; + } else { + scheduleDrain = function () { + setTimeout(nextTick, 0); + }; + } +} + +var draining; +var queue = []; +//named nextTick for less confusing stack traces +function nextTick() { + draining = true; + var i, oldQueue; + var len = queue.length; + while (len) { + oldQueue = queue; + queue = []; + i = -1; + while (++i < len) { + oldQueue[i](); + } + len = queue.length; + } + draining = false; +} + +module.exports = immediate; +function immediate(task) { + if (queue.push(task) === 1 && !draining) { + scheduleDrain(); + } +} + +}).call(this,typeof global !== "undefined" ? global : typeof self !== "undefined" ? self : typeof window !== "undefined" ? window : {}) +},{}],8:[function(require,module,exports){ +if (typeof Object.create === 'function') { + // implementation from standard node.js 'util' module + module.exports = function inherits(ctor, superCtor) { + if (superCtor) { + ctor.super_ = superCtor + ctor.prototype = Object.create(superCtor.prototype, { + constructor: { + value: ctor, + enumerable: false, + writable: true, + configurable: true + } + }) + } + }; +} else { + // old school shim for old browsers + module.exports = function inherits(ctor, superCtor) { + if (superCtor) { + ctor.super_ = superCtor + var TempCtor = function () {} + TempCtor.prototype = superCtor.prototype + ctor.prototype = new TempCtor() + ctor.prototype.constructor = ctor + } + } +} + +},{}],9:[function(require,module,exports){ +/*! + * Determine if an object is a Buffer + * + * @author Feross Aboukhadijeh <https://feross.org> + * @license MIT + */ + +// The _isBuffer check is for Safari 5-7 support, because it's missing +// Object.prototype.constructor. Remove this eventually +module.exports = function (obj) { + return obj != null && (isBuffer(obj) || isSlowBuffer(obj) || !!obj._isBuffer) +} + +function isBuffer (obj) { + return !!obj.constructor && typeof obj.constructor.isBuffer === 'function' && obj.constructor.isBuffer(obj) +} + +// For Node v0.10 support. Remove this eventually. +function isSlowBuffer (obj) { + return typeof obj.readFloatLE === 'function' && typeof obj.slice === 'function' && isBuffer(obj.slice(0, 0)) +} + +},{}],10:[function(require,module,exports){ +var toString = {}.toString; + +module.exports = Array.isArray || function (arr) { + return toString.call(arr) == '[object Array]'; +}; + +},{}],11:[function(require,module,exports){ +'use strict'; +var utils = require('./utils'); +var support = require('./support'); +// private property +var _keyStr = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/="; + + +// public method for encoding +exports.encode = function(input) { + var output = []; + var chr1, chr2, chr3, enc1, enc2, enc3, enc4; + var i = 0, len = input.length, remainingBytes = len; + + var isArray = utils.getTypeOf(input) !== "string"; + while (i < input.length) { + remainingBytes = len - i; + + if (!isArray) { + chr1 = input.charCodeAt(i++); + chr2 = i < len ? input.charCodeAt(i++) : 0; + chr3 = i < len ? input.charCodeAt(i++) : 0; + } else { + chr1 = input[i++]; + chr2 = i < len ? input[i++] : 0; + chr3 = i < len ? input[i++] : 0; + } + + enc1 = chr1 >> 2; + enc2 = ((chr1 & 3) << 4) | (chr2 >> 4); + enc3 = remainingBytes > 1 ? (((chr2 & 15) << 2) | (chr3 >> 6)) : 64; + enc4 = remainingBytes > 2 ? (chr3 & 63) : 64; + + output.push(_keyStr.charAt(enc1) + _keyStr.charAt(enc2) + _keyStr.charAt(enc3) + _keyStr.charAt(enc4)); + + } + + return output.join(""); +}; + +// public method for decoding +exports.decode = function(input) { + var chr1, chr2, chr3; + var enc1, enc2, enc3, enc4; + var i = 0, resultIndex = 0; + + var dataUrlPrefix = "data:"; + + if (input.substr(0, dataUrlPrefix.length) === dataUrlPrefix) { + // This is a common error: people give a data url + // (data:image/png;base64,iVBOR...) with a {base64: true} and + // wonders why things don't work. + // We can detect that the string input looks like a data url but we + // *can't* be sure it is one: removing everything up to the comma would + // be too dangerous. + throw new Error("Invalid base64 input, it looks like a data url."); + } + + input = input.replace(/[^A-Za-z0-9\+\/\=]/g, ""); + + var totalLength = input.length * 3 / 4; + if(input.charAt(input.length - 1) === _keyStr.charAt(64)) { + totalLength--; + } + if(input.charAt(input.length - 2) === _keyStr.charAt(64)) { + totalLength--; + } + if (totalLength % 1 !== 0) { + // totalLength is not an integer, the length does not match a valid + // base64 content. That can happen if: + // - the input is not a base64 content + // - the input is *almost* a base64 content, with a extra chars at the + // beginning or at the end + // - the input uses a base64 variant (base64url for example) + throw new Error("Invalid base64 input, bad content length."); + } + var output; + if (support.uint8array) { + output = new Uint8Array(totalLength|0); + } else { + output = new Array(totalLength|0); + } + + while (i < input.length) { + + enc1 = _keyStr.indexOf(input.charAt(i++)); + enc2 = _keyStr.indexOf(input.charAt(i++)); + enc3 = _keyStr.indexOf(input.charAt(i++)); + enc4 = _keyStr.indexOf(input.charAt(i++)); + + chr1 = (enc1 << 2) | (enc2 >> 4); + chr2 = ((enc2 & 15) << 4) | (enc3 >> 2); + chr3 = ((enc3 & 3) << 6) | enc4; + + output[resultIndex++] = chr1; + + if (enc3 !== 64) { + output[resultIndex++] = chr2; + } + if (enc4 !== 64) { + output[resultIndex++] = chr3; + } + + } + + return output; +}; + +},{"./support":40,"./utils":42}],12:[function(require,module,exports){ +'use strict'; + +var external = require("./external"); +var DataWorker = require('./stream/DataWorker'); +var DataLengthProbe = require('./stream/DataLengthProbe'); +var Crc32Probe = require('./stream/Crc32Probe'); +var DataLengthProbe = require('./stream/DataLengthProbe'); + +/** + * Represent a compressed object, with everything needed to decompress it. + * @constructor + * @param {number} compressedSize the size of the data compressed. + * @param {number} uncompressedSize the size of the data after decompression. + * @param {number} crc32 the crc32 of the decompressed file. + * @param {object} compression the type of compression, see lib/compressions.js. + * @param {String|ArrayBuffer|Uint8Array|Buffer} data the compressed data. + */ +function CompressedObject(compressedSize, uncompressedSize, crc32, compression, data) { + this.compressedSize = compressedSize; + this.uncompressedSize = uncompressedSize; + this.crc32 = crc32; + this.compression = compression; + this.compressedContent = data; +} + +CompressedObject.prototype = { + /** + * Create a worker to get the uncompressed content. + * @return {GenericWorker} the worker. + */ + getContentWorker : function () { + var worker = new DataWorker(external.Promise.resolve(this.compressedContent)) + .pipe(this.compression.uncompressWorker()) + .pipe(new DataLengthProbe("data_length")); + + var that = this; + worker.on("end", function () { + if(this.streamInfo['data_length'] !== that.uncompressedSize) { + throw new Error("Bug : uncompressed data size mismatch"); + } + }); + return worker; + }, + /** + * Create a worker to get the compressed content. + * @return {GenericWorker} the worker. + */ + getCompressedWorker : function () { + return new DataWorker(external.Promise.resolve(this.compressedContent)) + .withStreamInfo("compressedSize", this.compressedSize) + .withStreamInfo("uncompressedSize", this.uncompressedSize) + .withStreamInfo("crc32", this.crc32) + .withStreamInfo("compression", this.compression) + ; + } +}; + +/** + * Chain the given worker with other workers to compress the content with the + * given compression. + * @param {GenericWorker} uncompressedWorker the worker to pipe. + * @param {Object} compression the compression object. + * @param {Object} compressionOptions the options to use when compressing. + * @return {GenericWorker} the new worker compressing the content. + */ +CompressedObject.createWorkerFrom = function (uncompressedWorker, compression, compressionOptions) { + return uncompressedWorker + .pipe(new Crc32Probe()) + .pipe(new DataLengthProbe("uncompressedSize")) + .pipe(compression.compressWorker(compressionOptions)) + .pipe(new DataLengthProbe("compressedSize")) + .withStreamInfo("compression", compression); +}; + +module.exports = CompressedObject; + +},{"./external":16,"./stream/Crc32Probe":35,"./stream/DataLengthProbe":36,"./stream/DataWorker":37}],13:[function(require,module,exports){ +'use strict'; + +var GenericWorker = require("./stream/GenericWorker"); + +exports.STORE = { + magic: "\x00\x00", + compressWorker : function (compressionOptions) { + return new GenericWorker("STORE compression"); + }, + uncompressWorker : function () { + return new GenericWorker("STORE decompression"); + } +}; +exports.DEFLATE = require('./flate'); + +},{"./flate":17,"./stream/GenericWorker":38}],14:[function(require,module,exports){ +'use strict'; + +var utils = require('./utils'); + +/** + * The following functions come from pako, from pako/lib/zlib/crc32.js + * released under the MIT license, see pako https://github.com/nodeca/pako/ + */ + +// Use ordinary array, since untyped makes no boost here +function makeTable() { + var c, table = []; + + for(var n =0; n < 256; n++){ + c = n; + for(var k =0; k < 8; k++){ + c = ((c&1) ? (0xEDB88320 ^ (c >>> 1)) : (c >>> 1)); + } + table[n] = c; + } + + return table; +} + +// Create table on load. Just 255 signed longs. Not a problem. +var crcTable = makeTable(); + + +function crc32(crc, buf, len, pos) { + var t = crcTable, end = pos + len; + + crc = crc ^ (-1); + + for (var i = pos; i < end; i++ ) { + crc = (crc >>> 8) ^ t[(crc ^ buf[i]) & 0xFF]; + } + + return (crc ^ (-1)); // >>> 0; +} + +// That's all for the pako functions. + +/** + * Compute the crc32 of a string. + * This is almost the same as the function crc32, but for strings. Using the + * same function for the two use cases leads to horrible performances. + * @param {Number} crc the starting value of the crc. + * @param {String} str the string to use. + * @param {Number} len the length of the string. + * @param {Number} pos the starting position for the crc32 computation. + * @return {Number} the computed crc32. + */ +function crc32str(crc, str, len, pos) { + var t = crcTable, end = pos + len; + + crc = crc ^ (-1); + + for (var i = pos; i < end; i++ ) { + crc = (crc >>> 8) ^ t[(crc ^ str.charCodeAt(i)) & 0xFF]; + } + + return (crc ^ (-1)); // >>> 0; +} + +module.exports = function crc32wrapper(input, crc) { + if (typeof input === "undefined" || !input.length) { + return 0; + } + + var isArray = utils.getTypeOf(input) !== "string"; + + if(isArray) { + return crc32(crc|0, input, input.length, 0); + } else { + return crc32str(crc|0, input, input.length, 0); + } +}; + +},{"./utils":42}],15:[function(require,module,exports){ +'use strict'; +exports.base64 = false; +exports.binary = false; +exports.dir = false; +exports.createFolders = true; +exports.date = null; +exports.compression = null; +exports.compressionOptions = null; +exports.comment = null; +exports.unixPermissions = null; +exports.dosPermissions = null; + +},{}],16:[function(require,module,exports){ +/* global Promise */ +'use strict'; + +// load the global object first: +// - it should be better integrated in the system (unhandledRejection in node) +// - the environment may have a custom Promise implementation (see zone.js) +var ES6Promise = null; +if (typeof Promise !== "undefined") { + ES6Promise = Promise; +} else { + ES6Promise = require("lie"); +} + +/** + * Let the user use/change some implementations. + */ +module.exports = { + Promise: ES6Promise +}; + +},{"lie":46}],17:[function(require,module,exports){ +'use strict'; +var USE_TYPEDARRAY = (typeof Uint8Array !== 'undefined') && (typeof Uint16Array !== 'undefined') && (typeof Uint32Array !== 'undefined'); + +var pako = require("pako"); +var utils = require("./utils"); +var GenericWorker = require("./stream/GenericWorker"); + +var ARRAY_TYPE = USE_TYPEDARRAY ? "uint8array" : "array"; + +exports.magic = "\x08\x00"; + +/** + * Create a worker that uses pako to inflate/deflate. + * @constructor + * @param {String} action the name of the pako function to call : either "Deflate" or "Inflate". + * @param {Object} options the options to use when (de)compressing. + */ +function FlateWorker(action, options) { + GenericWorker.call(this, "FlateWorker/" + action); + + this._pako = null; + this._pakoAction = action; + this._pakoOptions = options; + // the `meta` object from the last chunk received + // this allow this worker to pass around metadata + this.meta = {}; +} + +utils.inherits(FlateWorker, GenericWorker); + +/** + * @see GenericWorker.processChunk + */ +FlateWorker.prototype.processChunk = function (chunk) { + this.meta = chunk.meta; + if (this._pako === null) { + this._createPako(); + } + this._pako.push(utils.transformTo(ARRAY_TYPE, chunk.data), false); +}; + +/** + * @see GenericWorker.flush + */ +FlateWorker.prototype.flush = function () { + GenericWorker.prototype.flush.call(this); + if (this._pako === null) { + this._createPako(); + } + this._pako.push([], true); +}; +/** + * @see GenericWorker.cleanUp + */ +FlateWorker.prototype.cleanUp = function () { + GenericWorker.prototype.cleanUp.call(this); + this._pako = null; +}; + +/** + * Create the _pako object. + * TODO: lazy-loading this object isn't the best solution but it's the + * quickest. The best solution is to lazy-load the worker list. See also the + * issue #446. + */ +FlateWorker.prototype._createPako = function () { + this._pako = new pako[this._pakoAction]({ + raw: true, + level: this._pakoOptions.level || -1 // default compression + }); + var self = this; + this._pako.onData = function(data) { + self.push({ + data : data, + meta : self.meta + }); + }; +}; + +exports.compressWorker = function (compressionOptions) { + return new FlateWorker("Deflate", compressionOptions); +}; +exports.uncompressWorker = function () { + return new FlateWorker("Inflate", {}); +}; + +},{"./stream/GenericWorker":38,"./utils":42,"pako":47}],18:[function(require,module,exports){ +'use strict'; + +var utils = require('../utils'); +var GenericWorker = require('../stream/GenericWorker'); +var utf8 = require('../utf8'); +var crc32 = require('../crc32'); +var signature = require('../signature'); + +/** + * Transform an integer into a string in hexadecimal. + * @private + * @param {number} dec the number to convert. + * @param {number} bytes the number of bytes to generate. + * @returns {string} the result. + */ +var decToHex = function(dec, bytes) { + var hex = "", i; + for (i = 0; i < bytes; i++) { + hex += String.fromCharCode(dec & 0xff); + dec = dec >>> 8; + } + return hex; +}; + +/** + * Generate the UNIX part of the external file attributes. + * @param {Object} unixPermissions the unix permissions or null. + * @param {Boolean} isDir true if the entry is a directory, false otherwise. + * @return {Number} a 32 bit integer. + * + * adapted from http://unix.stackexchange.com/questions/14705/the-zip-formats-external-file-attribute : + * + * TTTTsstrwxrwxrwx0000000000ADVSHR + * ^^^^____________________________ file type, see zipinfo.c (UNX_*) + * ^^^_________________________ setuid, setgid, sticky + * ^^^^^^^^^________________ permissions + * ^^^^^^^^^^______ not used ? + * ^^^^^^ DOS attribute bits : Archive, Directory, Volume label, System file, Hidden, Read only + */ +var generateUnixExternalFileAttr = function (unixPermissions, isDir) { + + var result = unixPermissions; + if (!unixPermissions) { + // I can't use octal values in strict mode, hence the hexa. + // 040775 => 0x41fd + // 0100664 => 0x81b4 + result = isDir ? 0x41fd : 0x81b4; + } + return (result & 0xFFFF) << 16; +}; + +/** + * Generate the DOS part of the external file attributes. + * @param {Object} dosPermissions the dos permissions or null. + * @param {Boolean} isDir true if the entry is a directory, false otherwise. + * @return {Number} a 32 bit integer. + * + * Bit 0 Read-Only + * Bit 1 Hidden + * Bit 2 System + * Bit 3 Volume Label + * Bit 4 Directory + * Bit 5 Archive + */ +var generateDosExternalFileAttr = function (dosPermissions, isDir) { + + // the dir flag is already set for compatibility + return (dosPermissions || 0) & 0x3F; +}; + +/** + * Generate the various parts used in the construction of the final zip file. + * @param {Object} streamInfo the hash with information about the compressed file. + * @param {Boolean} streamedContent is the content streamed ? + * @param {Boolean} streamingEnded is the stream finished ? + * @param {number} offset the current offset from the start of the zip file. + * @param {String} platform let's pretend we are this platform (change platform dependents fields) + * @param {Function} encodeFileName the function to encode the file name / comment. + * @return {Object} the zip parts. + */ +var generateZipParts = function(streamInfo, streamedContent, streamingEnded, offset, platform, encodeFileName) { + var file = streamInfo['file'], + compression = streamInfo['compression'], + useCustomEncoding = encodeFileName !== utf8.utf8encode, + encodedFileName = utils.transformTo("string", encodeFileName(file.name)), + utfEncodedFileName = utils.transformTo("string", utf8.utf8encode(file.name)), + comment = file.comment, + encodedComment = utils.transformTo("string", encodeFileName(comment)), + utfEncodedComment = utils.transformTo("string", utf8.utf8encode(comment)), + useUTF8ForFileName = utfEncodedFileName.length !== file.name.length, + useUTF8ForComment = utfEncodedComment.length !== comment.length, + dosTime, + dosDate, + extraFields = "", + unicodePathExtraField = "", + unicodeCommentExtraField = "", + dir = file.dir, + date = file.date; + + + var dataInfo = { + crc32 : 0, + compressedSize : 0, + uncompressedSize : 0 + }; + + // if the content is streamed, the sizes/crc32 are only available AFTER + // the end of the stream. + if (!streamedContent || streamingEnded) { + dataInfo.crc32 = streamInfo['crc32']; + dataInfo.compressedSize = streamInfo['compressedSize']; + dataInfo.uncompressedSize = streamInfo['uncompressedSize']; + } + + var bitflag = 0; + if (streamedContent) { + // Bit 3: the sizes/crc32 are set to zero in the local header. + // The correct values are put in the data descriptor immediately + // following the compressed data. + bitflag |= 0x0008; + } + if (!useCustomEncoding && (useUTF8ForFileName || useUTF8ForComment)) { + // Bit 11: Language encoding flag (EFS). + bitflag |= 0x0800; + } + + + var extFileAttr = 0; + var versionMadeBy = 0; + if (dir) { + // dos or unix, we set the dos dir flag + extFileAttr |= 0x00010; + } + if(platform === "UNIX") { + versionMadeBy = 0x031E; // UNIX, version 3.0 + extFileAttr |= generateUnixExternalFileAttr(file.unixPermissions, dir); + } else { // DOS or other, fallback to DOS + versionMadeBy = 0x0014; // DOS, version 2.0 + extFileAttr |= generateDosExternalFileAttr(file.dosPermissions, dir); + } + + // date + // @see http://www.delorie.com/djgpp/doc/rbinter/it/52/13.html + // @see http://www.delorie.com/djgpp/doc/rbinter/it/65/16.html + // @see http://www.delorie.com/djgpp/doc/rbinter/it/66/16.html + + dosTime = date.getUTCHours(); + dosTime = dosTime << 6; + dosTime = dosTime | date.getUTCMinutes(); + dosTime = dosTime << 5; + dosTime = dosTime | date.getUTCSeconds() / 2; + + dosDate = date.getUTCFullYear() - 1980; + dosDate = dosDate << 4; + dosDate = dosDate | (date.getUTCMonth() + 1); + dosDate = dosDate << 5; + dosDate = dosDate | date.getUTCDate(); + + if (useUTF8ForFileName) { + // set the unicode path extra field. unzip needs at least one extra + // field to correctly handle unicode path, so using the path is as good + // as any other information. This could improve the situation with + // other archive managers too. + // This field is usually used without the utf8 flag, with a non + // unicode path in the header (winrar, winzip). This helps (a bit) + // with the messy Windows' default compressed folders feature but + // breaks on p7zip which doesn't seek the unicode path extra field. + // So for now, UTF-8 everywhere ! + unicodePathExtraField = + // Version + decToHex(1, 1) + + // NameCRC32 + decToHex(crc32(encodedFileName), 4) + + // UnicodeName + utfEncodedFileName; + + extraFields += + // Info-ZIP Unicode Path Extra Field + "\x75\x70" + + // size + decToHex(unicodePathExtraField.length, 2) + + // content + unicodePathExtraField; + } + + if(useUTF8ForComment) { + + unicodeCommentExtraField = + // Version + decToHex(1, 1) + + // CommentCRC32 + decToHex(crc32(encodedComment), 4) + + // UnicodeName + utfEncodedComment; + + extraFields += + // Info-ZIP Unicode Path Extra Field + "\x75\x63" + + // size + decToHex(unicodeCommentExtraField.length, 2) + + // content + unicodeCommentExtraField; + } + + var header = ""; + + // version needed to extract + header += "\x0A\x00"; + // general purpose bit flag + header += decToHex(bitflag, 2); + // compression method + header += compression.magic; + // last mod file time + header += decToHex(dosTime, 2); + // last mod file date + header += decToHex(dosDate, 2); + // crc-32 + header += decToHex(dataInfo.crc32, 4); + // compressed size + header += decToHex(dataInfo.compressedSize, 4); + // uncompressed size + header += decToHex(dataInfo.uncompressedSize, 4); + // file name length + header += decToHex(encodedFileName.length, 2); + // extra field length + header += decToHex(extraFields.length, 2); + + + var fileRecord = signature.LOCAL_FILE_HEADER + header + encodedFileName + extraFields; + + var dirRecord = signature.CENTRAL_FILE_HEADER + + // version made by (00: DOS) + decToHex(versionMadeBy, 2) + + // file header (common to file and central directory) + header + + // file comment length + decToHex(encodedComment.length, 2) + + // disk number start + "\x00\x00" + + // internal file attributes TODO + "\x00\x00" + + // external file attributes + decToHex(extFileAttr, 4) + + // relative offset of local header + decToHex(offset, 4) + + // file name + encodedFileName + + // extra field + extraFields + + // file comment + encodedComment; + + return { + fileRecord: fileRecord, + dirRecord: dirRecord + }; +}; + +/** + * Generate the EOCD record. + * @param {Number} entriesCount the number of entries in the zip file. + * @param {Number} centralDirLength the length (in bytes) of the central dir. + * @param {Number} localDirLength the length (in bytes) of the local dir. + * @param {String} comment the zip file comment as a binary string. + * @param {Function} encodeFileName the function to encode the comment. + * @return {String} the EOCD record. + */ +var generateCentralDirectoryEnd = function (entriesCount, centralDirLength, localDirLength, comment, encodeFileName) { + var dirEnd = ""; + var encodedComment = utils.transformTo("string", encodeFileName(comment)); + + // end of central dir signature + dirEnd = signature.CENTRAL_DIRECTORY_END + + // number of this disk + "\x00\x00" + + // number of the disk with the start of the central directory + "\x00\x00" + + // total number of entries in the central directory on this disk + decToHex(entriesCount, 2) + + // total number of entries in the central directory + decToHex(entriesCount, 2) + + // size of the central directory 4 bytes + decToHex(centralDirLength, 4) + + // offset of start of central directory with respect to the starting disk number + decToHex(localDirLength, 4) + + // .ZIP file comment length + decToHex(encodedComment.length, 2) + + // .ZIP file comment + encodedComment; + + return dirEnd; +}; + +/** + * Generate data descriptors for a file entry. + * @param {Object} streamInfo the hash generated by a worker, containing information + * on the file entry. + * @return {String} the data descriptors. + */ +var generateDataDescriptors = function (streamInfo) { + var descriptor = ""; + descriptor = signature.DATA_DESCRIPTOR + + // crc-32 4 bytes + decToHex(streamInfo['crc32'], 4) + + // compressed size 4 bytes + decToHex(streamInfo['compressedSize'], 4) + + // uncompressed size 4 bytes + decToHex(streamInfo['uncompressedSize'], 4); + + return descriptor; +}; + + +/** + * A worker to concatenate other workers to create a zip file. + * @param {Boolean} streamFiles `true` to stream the content of the files, + * `false` to accumulate it. + * @param {String} comment the comment to use. + * @param {String} platform the platform to use, "UNIX" or "DOS". + * @param {Function} encodeFileName the function to encode file names and comments. + */ +function ZipFileWorker(streamFiles, comment, platform, encodeFileName) { + GenericWorker.call(this, "ZipFileWorker"); + // The number of bytes written so far. This doesn't count accumulated chunks. + this.bytesWritten = 0; + // The comment of the zip file + this.zipComment = comment; + // The platform "generating" the zip file. + this.zipPlatform = platform; + // the function to encode file names and comments. + this.encodeFileName = encodeFileName; + // Should we stream the content of the files ? + this.streamFiles = streamFiles; + // If `streamFiles` is false, we will need to accumulate the content of the + // files to calculate sizes / crc32 (and write them *before* the content). + // This boolean indicates if we are accumulating chunks (it will change a lot + // during the lifetime of this worker). + this.accumulate = false; + // The buffer receiving chunks when accumulating content. + this.contentBuffer = []; + // The list of generated directory records. + this.dirRecords = []; + // The offset (in bytes) from the beginning of the zip file for the current source. + this.currentSourceOffset = 0; + // The total number of entries in this zip file. + this.entriesCount = 0; + // the name of the file currently being added, null when handling the end of the zip file. + // Used for the emitted metadata. + this.currentFile = null; + + + + this._sources = []; +} +utils.inherits(ZipFileWorker, GenericWorker); + +/** + * @see GenericWorker.push + */ +ZipFileWorker.prototype.push = function (chunk) { + + var currentFilePercent = chunk.meta.percent || 0; + var entriesCount = this.entriesCount; + var remainingFiles = this._sources.length; + + if(this.accumulate) { + this.contentBuffer.push(chunk); + } else { + this.bytesWritten += chunk.data.length; + + GenericWorker.prototype.push.call(this, { + data : chunk.data, + meta : { + currentFile : this.currentFile, + percent : entriesCount ? (currentFilePercent + 100 * (entriesCount - remainingFiles - 1)) / entriesCount : 100 + } + }); + } +}; + +/** + * The worker started a new source (an other worker). + * @param {Object} streamInfo the streamInfo object from the new source. + */ +ZipFileWorker.prototype.openedSource = function (streamInfo) { + this.currentSourceOffset = this.bytesWritten; + this.currentFile = streamInfo['file'].name; + + var streamedContent = this.streamFiles && !streamInfo['file'].dir; + + // don't stream folders (because they don't have any content) + if(streamedContent) { + var record = generateZipParts(streamInfo, streamedContent, false, this.currentSourceOffset, this.zipPlatform, this.encodeFileName); + this.push({ + data : record.fileRecord, + meta : {percent:0} + }); + } else { + // we need to wait for the whole file before pushing anything + this.accumulate = true; + } +}; + +/** + * The worker finished a source (an other worker). + * @param {Object} streamInfo the streamInfo object from the finished source. + */ +ZipFileWorker.prototype.closedSource = function (streamInfo) { + this.accumulate = false; + var streamedContent = this.streamFiles && !streamInfo['file'].dir; + var record = generateZipParts(streamInfo, streamedContent, true, this.currentSourceOffset, this.zipPlatform, this.encodeFileName); + + this.dirRecords.push(record.dirRecord); + if(streamedContent) { + // after the streamed file, we put data descriptors + this.push({ + data : generateDataDescriptors(streamInfo), + meta : {percent:100} + }); + } else { + // the content wasn't streamed, we need to push everything now + // first the file record, then the content + this.push({ + data : record.fileRecord, + meta : {percent:0} + }); + while(this.contentBuffer.length) { + this.push(this.contentBuffer.shift()); + } + } + this.currentFile = null; +}; + +/** + * @see GenericWorker.flush + */ +ZipFileWorker.prototype.flush = function () { + + var localDirLength = this.bytesWritten; + for(var i = 0; i < this.dirRecords.length; i++) { + this.push({ + data : this.dirRecords[i], + meta : {percent:100} + }); + } + var centralDirLength = this.bytesWritten - localDirLength; + + var dirEnd = generateCentralDirectoryEnd(this.dirRecords.length, centralDirLength, localDirLength, this.zipComment, this.encodeFileName); + + this.push({ + data : dirEnd, + meta : {percent:100} + }); +}; + +/** + * Prepare the next source to be read. + */ +ZipFileWorker.prototype.prepareNextSource = function () { + this.previous = this._sources.shift(); + this.openedSource(this.previous.streamInfo); + if (this.isPaused) { + this.previous.pause(); + } else { + this.previous.resume(); + } +}; + +/** + * @see GenericWorker.registerPrevious + */ +ZipFileWorker.prototype.registerPrevious = function (previous) { + this._sources.push(previous); + var self = this; + + previous.on('data', function (chunk) { + self.processChunk(chunk); + }); + previous.on('end', function () { + self.closedSource(self.previous.streamInfo); + if(self._sources.length) { + self.prepareNextSource(); + } else { + self.end(); + } + }); + previous.on('error', function (e) { + self.error(e); + }); + return this; +}; + +/** + * @see GenericWorker.resume + */ +ZipFileWorker.prototype.resume = function () { + if(!GenericWorker.prototype.resume.call(this)) { + return false; + } + + if (!this.previous && this._sources.length) { + this.prepareNextSource(); + return true; + } + if (!this.previous && !this._sources.length && !this.generatedError) { + this.end(); + return true; + } +}; + +/** + * @see GenericWorker.error + */ +ZipFileWorker.prototype.error = function (e) { + var sources = this._sources; + if(!GenericWorker.prototype.error.call(this, e)) { + return false; + } + for(var i = 0; i < sources.length; i++) { + try { + sources[i].error(e); + } catch(e) { + // the `error` exploded, nothing to do + } + } + return true; +}; + +/** + * @see GenericWorker.lock + */ +ZipFileWorker.prototype.lock = function () { + GenericWorker.prototype.lock.call(this); + var sources = this._sources; + for(var i = 0; i < sources.length; i++) { + sources[i].lock(); + } +}; + +module.exports = ZipFileWorker; + +},{"../crc32":14,"../signature":33,"../stream/GenericWorker":38,"../utf8":41,"../utils":42}],19:[function(require,module,exports){ +'use strict'; + +var compressions = require('../compressions'); +var ZipFileWorker = require('./ZipFileWorker'); + +/** + * Find the compression to use. + * @param {String} fileCompression the compression defined at the file level, if any. + * @param {String} zipCompression the compression defined at the load() level. + * @return {Object} the compression object to use. + */ +var getCompression = function (fileCompression, zipCompression) { + + var compressionName = fileCompression || zipCompression; + var compression = compressions[compressionName]; + if (!compression) { + throw new Error(compressionName + " is not a valid compression method !"); + } + return compression; +}; + +/** + * Create a worker to generate a zip file. + * @param {JSZip} zip the JSZip instance at the right root level. + * @param {Object} options to generate the zip file. + * @param {String} comment the comment to use. + */ +exports.generateWorker = function (zip, options, comment) { + + var zipFileWorker = new ZipFileWorker(options.streamFiles, comment, options.platform, options.encodeFileName); + var entriesCount = 0; + try { + + zip.forEach(function (relativePath, file) { + entriesCount++; + var compression = getCompression(file.options.compression, options.compression); + var compressionOptions = file.options.compressionOptions || options.compressionOptions || {}; + var dir = file.dir, date = file.date; + + file._compressWorker(compression, compressionOptions) + .withStreamInfo("file", { + name : relativePath, + dir : dir, + date : date, + comment : file.comment || "", + unixPermissions : file.unixPermissions, + dosPermissions : file.dosPermissions + }) + .pipe(zipFileWorker); + }); + zipFileWorker.entriesCount = entriesCount; + } catch (e) { + zipFileWorker.error(e); + } + + return zipFileWorker; +}; + +},{"../compressions":13,"./ZipFileWorker":18}],20:[function(require,module,exports){ +'use strict'; + +/** + * Representation a of zip file in js + * @constructor + */ +function JSZip() { + // if this constructor is used without `new`, it adds `new` before itself: + if(!(this instanceof JSZip)) { + return new JSZip(); + } + + if(arguments.length) { + throw new Error("The constructor with parameters has been removed in JSZip 3.0, please check the upgrade guide."); + } + + // object containing the files : + // { + // "folder/" : {...}, + // "folder/data.txt" : {...} + // } + this.files = {}; + + this.comment = null; + + // Where we are in the hierarchy + this.root = ""; + this.clone = function() { + var newObj = new JSZip(); + for (var i in this) { + if (typeof this[i] !== "function") { + newObj[i] = this[i]; + } + } + return newObj; + }; +} +JSZip.prototype = require('./object'); +JSZip.prototype.loadAsync = require('./load'); +JSZip.support = require('./support'); +JSZip.defaults = require('./defaults'); + +// TODO find a better way to handle this version, +// a require('package.json').version doesn't work with webpack, see #327 +JSZip.version = "3.5.0"; + +JSZip.loadAsync = function (content, options) { + return new JSZip().loadAsync(content, options); +}; + +JSZip.external = require("./external"); +module.exports = JSZip; + +},{"./defaults":15,"./external":16,"./load":21,"./object":25,"./support":40}],21:[function(require,module,exports){ +'use strict'; +var utils = require('./utils'); +var external = require("./external"); +var utf8 = require('./utf8'); +var utils = require('./utils'); +var ZipEntries = require('./zipEntries'); +var Crc32Probe = require('./stream/Crc32Probe'); +var nodejsUtils = require("./nodejsUtils"); + +/** + * Check the CRC32 of an entry. + * @param {ZipEntry} zipEntry the zip entry to check. + * @return {Promise} the result. + */ +function checkEntryCRC32(zipEntry) { + return new external.Promise(function (resolve, reject) { + var worker = zipEntry.decompressed.getContentWorker().pipe(new Crc32Probe()); + worker.on("error", function (e) { + reject(e); + }) + .on("end", function () { + if (worker.streamInfo.crc32 !== zipEntry.decompressed.crc32) { + reject(new Error("Corrupted zip : CRC32 mismatch")); + } else { + resolve(); + } + }) + .resume(); + }); +} + +module.exports = function(data, options) { + var zip = this; + options = utils.extend(options || {}, { + base64: false, + checkCRC32: false, + optimizedBinaryString: false, + createFolders: false, + decodeFileName: utf8.utf8decode + }); + + if (nodejsUtils.isNode && nodejsUtils.isStream(data)) { + return external.Promise.reject(new Error("JSZip can't accept a stream when loading a zip file.")); + } + + return utils.prepareContent("the loaded zip file", data, true, options.optimizedBinaryString, options.base64) + .then(function(data) { + var zipEntries = new ZipEntries(options); + zipEntries.load(data); + return zipEntries; + }).then(function checkCRC32(zipEntries) { + var promises = [external.Promise.resolve(zipEntries)]; + var files = zipEntries.files; + if (options.checkCRC32) { + for (var i = 0; i < files.length; i++) { + promises.push(checkEntryCRC32(files[i])); + } + } + return external.Promise.all(promises); + }).then(function addFiles(results) { + var zipEntries = results.shift(); + var files = zipEntries.files; + for (var i = 0; i < files.length; i++) { + var input = files[i]; + zip.file(input.fileNameStr, input.decompressed, { + binary: true, + optimizedBinaryString: true, + date: input.date, + dir: input.dir, + comment : input.fileCommentStr.length ? input.fileCommentStr : null, + unixPermissions : input.unixPermissions, + dosPermissions : input.dosPermissions, + createFolders: options.createFolders + }); + } + if (zipEntries.zipComment.length) { + zip.comment = zipEntries.zipComment; + } + + return zip; + }); +}; + +},{"./external":16,"./nodejsUtils":22,"./stream/Crc32Probe":35,"./utf8":41,"./utils":42,"./zipEntries":43}],22:[function(require,module,exports){ +(function (Buffer){ +'use strict'; + +module.exports = { + /** + * True if this is running in Nodejs, will be undefined in a browser. + * In a browser, browserify won't include this file and the whole module + * will be resolved an empty object. + */ + isNode : typeof Buffer !== "undefined", + /** + * Create a new nodejs Buffer from an existing content. + * @param {Object} data the data to pass to the constructor. + * @param {String} encoding the encoding to use. + * @return {Buffer} a new Buffer. + */ + newBufferFrom: function(data, encoding) { + if (Buffer.from && Buffer.from !== Uint8Array.from) { + return Buffer.from(data, encoding); + } else { + if (typeof data === "number") { + // Safeguard for old Node.js versions. On newer versions, + // Buffer.from(number) / Buffer(number, encoding) already throw. + throw new Error("The \"data\" argument must not be a number"); + } + return new Buffer(data, encoding); + } + }, + /** + * Create a new nodejs Buffer with the specified size. + * @param {Integer} size the size of the buffer. + * @return {Buffer} a new Buffer. + */ + allocBuffer: function (size) { + if (Buffer.alloc) { + return Buffer.alloc(size); + } else { + var buf = new Buffer(size); + buf.fill(0); + return buf; + } + }, + /** + * Find out if an object is a Buffer. + * @param {Object} b the object to test. + * @return {Boolean} true if the object is a Buffer, false otherwise. + */ + isBuffer : function(b){ + return Buffer.isBuffer(b); + }, + + isStream : function (obj) { + return obj && + typeof obj.on === "function" && + typeof obj.pause === "function" && + typeof obj.resume === "function"; + } +}; + +}).call(this,require("buffer").Buffer) +},{"buffer":3}],23:[function(require,module,exports){ +"use strict"; + +var utils = require('../utils'); +var GenericWorker = require('../stream/GenericWorker'); + +/** + * A worker that use a nodejs stream as source. + * @constructor + * @param {String} filename the name of the file entry for this stream. + * @param {Readable} stream the nodejs stream. + */ +function NodejsStreamInputAdapter(filename, stream) { + GenericWorker.call(this, "Nodejs stream input adapter for " + filename); + this._upstreamEnded = false; + this._bindStream(stream); +} + +utils.inherits(NodejsStreamInputAdapter, GenericWorker); + +/** + * Prepare the stream and bind the callbacks on it. + * Do this ASAP on node 0.10 ! A lazy binding doesn't always work. + * @param {Stream} stream the nodejs stream to use. + */ +NodejsStreamInputAdapter.prototype._bindStream = function (stream) { + var self = this; + this._stream = stream; + stream.pause(); + stream + .on("data", function (chunk) { + self.push({ + data: chunk, + meta : { + percent : 0 + } + }); + }) + .on("error", function (e) { + if(self.isPaused) { + this.generatedError = e; + } else { + self.error(e); + } + }) + .on("end", function () { + if(self.isPaused) { + self._upstreamEnded = true; + } else { + self.end(); + } + }); +}; +NodejsStreamInputAdapter.prototype.pause = function () { + if(!GenericWorker.prototype.pause.call(this)) { + return false; + } + this._stream.pause(); + return true; +}; +NodejsStreamInputAdapter.prototype.resume = function () { + if(!GenericWorker.prototype.resume.call(this)) { + return false; + } + + if(this._upstreamEnded) { + this.end(); + } else { + this._stream.resume(); + } + + return true; +}; + +module.exports = NodejsStreamInputAdapter; + +},{"../stream/GenericWorker":38,"../utils":42}],24:[function(require,module,exports){ +'use strict'; + +var Readable = require('readable-stream').Readable; + +var utils = require('../utils'); +utils.inherits(NodejsStreamOutputAdapter, Readable); + +/** +* A nodejs stream using a worker as source. +* @see the SourceWrapper in http://nodejs.org/api/stream.html +* @constructor +* @param {StreamHelper} helper the helper wrapping the worker +* @param {Object} options the nodejs stream options +* @param {Function} updateCb the update callback. +*/ +function NodejsStreamOutputAdapter(helper, options, updateCb) { + Readable.call(this, options); + this._helper = helper; + + var self = this; + helper.on("data", function (data, meta) { + if (!self.push(data)) { + self._helper.pause(); + } + if(updateCb) { + updateCb(meta); + } + }) + .on("error", function(e) { + self.emit('error', e); + }) + .on("end", function () { + self.push(null); + }); +} + + +NodejsStreamOutputAdapter.prototype._read = function() { + this._helper.resume(); +}; + +module.exports = NodejsStreamOutputAdapter; + +},{"../utils":42,"readable-stream":26}],25:[function(require,module,exports){ +'use strict'; +var utf8 = require('./utf8'); +var utils = require('./utils'); +var GenericWorker = require('./stream/GenericWorker'); +var StreamHelper = require('./stream/StreamHelper'); +var defaults = require('./defaults'); +var CompressedObject = require('./compressedObject'); +var ZipObject = require('./zipObject'); +var generate = require("./generate"); +var nodejsUtils = require("./nodejsUtils"); +var NodejsStreamInputAdapter = require("./nodejs/NodejsStreamInputAdapter"); + + +/** + * Add a file in the current folder. + * @private + * @param {string} name the name of the file + * @param {String|ArrayBuffer|Uint8Array|Buffer} data the data of the file + * @param {Object} originalOptions the options of the file + * @return {Object} the new file. + */ +var fileAdd = function(name, data, originalOptions) { + // be sure sub folders exist + var dataType = utils.getTypeOf(data), + parent; + + + /* + * Correct options. + */ + + var o = utils.extend(originalOptions || {}, defaults); + o.date = o.date || new Date(); + if (o.compression !== null) { + o.compression = o.compression.toUpperCase(); + } + + if (typeof o.unixPermissions === "string") { + o.unixPermissions = parseInt(o.unixPermissions, 8); + } + + // UNX_IFDIR 0040000 see zipinfo.c + if (o.unixPermissions && (o.unixPermissions & 0x4000)) { + o.dir = true; + } + // Bit 4 Directory + if (o.dosPermissions && (o.dosPermissions & 0x0010)) { + o.dir = true; + } + + if (o.dir) { + name = forceTrailingSlash(name); + } + if (o.createFolders && (parent = parentFolder(name))) { + folderAdd.call(this, parent, true); + } + + var isUnicodeString = dataType === "string" && o.binary === false && o.base64 === false; + if (!originalOptions || typeof originalOptions.binary === "undefined") { + o.binary = !isUnicodeString; + } + + + var isCompressedEmpty = (data instanceof CompressedObject) && data.uncompressedSize === 0; + + if (isCompressedEmpty || o.dir || !data || data.length === 0) { + o.base64 = false; + o.binary = true; + data = ""; + o.compression = "STORE"; + dataType = "string"; + } + + /* + * Convert content to fit. + */ + + var zipObjectContent = null; + if (data instanceof CompressedObject || data instanceof GenericWorker) { + zipObjectContent = data; + } else if (nodejsUtils.isNode && nodejsUtils.isStream(data)) { + zipObjectContent = new NodejsStreamInputAdapter(name, data); + } else { + zipObjectContent = utils.prepareContent(name, data, o.binary, o.optimizedBinaryString, o.base64); + } + + var object = new ZipObject(name, zipObjectContent, o); + this.files[name] = object; + /* + TODO: we can't throw an exception because we have async promises + (we can have a promise of a Date() for example) but returning a + promise is useless because file(name, data) returns the JSZip + object for chaining. Should we break that to allow the user + to catch the error ? + + return external.Promise.resolve(zipObjectContent) + .then(function () { + return object; + }); + */ +}; + +/** + * Find the parent folder of the path. + * @private + * @param {string} path the path to use + * @return {string} the parent folder, or "" + */ +var parentFolder = function (path) { + if (path.slice(-1) === '/') { + path = path.substring(0, path.length - 1); + } + var lastSlash = path.lastIndexOf('/'); + return (lastSlash > 0) ? path.substring(0, lastSlash) : ""; +}; + +/** + * Returns the path with a slash at the end. + * @private + * @param {String} path the path to check. + * @return {String} the path with a trailing slash. + */ +var forceTrailingSlash = function(path) { + // Check the name ends with a / + if (path.slice(-1) !== "/") { + path += "/"; // IE doesn't like substr(-1) + } + return path; +}; + +/** + * Add a (sub) folder in the current folder. + * @private + * @param {string} name the folder's name + * @param {boolean=} [createFolders] If true, automatically create sub + * folders. Defaults to false. + * @return {Object} the new folder. + */ +var folderAdd = function(name, createFolders) { + createFolders = (typeof createFolders !== 'undefined') ? createFolders : defaults.createFolders; + + name = forceTrailingSlash(name); + + // Does this folder already exist? + if (!this.files[name]) { + fileAdd.call(this, name, null, { + dir: true, + createFolders: createFolders + }); + } + return this.files[name]; +}; + +/** +* Cross-window, cross-Node-context regular expression detection +* @param {Object} object Anything +* @return {Boolean} true if the object is a regular expression, +* false otherwise +*/ +function isRegExp(object) { + return Object.prototype.toString.call(object) === "[object RegExp]"; +} + +// return the actual prototype of JSZip +var out = { + /** + * @see loadAsync + */ + load: function() { + throw new Error("This method has been removed in JSZip 3.0, please check the upgrade guide."); + }, + + + /** + * Call a callback function for each entry at this folder level. + * @param {Function} cb the callback function: + * function (relativePath, file) {...} + * It takes 2 arguments : the relative path and the file. + */ + forEach: function(cb) { + var filename, relativePath, file; + for (filename in this.files) { + if (!this.files.hasOwnProperty(filename)) { + continue; + } + file = this.files[filename]; + relativePath = filename.slice(this.root.length, filename.length); + if (relativePath && filename.slice(0, this.root.length) === this.root) { // the file is in the current root + cb(relativePath, file); // TODO reverse the parameters ? need to be clean AND consistent with the filter search fn... + } + } + }, + + /** + * Filter nested files/folders with the specified function. + * @param {Function} search the predicate to use : + * function (relativePath, file) {...} + * It takes 2 arguments : the relative path and the file. + * @return {Array} An array of matching elements. + */ + filter: function(search) { + var result = []; + this.forEach(function (relativePath, entry) { + if (search(relativePath, entry)) { // the file matches the function + result.push(entry); + } + + }); + return result; + }, + + /** + * Add a file to the zip file, or search a file. + * @param {string|RegExp} name The name of the file to add (if data is defined), + * the name of the file to find (if no data) or a regex to match files. + * @param {String|ArrayBuffer|Uint8Array|Buffer} data The file data, either raw or base64 encoded + * @param {Object} o File options + * @return {JSZip|Object|Array} this JSZip object (when adding a file), + * a file (when searching by string) or an array of files (when searching by regex). + */ + file: function(name, data, o) { + if (arguments.length === 1) { + if (isRegExp(name)) { + var regexp = name; + return this.filter(function(relativePath, file) { + return !file.dir && regexp.test(relativePath); + }); + } + else { // text + var obj = this.files[this.root + name]; + if (obj && !obj.dir) { + return obj; + } else { + return null; + } + } + } + else { // more than one argument : we have data ! + name = this.root + name; + fileAdd.call(this, name, data, o); + } + return this; + }, + + /** + * Add a directory to the zip file, or search. + * @param {String|RegExp} arg The name of the directory to add, or a regex to search folders. + * @return {JSZip} an object with the new directory as the root, or an array containing matching folders. + */ + folder: function(arg) { + if (!arg) { + return this; + } + + if (isRegExp(arg)) { + return this.filter(function(relativePath, file) { + return file.dir && arg.test(relativePath); + }); + } + + // else, name is a new folder + var name = this.root + arg; + var newFolder = folderAdd.call(this, name); + + // Allow chaining by returning a new object with this folder as the root + var ret = this.clone(); + ret.root = newFolder.name; + return ret; + }, + + /** + * Delete a file, or a directory and all sub-files, from the zip + * @param {string} name the name of the file to delete + * @return {JSZip} this JSZip object + */ + remove: function(name) { + name = this.root + name; + var file = this.files[name]; + if (!file) { + // Look for any folders + if (name.slice(-1) !== "/") { + name += "/"; + } + file = this.files[name]; + } + + if (file && !file.dir) { + // file + delete this.files[name]; + } else { + // maybe a folder, delete recursively + var kids = this.filter(function(relativePath, file) { + return file.name.slice(0, name.length) === name; + }); + for (var i = 0; i < kids.length; i++) { + delete this.files[kids[i].name]; + } + } + + return this; + }, + + /** + * Generate the complete zip file + * @param {Object} options the options to generate the zip file : + * - compression, "STORE" by default. + * - type, "base64" by default. Values are : string, base64, uint8array, arraybuffer, blob. + * @return {String|Uint8Array|ArrayBuffer|Buffer|Blob} the zip file + */ + generate: function(options) { + throw new Error("This method has been removed in JSZip 3.0, please check the upgrade guide."); + }, + + /** + * Generate the complete zip file as an internal stream. + * @param {Object} options the options to generate the zip file : + * - compression, "STORE" by default. + * - type, "base64" by default. Values are : string, base64, uint8array, arraybuffer, blob. + * @return {StreamHelper} the streamed zip file. + */ + generateInternalStream: function(options) { + var worker, opts = {}; + try { + opts = utils.extend(options || {}, { + streamFiles: false, + compression: "STORE", + compressionOptions : null, + type: "", + platform: "DOS", + comment: null, + mimeType: 'application/zip', + encodeFileName: utf8.utf8encode + }); + + opts.type = opts.type.toLowerCase(); + opts.compression = opts.compression.toUpperCase(); + + // "binarystring" is preferred but the internals use "string". + if(opts.type === "binarystring") { + opts.type = "string"; + } + + if (!opts.type) { + throw new Error("No output type specified."); + } + + utils.checkSupport(opts.type); + + // accept nodejs `process.platform` + if( + opts.platform === 'darwin' || + opts.platform === 'freebsd' || + opts.platform === 'linux' || + opts.platform === 'sunos' + ) { + opts.platform = "UNIX"; + } + if (opts.platform === 'win32') { + opts.platform = "DOS"; + } + + var comment = opts.comment || this.comment || ""; + worker = generate.generateWorker(this, opts, comment); + } catch (e) { + worker = new GenericWorker("error"); + worker.error(e); + } + return new StreamHelper(worker, opts.type || "string", opts.mimeType); + }, + /** + * Generate the complete zip file asynchronously. + * @see generateInternalStream + */ + generateAsync: function(options, onUpdate) { + return this.generateInternalStream(options).accumulate(onUpdate); + }, + /** + * Generate the complete zip file asynchronously. + * @see generateInternalStream + */ + generateNodeStream: function(options, onUpdate) { + options = options || {}; + if (!options.type) { + options.type = "nodebuffer"; + } + return this.generateInternalStream(options).toNodejsStream(onUpdate); + } +}; +module.exports = out; + +},{"./compressedObject":12,"./defaults":15,"./generate":19,"./nodejs/NodejsStreamInputAdapter":23,"./nodejsUtils":22,"./stream/GenericWorker":38,"./stream/StreamHelper":39,"./utf8":41,"./utils":42,"./zipObject":45}],26:[function(require,module,exports){ +/* + * This file is used by module bundlers (browserify/webpack/etc) when + * including a stream implementation. We use "readable-stream" to get a + * consistent behavior between nodejs versions but bundlers often have a shim + * for "stream". Using this shim greatly improve the compatibility and greatly + * reduce the final size of the bundle (only one stream implementation, not + * two). + */ +module.exports = require("stream"); + +},{"stream":80}],27:[function(require,module,exports){ +'use strict'; +var DataReader = require('./DataReader'); +var utils = require('../utils'); + +function ArrayReader(data) { + DataReader.call(this, data); + for(var i = 0; i < this.data.length; i++) { + data[i] = data[i] & 0xFF; + } +} +utils.inherits(ArrayReader, DataReader); +/** + * @see DataReader.byteAt + */ +ArrayReader.prototype.byteAt = function(i) { + return this.data[this.zero + i]; +}; +/** + * @see DataReader.lastIndexOfSignature + */ +ArrayReader.prototype.lastIndexOfSignature = function(sig) { + var sig0 = sig.charCodeAt(0), + sig1 = sig.charCodeAt(1), + sig2 = sig.charCodeAt(2), + sig3 = sig.charCodeAt(3); + for (var i = this.length - 4; i >= 0; --i) { + if (this.data[i] === sig0 && this.data[i + 1] === sig1 && this.data[i + 2] === sig2 && this.data[i + 3] === sig3) { + return i - this.zero; + } + } + + return -1; +}; +/** + * @see DataReader.readAndCheckSignature + */ +ArrayReader.prototype.readAndCheckSignature = function (sig) { + var sig0 = sig.charCodeAt(0), + sig1 = sig.charCodeAt(1), + sig2 = sig.charCodeAt(2), + sig3 = sig.charCodeAt(3), + data = this.readData(4); + return sig0 === data[0] && sig1 === data[1] && sig2 === data[2] && sig3 === data[3]; +}; +/** + * @see DataReader.readData + */ +ArrayReader.prototype.readData = function(size) { + this.checkOffset(size); + if(size === 0) { + return []; + } + var result = this.data.slice(this.zero + this.index, this.zero + this.index + size); + this.index += size; + return result; +}; +module.exports = ArrayReader; + +},{"../utils":42,"./DataReader":28}],28:[function(require,module,exports){ +'use strict'; +var utils = require('../utils'); + +function DataReader(data) { + this.data = data; // type : see implementation + this.length = data.length; + this.index = 0; + this.zero = 0; +} +DataReader.prototype = { + /** + * Check that the offset will not go too far. + * @param {string} offset the additional offset to check. + * @throws {Error} an Error if the offset is out of bounds. + */ + checkOffset: function(offset) { + this.checkIndex(this.index + offset); + }, + /** + * Check that the specified index will not be too far. + * @param {string} newIndex the index to check. + * @throws {Error} an Error if the index is out of bounds. + */ + checkIndex: function(newIndex) { + if (this.length < this.zero + newIndex || newIndex < 0) { + throw new Error("End of data reached (data length = " + this.length + ", asked index = " + (newIndex) + "). Corrupted zip ?"); + } + }, + /** + * Change the index. + * @param {number} newIndex The new index. + * @throws {Error} if the new index is out of the data. + */ + setIndex: function(newIndex) { + this.checkIndex(newIndex); + this.index = newIndex; + }, + /** + * Skip the next n bytes. + * @param {number} n the number of bytes to skip. + * @throws {Error} if the new index is out of the data. + */ + skip: function(n) { + this.setIndex(this.index + n); + }, + /** + * Get the byte at the specified index. + * @param {number} i the index to use. + * @return {number} a byte. + */ + byteAt: function(i) { + // see implementations + }, + /** + * Get the next number with a given byte size. + * @param {number} size the number of bytes to read. + * @return {number} the corresponding number. + */ + readInt: function(size) { + var result = 0, + i; + this.checkOffset(size); + for (i = this.index + size - 1; i >= this.index; i--) { + result = (result << 8) + this.byteAt(i); + } + this.index += size; + return result; + }, + /** + * Get the next string with a given byte size. + * @param {number} size the number of bytes to read. + * @return {string} the corresponding string. + */ + readString: function(size) { + return utils.transformTo("string", this.readData(size)); + }, + /** + * Get raw data without conversion, <size> bytes. + * @param {number} size the number of bytes to read. + * @return {Object} the raw data, implementation specific. + */ + readData: function(size) { + // see implementations + }, + /** + * Find the last occurrence of a zip signature (4 bytes). + * @param {string} sig the signature to find. + * @return {number} the index of the last occurrence, -1 if not found. + */ + lastIndexOfSignature: function(sig) { + // see implementations + }, + /** + * Read the signature (4 bytes) at the current position and compare it with sig. + * @param {string} sig the expected signature + * @return {boolean} true if the signature matches, false otherwise. + */ + readAndCheckSignature: function(sig) { + // see implementations + }, + /** + * Get the next date. + * @return {Date} the date. + */ + readDate: function() { + var dostime = this.readInt(4); + return new Date(Date.UTC( + ((dostime >> 25) & 0x7f) + 1980, // year + ((dostime >> 21) & 0x0f) - 1, // month + (dostime >> 16) & 0x1f, // day + (dostime >> 11) & 0x1f, // hour + (dostime >> 5) & 0x3f, // minute + (dostime & 0x1f) << 1)); // second + } +}; +module.exports = DataReader; + +},{"../utils":42}],29:[function(require,module,exports){ +'use strict'; +var Uint8ArrayReader = require('./Uint8ArrayReader'); +var utils = require('../utils'); + +function NodeBufferReader(data) { + Uint8ArrayReader.call(this, data); +} +utils.inherits(NodeBufferReader, Uint8ArrayReader); + +/** + * @see DataReader.readData + */ +NodeBufferReader.prototype.readData = function(size) { + this.checkOffset(size); + var result = this.data.slice(this.zero + this.index, this.zero + this.index + size); + this.index += size; + return result; +}; +module.exports = NodeBufferReader; + +},{"../utils":42,"./Uint8ArrayReader":31}],30:[function(require,module,exports){ +'use strict'; +var DataReader = require('./DataReader'); +var utils = require('../utils'); + +function StringReader(data) { + DataReader.call(this, data); +} +utils.inherits(StringReader, DataReader); +/** + * @see DataReader.byteAt + */ +StringReader.prototype.byteAt = function(i) { + return this.data.charCodeAt(this.zero + i); +}; +/** + * @see DataReader.lastIndexOfSignature + */ +StringReader.prototype.lastIndexOfSignature = function(sig) { + return this.data.lastIndexOf(sig) - this.zero; +}; +/** + * @see DataReader.readAndCheckSignature + */ +StringReader.prototype.readAndCheckSignature = function (sig) { + var data = this.readData(4); + return sig === data; +}; +/** + * @see DataReader.readData + */ +StringReader.prototype.readData = function(size) { + this.checkOffset(size); + // this will work because the constructor applied the "& 0xff" mask. + var result = this.data.slice(this.zero + this.index, this.zero + this.index + size); + this.index += size; + return result; +}; +module.exports = StringReader; + +},{"../utils":42,"./DataReader":28}],31:[function(require,module,exports){ +'use strict'; +var ArrayReader = require('./ArrayReader'); +var utils = require('../utils'); + +function Uint8ArrayReader(data) { + ArrayReader.call(this, data); +} +utils.inherits(Uint8ArrayReader, ArrayReader); +/** + * @see DataReader.readData + */ +Uint8ArrayReader.prototype.readData = function(size) { + this.checkOffset(size); + if(size === 0) { + // in IE10, when using subarray(idx, idx), we get the array [0x00] instead of []. + return new Uint8Array(0); + } + var result = this.data.subarray(this.zero + this.index, this.zero + this.index + size); + this.index += size; + return result; +}; +module.exports = Uint8ArrayReader; + +},{"../utils":42,"./ArrayReader":27}],32:[function(require,module,exports){ +'use strict'; + +var utils = require('../utils'); +var support = require('../support'); +var ArrayReader = require('./ArrayReader'); +var StringReader = require('./StringReader'); +var NodeBufferReader = require('./NodeBufferReader'); +var Uint8ArrayReader = require('./Uint8ArrayReader'); + +/** + * Create a reader adapted to the data. + * @param {String|ArrayBuffer|Uint8Array|Buffer} data the data to read. + * @return {DataReader} the data reader. + */ +module.exports = function (data) { + var type = utils.getTypeOf(data); + utils.checkSupport(type); + if (type === "string" && !support.uint8array) { + return new StringReader(data); + } + if (type === "nodebuffer") { + return new NodeBufferReader(data); + } + if (support.uint8array) { + return new Uint8ArrayReader(utils.transformTo("uint8array", data)); + } + return new ArrayReader(utils.transformTo("array", data)); +}; + +},{"../support":40,"../utils":42,"./ArrayReader":27,"./NodeBufferReader":29,"./StringReader":30,"./Uint8ArrayReader":31}],33:[function(require,module,exports){ +'use strict'; +exports.LOCAL_FILE_HEADER = "PK\x03\x04"; +exports.CENTRAL_FILE_HEADER = "PK\x01\x02"; +exports.CENTRAL_DIRECTORY_END = "PK\x05\x06"; +exports.ZIP64_CENTRAL_DIRECTORY_LOCATOR = "PK\x06\x07"; +exports.ZIP64_CENTRAL_DIRECTORY_END = "PK\x06\x06"; +exports.DATA_DESCRIPTOR = "PK\x07\x08"; + +},{}],34:[function(require,module,exports){ +'use strict'; + +var GenericWorker = require('./GenericWorker'); +var utils = require('../utils'); + +/** + * A worker which convert chunks to a specified type. + * @constructor + * @param {String} destType the destination type. + */ +function ConvertWorker(destType) { + GenericWorker.call(this, "ConvertWorker to " + destType); + this.destType = destType; +} +utils.inherits(ConvertWorker, GenericWorker); + +/** + * @see GenericWorker.processChunk + */ +ConvertWorker.prototype.processChunk = function (chunk) { + this.push({ + data : utils.transformTo(this.destType, chunk.data), + meta : chunk.meta + }); +}; +module.exports = ConvertWorker; + +},{"../utils":42,"./GenericWorker":38}],35:[function(require,module,exports){ +'use strict'; + +var GenericWorker = require('./GenericWorker'); +var crc32 = require('../crc32'); +var utils = require('../utils'); + +/** + * A worker which calculate the crc32 of the data flowing through. + * @constructor + */ +function Crc32Probe() { + GenericWorker.call(this, "Crc32Probe"); + this.withStreamInfo("crc32", 0); +} +utils.inherits(Crc32Probe, GenericWorker); + +/** + * @see GenericWorker.processChunk + */ +Crc32Probe.prototype.processChunk = function (chunk) { + this.streamInfo.crc32 = crc32(chunk.data, this.streamInfo.crc32 || 0); + this.push(chunk); +}; +module.exports = Crc32Probe; + +},{"../crc32":14,"../utils":42,"./GenericWorker":38}],36:[function(require,module,exports){ +'use strict'; + +var utils = require('../utils'); +var GenericWorker = require('./GenericWorker'); + +/** + * A worker which calculate the total length of the data flowing through. + * @constructor + * @param {String} propName the name used to expose the length + */ +function DataLengthProbe(propName) { + GenericWorker.call(this, "DataLengthProbe for " + propName); + this.propName = propName; + this.withStreamInfo(propName, 0); +} +utils.inherits(DataLengthProbe, GenericWorker); + +/** + * @see GenericWorker.processChunk + */ +DataLengthProbe.prototype.processChunk = function (chunk) { + if(chunk) { + var length = this.streamInfo[this.propName] || 0; + this.streamInfo[this.propName] = length + chunk.data.length; + } + GenericWorker.prototype.processChunk.call(this, chunk); +}; +module.exports = DataLengthProbe; + + +},{"../utils":42,"./GenericWorker":38}],37:[function(require,module,exports){ +'use strict'; + +var utils = require('../utils'); +var GenericWorker = require('./GenericWorker'); + +// the size of the generated chunks +// TODO expose this as a public variable +var DEFAULT_BLOCK_SIZE = 16 * 1024; + +/** + * A worker that reads a content and emits chunks. + * @constructor + * @param {Promise} dataP the promise of the data to split + */ +function DataWorker(dataP) { + GenericWorker.call(this, "DataWorker"); + var self = this; + this.dataIsReady = false; + this.index = 0; + this.max = 0; + this.data = null; + this.type = ""; + + this._tickScheduled = false; + + dataP.then(function (data) { + self.dataIsReady = true; + self.data = data; + self.max = data && data.length || 0; + self.type = utils.getTypeOf(data); + if(!self.isPaused) { + self._tickAndRepeat(); + } + }, function (e) { + self.error(e); + }); +} + +utils.inherits(DataWorker, GenericWorker); + +/** + * @see GenericWorker.cleanUp + */ +DataWorker.prototype.cleanUp = function () { + GenericWorker.prototype.cleanUp.call(this); + this.data = null; +}; + +/** + * @see GenericWorker.resume + */ +DataWorker.prototype.resume = function () { + if(!GenericWorker.prototype.resume.call(this)) { + return false; + } + + if (!this._tickScheduled && this.dataIsReady) { + this._tickScheduled = true; + utils.delay(this._tickAndRepeat, [], this); + } + return true; +}; + +/** + * Trigger a tick a schedule an other call to this function. + */ +DataWorker.prototype._tickAndRepeat = function() { + this._tickScheduled = false; + if(this.isPaused || this.isFinished) { + return; + } + this._tick(); + if(!this.isFinished) { + utils.delay(this._tickAndRepeat, [], this); + this._tickScheduled = true; + } +}; + +/** + * Read and push a chunk. + */ +DataWorker.prototype._tick = function() { + + if(this.isPaused || this.isFinished) { + return false; + } + + var size = DEFAULT_BLOCK_SIZE; + var data = null, nextIndex = Math.min(this.max, this.index + size); + if (this.index >= this.max) { + // EOF + return this.end(); + } else { + switch(this.type) { + case "string": + data = this.data.substring(this.index, nextIndex); + break; + case "uint8array": + data = this.data.subarray(this.index, nextIndex); + break; + case "array": + case "nodebuffer": + data = this.data.slice(this.index, nextIndex); + break; + } + this.index = nextIndex; + return this.push({ + data : data, + meta : { + percent : this.max ? this.index / this.max * 100 : 0 + } + }); + } +}; + +module.exports = DataWorker; + +},{"../utils":42,"./GenericWorker":38}],38:[function(require,module,exports){ +'use strict'; + +/** + * A worker that does nothing but passing chunks to the next one. This is like + * a nodejs stream but with some differences. On the good side : + * - it works on IE 6-9 without any issue / polyfill + * - it weights less than the full dependencies bundled with browserify + * - it forwards errors (no need to declare an error handler EVERYWHERE) + * + * A chunk is an object with 2 attributes : `meta` and `data`. The former is an + * object containing anything (`percent` for example), see each worker for more + * details. The latter is the real data (String, Uint8Array, etc). + * + * @constructor + * @param {String} name the name of the stream (mainly used for debugging purposes) + */ +function GenericWorker(name) { + // the name of the worker + this.name = name || "default"; + // an object containing metadata about the workers chain + this.streamInfo = {}; + // an error which happened when the worker was paused + this.generatedError = null; + // an object containing metadata to be merged by this worker into the general metadata + this.extraStreamInfo = {}; + // true if the stream is paused (and should not do anything), false otherwise + this.isPaused = true; + // true if the stream is finished (and should not do anything), false otherwise + this.isFinished = false; + // true if the stream is locked to prevent further structure updates (pipe), false otherwise + this.isLocked = false; + // the event listeners + this._listeners = { + 'data':[], + 'end':[], + 'error':[] + }; + // the previous worker, if any + this.previous = null; +} + +GenericWorker.prototype = { + /** + * Push a chunk to the next workers. + * @param {Object} chunk the chunk to push + */ + push : function (chunk) { + this.emit("data", chunk); + }, + /** + * End the stream. + * @return {Boolean} true if this call ended the worker, false otherwise. + */ + end : function () { + if (this.isFinished) { + return false; + } + + this.flush(); + try { + this.emit("end"); + this.cleanUp(); + this.isFinished = true; + } catch (e) { + this.emit("error", e); + } + return true; + }, + /** + * End the stream with an error. + * @param {Error} e the error which caused the premature end. + * @return {Boolean} true if this call ended the worker with an error, false otherwise. + */ + error : function (e) { + if (this.isFinished) { + return false; + } + + if(this.isPaused) { + this.generatedError = e; + } else { + this.isFinished = true; + + this.emit("error", e); + + // in the workers chain exploded in the middle of the chain, + // the error event will go downward but we also need to notify + // workers upward that there has been an error. + if(this.previous) { + this.previous.error(e); + } + + this.cleanUp(); + } + return true; + }, + /** + * Add a callback on an event. + * @param {String} name the name of the event (data, end, error) + * @param {Function} listener the function to call when the event is triggered + * @return {GenericWorker} the current object for chainability + */ + on : function (name, listener) { + this._listeners[name].push(listener); + return this; + }, + /** + * Clean any references when a worker is ending. + */ + cleanUp : function () { + this.streamInfo = this.generatedError = this.extraStreamInfo = null; + this._listeners = []; + }, + /** + * Trigger an event. This will call registered callback with the provided arg. + * @param {String} name the name of the event (data, end, error) + * @param {Object} arg the argument to call the callback with. + */ + emit : function (name, arg) { + if (this._listeners[name]) { + for(var i = 0; i < this._listeners[name].length; i++) { + this._listeners[name][i].call(this, arg); + } + } + }, + /** + * Chain a worker with an other. + * @param {Worker} next the worker receiving events from the current one. + * @return {worker} the next worker for chainability + */ + pipe : function (next) { + return next.registerPrevious(this); + }, + /** + * Same as `pipe` in the other direction. + * Using an API with `pipe(next)` is very easy. + * Implementing the API with the point of view of the next one registering + * a source is easier, see the ZipFileWorker. + * @param {Worker} previous the previous worker, sending events to this one + * @return {Worker} the current worker for chainability + */ + registerPrevious : function (previous) { + if (this.isLocked) { + throw new Error("The stream '" + this + "' has already been used."); + } + + // sharing the streamInfo... + this.streamInfo = previous.streamInfo; + // ... and adding our own bits + this.mergeStreamInfo(); + this.previous = previous; + var self = this; + previous.on('data', function (chunk) { + self.processChunk(chunk); + }); + previous.on('end', function () { + self.end(); + }); + previous.on('error', function (e) { + self.error(e); + }); + return this; + }, + /** + * Pause the stream so it doesn't send events anymore. + * @return {Boolean} true if this call paused the worker, false otherwise. + */ + pause : function () { + if(this.isPaused || this.isFinished) { + return false; + } + this.isPaused = true; + + if(this.previous) { + this.previous.pause(); + } + return true; + }, + /** + * Resume a paused stream. + * @return {Boolean} true if this call resumed the worker, false otherwise. + */ + resume : function () { + if(!this.isPaused || this.isFinished) { + return false; + } + this.isPaused = false; + + // if true, the worker tried to resume but failed + var withError = false; + if(this.generatedError) { + this.error(this.generatedError); + withError = true; + } + if(this.previous) { + this.previous.resume(); + } + + return !withError; + }, + /** + * Flush any remaining bytes as the stream is ending. + */ + flush : function () {}, + /** + * Process a chunk. This is usually the method overridden. + * @param {Object} chunk the chunk to process. + */ + processChunk : function(chunk) { + this.push(chunk); + }, + /** + * Add a key/value to be added in the workers chain streamInfo once activated. + * @param {String} key the key to use + * @param {Object} value the associated value + * @return {Worker} the current worker for chainability + */ + withStreamInfo : function (key, value) { + this.extraStreamInfo[key] = value; + this.mergeStreamInfo(); + return this; + }, + /** + * Merge this worker's streamInfo into the chain's streamInfo. + */ + mergeStreamInfo : function () { + for(var key in this.extraStreamInfo) { + if (!this.extraStreamInfo.hasOwnProperty(key)) { + continue; + } + this.streamInfo[key] = this.extraStreamInfo[key]; + } + }, + + /** + * Lock the stream to prevent further updates on the workers chain. + * After calling this method, all calls to pipe will fail. + */ + lock: function () { + if (this.isLocked) { + throw new Error("The stream '" + this + "' has already been used."); + } + this.isLocked = true; + if (this.previous) { + this.previous.lock(); + } + }, + + /** + * + * Pretty print the workers chain. + */ + toString : function () { + var me = "Worker " + this.name; + if (this.previous) { + return this.previous + " -> " + me; + } else { + return me; + } + } +}; + +module.exports = GenericWorker; + +},{}],39:[function(require,module,exports){ +(function (Buffer){ +'use strict'; + +var utils = require('../utils'); +var ConvertWorker = require('./ConvertWorker'); +var GenericWorker = require('./GenericWorker'); +var base64 = require('../base64'); +var support = require("../support"); +var external = require("../external"); + +var NodejsStreamOutputAdapter = null; +if (support.nodestream) { + try { + NodejsStreamOutputAdapter = require('../nodejs/NodejsStreamOutputAdapter'); + } catch(e) {} +} + +/** + * Apply the final transformation of the data. If the user wants a Blob for + * example, it's easier to work with an U8intArray and finally do the + * ArrayBuffer/Blob conversion. + * @param {String} type the name of the final type + * @param {String|Uint8Array|Buffer} content the content to transform + * @param {String} mimeType the mime type of the content, if applicable. + * @return {String|Uint8Array|ArrayBuffer|Buffer|Blob} the content in the right format. + */ +function transformZipOutput(type, content, mimeType) { + switch(type) { + case "blob" : + return utils.newBlob(utils.transformTo("arraybuffer", content), mimeType); + case "base64" : + return base64.encode(content); + default : + return utils.transformTo(type, content); + } +} + +/** + * Concatenate an array of data of the given type. + * @param {String} type the type of the data in the given array. + * @param {Array} dataArray the array containing the data chunks to concatenate + * @return {String|Uint8Array|Buffer} the concatenated data + * @throws Error if the asked type is unsupported + */ +function concat (type, dataArray) { + var i, index = 0, res = null, totalLength = 0; + for(i = 0; i < dataArray.length; i++) { + totalLength += dataArray[i].length; + } + switch(type) { + case "string": + return dataArray.join(""); + case "array": + return Array.prototype.concat.apply([], dataArray); + case "uint8array": + res = new Uint8Array(totalLength); + for(i = 0; i < dataArray.length; i++) { + res.set(dataArray[i], index); + index += dataArray[i].length; + } + return res; + case "nodebuffer": + return Buffer.concat(dataArray); + default: + throw new Error("concat : unsupported type '" + type + "'"); + } +} + +/** + * Listen a StreamHelper, accumulate its content and concatenate it into a + * complete block. + * @param {StreamHelper} helper the helper to use. + * @param {Function} updateCallback a callback called on each update. Called + * with one arg : + * - the metadata linked to the update received. + * @return Promise the promise for the accumulation. + */ +function accumulate(helper, updateCallback) { + return new external.Promise(function (resolve, reject){ + var dataArray = []; + var chunkType = helper._internalType, + resultType = helper._outputType, + mimeType = helper._mimeType; + helper + .on('data', function (data, meta) { + dataArray.push(data); + if(updateCallback) { + updateCallback(meta); + } + }) + .on('error', function(err) { + dataArray = []; + reject(err); + }) + .on('end', function (){ + try { + var result = transformZipOutput(resultType, concat(chunkType, dataArray), mimeType); + resolve(result); + } catch (e) { + reject(e); + } + dataArray = []; + }) + .resume(); + }); +} + +/** + * An helper to easily use workers outside of JSZip. + * @constructor + * @param {Worker} worker the worker to wrap + * @param {String} outputType the type of data expected by the use + * @param {String} mimeType the mime type of the content, if applicable. + */ +function StreamHelper(worker, outputType, mimeType) { + var internalType = outputType; + switch(outputType) { + case "blob": + case "arraybuffer": + internalType = "uint8array"; + break; + case "base64": + internalType = "string"; + break; + } + + try { + // the type used internally + this._internalType = internalType; + // the type used to output results + this._outputType = outputType; + // the mime type + this._mimeType = mimeType; + utils.checkSupport(internalType); + this._worker = worker.pipe(new ConvertWorker(internalType)); + // the last workers can be rewired without issues but we need to + // prevent any updates on previous workers. + worker.lock(); + } catch(e) { + this._worker = new GenericWorker("error"); + this._worker.error(e); + } +} + +StreamHelper.prototype = { + /** + * Listen a StreamHelper, accumulate its content and concatenate it into a + * complete block. + * @param {Function} updateCb the update callback. + * @return Promise the promise for the accumulation. + */ + accumulate : function (updateCb) { + return accumulate(this, updateCb); + }, + /** + * Add a listener on an event triggered on a stream. + * @param {String} evt the name of the event + * @param {Function} fn the listener + * @return {StreamHelper} the current helper. + */ + on : function (evt, fn) { + var self = this; + + if(evt === "data") { + this._worker.on(evt, function (chunk) { + fn.call(self, chunk.data, chunk.meta); + }); + } else { + this._worker.on(evt, function () { + utils.delay(fn, arguments, self); + }); + } + return this; + }, + /** + * Resume the flow of chunks. + * @return {StreamHelper} the current helper. + */ + resume : function () { + utils.delay(this._worker.resume, [], this._worker); + return this; + }, + /** + * Pause the flow of chunks. + * @return {StreamHelper} the current helper. + */ + pause : function () { + this._worker.pause(); + return this; + }, + /** + * Return a nodejs stream for this helper. + * @param {Function} updateCb the update callback. + * @return {NodejsStreamOutputAdapter} the nodejs stream. + */ + toNodejsStream : function (updateCb) { + utils.checkSupport("nodestream"); + if (this._outputType !== "nodebuffer") { + // an object stream containing blob/arraybuffer/uint8array/string + // is strange and I don't know if it would be useful. + // I you find this comment and have a good usecase, please open a + // bug report ! + throw new Error(this._outputType + " is not supported by this method"); + } + + return new NodejsStreamOutputAdapter(this, { + objectMode : this._outputType !== "nodebuffer" + }, updateCb); + } +}; + + +module.exports = StreamHelper; + +}).call(this,require("buffer").Buffer) +},{"../base64":11,"../external":16,"../nodejs/NodejsStreamOutputAdapter":24,"../support":40,"../utils":42,"./ConvertWorker":34,"./GenericWorker":38,"buffer":3}],40:[function(require,module,exports){ +(function (Buffer){ +'use strict'; + +exports.base64 = true; +exports.array = true; +exports.string = true; +exports.arraybuffer = typeof ArrayBuffer !== "undefined" && typeof Uint8Array !== "undefined"; +exports.nodebuffer = typeof Buffer !== "undefined"; +// contains true if JSZip can read/generate Uint8Array, false otherwise. +exports.uint8array = typeof Uint8Array !== "undefined"; + +if (typeof ArrayBuffer === "undefined") { + exports.blob = false; +} +else { + var buffer = new ArrayBuffer(0); + try { + exports.blob = new Blob([buffer], { + type: "application/zip" + }).size === 0; + } + catch (e) { + try { + var Builder = self.BlobBuilder || self.WebKitBlobBuilder || self.MozBlobBuilder || self.MSBlobBuilder; + var builder = new Builder(); + builder.append(buffer); + exports.blob = builder.getBlob('application/zip').size === 0; + } + catch (e) { + exports.blob = false; + } + } +} + +try { + exports.nodestream = !!require('readable-stream').Readable; +} catch(e) { + exports.nodestream = false; +} + +}).call(this,require("buffer").Buffer) +},{"buffer":3,"readable-stream":26}],41:[function(require,module,exports){ +'use strict'; + +var utils = require('./utils'); +var support = require('./support'); +var nodejsUtils = require('./nodejsUtils'); +var GenericWorker = require('./stream/GenericWorker'); + +/** + * The following functions come from pako, from pako/lib/utils/strings + * released under the MIT license, see pako https://github.com/nodeca/pako/ + */ + +// Table with utf8 lengths (calculated by first byte of sequence) +// Note, that 5 & 6-byte values and some 4-byte values can not be represented in JS, +// because max possible codepoint is 0x10ffff +var _utf8len = new Array(256); +for (var i=0; i<256; i++) { + _utf8len[i] = (i >= 252 ? 6 : i >= 248 ? 5 : i >= 240 ? 4 : i >= 224 ? 3 : i >= 192 ? 2 : 1); +} +_utf8len[254]=_utf8len[254]=1; // Invalid sequence start + +// convert string to array (typed, when possible) +var string2buf = function (str) { + var buf, c, c2, m_pos, i, str_len = str.length, buf_len = 0; + + // count binary size + for (m_pos = 0; m_pos < str_len; m_pos++) { + c = str.charCodeAt(m_pos); + if ((c & 0xfc00) === 0xd800 && (m_pos+1 < str_len)) { + c2 = str.charCodeAt(m_pos+1); + if ((c2 & 0xfc00) === 0xdc00) { + c = 0x10000 + ((c - 0xd800) << 10) + (c2 - 0xdc00); + m_pos++; + } + } + buf_len += c < 0x80 ? 1 : c < 0x800 ? 2 : c < 0x10000 ? 3 : 4; + } + + // allocate buffer + if (support.uint8array) { + buf = new Uint8Array(buf_len); + } else { + buf = new Array(buf_len); + } + + // convert + for (i=0, m_pos = 0; i < buf_len; m_pos++) { + c = str.charCodeAt(m_pos); + if ((c & 0xfc00) === 0xd800 && (m_pos+1 < str_len)) { + c2 = str.charCodeAt(m_pos+1); + if ((c2 & 0xfc00) === 0xdc00) { + c = 0x10000 + ((c - 0xd800) << 10) + (c2 - 0xdc00); + m_pos++; + } + } + if (c < 0x80) { + /* one byte */ + buf[i++] = c; + } else if (c < 0x800) { + /* two bytes */ + buf[i++] = 0xC0 | (c >>> 6); + buf[i++] = 0x80 | (c & 0x3f); + } else if (c < 0x10000) { + /* three bytes */ + buf[i++] = 0xE0 | (c >>> 12); + buf[i++] = 0x80 | (c >>> 6 & 0x3f); + buf[i++] = 0x80 | (c & 0x3f); + } else { + /* four bytes */ + buf[i++] = 0xf0 | (c >>> 18); + buf[i++] = 0x80 | (c >>> 12 & 0x3f); + buf[i++] = 0x80 | (c >>> 6 & 0x3f); + buf[i++] = 0x80 | (c & 0x3f); + } + } + + return buf; +}; + +// Calculate max possible position in utf8 buffer, +// that will not break sequence. If that's not possible +// - (very small limits) return max size as is. +// +// buf[] - utf8 bytes array +// max - length limit (mandatory); +var utf8border = function(buf, max) { + var pos; + + max = max || buf.length; + if (max > buf.length) { max = buf.length; } + + // go back from last position, until start of sequence found + pos = max-1; + while (pos >= 0 && (buf[pos] & 0xC0) === 0x80) { pos--; } + + // Fuckup - very small and broken sequence, + // return max, because we should return something anyway. + if (pos < 0) { return max; } + + // If we came to start of buffer - that means vuffer is too small, + // return max too. + if (pos === 0) { return max; } + + return (pos + _utf8len[buf[pos]] > max) ? pos : max; +}; + +// convert array to string +var buf2string = function (buf) { + var str, i, out, c, c_len; + var len = buf.length; + + // Reserve max possible length (2 words per char) + // NB: by unknown reasons, Array is significantly faster for + // String.fromCharCode.apply than Uint16Array. + var utf16buf = new Array(len*2); + + for (out=0, i=0; i<len;) { + c = buf[i++]; + // quick process ascii + if (c < 0x80) { utf16buf[out++] = c; continue; } + + c_len = _utf8len[c]; + // skip 5 & 6 byte codes + if (c_len > 4) { utf16buf[out++] = 0xfffd; i += c_len-1; continue; } + + // apply mask on first byte + c &= c_len === 2 ? 0x1f : c_len === 3 ? 0x0f : 0x07; + // join the rest + while (c_len > 1 && i < len) { + c = (c << 6) | (buf[i++] & 0x3f); + c_len--; + } + + // terminated by end of string? + if (c_len > 1) { utf16buf[out++] = 0xfffd; continue; } + + if (c < 0x10000) { + utf16buf[out++] = c; + } else { + c -= 0x10000; + utf16buf[out++] = 0xd800 | ((c >> 10) & 0x3ff); + utf16buf[out++] = 0xdc00 | (c & 0x3ff); + } + } + + // shrinkBuf(utf16buf, out) + if (utf16buf.length !== out) { + if(utf16buf.subarray) { + utf16buf = utf16buf.subarray(0, out); + } else { + utf16buf.length = out; + } + } + + // return String.fromCharCode.apply(null, utf16buf); + return utils.applyFromCharCode(utf16buf); +}; + + +// That's all for the pako functions. + + +/** + * Transform a javascript string into an array (typed if possible) of bytes, + * UTF-8 encoded. + * @param {String} str the string to encode + * @return {Array|Uint8Array|Buffer} the UTF-8 encoded string. + */ +exports.utf8encode = function utf8encode(str) { + if (support.nodebuffer) { + return nodejsUtils.newBufferFrom(str, "utf-8"); + } + + return string2buf(str); +}; + + +/** + * Transform a bytes array (or a representation) representing an UTF-8 encoded + * string into a javascript string. + * @param {Array|Uint8Array|Buffer} buf the data de decode + * @return {String} the decoded string. + */ +exports.utf8decode = function utf8decode(buf) { + if (support.nodebuffer) { + return utils.transformTo("nodebuffer", buf).toString("utf-8"); + } + + buf = utils.transformTo(support.uint8array ? "uint8array" : "array", buf); + + return buf2string(buf); +}; + +/** + * A worker to decode utf8 encoded binary chunks into string chunks. + * @constructor + */ +function Utf8DecodeWorker() { + GenericWorker.call(this, "utf-8 decode"); + // the last bytes if a chunk didn't end with a complete codepoint. + this.leftOver = null; +} +utils.inherits(Utf8DecodeWorker, GenericWorker); + +/** + * @see GenericWorker.processChunk + */ +Utf8DecodeWorker.prototype.processChunk = function (chunk) { + + var data = utils.transformTo(support.uint8array ? "uint8array" : "array", chunk.data); + + // 1st step, re-use what's left of the previous chunk + if (this.leftOver && this.leftOver.length) { + if(support.uint8array) { + var previousData = data; + data = new Uint8Array(previousData.length + this.leftOver.length); + data.set(this.leftOver, 0); + data.set(previousData, this.leftOver.length); + } else { + data = this.leftOver.concat(data); + } + this.leftOver = null; + } + + var nextBoundary = utf8border(data); + var usableData = data; + if (nextBoundary !== data.length) { + if (support.uint8array) { + usableData = data.subarray(0, nextBoundary); + this.leftOver = data.subarray(nextBoundary, data.length); + } else { + usableData = data.slice(0, nextBoundary); + this.leftOver = data.slice(nextBoundary, data.length); + } + } + + this.push({ + data : exports.utf8decode(usableData), + meta : chunk.meta + }); +}; + +/** + * @see GenericWorker.flush + */ +Utf8DecodeWorker.prototype.flush = function () { + if(this.leftOver && this.leftOver.length) { + this.push({ + data : exports.utf8decode(this.leftOver), + meta : {} + }); + this.leftOver = null; + } +}; +exports.Utf8DecodeWorker = Utf8DecodeWorker; + +/** + * A worker to endcode string chunks into utf8 encoded binary chunks. + * @constructor + */ +function Utf8EncodeWorker() { + GenericWorker.call(this, "utf-8 encode"); +} +utils.inherits(Utf8EncodeWorker, GenericWorker); + +/** + * @see GenericWorker.processChunk + */ +Utf8EncodeWorker.prototype.processChunk = function (chunk) { + this.push({ + data : exports.utf8encode(chunk.data), + meta : chunk.meta + }); +}; +exports.Utf8EncodeWorker = Utf8EncodeWorker; + +},{"./nodejsUtils":22,"./stream/GenericWorker":38,"./support":40,"./utils":42}],42:[function(require,module,exports){ +'use strict'; + +var support = require('./support'); +var base64 = require('./base64'); +var nodejsUtils = require('./nodejsUtils'); +var setImmediate = require('set-immediate-shim'); +var external = require("./external"); + + +/** + * Convert a string that pass as a "binary string": it should represent a byte + * array but may have > 255 char codes. Be sure to take only the first byte + * and returns the byte array. + * @param {String} str the string to transform. + * @return {Array|Uint8Array} the string in a binary format. + */ +function string2binary(str) { + var result = null; + if (support.uint8array) { + result = new Uint8Array(str.length); + } else { + result = new Array(str.length); + } + return stringToArrayLike(str, result); +} + +/** + * Create a new blob with the given content and the given type. + * @param {String|ArrayBuffer} part the content to put in the blob. DO NOT use + * an Uint8Array because the stock browser of android 4 won't accept it (it + * will be silently converted to a string, "[object Uint8Array]"). + * + * Use only ONE part to build the blob to avoid a memory leak in IE11 / Edge: + * when a large amount of Array is used to create the Blob, the amount of + * memory consumed is nearly 100 times the original data amount. + * + * @param {String} type the mime type of the blob. + * @return {Blob} the created blob. + */ +exports.newBlob = function(part, type) { + exports.checkSupport("blob"); + + try { + // Blob constructor + return new Blob([part], { + type: type + }); + } + catch (e) { + + try { + // deprecated, browser only, old way + var Builder = self.BlobBuilder || self.WebKitBlobBuilder || self.MozBlobBuilder || self.MSBlobBuilder; + var builder = new Builder(); + builder.append(part); + return builder.getBlob(type); + } + catch (e) { + + // well, fuck ?! + throw new Error("Bug : can't construct the Blob."); + } + } + + +}; +/** + * The identity function. + * @param {Object} input the input. + * @return {Object} the same input. + */ +function identity(input) { + return input; +} + +/** + * Fill in an array with a string. + * @param {String} str the string to use. + * @param {Array|ArrayBuffer|Uint8Array|Buffer} array the array to fill in (will be mutated). + * @return {Array|ArrayBuffer|Uint8Array|Buffer} the updated array. + */ +function stringToArrayLike(str, array) { + for (var i = 0; i < str.length; ++i) { + array[i] = str.charCodeAt(i) & 0xFF; + } + return array; +} + +/** + * An helper for the function arrayLikeToString. + * This contains static information and functions that + * can be optimized by the browser JIT compiler. + */ +var arrayToStringHelper = { + /** + * Transform an array of int into a string, chunk by chunk. + * See the performances notes on arrayLikeToString. + * @param {Array|ArrayBuffer|Uint8Array|Buffer} array the array to transform. + * @param {String} type the type of the array. + * @param {Integer} chunk the chunk size. + * @return {String} the resulting string. + * @throws Error if the chunk is too big for the stack. + */ + stringifyByChunk: function(array, type, chunk) { + var result = [], k = 0, len = array.length; + // shortcut + if (len <= chunk) { + return String.fromCharCode.apply(null, array); + } + while (k < len) { + if (type === "array" || type === "nodebuffer") { + result.push(String.fromCharCode.apply(null, array.slice(k, Math.min(k + chunk, len)))); + } + else { + result.push(String.fromCharCode.apply(null, array.subarray(k, Math.min(k + chunk, len)))); + } + k += chunk; + } + return result.join(""); + }, + /** + * Call String.fromCharCode on every item in the array. + * This is the naive implementation, which generate A LOT of intermediate string. + * This should be used when everything else fail. + * @param {Array|ArrayBuffer|Uint8Array|Buffer} array the array to transform. + * @return {String} the result. + */ + stringifyByChar: function(array){ + var resultStr = ""; + for(var i = 0; i < array.length; i++) { + resultStr += String.fromCharCode(array[i]); + } + return resultStr; + }, + applyCanBeUsed : { + /** + * true if the browser accepts to use String.fromCharCode on Uint8Array + */ + uint8array : (function () { + try { + return support.uint8array && String.fromCharCode.apply(null, new Uint8Array(1)).length === 1; + } catch (e) { + return false; + } + })(), + /** + * true if the browser accepts to use String.fromCharCode on nodejs Buffer. + */ + nodebuffer : (function () { + try { + return support.nodebuffer && String.fromCharCode.apply(null, nodejsUtils.allocBuffer(1)).length === 1; + } catch (e) { + return false; + } + })() + } +}; + +/** + * Transform an array-like object to a string. + * @param {Array|ArrayBuffer|Uint8Array|Buffer} array the array to transform. + * @return {String} the result. + */ +function arrayLikeToString(array) { + // Performances notes : + // -------------------- + // String.fromCharCode.apply(null, array) is the fastest, see + // see http://jsperf.com/converting-a-uint8array-to-a-string/2 + // but the stack is limited (and we can get huge arrays !). + // + // result += String.fromCharCode(array[i]); generate too many strings ! + // + // This code is inspired by http://jsperf.com/arraybuffer-to-string-apply-performance/2 + // TODO : we now have workers that split the work. Do we still need that ? + var chunk = 65536, + type = exports.getTypeOf(array), + canUseApply = true; + if (type === "uint8array") { + canUseApply = arrayToStringHelper.applyCanBeUsed.uint8array; + } else if (type === "nodebuffer") { + canUseApply = arrayToStringHelper.applyCanBeUsed.nodebuffer; + } + + if (canUseApply) { + while (chunk > 1) { + try { + return arrayToStringHelper.stringifyByChunk(array, type, chunk); + } catch (e) { + chunk = Math.floor(chunk / 2); + } + } + } + + // no apply or chunk error : slow and painful algorithm + // default browser on android 4.* + return arrayToStringHelper.stringifyByChar(array); +} + +exports.applyFromCharCode = arrayLikeToString; + + +/** + * Copy the data from an array-like to an other array-like. + * @param {Array|ArrayBuffer|Uint8Array|Buffer} arrayFrom the origin array. + * @param {Array|ArrayBuffer|Uint8Array|Buffer} arrayTo the destination array which will be mutated. + * @return {Array|ArrayBuffer|Uint8Array|Buffer} the updated destination array. + */ +function arrayLikeToArrayLike(arrayFrom, arrayTo) { + for (var i = 0; i < arrayFrom.length; i++) { + arrayTo[i] = arrayFrom[i]; + } + return arrayTo; +} + +// a matrix containing functions to transform everything into everything. +var transform = {}; + +// string to ? +transform["string"] = { + "string": identity, + "array": function(input) { + return stringToArrayLike(input, new Array(input.length)); + }, + "arraybuffer": function(input) { + return transform["string"]["uint8array"](input).buffer; + }, + "uint8array": function(input) { + return stringToArrayLike(input, new Uint8Array(input.length)); + }, + "nodebuffer": function(input) { + return stringToArrayLike(input, nodejsUtils.allocBuffer(input.length)); + } +}; + +// array to ? +transform["array"] = { + "string": arrayLikeToString, + "array": identity, + "arraybuffer": function(input) { + return (new Uint8Array(input)).buffer; + }, + "uint8array": function(input) { + return new Uint8Array(input); + }, + "nodebuffer": function(input) { + return nodejsUtils.newBufferFrom(input); + } +}; + +// arraybuffer to ? +transform["arraybuffer"] = { + "string": function(input) { + return arrayLikeToString(new Uint8Array(input)); + }, + "array": function(input) { + return arrayLikeToArrayLike(new Uint8Array(input), new Array(input.byteLength)); + }, + "arraybuffer": identity, + "uint8array": function(input) { + return new Uint8Array(input); + }, + "nodebuffer": function(input) { + return nodejsUtils.newBufferFrom(new Uint8Array(input)); + } +}; + +// uint8array to ? +transform["uint8array"] = { + "string": arrayLikeToString, + "array": function(input) { + return arrayLikeToArrayLike(input, new Array(input.length)); + }, + "arraybuffer": function(input) { + return input.buffer; + }, + "uint8array": identity, + "nodebuffer": function(input) { + return nodejsUtils.newBufferFrom(input); + } +}; + +// nodebuffer to ? +transform["nodebuffer"] = { + "string": arrayLikeToString, + "array": function(input) { + return arrayLikeToArrayLike(input, new Array(input.length)); + }, + "arraybuffer": function(input) { + return transform["nodebuffer"]["uint8array"](input).buffer; + }, + "uint8array": function(input) { + return arrayLikeToArrayLike(input, new Uint8Array(input.length)); + }, + "nodebuffer": identity +}; + +/** + * Transform an input into any type. + * The supported output type are : string, array, uint8array, arraybuffer, nodebuffer. + * If no output type is specified, the unmodified input will be returned. + * @param {String} outputType the output type. + * @param {String|Array|ArrayBuffer|Uint8Array|Buffer} input the input to convert. + * @throws {Error} an Error if the browser doesn't support the requested output type. + */ +exports.transformTo = function(outputType, input) { + if (!input) { + // undefined, null, etc + // an empty string won't harm. + input = ""; + } + if (!outputType) { + return input; + } + exports.checkSupport(outputType); + var inputType = exports.getTypeOf(input); + var result = transform[inputType][outputType](input); + return result; +}; + +/** + * Return the type of the input. + * The type will be in a format valid for JSZip.utils.transformTo : string, array, uint8array, arraybuffer. + * @param {Object} input the input to identify. + * @return {String} the (lowercase) type of the input. + */ +exports.getTypeOf = function(input) { + if (typeof input === "string") { + return "string"; + } + if (Object.prototype.toString.call(input) === "[object Array]") { + return "array"; + } + if (support.nodebuffer && nodejsUtils.isBuffer(input)) { + return "nodebuffer"; + } + if (support.uint8array && input instanceof Uint8Array) { + return "uint8array"; + } + if (support.arraybuffer && input instanceof ArrayBuffer) { + return "arraybuffer"; + } +}; + +/** + * Throw an exception if the type is not supported. + * @param {String} type the type to check. + * @throws {Error} an Error if the browser doesn't support the requested type. + */ +exports.checkSupport = function(type) { + var supported = support[type.toLowerCase()]; + if (!supported) { + throw new Error(type + " is not supported by this platform"); + } +}; + +exports.MAX_VALUE_16BITS = 65535; +exports.MAX_VALUE_32BITS = -1; // well, "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF" is parsed as -1 + +/** + * Prettify a string read as binary. + * @param {string} str the string to prettify. + * @return {string} a pretty string. + */ +exports.pretty = function(str) { + var res = '', + code, i; + for (i = 0; i < (str || "").length; i++) { + code = str.charCodeAt(i); + res += '\\x' + (code < 16 ? "0" : "") + code.toString(16).toUpperCase(); + } + return res; +}; + +/** + * Defer the call of a function. + * @param {Function} callback the function to call asynchronously. + * @param {Array} args the arguments to give to the callback. + */ +exports.delay = function(callback, args, self) { + setImmediate(function () { + callback.apply(self || null, args || []); + }); +}; + +/** + * Extends a prototype with an other, without calling a constructor with + * side effects. Inspired by nodejs' `utils.inherits` + * @param {Function} ctor the constructor to augment + * @param {Function} superCtor the parent constructor to use + */ +exports.inherits = function (ctor, superCtor) { + var Obj = function() {}; + Obj.prototype = superCtor.prototype; + ctor.prototype = new Obj(); +}; + +/** + * Merge the objects passed as parameters into a new one. + * @private + * @param {...Object} var_args All objects to merge. + * @return {Object} a new object with the data of the others. + */ +exports.extend = function() { + var result = {}, i, attr; + for (i = 0; i < arguments.length; i++) { // arguments is not enumerable in some browsers + for (attr in arguments[i]) { + if (arguments[i].hasOwnProperty(attr) && typeof result[attr] === "undefined") { + result[attr] = arguments[i][attr]; + } + } + } + return result; +}; + +/** + * Transform arbitrary content into a Promise. + * @param {String} name a name for the content being processed. + * @param {Object} inputData the content to process. + * @param {Boolean} isBinary true if the content is not an unicode string + * @param {Boolean} isOptimizedBinaryString true if the string content only has one byte per character. + * @param {Boolean} isBase64 true if the string content is encoded with base64. + * @return {Promise} a promise in a format usable by JSZip. + */ +exports.prepareContent = function(name, inputData, isBinary, isOptimizedBinaryString, isBase64) { + + // if inputData is already a promise, this flatten it. + var promise = external.Promise.resolve(inputData).then(function(data) { + + + var isBlob = support.blob && (data instanceof Blob || ['[object File]', '[object Blob]'].indexOf(Object.prototype.toString.call(data)) !== -1); + + if (isBlob && typeof FileReader !== "undefined") { + return new external.Promise(function (resolve, reject) { + var reader = new FileReader(); + + reader.onload = function(e) { + resolve(e.target.result); + }; + reader.onerror = function(e) { + reject(e.target.error); + }; + reader.readAsArrayBuffer(data); + }); + } else { + return data; + } + }); + + return promise.then(function(data) { + var dataType = exports.getTypeOf(data); + + if (!dataType) { + return external.Promise.reject( + new Error("Can't read the data of '" + name + "'. Is it " + + "in a supported JavaScript type (String, Blob, ArrayBuffer, etc) ?") + ); + } + // special case : it's way easier to work with Uint8Array than with ArrayBuffer + if (dataType === "arraybuffer") { + data = exports.transformTo("uint8array", data); + } else if (dataType === "string") { + if (isBase64) { + data = base64.decode(data); + } + else if (isBinary) { + // optimizedBinaryString === true means that the file has already been filtered with a 0xFF mask + if (isOptimizedBinaryString !== true) { + // this is a string, not in a base64 format. + // Be sure that this is a correct "binary string" + data = string2binary(data); + } + } + } + return data; + }); +}; + +},{"./base64":11,"./external":16,"./nodejsUtils":22,"./support":40,"set-immediate-shim":79}],43:[function(require,module,exports){ +'use strict'; +var readerFor = require('./reader/readerFor'); +var utils = require('./utils'); +var sig = require('./signature'); +var ZipEntry = require('./zipEntry'); +var utf8 = require('./utf8'); +var support = require('./support'); +// class ZipEntries {{{ +/** + * All the entries in the zip file. + * @constructor + * @param {Object} loadOptions Options for loading the stream. + */ +function ZipEntries(loadOptions) { + this.files = []; + this.loadOptions = loadOptions; +} +ZipEntries.prototype = { + /** + * Check that the reader is on the specified signature. + * @param {string} expectedSignature the expected signature. + * @throws {Error} if it is an other signature. + */ + checkSignature: function(expectedSignature) { + if (!this.reader.readAndCheckSignature(expectedSignature)) { + this.reader.index -= 4; + var signature = this.reader.readString(4); + throw new Error("Corrupted zip or bug: unexpected signature " + "(" + utils.pretty(signature) + ", expected " + utils.pretty(expectedSignature) + ")"); + } + }, + /** + * Check if the given signature is at the given index. + * @param {number} askedIndex the index to check. + * @param {string} expectedSignature the signature to expect. + * @return {boolean} true if the signature is here, false otherwise. + */ + isSignature: function(askedIndex, expectedSignature) { + var currentIndex = this.reader.index; + this.reader.setIndex(askedIndex); + var signature = this.reader.readString(4); + var result = signature === expectedSignature; + this.reader.setIndex(currentIndex); + return result; + }, + /** + * Read the end of the central directory. + */ + readBlockEndOfCentral: function() { + this.diskNumber = this.reader.readInt(2); + this.diskWithCentralDirStart = this.reader.readInt(2); + this.centralDirRecordsOnThisDisk = this.reader.readInt(2); + this.centralDirRecords = this.reader.readInt(2); + this.centralDirSize = this.reader.readInt(4); + this.centralDirOffset = this.reader.readInt(4); + + this.zipCommentLength = this.reader.readInt(2); + // warning : the encoding depends of the system locale + // On a linux machine with LANG=en_US.utf8, this field is utf8 encoded. + // On a windows machine, this field is encoded with the localized windows code page. + var zipComment = this.reader.readData(this.zipCommentLength); + var decodeParamType = support.uint8array ? "uint8array" : "array"; + // To get consistent behavior with the generation part, we will assume that + // this is utf8 encoded unless specified otherwise. + var decodeContent = utils.transformTo(decodeParamType, zipComment); + this.zipComment = this.loadOptions.decodeFileName(decodeContent); + }, + /** + * Read the end of the Zip 64 central directory. + * Not merged with the method readEndOfCentral : + * The end of central can coexist with its Zip64 brother, + * I don't want to read the wrong number of bytes ! + */ + readBlockZip64EndOfCentral: function() { + this.zip64EndOfCentralSize = this.reader.readInt(8); + this.reader.skip(4); + // this.versionMadeBy = this.reader.readString(2); + // this.versionNeeded = this.reader.readInt(2); + this.diskNumber = this.reader.readInt(4); + this.diskWithCentralDirStart = this.reader.readInt(4); + this.centralDirRecordsOnThisDisk = this.reader.readInt(8); + this.centralDirRecords = this.reader.readInt(8); + this.centralDirSize = this.reader.readInt(8); + this.centralDirOffset = this.reader.readInt(8); + + this.zip64ExtensibleData = {}; + var extraDataSize = this.zip64EndOfCentralSize - 44, + index = 0, + extraFieldId, + extraFieldLength, + extraFieldValue; + while (index < extraDataSize) { + extraFieldId = this.reader.readInt(2); + extraFieldLength = this.reader.readInt(4); + extraFieldValue = this.reader.readData(extraFieldLength); + this.zip64ExtensibleData[extraFieldId] = { + id: extraFieldId, + length: extraFieldLength, + value: extraFieldValue + }; + } + }, + /** + * Read the end of the Zip 64 central directory locator. + */ + readBlockZip64EndOfCentralLocator: function() { + this.diskWithZip64CentralDirStart = this.reader.readInt(4); + this.relativeOffsetEndOfZip64CentralDir = this.reader.readInt(8); + this.disksCount = this.reader.readInt(4); + if (this.disksCount > 1) { + throw new Error("Multi-volumes zip are not supported"); + } + }, + /** + * Read the local files, based on the offset read in the central part. + */ + readLocalFiles: function() { + var i, file; + for (i = 0; i < this.files.length; i++) { + file = this.files[i]; + this.reader.setIndex(file.localHeaderOffset); + this.checkSignature(sig.LOCAL_FILE_HEADER); + file.readLocalPart(this.reader); + file.handleUTF8(); + file.processAttributes(); + } + }, + /** + * Read the central directory. + */ + readCentralDir: function() { + var file; + + this.reader.setIndex(this.centralDirOffset); + while (this.reader.readAndCheckSignature(sig.CENTRAL_FILE_HEADER)) { + file = new ZipEntry({ + zip64: this.zip64 + }, this.loadOptions); + file.readCentralPart(this.reader); + this.files.push(file); + } + + if (this.centralDirRecords !== this.files.length) { + if (this.centralDirRecords !== 0 && this.files.length === 0) { + // We expected some records but couldn't find ANY. + // This is really suspicious, as if something went wrong. + throw new Error("Corrupted zip or bug: expected " + this.centralDirRecords + " records in central dir, got " + this.files.length); + } else { + // We found some records but not all. + // Something is wrong but we got something for the user: no error here. + // console.warn("expected", this.centralDirRecords, "records in central dir, got", this.files.length); + } + } + }, + /** + * Read the end of central directory. + */ + readEndOfCentral: function() { + var offset = this.reader.lastIndexOfSignature(sig.CENTRAL_DIRECTORY_END); + if (offset < 0) { + // Check if the content is a truncated zip or complete garbage. + // A "LOCAL_FILE_HEADER" is not required at the beginning (auto + // extractible zip for example) but it can give a good hint. + // If an ajax request was used without responseType, we will also + // get unreadable data. + var isGarbage = !this.isSignature(0, sig.LOCAL_FILE_HEADER); + + if (isGarbage) { + throw new Error("Can't find end of central directory : is this a zip file ? " + + "If it is, see https://stuk.github.io/jszip/documentation/howto/read_zip.html"); + } else { + throw new Error("Corrupted zip: can't find end of central directory"); + } + + } + this.reader.setIndex(offset); + var endOfCentralDirOffset = offset; + this.checkSignature(sig.CENTRAL_DIRECTORY_END); + this.readBlockEndOfCentral(); + + + /* extract from the zip spec : + 4) If one of the fields in the end of central directory + record is too small to hold required data, the field + should be set to -1 (0xFFFF or 0xFFFFFFFF) and the + ZIP64 format record should be created. + 5) The end of central directory record and the + Zip64 end of central directory locator record must + reside on the same disk when splitting or spanning + an archive. + */ + if (this.diskNumber === utils.MAX_VALUE_16BITS || this.diskWithCentralDirStart === utils.MAX_VALUE_16BITS || this.centralDirRecordsOnThisDisk === utils.MAX_VALUE_16BITS || this.centralDirRecords === utils.MAX_VALUE_16BITS || this.centralDirSize === utils.MAX_VALUE_32BITS || this.centralDirOffset === utils.MAX_VALUE_32BITS) { + this.zip64 = true; + + /* + Warning : the zip64 extension is supported, but ONLY if the 64bits integer read from + the zip file can fit into a 32bits integer. This cannot be solved : JavaScript represents + all numbers as 64-bit double precision IEEE 754 floating point numbers. + So, we have 53bits for integers and bitwise operations treat everything as 32bits. + see https://developer.mozilla.org/en-US/docs/JavaScript/Reference/Operators/Bitwise_Operators + and http://www.ecma-international.org/publications/files/ECMA-ST/ECMA-262.pdf section 8.5 + */ + + // should look for a zip64 EOCD locator + offset = this.reader.lastIndexOfSignature(sig.ZIP64_CENTRAL_DIRECTORY_LOCATOR); + if (offset < 0) { + throw new Error("Corrupted zip: can't find the ZIP64 end of central directory locator"); + } + this.reader.setIndex(offset); + this.checkSignature(sig.ZIP64_CENTRAL_DIRECTORY_LOCATOR); + this.readBlockZip64EndOfCentralLocator(); + + // now the zip64 EOCD record + if (!this.isSignature(this.relativeOffsetEndOfZip64CentralDir, sig.ZIP64_CENTRAL_DIRECTORY_END)) { + // console.warn("ZIP64 end of central directory not where expected."); + this.relativeOffsetEndOfZip64CentralDir = this.reader.lastIndexOfSignature(sig.ZIP64_CENTRAL_DIRECTORY_END); + if (this.relativeOffsetEndOfZip64CentralDir < 0) { + throw new Error("Corrupted zip: can't find the ZIP64 end of central directory"); + } + } + this.reader.setIndex(this.relativeOffsetEndOfZip64CentralDir); + this.checkSignature(sig.ZIP64_CENTRAL_DIRECTORY_END); + this.readBlockZip64EndOfCentral(); + } + + var expectedEndOfCentralDirOffset = this.centralDirOffset + this.centralDirSize; + if (this.zip64) { + expectedEndOfCentralDirOffset += 20; // end of central dir 64 locator + expectedEndOfCentralDirOffset += 12 /* should not include the leading 12 bytes */ + this.zip64EndOfCentralSize; + } + + var extraBytes = endOfCentralDirOffset - expectedEndOfCentralDirOffset; + + if (extraBytes > 0) { + // console.warn(extraBytes, "extra bytes at beginning or within zipfile"); + if (this.isSignature(endOfCentralDirOffset, sig.CENTRAL_FILE_HEADER)) { + // The offsets seem wrong, but we have something at the specified offset. + // So… we keep it. + } else { + // the offset is wrong, update the "zero" of the reader + // this happens if data has been prepended (crx files for example) + this.reader.zero = extraBytes; + } + } else if (extraBytes < 0) { + throw new Error("Corrupted zip: missing " + Math.abs(extraBytes) + " bytes."); + } + }, + prepareReader: function(data) { + this.reader = readerFor(data); + }, + /** + * Read a zip file and create ZipEntries. + * @param {String|ArrayBuffer|Uint8Array|Buffer} data the binary string representing a zip file. + */ + load: function(data) { + this.prepareReader(data); + this.readEndOfCentral(); + this.readCentralDir(); + this.readLocalFiles(); + } +}; +// }}} end of ZipEntries +module.exports = ZipEntries; + +},{"./reader/readerFor":32,"./signature":33,"./support":40,"./utf8":41,"./utils":42,"./zipEntry":44}],44:[function(require,module,exports){ +'use strict'; +var readerFor = require('./reader/readerFor'); +var utils = require('./utils'); +var CompressedObject = require('./compressedObject'); +var crc32fn = require('./crc32'); +var utf8 = require('./utf8'); +var compressions = require('./compressions'); +var support = require('./support'); + +var MADE_BY_DOS = 0x00; +var MADE_BY_UNIX = 0x03; + +/** + * Find a compression registered in JSZip. + * @param {string} compressionMethod the method magic to find. + * @return {Object|null} the JSZip compression object, null if none found. + */ +var findCompression = function(compressionMethod) { + for (var method in compressions) { + if (!compressions.hasOwnProperty(method)) { + continue; + } + if (compressions[method].magic === compressionMethod) { + return compressions[method]; + } + } + return null; +}; + +// class ZipEntry {{{ +/** + * An entry in the zip file. + * @constructor + * @param {Object} options Options of the current file. + * @param {Object} loadOptions Options for loading the stream. + */ +function ZipEntry(options, loadOptions) { + this.options = options; + this.loadOptions = loadOptions; +} +ZipEntry.prototype = { + /** + * say if the file is encrypted. + * @return {boolean} true if the file is encrypted, false otherwise. + */ + isEncrypted: function() { + // bit 1 is set + return (this.bitFlag & 0x0001) === 0x0001; + }, + /** + * say if the file has utf-8 filename/comment. + * @return {boolean} true if the filename/comment is in utf-8, false otherwise. + */ + useUTF8: function() { + // bit 11 is set + return (this.bitFlag & 0x0800) === 0x0800; + }, + /** + * Read the local part of a zip file and add the info in this object. + * @param {DataReader} reader the reader to use. + */ + readLocalPart: function(reader) { + var compression, localExtraFieldsLength; + + // we already know everything from the central dir ! + // If the central dir data are false, we are doomed. + // On the bright side, the local part is scary : zip64, data descriptors, both, etc. + // The less data we get here, the more reliable this should be. + // Let's skip the whole header and dash to the data ! + reader.skip(22); + // in some zip created on windows, the filename stored in the central dir contains \ instead of /. + // Strangely, the filename here is OK. + // I would love to treat these zip files as corrupted (see http://www.info-zip.org/FAQ.html#backslashes + // or APPNOTE#4.4.17.1, "All slashes MUST be forward slashes '/'") but there are a lot of bad zip generators... + // Search "unzip mismatching "local" filename continuing with "central" filename version" on + // the internet. + // + // I think I see the logic here : the central directory is used to display + // content and the local directory is used to extract the files. Mixing / and \ + // may be used to display \ to windows users and use / when extracting the files. + // Unfortunately, this lead also to some issues : http://seclists.org/fulldisclosure/2009/Sep/394 + this.fileNameLength = reader.readInt(2); + localExtraFieldsLength = reader.readInt(2); // can't be sure this will be the same as the central dir + // the fileName is stored as binary data, the handleUTF8 method will take care of the encoding. + this.fileName = reader.readData(this.fileNameLength); + reader.skip(localExtraFieldsLength); + + if (this.compressedSize === -1 || this.uncompressedSize === -1) { + throw new Error("Bug or corrupted zip : didn't get enough information from the central directory " + "(compressedSize === -1 || uncompressedSize === -1)"); + } + + compression = findCompression(this.compressionMethod); + if (compression === null) { // no compression found + throw new Error("Corrupted zip : compression " + utils.pretty(this.compressionMethod) + " unknown (inner file : " + utils.transformTo("string", this.fileName) + ")"); + } + this.decompressed = new CompressedObject(this.compressedSize, this.uncompressedSize, this.crc32, compression, reader.readData(this.compressedSize)); + }, + + /** + * Read the central part of a zip file and add the info in this object. + * @param {DataReader} reader the reader to use. + */ + readCentralPart: function(reader) { + this.versionMadeBy = reader.readInt(2); + reader.skip(2); + // this.versionNeeded = reader.readInt(2); + this.bitFlag = reader.readInt(2); + this.compressionMethod = reader.readString(2); + this.date = reader.readDate(); + this.crc32 = reader.readInt(4); + this.compressedSize = reader.readInt(4); + this.uncompressedSize = reader.readInt(4); + var fileNameLength = reader.readInt(2); + this.extraFieldsLength = reader.readInt(2); + this.fileCommentLength = reader.readInt(2); + this.diskNumberStart = reader.readInt(2); + this.internalFileAttributes = reader.readInt(2); + this.externalFileAttributes = reader.readInt(4); + this.localHeaderOffset = reader.readInt(4); + + if (this.isEncrypted()) { + throw new Error("Encrypted zip are not supported"); + } + + // will be read in the local part, see the comments there + reader.skip(fileNameLength); + this.readExtraFields(reader); + this.parseZIP64ExtraField(reader); + this.fileComment = reader.readData(this.fileCommentLength); + }, + + /** + * Parse the external file attributes and get the unix/dos permissions. + */ + processAttributes: function () { + this.unixPermissions = null; + this.dosPermissions = null; + var madeBy = this.versionMadeBy >> 8; + + // Check if we have the DOS directory flag set. + // We look for it in the DOS and UNIX permissions + // but some unknown platform could set it as a compatibility flag. + this.dir = this.externalFileAttributes & 0x0010 ? true : false; + + if(madeBy === MADE_BY_DOS) { + // first 6 bits (0 to 5) + this.dosPermissions = this.externalFileAttributes & 0x3F; + } + + if(madeBy === MADE_BY_UNIX) { + this.unixPermissions = (this.externalFileAttributes >> 16) & 0xFFFF; + // the octal permissions are in (this.unixPermissions & 0x01FF).toString(8); + } + + // fail safe : if the name ends with a / it probably means a folder + if (!this.dir && this.fileNameStr.slice(-1) === '/') { + this.dir = true; + } + }, + + /** + * Parse the ZIP64 extra field and merge the info in the current ZipEntry. + * @param {DataReader} reader the reader to use. + */ + parseZIP64ExtraField: function(reader) { + + if (!this.extraFields[0x0001]) { + return; + } + + // should be something, preparing the extra reader + var extraReader = readerFor(this.extraFields[0x0001].value); + + // I really hope that these 64bits integer can fit in 32 bits integer, because js + // won't let us have more. + if (this.uncompressedSize === utils.MAX_VALUE_32BITS) { + this.uncompressedSize = extraReader.readInt(8); + } + if (this.compressedSize === utils.MAX_VALUE_32BITS) { + this.compressedSize = extraReader.readInt(8); + } + if (this.localHeaderOffset === utils.MAX_VALUE_32BITS) { + this.localHeaderOffset = extraReader.readInt(8); + } + if (this.diskNumberStart === utils.MAX_VALUE_32BITS) { + this.diskNumberStart = extraReader.readInt(4); + } + }, + /** + * Read the central part of a zip file and add the info in this object. + * @param {DataReader} reader the reader to use. + */ + readExtraFields: function(reader) { + var end = reader.index + this.extraFieldsLength, + extraFieldId, + extraFieldLength, + extraFieldValue; + + if (!this.extraFields) { + this.extraFields = {}; + } + + while (reader.index + 4 < end) { + extraFieldId = reader.readInt(2); + extraFieldLength = reader.readInt(2); + extraFieldValue = reader.readData(extraFieldLength); + + this.extraFields[extraFieldId] = { + id: extraFieldId, + length: extraFieldLength, + value: extraFieldValue + }; + } + + reader.setIndex(end); + }, + /** + * Apply an UTF8 transformation if needed. + */ + handleUTF8: function() { + var decodeParamType = support.uint8array ? "uint8array" : "array"; + if (this.useUTF8()) { + this.fileNameStr = utf8.utf8decode(this.fileName); + this.fileCommentStr = utf8.utf8decode(this.fileComment); + } else { + var upath = this.findExtraFieldUnicodePath(); + if (upath !== null) { + this.fileNameStr = upath; + } else { + // ASCII text or unsupported code page + var fileNameByteArray = utils.transformTo(decodeParamType, this.fileName); + this.fileNameStr = this.loadOptions.decodeFileName(fileNameByteArray); + } + + var ucomment = this.findExtraFieldUnicodeComment(); + if (ucomment !== null) { + this.fileCommentStr = ucomment; + } else { + // ASCII text or unsupported code page + var commentByteArray = utils.transformTo(decodeParamType, this.fileComment); + this.fileCommentStr = this.loadOptions.decodeFileName(commentByteArray); + } + } + }, + + /** + * Find the unicode path declared in the extra field, if any. + * @return {String} the unicode path, null otherwise. + */ + findExtraFieldUnicodePath: function() { + var upathField = this.extraFields[0x7075]; + if (upathField) { + var extraReader = readerFor(upathField.value); + + // wrong version + if (extraReader.readInt(1) !== 1) { + return null; + } + + // the crc of the filename changed, this field is out of date. + if (crc32fn(this.fileName) !== extraReader.readInt(4)) { + return null; + } + + return utf8.utf8decode(extraReader.readData(upathField.length - 5)); + } + return null; + }, + + /** + * Find the unicode comment declared in the extra field, if any. + * @return {String} the unicode comment, null otherwise. + */ + findExtraFieldUnicodeComment: function() { + var ucommentField = this.extraFields[0x6375]; + if (ucommentField) { + var extraReader = readerFor(ucommentField.value); + + // wrong version + if (extraReader.readInt(1) !== 1) { + return null; + } + + // the crc of the comment changed, this field is out of date. + if (crc32fn(this.fileComment) !== extraReader.readInt(4)) { + return null; + } + + return utf8.utf8decode(extraReader.readData(ucommentField.length - 5)); + } + return null; + } +}; +module.exports = ZipEntry; + +},{"./compressedObject":12,"./compressions":13,"./crc32":14,"./reader/readerFor":32,"./support":40,"./utf8":41,"./utils":42}],45:[function(require,module,exports){ +'use strict'; + +var StreamHelper = require('./stream/StreamHelper'); +var DataWorker = require('./stream/DataWorker'); +var utf8 = require('./utf8'); +var CompressedObject = require('./compressedObject'); +var GenericWorker = require('./stream/GenericWorker'); + +/** + * A simple object representing a file in the zip file. + * @constructor + * @param {string} name the name of the file + * @param {String|ArrayBuffer|Uint8Array|Buffer} data the data + * @param {Object} options the options of the file + */ +var ZipObject = function(name, data, options) { + this.name = name; + this.dir = options.dir; + this.date = options.date; + this.comment = options.comment; + this.unixPermissions = options.unixPermissions; + this.dosPermissions = options.dosPermissions; + + this._data = data; + this._dataBinary = options.binary; + // keep only the compression + this.options = { + compression : options.compression, + compressionOptions : options.compressionOptions + }; +}; + +ZipObject.prototype = { + /** + * Create an internal stream for the content of this object. + * @param {String} type the type of each chunk. + * @return StreamHelper the stream. + */ + internalStream: function (type) { + var result = null, outputType = "string"; + try { + if (!type) { + throw new Error("No output type specified."); + } + outputType = type.toLowerCase(); + var askUnicodeString = outputType === "string" || outputType === "text"; + if (outputType === "binarystring" || outputType === "text") { + outputType = "string"; + } + result = this._decompressWorker(); + + var isUnicodeString = !this._dataBinary; + + if (isUnicodeString && !askUnicodeString) { + result = result.pipe(new utf8.Utf8EncodeWorker()); + } + if (!isUnicodeString && askUnicodeString) { + result = result.pipe(new utf8.Utf8DecodeWorker()); + } + } catch (e) { + result = new GenericWorker("error"); + result.error(e); + } + + return new StreamHelper(result, outputType, ""); + }, + + /** + * Prepare the content in the asked type. + * @param {String} type the type of the result. + * @param {Function} onUpdate a function to call on each internal update. + * @return Promise the promise of the result. + */ + async: function (type, onUpdate) { + return this.internalStream(type).accumulate(onUpdate); + }, + + /** + * Prepare the content as a nodejs stream. + * @param {String} type the type of each chunk. + * @param {Function} onUpdate a function to call on each internal update. + * @return Stream the stream. + */ + nodeStream: function (type, onUpdate) { + return this.internalStream(type || "nodebuffer").toNodejsStream(onUpdate); + }, + + /** + * Return a worker for the compressed content. + * @private + * @param {Object} compression the compression object to use. + * @param {Object} compressionOptions the options to use when compressing. + * @return Worker the worker. + */ + _compressWorker: function (compression, compressionOptions) { + if ( + this._data instanceof CompressedObject && + this._data.compression.magic === compression.magic + ) { + return this._data.getCompressedWorker(); + } else { + var result = this._decompressWorker(); + if(!this._dataBinary) { + result = result.pipe(new utf8.Utf8EncodeWorker()); + } + return CompressedObject.createWorkerFrom(result, compression, compressionOptions); + } + }, + /** + * Return a worker for the decompressed content. + * @private + * @return Worker the worker. + */ + _decompressWorker : function () { + if (this._data instanceof CompressedObject) { + return this._data.getContentWorker(); + } else if (this._data instanceof GenericWorker) { + return this._data; + } else { + return new DataWorker(this._data); + } + } +}; + +var removedMethods = ["asText", "asBinary", "asNodeBuffer", "asUint8Array", "asArrayBuffer"]; +var removedFn = function () { + throw new Error("This method has been removed in JSZip 3.0, please check the upgrade guide."); +}; + +for(var i = 0; i < removedMethods.length; i++) { + ZipObject.prototype[removedMethods[i]] = removedFn; +} +module.exports = ZipObject; + +},{"./compressedObject":12,"./stream/DataWorker":37,"./stream/GenericWorker":38,"./stream/StreamHelper":39,"./utf8":41}],46:[function(require,module,exports){ +'use strict'; +var immediate = require('immediate'); + +/* istanbul ignore next */ +function INTERNAL() {} + +var handlers = {}; + +var REJECTED = ['REJECTED']; +var FULFILLED = ['FULFILLED']; +var PENDING = ['PENDING']; + +module.exports = Promise; + +function Promise(resolver) { + if (typeof resolver !== 'function') { + throw new TypeError('resolver must be a function'); + } + this.state = PENDING; + this.queue = []; + this.outcome = void 0; + if (resolver !== INTERNAL) { + safelyResolveThenable(this, resolver); + } +} + +Promise.prototype["finally"] = function (callback) { + if (typeof callback !== 'function') { + return this; + } + var p = this.constructor; + return this.then(resolve, reject); + + function resolve(value) { + function yes () { + return value; + } + return p.resolve(callback()).then(yes); + } + function reject(reason) { + function no () { + throw reason; + } + return p.resolve(callback()).then(no); + } +}; +Promise.prototype["catch"] = function (onRejected) { + return this.then(null, onRejected); +}; +Promise.prototype.then = function (onFulfilled, onRejected) { + if (typeof onFulfilled !== 'function' && this.state === FULFILLED || + typeof onRejected !== 'function' && this.state === REJECTED) { + return this; + } + var promise = new this.constructor(INTERNAL); + if (this.state !== PENDING) { + var resolver = this.state === FULFILLED ? onFulfilled : onRejected; + unwrap(promise, resolver, this.outcome); + } else { + this.queue.push(new QueueItem(promise, onFulfilled, onRejected)); + } + + return promise; +}; +function QueueItem(promise, onFulfilled, onRejected) { + this.promise = promise; + if (typeof onFulfilled === 'function') { + this.onFulfilled = onFulfilled; + this.callFulfilled = this.otherCallFulfilled; + } + if (typeof onRejected === 'function') { + this.onRejected = onRejected; + this.callRejected = this.otherCallRejected; + } +} +QueueItem.prototype.callFulfilled = function (value) { + handlers.resolve(this.promise, value); +}; +QueueItem.prototype.otherCallFulfilled = function (value) { + unwrap(this.promise, this.onFulfilled, value); +}; +QueueItem.prototype.callRejected = function (value) { + handlers.reject(this.promise, value); +}; +QueueItem.prototype.otherCallRejected = function (value) { + unwrap(this.promise, this.onRejected, value); +}; + +function unwrap(promise, func, value) { + immediate(function () { + var returnValue; + try { + returnValue = func(value); + } catch (e) { + return handlers.reject(promise, e); + } + if (returnValue === promise) { + handlers.reject(promise, new TypeError('Cannot resolve promise with itself')); + } else { + handlers.resolve(promise, returnValue); + } + }); +} + +handlers.resolve = function (self, value) { + var result = tryCatch(getThen, value); + if (result.status === 'error') { + return handlers.reject(self, result.value); + } + var thenable = result.value; + + if (thenable) { + safelyResolveThenable(self, thenable); + } else { + self.state = FULFILLED; + self.outcome = value; + var i = -1; + var len = self.queue.length; + while (++i < len) { + self.queue[i].callFulfilled(value); + } + } + return self; +}; +handlers.reject = function (self, error) { + self.state = REJECTED; + self.outcome = error; + var i = -1; + var len = self.queue.length; + while (++i < len) { + self.queue[i].callRejected(error); + } + return self; +}; + +function getThen(obj) { + // Make sure we only access the accessor once as required by the spec + var then = obj && obj.then; + if (obj && (typeof obj === 'object' || typeof obj === 'function') && typeof then === 'function') { + return function appyThen() { + then.apply(obj, arguments); + }; + } +} + +function safelyResolveThenable(self, thenable) { + // Either fulfill, reject or reject with error + var called = false; + function onError(value) { + if (called) { + return; + } + called = true; + handlers.reject(self, value); + } + + function onSuccess(value) { + if (called) { + return; + } + called = true; + handlers.resolve(self, value); + } + + function tryToUnwrap() { + thenable(onSuccess, onError); + } + + var result = tryCatch(tryToUnwrap); + if (result.status === 'error') { + onError(result.value); + } +} + +function tryCatch(func, value) { + var out = {}; + try { + out.value = func(value); + out.status = 'success'; + } catch (e) { + out.status = 'error'; + out.value = e; + } + return out; +} + +Promise.resolve = resolve; +function resolve(value) { + if (value instanceof this) { + return value; + } + return handlers.resolve(new this(INTERNAL), value); +} + +Promise.reject = reject; +function reject(reason) { + var promise = new this(INTERNAL); + return handlers.reject(promise, reason); +} + +Promise.all = all; +function all(iterable) { + var self = this; + if (Object.prototype.toString.call(iterable) !== '[object Array]') { + return this.reject(new TypeError('must be an array')); + } + + var len = iterable.length; + var called = false; + if (!len) { + return this.resolve([]); + } + + var values = new Array(len); + var resolved = 0; + var i = -1; + var promise = new this(INTERNAL); + + while (++i < len) { + allResolver(iterable[i], i); + } + return promise; + function allResolver(value, i) { + self.resolve(value).then(resolveFromAll, function (error) { + if (!called) { + called = true; + handlers.reject(promise, error); + } + }); + function resolveFromAll(outValue) { + values[i] = outValue; + if (++resolved === len && !called) { + called = true; + handlers.resolve(promise, values); + } + } + } +} + +Promise.race = race; +function race(iterable) { + var self = this; + if (Object.prototype.toString.call(iterable) !== '[object Array]') { + return this.reject(new TypeError('must be an array')); + } + + var len = iterable.length; + var called = false; + if (!len) { + return this.resolve([]); + } + + var i = -1; + var promise = new this(INTERNAL); + + while (++i < len) { + resolver(iterable[i]); + } + return promise; + function resolver(value) { + self.resolve(value).then(function (response) { + if (!called) { + called = true; + handlers.resolve(promise, response); + } + }, function (error) { + if (!called) { + called = true; + handlers.reject(promise, error); + } + }); + } +} + +},{"immediate":7}],47:[function(require,module,exports){ +// Top level file is just a mixin of submodules & constants +'use strict'; + +var assign = require('./lib/utils/common').assign; + +var deflate = require('./lib/deflate'); +var inflate = require('./lib/inflate'); +var constants = require('./lib/zlib/constants'); + +var pako = {}; + +assign(pako, deflate, inflate, constants); + +module.exports = pako; + +},{"./lib/deflate":48,"./lib/inflate":49,"./lib/utils/common":50,"./lib/zlib/constants":53}],48:[function(require,module,exports){ +'use strict'; + + +var zlib_deflate = require('./zlib/deflate'); +var utils = require('./utils/common'); +var strings = require('./utils/strings'); +var msg = require('./zlib/messages'); +var ZStream = require('./zlib/zstream'); + +var toString = Object.prototype.toString; + +/* Public constants ==========================================================*/ +/* ===========================================================================*/ + +var Z_NO_FLUSH = 0; +var Z_FINISH = 4; + +var Z_OK = 0; +var Z_STREAM_END = 1; +var Z_SYNC_FLUSH = 2; + +var Z_DEFAULT_COMPRESSION = -1; + +var Z_DEFAULT_STRATEGY = 0; + +var Z_DEFLATED = 8; + +/* ===========================================================================*/ + + +/** + * class Deflate + * + * Generic JS-style wrapper for zlib calls. If you don't need + * streaming behaviour - use more simple functions: [[deflate]], + * [[deflateRaw]] and [[gzip]]. + **/ + +/* internal + * Deflate.chunks -> Array + * + * Chunks of output data, if [[Deflate#onData]] not overridden. + **/ + +/** + * Deflate.result -> Uint8Array|Array + * + * Compressed result, generated by default [[Deflate#onData]] + * and [[Deflate#onEnd]] handlers. Filled after you push last chunk + * (call [[Deflate#push]] with `Z_FINISH` / `true` param) or if you + * push a chunk with explicit flush (call [[Deflate#push]] with + * `Z_SYNC_FLUSH` param). + **/ + +/** + * Deflate.err -> Number + * + * Error code after deflate finished. 0 (Z_OK) on success. + * You will not need it in real life, because deflate errors + * are possible only on wrong options or bad `onData` / `onEnd` + * custom handlers. + **/ + +/** + * Deflate.msg -> String + * + * Error message, if [[Deflate.err]] != 0 + **/ + + +/** + * new Deflate(options) + * - options (Object): zlib deflate options. + * + * Creates new deflator instance with specified params. Throws exception + * on bad params. Supported options: + * + * - `level` + * - `windowBits` + * - `memLevel` + * - `strategy` + * - `dictionary` + * + * [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced) + * for more information on these. + * + * Additional options, for internal needs: + * + * - `chunkSize` - size of generated data chunks (16K by default) + * - `raw` (Boolean) - do raw deflate + * - `gzip` (Boolean) - create gzip wrapper + * - `to` (String) - if equal to 'string', then result will be "binary string" + * (each char code [0..255]) + * - `header` (Object) - custom header for gzip + * - `text` (Boolean) - true if compressed data believed to be text + * - `time` (Number) - modification time, unix timestamp + * - `os` (Number) - operation system code + * - `extra` (Array) - array of bytes with extra data (max 65536) + * - `name` (String) - file name (binary string) + * - `comment` (String) - comment (binary string) + * - `hcrc` (Boolean) - true if header crc should be added + * + * ##### Example: + * + * ```javascript + * var pako = require('pako') + * , chunk1 = Uint8Array([1,2,3,4,5,6,7,8,9]) + * , chunk2 = Uint8Array([10,11,12,13,14,15,16,17,18,19]); + * + * var deflate = new pako.Deflate({ level: 3}); + * + * deflate.push(chunk1, false); + * deflate.push(chunk2, true); // true -> last chunk + * + * if (deflate.err) { throw new Error(deflate.err); } + * + * console.log(deflate.result); + * ``` + **/ +function Deflate(options) { + if (!(this instanceof Deflate)) return new Deflate(options); + + this.options = utils.assign({ + level: Z_DEFAULT_COMPRESSION, + method: Z_DEFLATED, + chunkSize: 16384, + windowBits: 15, + memLevel: 8, + strategy: Z_DEFAULT_STRATEGY, + to: '' + }, options || {}); + + var opt = this.options; + + if (opt.raw && (opt.windowBits > 0)) { + opt.windowBits = -opt.windowBits; + } + + else if (opt.gzip && (opt.windowBits > 0) && (opt.windowBits < 16)) { + opt.windowBits += 16; + } + + this.err = 0; // error code, if happens (0 = Z_OK) + this.msg = ''; // error message + this.ended = false; // used to avoid multiple onEnd() calls + this.chunks = []; // chunks of compressed data + + this.strm = new ZStream(); + this.strm.avail_out = 0; + + var status = zlib_deflate.deflateInit2( + this.strm, + opt.level, + opt.method, + opt.windowBits, + opt.memLevel, + opt.strategy + ); + + if (status !== Z_OK) { + throw new Error(msg[status]); + } + + if (opt.header) { + zlib_deflate.deflateSetHeader(this.strm, opt.header); + } + + if (opt.dictionary) { + var dict; + // Convert data if needed + if (typeof opt.dictionary === 'string') { + // If we need to compress text, change encoding to utf8. + dict = strings.string2buf(opt.dictionary); + } else if (toString.call(opt.dictionary) === '[object ArrayBuffer]') { + dict = new Uint8Array(opt.dictionary); + } else { + dict = opt.dictionary; + } + + status = zlib_deflate.deflateSetDictionary(this.strm, dict); + + if (status !== Z_OK) { + throw new Error(msg[status]); + } + + this._dict_set = true; + } +} + +/** + * Deflate#push(data[, mode]) -> Boolean + * - data (Uint8Array|Array|ArrayBuffer|String): input data. Strings will be + * converted to utf8 byte sequence. + * - mode (Number|Boolean): 0..6 for corresponding Z_NO_FLUSH..Z_TREE modes. + * See constants. Skipped or `false` means Z_NO_FLUSH, `true` means Z_FINISH. + * + * Sends input data to deflate pipe, generating [[Deflate#onData]] calls with + * new compressed chunks. Returns `true` on success. The last data block must have + * mode Z_FINISH (or `true`). That will flush internal pending buffers and call + * [[Deflate#onEnd]]. For interim explicit flushes (without ending the stream) you + * can use mode Z_SYNC_FLUSH, keeping the compression context. + * + * On fail call [[Deflate#onEnd]] with error code and return false. + * + * We strongly recommend to use `Uint8Array` on input for best speed (output + * array format is detected automatically). Also, don't skip last param and always + * use the same type in your code (boolean or number). That will improve JS speed. + * + * For regular `Array`-s make sure all elements are [0..255]. + * + * ##### Example + * + * ```javascript + * push(chunk, false); // push one of data chunks + * ... + * push(chunk, true); // push last chunk + * ``` + **/ +Deflate.prototype.push = function (data, mode) { + var strm = this.strm; + var chunkSize = this.options.chunkSize; + var status, _mode; + + if (this.ended) { return false; } + + _mode = (mode === ~~mode) ? mode : ((mode === true) ? Z_FINISH : Z_NO_FLUSH); + + // Convert data if needed + if (typeof data === 'string') { + // If we need to compress text, change encoding to utf8. + strm.input = strings.string2buf(data); + } else if (toString.call(data) === '[object ArrayBuffer]') { + strm.input = new Uint8Array(data); + } else { + strm.input = data; + } + + strm.next_in = 0; + strm.avail_in = strm.input.length; + + do { + if (strm.avail_out === 0) { + strm.output = new utils.Buf8(chunkSize); + strm.next_out = 0; + strm.avail_out = chunkSize; + } + status = zlib_deflate.deflate(strm, _mode); /* no bad return value */ + + if (status !== Z_STREAM_END && status !== Z_OK) { + this.onEnd(status); + this.ended = true; + return false; + } + if (strm.avail_out === 0 || (strm.avail_in === 0 && (_mode === Z_FINISH || _mode === Z_SYNC_FLUSH))) { + if (this.options.to === 'string') { + this.onData(strings.buf2binstring(utils.shrinkBuf(strm.output, strm.next_out))); + } else { + this.onData(utils.shrinkBuf(strm.output, strm.next_out)); + } + } + } while ((strm.avail_in > 0 || strm.avail_out === 0) && status !== Z_STREAM_END); + + // Finalize on the last chunk. + if (_mode === Z_FINISH) { + status = zlib_deflate.deflateEnd(this.strm); + this.onEnd(status); + this.ended = true; + return status === Z_OK; + } + + // callback interim results if Z_SYNC_FLUSH. + if (_mode === Z_SYNC_FLUSH) { + this.onEnd(Z_OK); + strm.avail_out = 0; + return true; + } + + return true; +}; + + +/** + * Deflate#onData(chunk) -> Void + * - chunk (Uint8Array|Array|String): output data. Type of array depends + * on js engine support. When string output requested, each chunk + * will be string. + * + * By default, stores data blocks in `chunks[]` property and glue + * those in `onEnd`. Override this handler, if you need another behaviour. + **/ +Deflate.prototype.onData = function (chunk) { + this.chunks.push(chunk); +}; + + +/** + * Deflate#onEnd(status) -> Void + * - status (Number): deflate status. 0 (Z_OK) on success, + * other if not. + * + * Called once after you tell deflate that the input stream is + * complete (Z_FINISH) or should be flushed (Z_SYNC_FLUSH) + * or if an error happened. By default - join collected chunks, + * free memory and fill `results` / `err` properties. + **/ +Deflate.prototype.onEnd = function (status) { + // On success - join + if (status === Z_OK) { + if (this.options.to === 'string') { + this.result = this.chunks.join(''); + } else { + this.result = utils.flattenChunks(this.chunks); + } + } + this.chunks = []; + this.err = status; + this.msg = this.strm.msg; +}; + + +/** + * deflate(data[, options]) -> Uint8Array|Array|String + * - data (Uint8Array|Array|String): input data to compress. + * - options (Object): zlib deflate options. + * + * Compress `data` with deflate algorithm and `options`. + * + * Supported options are: + * + * - level + * - windowBits + * - memLevel + * - strategy + * - dictionary + * + * [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced) + * for more information on these. + * + * Sugar (options): + * + * - `raw` (Boolean) - say that we work with raw stream, if you don't wish to specify + * negative windowBits implicitly. + * - `to` (String) - if equal to 'string', then result will be "binary string" + * (each char code [0..255]) + * + * ##### Example: + * + * ```javascript + * var pako = require('pako') + * , data = Uint8Array([1,2,3,4,5,6,7,8,9]); + * + * console.log(pako.deflate(data)); + * ``` + **/ +function deflate(input, options) { + var deflator = new Deflate(options); + + deflator.push(input, true); + + // That will never happens, if you don't cheat with options :) + if (deflator.err) { throw deflator.msg || msg[deflator.err]; } + + return deflator.result; +} + + +/** + * deflateRaw(data[, options]) -> Uint8Array|Array|String + * - data (Uint8Array|Array|String): input data to compress. + * - options (Object): zlib deflate options. + * + * The same as [[deflate]], but creates raw data, without wrapper + * (header and adler32 crc). + **/ +function deflateRaw(input, options) { + options = options || {}; + options.raw = true; + return deflate(input, options); +} + + +/** + * gzip(data[, options]) -> Uint8Array|Array|String + * - data (Uint8Array|Array|String): input data to compress. + * - options (Object): zlib deflate options. + * + * The same as [[deflate]], but create gzip wrapper instead of + * deflate one. + **/ +function gzip(input, options) { + options = options || {}; + options.gzip = true; + return deflate(input, options); +} + + +exports.Deflate = Deflate; +exports.deflate = deflate; +exports.deflateRaw = deflateRaw; +exports.gzip = gzip; + +},{"./utils/common":50,"./utils/strings":51,"./zlib/deflate":55,"./zlib/messages":60,"./zlib/zstream":62}],49:[function(require,module,exports){ +'use strict'; + + +var zlib_inflate = require('./zlib/inflate'); +var utils = require('./utils/common'); +var strings = require('./utils/strings'); +var c = require('./zlib/constants'); +var msg = require('./zlib/messages'); +var ZStream = require('./zlib/zstream'); +var GZheader = require('./zlib/gzheader'); + +var toString = Object.prototype.toString; + +/** + * class Inflate + * + * Generic JS-style wrapper for zlib calls. If you don't need + * streaming behaviour - use more simple functions: [[inflate]] + * and [[inflateRaw]]. + **/ + +/* internal + * inflate.chunks -> Array + * + * Chunks of output data, if [[Inflate#onData]] not overridden. + **/ + +/** + * Inflate.result -> Uint8Array|Array|String + * + * Uncompressed result, generated by default [[Inflate#onData]] + * and [[Inflate#onEnd]] handlers. Filled after you push last chunk + * (call [[Inflate#push]] with `Z_FINISH` / `true` param) or if you + * push a chunk with explicit flush (call [[Inflate#push]] with + * `Z_SYNC_FLUSH` param). + **/ + +/** + * Inflate.err -> Number + * + * Error code after inflate finished. 0 (Z_OK) on success. + * Should be checked if broken data possible. + **/ + +/** + * Inflate.msg -> String + * + * Error message, if [[Inflate.err]] != 0 + **/ + + +/** + * new Inflate(options) + * - options (Object): zlib inflate options. + * + * Creates new inflator instance with specified params. Throws exception + * on bad params. Supported options: + * + * - `windowBits` + * - `dictionary` + * + * [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced) + * for more information on these. + * + * Additional options, for internal needs: + * + * - `chunkSize` - size of generated data chunks (16K by default) + * - `raw` (Boolean) - do raw inflate + * - `to` (String) - if equal to 'string', then result will be converted + * from utf8 to utf16 (javascript) string. When string output requested, + * chunk length can differ from `chunkSize`, depending on content. + * + * By default, when no options set, autodetect deflate/gzip data format via + * wrapper header. + * + * ##### Example: + * + * ```javascript + * var pako = require('pako') + * , chunk1 = Uint8Array([1,2,3,4,5,6,7,8,9]) + * , chunk2 = Uint8Array([10,11,12,13,14,15,16,17,18,19]); + * + * var inflate = new pako.Inflate({ level: 3}); + * + * inflate.push(chunk1, false); + * inflate.push(chunk2, true); // true -> last chunk + * + * if (inflate.err) { throw new Error(inflate.err); } + * + * console.log(inflate.result); + * ``` + **/ +function Inflate(options) { + if (!(this instanceof Inflate)) return new Inflate(options); + + this.options = utils.assign({ + chunkSize: 16384, + windowBits: 0, + to: '' + }, options || {}); + + var opt = this.options; + + // Force window size for `raw` data, if not set directly, + // because we have no header for autodetect. + if (opt.raw && (opt.windowBits >= 0) && (opt.windowBits < 16)) { + opt.windowBits = -opt.windowBits; + if (opt.windowBits === 0) { opt.windowBits = -15; } + } + + // If `windowBits` not defined (and mode not raw) - set autodetect flag for gzip/deflate + if ((opt.windowBits >= 0) && (opt.windowBits < 16) && + !(options && options.windowBits)) { + opt.windowBits += 32; + } + + // Gzip header has no info about windows size, we can do autodetect only + // for deflate. So, if window size not set, force it to max when gzip possible + if ((opt.windowBits > 15) && (opt.windowBits < 48)) { + // bit 3 (16) -> gzipped data + // bit 4 (32) -> autodetect gzip/deflate + if ((opt.windowBits & 15) === 0) { + opt.windowBits |= 15; + } + } + + this.err = 0; // error code, if happens (0 = Z_OK) + this.msg = ''; // error message + this.ended = false; // used to avoid multiple onEnd() calls + this.chunks = []; // chunks of compressed data + + this.strm = new ZStream(); + this.strm.avail_out = 0; + + var status = zlib_inflate.inflateInit2( + this.strm, + opt.windowBits + ); + + if (status !== c.Z_OK) { + throw new Error(msg[status]); + } + + this.header = new GZheader(); + + zlib_inflate.inflateGetHeader(this.strm, this.header); + + // Setup dictionary + if (opt.dictionary) { + // Convert data if needed + if (typeof opt.dictionary === 'string') { + opt.dictionary = strings.string2buf(opt.dictionary); + } else if (toString.call(opt.dictionary) === '[object ArrayBuffer]') { + opt.dictionary = new Uint8Array(opt.dictionary); + } + if (opt.raw) { //In raw mode we need to set the dictionary early + status = zlib_inflate.inflateSetDictionary(this.strm, opt.dictionary); + if (status !== c.Z_OK) { + throw new Error(msg[status]); + } + } + } +} + +/** + * Inflate#push(data[, mode]) -> Boolean + * - data (Uint8Array|Array|ArrayBuffer|String): input data + * - mode (Number|Boolean): 0..6 for corresponding Z_NO_FLUSH..Z_TREE modes. + * See constants. Skipped or `false` means Z_NO_FLUSH, `true` means Z_FINISH. + * + * Sends input data to inflate pipe, generating [[Inflate#onData]] calls with + * new output chunks. Returns `true` on success. The last data block must have + * mode Z_FINISH (or `true`). That will flush internal pending buffers and call + * [[Inflate#onEnd]]. For interim explicit flushes (without ending the stream) you + * can use mode Z_SYNC_FLUSH, keeping the decompression context. + * + * On fail call [[Inflate#onEnd]] with error code and return false. + * + * We strongly recommend to use `Uint8Array` on input for best speed (output + * format is detected automatically). Also, don't skip last param and always + * use the same type in your code (boolean or number). That will improve JS speed. + * + * For regular `Array`-s make sure all elements are [0..255]. + * + * ##### Example + * + * ```javascript + * push(chunk, false); // push one of data chunks + * ... + * push(chunk, true); // push last chunk + * ``` + **/ +Inflate.prototype.push = function (data, mode) { + var strm = this.strm; + var chunkSize = this.options.chunkSize; + var dictionary = this.options.dictionary; + var status, _mode; + var next_out_utf8, tail, utf8str; + + // Flag to properly process Z_BUF_ERROR on testing inflate call + // when we check that all output data was flushed. + var allowBufError = false; + + if (this.ended) { return false; } + _mode = (mode === ~~mode) ? mode : ((mode === true) ? c.Z_FINISH : c.Z_NO_FLUSH); + + // Convert data if needed + if (typeof data === 'string') { + // Only binary strings can be decompressed on practice + strm.input = strings.binstring2buf(data); + } else if (toString.call(data) === '[object ArrayBuffer]') { + strm.input = new Uint8Array(data); + } else { + strm.input = data; + } + + strm.next_in = 0; + strm.avail_in = strm.input.length; + + do { + if (strm.avail_out === 0) { + strm.output = new utils.Buf8(chunkSize); + strm.next_out = 0; + strm.avail_out = chunkSize; + } + + status = zlib_inflate.inflate(strm, c.Z_NO_FLUSH); /* no bad return value */ + + if (status === c.Z_NEED_DICT && dictionary) { + status = zlib_inflate.inflateSetDictionary(this.strm, dictionary); + } + + if (status === c.Z_BUF_ERROR && allowBufError === true) { + status = c.Z_OK; + allowBufError = false; + } + + if (status !== c.Z_STREAM_END && status !== c.Z_OK) { + this.onEnd(status); + this.ended = true; + return false; + } + + if (strm.next_out) { + if (strm.avail_out === 0 || status === c.Z_STREAM_END || (strm.avail_in === 0 && (_mode === c.Z_FINISH || _mode === c.Z_SYNC_FLUSH))) { + + if (this.options.to === 'string') { + + next_out_utf8 = strings.utf8border(strm.output, strm.next_out); + + tail = strm.next_out - next_out_utf8; + utf8str = strings.buf2string(strm.output, next_out_utf8); + + // move tail + strm.next_out = tail; + strm.avail_out = chunkSize - tail; + if (tail) { utils.arraySet(strm.output, strm.output, next_out_utf8, tail, 0); } + + this.onData(utf8str); + + } else { + this.onData(utils.shrinkBuf(strm.output, strm.next_out)); + } + } + } + + // When no more input data, we should check that internal inflate buffers + // are flushed. The only way to do it when avail_out = 0 - run one more + // inflate pass. But if output data not exists, inflate return Z_BUF_ERROR. + // Here we set flag to process this error properly. + // + // NOTE. Deflate does not return error in this case and does not needs such + // logic. + if (strm.avail_in === 0 && strm.avail_out === 0) { + allowBufError = true; + } + + } while ((strm.avail_in > 0 || strm.avail_out === 0) && status !== c.Z_STREAM_END); + + if (status === c.Z_STREAM_END) { + _mode = c.Z_FINISH; + } + + // Finalize on the last chunk. + if (_mode === c.Z_FINISH) { + status = zlib_inflate.inflateEnd(this.strm); + this.onEnd(status); + this.ended = true; + return status === c.Z_OK; + } + + // callback interim results if Z_SYNC_FLUSH. + if (_mode === c.Z_SYNC_FLUSH) { + this.onEnd(c.Z_OK); + strm.avail_out = 0; + return true; + } + + return true; +}; + + +/** + * Inflate#onData(chunk) -> Void + * - chunk (Uint8Array|Array|String): output data. Type of array depends + * on js engine support. When string output requested, each chunk + * will be string. + * + * By default, stores data blocks in `chunks[]` property and glue + * those in `onEnd`. Override this handler, if you need another behaviour. + **/ +Inflate.prototype.onData = function (chunk) { + this.chunks.push(chunk); +}; + + +/** + * Inflate#onEnd(status) -> Void + * - status (Number): inflate status. 0 (Z_OK) on success, + * other if not. + * + * Called either after you tell inflate that the input stream is + * complete (Z_FINISH) or should be flushed (Z_SYNC_FLUSH) + * or if an error happened. By default - join collected chunks, + * free memory and fill `results` / `err` properties. + **/ +Inflate.prototype.onEnd = function (status) { + // On success - join + if (status === c.Z_OK) { + if (this.options.to === 'string') { + // Glue & convert here, until we teach pako to send + // utf8 aligned strings to onData + this.result = this.chunks.join(''); + } else { + this.result = utils.flattenChunks(this.chunks); + } + } + this.chunks = []; + this.err = status; + this.msg = this.strm.msg; +}; + + +/** + * inflate(data[, options]) -> Uint8Array|Array|String + * - data (Uint8Array|Array|String): input data to decompress. + * - options (Object): zlib inflate options. + * + * Decompress `data` with inflate/ungzip and `options`. Autodetect + * format via wrapper header by default. That's why we don't provide + * separate `ungzip` method. + * + * Supported options are: + * + * - windowBits + * + * [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced) + * for more information. + * + * Sugar (options): + * + * - `raw` (Boolean) - say that we work with raw stream, if you don't wish to specify + * negative windowBits implicitly. + * - `to` (String) - if equal to 'string', then result will be converted + * from utf8 to utf16 (javascript) string. When string output requested, + * chunk length can differ from `chunkSize`, depending on content. + * + * + * ##### Example: + * + * ```javascript + * var pako = require('pako') + * , input = pako.deflate([1,2,3,4,5,6,7,8,9]) + * , output; + * + * try { + * output = pako.inflate(input); + * } catch (err) + * console.log(err); + * } + * ``` + **/ +function inflate(input, options) { + var inflator = new Inflate(options); + + inflator.push(input, true); + + // That will never happens, if you don't cheat with options :) + if (inflator.err) { throw inflator.msg || msg[inflator.err]; } + + return inflator.result; +} + + +/** + * inflateRaw(data[, options]) -> Uint8Array|Array|String + * - data (Uint8Array|Array|String): input data to decompress. + * - options (Object): zlib inflate options. + * + * The same as [[inflate]], but creates raw data, without wrapper + * (header and adler32 crc). + **/ +function inflateRaw(input, options) { + options = options || {}; + options.raw = true; + return inflate(input, options); +} + + +/** + * ungzip(data[, options]) -> Uint8Array|Array|String + * - data (Uint8Array|Array|String): input data to decompress. + * - options (Object): zlib inflate options. + * + * Just shortcut to [[inflate]], because it autodetects format + * by header.content. Done for convenience. + **/ + + +exports.Inflate = Inflate; +exports.inflate = inflate; +exports.inflateRaw = inflateRaw; +exports.ungzip = inflate; + +},{"./utils/common":50,"./utils/strings":51,"./zlib/constants":53,"./zlib/gzheader":56,"./zlib/inflate":58,"./zlib/messages":60,"./zlib/zstream":62}],50:[function(require,module,exports){ +'use strict'; + + +var TYPED_OK = (typeof Uint8Array !== 'undefined') && + (typeof Uint16Array !== 'undefined') && + (typeof Int32Array !== 'undefined'); + +function _has(obj, key) { + return Object.prototype.hasOwnProperty.call(obj, key); +} + +exports.assign = function (obj /*from1, from2, from3, ...*/) { + var sources = Array.prototype.slice.call(arguments, 1); + while (sources.length) { + var source = sources.shift(); + if (!source) { continue; } + + if (typeof source !== 'object') { + throw new TypeError(source + 'must be non-object'); + } + + for (var p in source) { + if (_has(source, p)) { + obj[p] = source[p]; + } + } + } + + return obj; +}; + + +// reduce buffer size, avoiding mem copy +exports.shrinkBuf = function (buf, size) { + if (buf.length === size) { return buf; } + if (buf.subarray) { return buf.subarray(0, size); } + buf.length = size; + return buf; +}; + + +var fnTyped = { + arraySet: function (dest, src, src_offs, len, dest_offs) { + if (src.subarray && dest.subarray) { + dest.set(src.subarray(src_offs, src_offs + len), dest_offs); + return; + } + // Fallback to ordinary array + for (var i = 0; i < len; i++) { + dest[dest_offs + i] = src[src_offs + i]; + } + }, + // Join array of chunks to single array. + flattenChunks: function (chunks) { + var i, l, len, pos, chunk, result; + + // calculate data length + len = 0; + for (i = 0, l = chunks.length; i < l; i++) { + len += chunks[i].length; + } + + // join chunks + result = new Uint8Array(len); + pos = 0; + for (i = 0, l = chunks.length; i < l; i++) { + chunk = chunks[i]; + result.set(chunk, pos); + pos += chunk.length; + } + + return result; + } +}; + +var fnUntyped = { + arraySet: function (dest, src, src_offs, len, dest_offs) { + for (var i = 0; i < len; i++) { + dest[dest_offs + i] = src[src_offs + i]; + } + }, + // Join array of chunks to single array. + flattenChunks: function (chunks) { + return [].concat.apply([], chunks); + } +}; + + +// Enable/Disable typed arrays use, for testing +// +exports.setTyped = function (on) { + if (on) { + exports.Buf8 = Uint8Array; + exports.Buf16 = Uint16Array; + exports.Buf32 = Int32Array; + exports.assign(exports, fnTyped); + } else { + exports.Buf8 = Array; + exports.Buf16 = Array; + exports.Buf32 = Array; + exports.assign(exports, fnUntyped); + } +}; + +exports.setTyped(TYPED_OK); + +},{}],51:[function(require,module,exports){ +// String encode/decode helpers +'use strict'; + + +var utils = require('./common'); + + +// Quick check if we can use fast array to bin string conversion +// +// - apply(Array) can fail on Android 2.2 +// - apply(Uint8Array) can fail on iOS 5.1 Safari +// +var STR_APPLY_OK = true; +var STR_APPLY_UIA_OK = true; + +try { String.fromCharCode.apply(null, [ 0 ]); } catch (__) { STR_APPLY_OK = false; } +try { String.fromCharCode.apply(null, new Uint8Array(1)); } catch (__) { STR_APPLY_UIA_OK = false; } + + +// Table with utf8 lengths (calculated by first byte of sequence) +// Note, that 5 & 6-byte values and some 4-byte values can not be represented in JS, +// because max possible codepoint is 0x10ffff +var _utf8len = new utils.Buf8(256); +for (var q = 0; q < 256; q++) { + _utf8len[q] = (q >= 252 ? 6 : q >= 248 ? 5 : q >= 240 ? 4 : q >= 224 ? 3 : q >= 192 ? 2 : 1); +} +_utf8len[254] = _utf8len[254] = 1; // Invalid sequence start + + +// convert string to array (typed, when possible) +exports.string2buf = function (str) { + var buf, c, c2, m_pos, i, str_len = str.length, buf_len = 0; + + // count binary size + for (m_pos = 0; m_pos < str_len; m_pos++) { + c = str.charCodeAt(m_pos); + if ((c & 0xfc00) === 0xd800 && (m_pos + 1 < str_len)) { + c2 = str.charCodeAt(m_pos + 1); + if ((c2 & 0xfc00) === 0xdc00) { + c = 0x10000 + ((c - 0xd800) << 10) + (c2 - 0xdc00); + m_pos++; + } + } + buf_len += c < 0x80 ? 1 : c < 0x800 ? 2 : c < 0x10000 ? 3 : 4; + } + + // allocate buffer + buf = new utils.Buf8(buf_len); + + // convert + for (i = 0, m_pos = 0; i < buf_len; m_pos++) { + c = str.charCodeAt(m_pos); + if ((c & 0xfc00) === 0xd800 && (m_pos + 1 < str_len)) { + c2 = str.charCodeAt(m_pos + 1); + if ((c2 & 0xfc00) === 0xdc00) { + c = 0x10000 + ((c - 0xd800) << 10) + (c2 - 0xdc00); + m_pos++; + } + } + if (c < 0x80) { + /* one byte */ + buf[i++] = c; + } else if (c < 0x800) { + /* two bytes */ + buf[i++] = 0xC0 | (c >>> 6); + buf[i++] = 0x80 | (c & 0x3f); + } else if (c < 0x10000) { + /* three bytes */ + buf[i++] = 0xE0 | (c >>> 12); + buf[i++] = 0x80 | (c >>> 6 & 0x3f); + buf[i++] = 0x80 | (c & 0x3f); + } else { + /* four bytes */ + buf[i++] = 0xf0 | (c >>> 18); + buf[i++] = 0x80 | (c >>> 12 & 0x3f); + buf[i++] = 0x80 | (c >>> 6 & 0x3f); + buf[i++] = 0x80 | (c & 0x3f); + } + } + + return buf; +}; + +// Helper (used in 2 places) +function buf2binstring(buf, len) { + // On Chrome, the arguments in a function call that are allowed is `65534`. + // If the length of the buffer is smaller than that, we can use this optimization, + // otherwise we will take a slower path. + if (len < 65534) { + if ((buf.subarray && STR_APPLY_UIA_OK) || (!buf.subarray && STR_APPLY_OK)) { + return String.fromCharCode.apply(null, utils.shrinkBuf(buf, len)); + } + } + + var result = ''; + for (var i = 0; i < len; i++) { + result += String.fromCharCode(buf[i]); + } + return result; +} + + +// Convert byte array to binary string +exports.buf2binstring = function (buf) { + return buf2binstring(buf, buf.length); +}; + + +// Convert binary string (typed, when possible) +exports.binstring2buf = function (str) { + var buf = new utils.Buf8(str.length); + for (var i = 0, len = buf.length; i < len; i++) { + buf[i] = str.charCodeAt(i); + } + return buf; +}; + + +// convert array to string +exports.buf2string = function (buf, max) { + var i, out, c, c_len; + var len = max || buf.length; + + // Reserve max possible length (2 words per char) + // NB: by unknown reasons, Array is significantly faster for + // String.fromCharCode.apply than Uint16Array. + var utf16buf = new Array(len * 2); + + for (out = 0, i = 0; i < len;) { + c = buf[i++]; + // quick process ascii + if (c < 0x80) { utf16buf[out++] = c; continue; } + + c_len = _utf8len[c]; + // skip 5 & 6 byte codes + if (c_len > 4) { utf16buf[out++] = 0xfffd; i += c_len - 1; continue; } + + // apply mask on first byte + c &= c_len === 2 ? 0x1f : c_len === 3 ? 0x0f : 0x07; + // join the rest + while (c_len > 1 && i < len) { + c = (c << 6) | (buf[i++] & 0x3f); + c_len--; + } + + // terminated by end of string? + if (c_len > 1) { utf16buf[out++] = 0xfffd; continue; } + + if (c < 0x10000) { + utf16buf[out++] = c; + } else { + c -= 0x10000; + utf16buf[out++] = 0xd800 | ((c >> 10) & 0x3ff); + utf16buf[out++] = 0xdc00 | (c & 0x3ff); + } + } + + return buf2binstring(utf16buf, out); +}; + + +// Calculate max possible position in utf8 buffer, +// that will not break sequence. If that's not possible +// - (very small limits) return max size as is. +// +// buf[] - utf8 bytes array +// max - length limit (mandatory); +exports.utf8border = function (buf, max) { + var pos; + + max = max || buf.length; + if (max > buf.length) { max = buf.length; } + + // go back from last position, until start of sequence found + pos = max - 1; + while (pos >= 0 && (buf[pos] & 0xC0) === 0x80) { pos--; } + + // Very small and broken sequence, + // return max, because we should return something anyway. + if (pos < 0) { return max; } + + // If we came to start of buffer - that means buffer is too small, + // return max too. + if (pos === 0) { return max; } + + return (pos + _utf8len[buf[pos]] > max) ? pos : max; +}; + +},{"./common":50}],52:[function(require,module,exports){ +'use strict'; + +// Note: adler32 takes 12% for level 0 and 2% for level 6. +// It isn't worth it to make additional optimizations as in original. +// Small size is preferable. + +// (C) 1995-2013 Jean-loup Gailly and Mark Adler +// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// 3. This notice may not be removed or altered from any source distribution. + +function adler32(adler, buf, len, pos) { + var s1 = (adler & 0xffff) |0, + s2 = ((adler >>> 16) & 0xffff) |0, + n = 0; + + while (len !== 0) { + // Set limit ~ twice less than 5552, to keep + // s2 in 31-bits, because we force signed ints. + // in other case %= will fail. + n = len > 2000 ? 2000 : len; + len -= n; + + do { + s1 = (s1 + buf[pos++]) |0; + s2 = (s2 + s1) |0; + } while (--n); + + s1 %= 65521; + s2 %= 65521; + } + + return (s1 | (s2 << 16)) |0; +} + + +module.exports = adler32; + +},{}],53:[function(require,module,exports){ +'use strict'; + +// (C) 1995-2013 Jean-loup Gailly and Mark Adler +// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// 3. This notice may not be removed or altered from any source distribution. + +module.exports = { + + /* Allowed flush values; see deflate() and inflate() below for details */ + Z_NO_FLUSH: 0, + Z_PARTIAL_FLUSH: 1, + Z_SYNC_FLUSH: 2, + Z_FULL_FLUSH: 3, + Z_FINISH: 4, + Z_BLOCK: 5, + Z_TREES: 6, + + /* Return codes for the compression/decompression functions. Negative values + * are errors, positive values are used for special but normal events. + */ + Z_OK: 0, + Z_STREAM_END: 1, + Z_NEED_DICT: 2, + Z_ERRNO: -1, + Z_STREAM_ERROR: -2, + Z_DATA_ERROR: -3, + //Z_MEM_ERROR: -4, + Z_BUF_ERROR: -5, + //Z_VERSION_ERROR: -6, + + /* compression levels */ + Z_NO_COMPRESSION: 0, + Z_BEST_SPEED: 1, + Z_BEST_COMPRESSION: 9, + Z_DEFAULT_COMPRESSION: -1, + + + Z_FILTERED: 1, + Z_HUFFMAN_ONLY: 2, + Z_RLE: 3, + Z_FIXED: 4, + Z_DEFAULT_STRATEGY: 0, + + /* Possible values of the data_type field (though see inflate()) */ + Z_BINARY: 0, + Z_TEXT: 1, + //Z_ASCII: 1, // = Z_TEXT (deprecated) + Z_UNKNOWN: 2, + + /* The deflate compression method */ + Z_DEFLATED: 8 + //Z_NULL: null // Use -1 or null inline, depending on var type +}; + +},{}],54:[function(require,module,exports){ +'use strict'; + +// Note: we can't get significant speed boost here. +// So write code to minimize size - no pregenerated tables +// and array tools dependencies. + +// (C) 1995-2013 Jean-loup Gailly and Mark Adler +// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// 3. This notice may not be removed or altered from any source distribution. + +// Use ordinary array, since untyped makes no boost here +function makeTable() { + var c, table = []; + + for (var n = 0; n < 256; n++) { + c = n; + for (var k = 0; k < 8; k++) { + c = ((c & 1) ? (0xEDB88320 ^ (c >>> 1)) : (c >>> 1)); + } + table[n] = c; + } + + return table; +} + +// Create table on load. Just 255 signed longs. Not a problem. +var crcTable = makeTable(); + + +function crc32(crc, buf, len, pos) { + var t = crcTable, + end = pos + len; + + crc ^= -1; + + for (var i = pos; i < end; i++) { + crc = (crc >>> 8) ^ t[(crc ^ buf[i]) & 0xFF]; + } + + return (crc ^ (-1)); // >>> 0; +} + + +module.exports = crc32; + +},{}],55:[function(require,module,exports){ +'use strict'; + +// (C) 1995-2013 Jean-loup Gailly and Mark Adler +// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// 3. This notice may not be removed or altered from any source distribution. + +var utils = require('../utils/common'); +var trees = require('./trees'); +var adler32 = require('./adler32'); +var crc32 = require('./crc32'); +var msg = require('./messages'); + +/* Public constants ==========================================================*/ +/* ===========================================================================*/ + + +/* Allowed flush values; see deflate() and inflate() below for details */ +var Z_NO_FLUSH = 0; +var Z_PARTIAL_FLUSH = 1; +//var Z_SYNC_FLUSH = 2; +var Z_FULL_FLUSH = 3; +var Z_FINISH = 4; +var Z_BLOCK = 5; +//var Z_TREES = 6; + + +/* Return codes for the compression/decompression functions. Negative values + * are errors, positive values are used for special but normal events. + */ +var Z_OK = 0; +var Z_STREAM_END = 1; +//var Z_NEED_DICT = 2; +//var Z_ERRNO = -1; +var Z_STREAM_ERROR = -2; +var Z_DATA_ERROR = -3; +//var Z_MEM_ERROR = -4; +var Z_BUF_ERROR = -5; +//var Z_VERSION_ERROR = -6; + + +/* compression levels */ +//var Z_NO_COMPRESSION = 0; +//var Z_BEST_SPEED = 1; +//var Z_BEST_COMPRESSION = 9; +var Z_DEFAULT_COMPRESSION = -1; + + +var Z_FILTERED = 1; +var Z_HUFFMAN_ONLY = 2; +var Z_RLE = 3; +var Z_FIXED = 4; +var Z_DEFAULT_STRATEGY = 0; + +/* Possible values of the data_type field (though see inflate()) */ +//var Z_BINARY = 0; +//var Z_TEXT = 1; +//var Z_ASCII = 1; // = Z_TEXT +var Z_UNKNOWN = 2; + + +/* The deflate compression method */ +var Z_DEFLATED = 8; + +/*============================================================================*/ + + +var MAX_MEM_LEVEL = 9; +/* Maximum value for memLevel in deflateInit2 */ +var MAX_WBITS = 15; +/* 32K LZ77 window */ +var DEF_MEM_LEVEL = 8; + + +var LENGTH_CODES = 29; +/* number of length codes, not counting the special END_BLOCK code */ +var LITERALS = 256; +/* number of literal bytes 0..255 */ +var L_CODES = LITERALS + 1 + LENGTH_CODES; +/* number of Literal or Length codes, including the END_BLOCK code */ +var D_CODES = 30; +/* number of distance codes */ +var BL_CODES = 19; +/* number of codes used to transfer the bit lengths */ +var HEAP_SIZE = 2 * L_CODES + 1; +/* maximum heap size */ +var MAX_BITS = 15; +/* All codes must not exceed MAX_BITS bits */ + +var MIN_MATCH = 3; +var MAX_MATCH = 258; +var MIN_LOOKAHEAD = (MAX_MATCH + MIN_MATCH + 1); + +var PRESET_DICT = 0x20; + +var INIT_STATE = 42; +var EXTRA_STATE = 69; +var NAME_STATE = 73; +var COMMENT_STATE = 91; +var HCRC_STATE = 103; +var BUSY_STATE = 113; +var FINISH_STATE = 666; + +var BS_NEED_MORE = 1; /* block not completed, need more input or more output */ +var BS_BLOCK_DONE = 2; /* block flush performed */ +var BS_FINISH_STARTED = 3; /* finish started, need only more output at next deflate */ +var BS_FINISH_DONE = 4; /* finish done, accept no more input or output */ + +var OS_CODE = 0x03; // Unix :) . Don't detect, use this default. + +function err(strm, errorCode) { + strm.msg = msg[errorCode]; + return errorCode; +} + +function rank(f) { + return ((f) << 1) - ((f) > 4 ? 9 : 0); +} + +function zero(buf) { var len = buf.length; while (--len >= 0) { buf[len] = 0; } } + + +/* ========================================================================= + * Flush as much pending output as possible. All deflate() output goes + * through this function so some applications may wish to modify it + * to avoid allocating a large strm->output buffer and copying into it. + * (See also read_buf()). + */ +function flush_pending(strm) { + var s = strm.state; + + //_tr_flush_bits(s); + var len = s.pending; + if (len > strm.avail_out) { + len = strm.avail_out; + } + if (len === 0) { return; } + + utils.arraySet(strm.output, s.pending_buf, s.pending_out, len, strm.next_out); + strm.next_out += len; + s.pending_out += len; + strm.total_out += len; + strm.avail_out -= len; + s.pending -= len; + if (s.pending === 0) { + s.pending_out = 0; + } +} + + +function flush_block_only(s, last) { + trees._tr_flush_block(s, (s.block_start >= 0 ? s.block_start : -1), s.strstart - s.block_start, last); + s.block_start = s.strstart; + flush_pending(s.strm); +} + + +function put_byte(s, b) { + s.pending_buf[s.pending++] = b; +} + + +/* ========================================================================= + * Put a short in the pending buffer. The 16-bit value is put in MSB order. + * IN assertion: the stream state is correct and there is enough room in + * pending_buf. + */ +function putShortMSB(s, b) { +// put_byte(s, (Byte)(b >> 8)); +// put_byte(s, (Byte)(b & 0xff)); + s.pending_buf[s.pending++] = (b >>> 8) & 0xff; + s.pending_buf[s.pending++] = b & 0xff; +} + + +/* =========================================================================== + * Read a new buffer from the current input stream, update the adler32 + * and total number of bytes read. All deflate() input goes through + * this function so some applications may wish to modify it to avoid + * allocating a large strm->input buffer and copying from it. + * (See also flush_pending()). + */ +function read_buf(strm, buf, start, size) { + var len = strm.avail_in; + + if (len > size) { len = size; } + if (len === 0) { return 0; } + + strm.avail_in -= len; + + // zmemcpy(buf, strm->next_in, len); + utils.arraySet(buf, strm.input, strm.next_in, len, start); + if (strm.state.wrap === 1) { + strm.adler = adler32(strm.adler, buf, len, start); + } + + else if (strm.state.wrap === 2) { + strm.adler = crc32(strm.adler, buf, len, start); + } + + strm.next_in += len; + strm.total_in += len; + + return len; +} + + +/* =========================================================================== + * Set match_start to the longest match starting at the given string and + * return its length. Matches shorter or equal to prev_length are discarded, + * in which case the result is equal to prev_length and match_start is + * garbage. + * IN assertions: cur_match is the head of the hash chain for the current + * string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1 + * OUT assertion: the match length is not greater than s->lookahead. + */ +function longest_match(s, cur_match) { + var chain_length = s.max_chain_length; /* max hash chain length */ + var scan = s.strstart; /* current string */ + var match; /* matched string */ + var len; /* length of current match */ + var best_len = s.prev_length; /* best match length so far */ + var nice_match = s.nice_match; /* stop if match long enough */ + var limit = (s.strstart > (s.w_size - MIN_LOOKAHEAD)) ? + s.strstart - (s.w_size - MIN_LOOKAHEAD) : 0/*NIL*/; + + var _win = s.window; // shortcut + + var wmask = s.w_mask; + var prev = s.prev; + + /* Stop when cur_match becomes <= limit. To simplify the code, + * we prevent matches with the string of window index 0. + */ + + var strend = s.strstart + MAX_MATCH; + var scan_end1 = _win[scan + best_len - 1]; + var scan_end = _win[scan + best_len]; + + /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16. + * It is easy to get rid of this optimization if necessary. + */ + // Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever"); + + /* Do not waste too much time if we already have a good match: */ + if (s.prev_length >= s.good_match) { + chain_length >>= 2; + } + /* Do not look for matches beyond the end of the input. This is necessary + * to make deflate deterministic. + */ + if (nice_match > s.lookahead) { nice_match = s.lookahead; } + + // Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead"); + + do { + // Assert(cur_match < s->strstart, "no future"); + match = cur_match; + + /* Skip to next match if the match length cannot increase + * or if the match length is less than 2. Note that the checks below + * for insufficient lookahead only occur occasionally for performance + * reasons. Therefore uninitialized memory will be accessed, and + * conditional jumps will be made that depend on those values. + * However the length of the match is limited to the lookahead, so + * the output of deflate is not affected by the uninitialized values. + */ + + if (_win[match + best_len] !== scan_end || + _win[match + best_len - 1] !== scan_end1 || + _win[match] !== _win[scan] || + _win[++match] !== _win[scan + 1]) { + continue; + } + + /* The check at best_len-1 can be removed because it will be made + * again later. (This heuristic is not always a win.) + * It is not necessary to compare scan[2] and match[2] since they + * are always equal when the other bytes match, given that + * the hash keys are equal and that HASH_BITS >= 8. + */ + scan += 2; + match++; + // Assert(*scan == *match, "match[2]?"); + + /* We check for insufficient lookahead only every 8th comparison; + * the 256th check will be made at strstart+258. + */ + do { + /*jshint noempty:false*/ + } while (_win[++scan] === _win[++match] && _win[++scan] === _win[++match] && + _win[++scan] === _win[++match] && _win[++scan] === _win[++match] && + _win[++scan] === _win[++match] && _win[++scan] === _win[++match] && + _win[++scan] === _win[++match] && _win[++scan] === _win[++match] && + scan < strend); + + // Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan"); + + len = MAX_MATCH - (strend - scan); + scan = strend - MAX_MATCH; + + if (len > best_len) { + s.match_start = cur_match; + best_len = len; + if (len >= nice_match) { + break; + } + scan_end1 = _win[scan + best_len - 1]; + scan_end = _win[scan + best_len]; + } + } while ((cur_match = prev[cur_match & wmask]) > limit && --chain_length !== 0); + + if (best_len <= s.lookahead) { + return best_len; + } + return s.lookahead; +} + + +/* =========================================================================== + * Fill the window when the lookahead becomes insufficient. + * Updates strstart and lookahead. + * + * IN assertion: lookahead < MIN_LOOKAHEAD + * OUT assertions: strstart <= window_size-MIN_LOOKAHEAD + * At least one byte has been read, or avail_in == 0; reads are + * performed for at least two bytes (required for the zip translate_eol + * option -- not supported here). + */ +function fill_window(s) { + var _w_size = s.w_size; + var p, n, m, more, str; + + //Assert(s->lookahead < MIN_LOOKAHEAD, "already enough lookahead"); + + do { + more = s.window_size - s.lookahead - s.strstart; + + // JS ints have 32 bit, block below not needed + /* Deal with !@#$% 64K limit: */ + //if (sizeof(int) <= 2) { + // if (more == 0 && s->strstart == 0 && s->lookahead == 0) { + // more = wsize; + // + // } else if (more == (unsigned)(-1)) { + // /* Very unlikely, but possible on 16 bit machine if + // * strstart == 0 && lookahead == 1 (input done a byte at time) + // */ + // more--; + // } + //} + + + /* If the window is almost full and there is insufficient lookahead, + * move the upper half to the lower one to make room in the upper half. + */ + if (s.strstart >= _w_size + (_w_size - MIN_LOOKAHEAD)) { + + utils.arraySet(s.window, s.window, _w_size, _w_size, 0); + s.match_start -= _w_size; + s.strstart -= _w_size; + /* we now have strstart >= MAX_DIST */ + s.block_start -= _w_size; + + /* Slide the hash table (could be avoided with 32 bit values + at the expense of memory usage). We slide even when level == 0 + to keep the hash table consistent if we switch back to level > 0 + later. (Using level 0 permanently is not an optimal usage of + zlib, so we don't care about this pathological case.) + */ + + n = s.hash_size; + p = n; + do { + m = s.head[--p]; + s.head[p] = (m >= _w_size ? m - _w_size : 0); + } while (--n); + + n = _w_size; + p = n; + do { + m = s.prev[--p]; + s.prev[p] = (m >= _w_size ? m - _w_size : 0); + /* If n is not on any hash chain, prev[n] is garbage but + * its value will never be used. + */ + } while (--n); + + more += _w_size; + } + if (s.strm.avail_in === 0) { + break; + } + + /* If there was no sliding: + * strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 && + * more == window_size - lookahead - strstart + * => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1) + * => more >= window_size - 2*WSIZE + 2 + * In the BIG_MEM or MMAP case (not yet supported), + * window_size == input_size + MIN_LOOKAHEAD && + * strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD. + * Otherwise, window_size == 2*WSIZE so more >= 2. + * If there was sliding, more >= WSIZE. So in all cases, more >= 2. + */ + //Assert(more >= 2, "more < 2"); + n = read_buf(s.strm, s.window, s.strstart + s.lookahead, more); + s.lookahead += n; + + /* Initialize the hash value now that we have some input: */ + if (s.lookahead + s.insert >= MIN_MATCH) { + str = s.strstart - s.insert; + s.ins_h = s.window[str]; + + /* UPDATE_HASH(s, s->ins_h, s->window[str + 1]); */ + s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[str + 1]) & s.hash_mask; +//#if MIN_MATCH != 3 +// Call update_hash() MIN_MATCH-3 more times +//#endif + while (s.insert) { + /* UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]); */ + s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[str + MIN_MATCH - 1]) & s.hash_mask; + + s.prev[str & s.w_mask] = s.head[s.ins_h]; + s.head[s.ins_h] = str; + str++; + s.insert--; + if (s.lookahead + s.insert < MIN_MATCH) { + break; + } + } + } + /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage, + * but this is not important since only literal bytes will be emitted. + */ + + } while (s.lookahead < MIN_LOOKAHEAD && s.strm.avail_in !== 0); + + /* If the WIN_INIT bytes after the end of the current data have never been + * written, then zero those bytes in order to avoid memory check reports of + * the use of uninitialized (or uninitialised as Julian writes) bytes by + * the longest match routines. Update the high water mark for the next + * time through here. WIN_INIT is set to MAX_MATCH since the longest match + * routines allow scanning to strstart + MAX_MATCH, ignoring lookahead. + */ +// if (s.high_water < s.window_size) { +// var curr = s.strstart + s.lookahead; +// var init = 0; +// +// if (s.high_water < curr) { +// /* Previous high water mark below current data -- zero WIN_INIT +// * bytes or up to end of window, whichever is less. +// */ +// init = s.window_size - curr; +// if (init > WIN_INIT) +// init = WIN_INIT; +// zmemzero(s->window + curr, (unsigned)init); +// s->high_water = curr + init; +// } +// else if (s->high_water < (ulg)curr + WIN_INIT) { +// /* High water mark at or above current data, but below current data +// * plus WIN_INIT -- zero out to current data plus WIN_INIT, or up +// * to end of window, whichever is less. +// */ +// init = (ulg)curr + WIN_INIT - s->high_water; +// if (init > s->window_size - s->high_water) +// init = s->window_size - s->high_water; +// zmemzero(s->window + s->high_water, (unsigned)init); +// s->high_water += init; +// } +// } +// +// Assert((ulg)s->strstart <= s->window_size - MIN_LOOKAHEAD, +// "not enough room for search"); +} + +/* =========================================================================== + * Copy without compression as much as possible from the input stream, return + * the current block state. + * This function does not insert new strings in the dictionary since + * uncompressible data is probably not useful. This function is used + * only for the level=0 compression option. + * NOTE: this function should be optimized to avoid extra copying from + * window to pending_buf. + */ +function deflate_stored(s, flush) { + /* Stored blocks are limited to 0xffff bytes, pending_buf is limited + * to pending_buf_size, and each stored block has a 5 byte header: + */ + var max_block_size = 0xffff; + + if (max_block_size > s.pending_buf_size - 5) { + max_block_size = s.pending_buf_size - 5; + } + + /* Copy as much as possible from input to output: */ + for (;;) { + /* Fill the window as much as possible: */ + if (s.lookahead <= 1) { + + //Assert(s->strstart < s->w_size+MAX_DIST(s) || + // s->block_start >= (long)s->w_size, "slide too late"); +// if (!(s.strstart < s.w_size + (s.w_size - MIN_LOOKAHEAD) || +// s.block_start >= s.w_size)) { +// throw new Error("slide too late"); +// } + + fill_window(s); + if (s.lookahead === 0 && flush === Z_NO_FLUSH) { + return BS_NEED_MORE; + } + + if (s.lookahead === 0) { + break; + } + /* flush the current block */ + } + //Assert(s->block_start >= 0L, "block gone"); +// if (s.block_start < 0) throw new Error("block gone"); + + s.strstart += s.lookahead; + s.lookahead = 0; + + /* Emit a stored block if pending_buf will be full: */ + var max_start = s.block_start + max_block_size; + + if (s.strstart === 0 || s.strstart >= max_start) { + /* strstart == 0 is possible when wraparound on 16-bit machine */ + s.lookahead = s.strstart - max_start; + s.strstart = max_start; + /*** FLUSH_BLOCK(s, 0); ***/ + flush_block_only(s, false); + if (s.strm.avail_out === 0) { + return BS_NEED_MORE; + } + /***/ + + + } + /* Flush if we may have to slide, otherwise block_start may become + * negative and the data will be gone: + */ + if (s.strstart - s.block_start >= (s.w_size - MIN_LOOKAHEAD)) { + /*** FLUSH_BLOCK(s, 0); ***/ + flush_block_only(s, false); + if (s.strm.avail_out === 0) { + return BS_NEED_MORE; + } + /***/ + } + } + + s.insert = 0; + + if (flush === Z_FINISH) { + /*** FLUSH_BLOCK(s, 1); ***/ + flush_block_only(s, true); + if (s.strm.avail_out === 0) { + return BS_FINISH_STARTED; + } + /***/ + return BS_FINISH_DONE; + } + + if (s.strstart > s.block_start) { + /*** FLUSH_BLOCK(s, 0); ***/ + flush_block_only(s, false); + if (s.strm.avail_out === 0) { + return BS_NEED_MORE; + } + /***/ + } + + return BS_NEED_MORE; +} + +/* =========================================================================== + * Compress as much as possible from the input stream, return the current + * block state. + * This function does not perform lazy evaluation of matches and inserts + * new strings in the dictionary only for unmatched strings or for short + * matches. It is used only for the fast compression options. + */ +function deflate_fast(s, flush) { + var hash_head; /* head of the hash chain */ + var bflush; /* set if current block must be flushed */ + + for (;;) { + /* Make sure that we always have enough lookahead, except + * at the end of the input file. We need MAX_MATCH bytes + * for the next match, plus MIN_MATCH bytes to insert the + * string following the next match. + */ + if (s.lookahead < MIN_LOOKAHEAD) { + fill_window(s); + if (s.lookahead < MIN_LOOKAHEAD && flush === Z_NO_FLUSH) { + return BS_NEED_MORE; + } + if (s.lookahead === 0) { + break; /* flush the current block */ + } + } + + /* Insert the string window[strstart .. strstart+2] in the + * dictionary, and set hash_head to the head of the hash chain: + */ + hash_head = 0/*NIL*/; + if (s.lookahead >= MIN_MATCH) { + /*** INSERT_STRING(s, s.strstart, hash_head); ***/ + s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[s.strstart + MIN_MATCH - 1]) & s.hash_mask; + hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h]; + s.head[s.ins_h] = s.strstart; + /***/ + } + + /* Find the longest match, discarding those <= prev_length. + * At this point we have always match_length < MIN_MATCH + */ + if (hash_head !== 0/*NIL*/ && ((s.strstart - hash_head) <= (s.w_size - MIN_LOOKAHEAD))) { + /* To simplify the code, we prevent matches with the string + * of window index 0 (in particular we have to avoid a match + * of the string with itself at the start of the input file). + */ + s.match_length = longest_match(s, hash_head); + /* longest_match() sets match_start */ + } + if (s.match_length >= MIN_MATCH) { + // check_match(s, s.strstart, s.match_start, s.match_length); // for debug only + + /*** _tr_tally_dist(s, s.strstart - s.match_start, + s.match_length - MIN_MATCH, bflush); ***/ + bflush = trees._tr_tally(s, s.strstart - s.match_start, s.match_length - MIN_MATCH); + + s.lookahead -= s.match_length; + + /* Insert new strings in the hash table only if the match length + * is not too large. This saves time but degrades compression. + */ + if (s.match_length <= s.max_lazy_match/*max_insert_length*/ && s.lookahead >= MIN_MATCH) { + s.match_length--; /* string at strstart already in table */ + do { + s.strstart++; + /*** INSERT_STRING(s, s.strstart, hash_head); ***/ + s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[s.strstart + MIN_MATCH - 1]) & s.hash_mask; + hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h]; + s.head[s.ins_h] = s.strstart; + /***/ + /* strstart never exceeds WSIZE-MAX_MATCH, so there are + * always MIN_MATCH bytes ahead. + */ + } while (--s.match_length !== 0); + s.strstart++; + } else + { + s.strstart += s.match_length; + s.match_length = 0; + s.ins_h = s.window[s.strstart]; + /* UPDATE_HASH(s, s.ins_h, s.window[s.strstart+1]); */ + s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[s.strstart + 1]) & s.hash_mask; + +//#if MIN_MATCH != 3 +// Call UPDATE_HASH() MIN_MATCH-3 more times +//#endif + /* If lookahead < MIN_MATCH, ins_h is garbage, but it does not + * matter since it will be recomputed at next deflate call. + */ + } + } else { + /* No match, output a literal byte */ + //Tracevv((stderr,"%c", s.window[s.strstart])); + /*** _tr_tally_lit(s, s.window[s.strstart], bflush); ***/ + bflush = trees._tr_tally(s, 0, s.window[s.strstart]); + + s.lookahead--; + s.strstart++; + } + if (bflush) { + /*** FLUSH_BLOCK(s, 0); ***/ + flush_block_only(s, false); + if (s.strm.avail_out === 0) { + return BS_NEED_MORE; + } + /***/ + } + } + s.insert = ((s.strstart < (MIN_MATCH - 1)) ? s.strstart : MIN_MATCH - 1); + if (flush === Z_FINISH) { + /*** FLUSH_BLOCK(s, 1); ***/ + flush_block_only(s, true); + if (s.strm.avail_out === 0) { + return BS_FINISH_STARTED; + } + /***/ + return BS_FINISH_DONE; + } + if (s.last_lit) { + /*** FLUSH_BLOCK(s, 0); ***/ + flush_block_only(s, false); + if (s.strm.avail_out === 0) { + return BS_NEED_MORE; + } + /***/ + } + return BS_BLOCK_DONE; +} + +/* =========================================================================== + * Same as above, but achieves better compression. We use a lazy + * evaluation for matches: a match is finally adopted only if there is + * no better match at the next window position. + */ +function deflate_slow(s, flush) { + var hash_head; /* head of hash chain */ + var bflush; /* set if current block must be flushed */ + + var max_insert; + + /* Process the input block. */ + for (;;) { + /* Make sure that we always have enough lookahead, except + * at the end of the input file. We need MAX_MATCH bytes + * for the next match, plus MIN_MATCH bytes to insert the + * string following the next match. + */ + if (s.lookahead < MIN_LOOKAHEAD) { + fill_window(s); + if (s.lookahead < MIN_LOOKAHEAD && flush === Z_NO_FLUSH) { + return BS_NEED_MORE; + } + if (s.lookahead === 0) { break; } /* flush the current block */ + } + + /* Insert the string window[strstart .. strstart+2] in the + * dictionary, and set hash_head to the head of the hash chain: + */ + hash_head = 0/*NIL*/; + if (s.lookahead >= MIN_MATCH) { + /*** INSERT_STRING(s, s.strstart, hash_head); ***/ + s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[s.strstart + MIN_MATCH - 1]) & s.hash_mask; + hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h]; + s.head[s.ins_h] = s.strstart; + /***/ + } + + /* Find the longest match, discarding those <= prev_length. + */ + s.prev_length = s.match_length; + s.prev_match = s.match_start; + s.match_length = MIN_MATCH - 1; + + if (hash_head !== 0/*NIL*/ && s.prev_length < s.max_lazy_match && + s.strstart - hash_head <= (s.w_size - MIN_LOOKAHEAD)/*MAX_DIST(s)*/) { + /* To simplify the code, we prevent matches with the string + * of window index 0 (in particular we have to avoid a match + * of the string with itself at the start of the input file). + */ + s.match_length = longest_match(s, hash_head); + /* longest_match() sets match_start */ + + if (s.match_length <= 5 && + (s.strategy === Z_FILTERED || (s.match_length === MIN_MATCH && s.strstart - s.match_start > 4096/*TOO_FAR*/))) { + + /* If prev_match is also MIN_MATCH, match_start is garbage + * but we will ignore the current match anyway. + */ + s.match_length = MIN_MATCH - 1; + } + } + /* If there was a match at the previous step and the current + * match is not better, output the previous match: + */ + if (s.prev_length >= MIN_MATCH && s.match_length <= s.prev_length) { + max_insert = s.strstart + s.lookahead - MIN_MATCH; + /* Do not insert strings in hash table beyond this. */ + + //check_match(s, s.strstart-1, s.prev_match, s.prev_length); + + /***_tr_tally_dist(s, s.strstart - 1 - s.prev_match, + s.prev_length - MIN_MATCH, bflush);***/ + bflush = trees._tr_tally(s, s.strstart - 1 - s.prev_match, s.prev_length - MIN_MATCH); + /* Insert in hash table all strings up to the end of the match. + * strstart-1 and strstart are already inserted. If there is not + * enough lookahead, the last two strings are not inserted in + * the hash table. + */ + s.lookahead -= s.prev_length - 1; + s.prev_length -= 2; + do { + if (++s.strstart <= max_insert) { + /*** INSERT_STRING(s, s.strstart, hash_head); ***/ + s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[s.strstart + MIN_MATCH - 1]) & s.hash_mask; + hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h]; + s.head[s.ins_h] = s.strstart; + /***/ + } + } while (--s.prev_length !== 0); + s.match_available = 0; + s.match_length = MIN_MATCH - 1; + s.strstart++; + + if (bflush) { + /*** FLUSH_BLOCK(s, 0); ***/ + flush_block_only(s, false); + if (s.strm.avail_out === 0) { + return BS_NEED_MORE; + } + /***/ + } + + } else if (s.match_available) { + /* If there was no match at the previous position, output a + * single literal. If there was a match but the current match + * is longer, truncate the previous match to a single literal. + */ + //Tracevv((stderr,"%c", s->window[s->strstart-1])); + /*** _tr_tally_lit(s, s.window[s.strstart-1], bflush); ***/ + bflush = trees._tr_tally(s, 0, s.window[s.strstart - 1]); + + if (bflush) { + /*** FLUSH_BLOCK_ONLY(s, 0) ***/ + flush_block_only(s, false); + /***/ + } + s.strstart++; + s.lookahead--; + if (s.strm.avail_out === 0) { + return BS_NEED_MORE; + } + } else { + /* There is no previous match to compare with, wait for + * the next step to decide. + */ + s.match_available = 1; + s.strstart++; + s.lookahead--; + } + } + //Assert (flush != Z_NO_FLUSH, "no flush?"); + if (s.match_available) { + //Tracevv((stderr,"%c", s->window[s->strstart-1])); + /*** _tr_tally_lit(s, s.window[s.strstart-1], bflush); ***/ + bflush = trees._tr_tally(s, 0, s.window[s.strstart - 1]); + + s.match_available = 0; + } + s.insert = s.strstart < MIN_MATCH - 1 ? s.strstart : MIN_MATCH - 1; + if (flush === Z_FINISH) { + /*** FLUSH_BLOCK(s, 1); ***/ + flush_block_only(s, true); + if (s.strm.avail_out === 0) { + return BS_FINISH_STARTED; + } + /***/ + return BS_FINISH_DONE; + } + if (s.last_lit) { + /*** FLUSH_BLOCK(s, 0); ***/ + flush_block_only(s, false); + if (s.strm.avail_out === 0) { + return BS_NEED_MORE; + } + /***/ + } + + return BS_BLOCK_DONE; +} + + +/* =========================================================================== + * For Z_RLE, simply look for runs of bytes, generate matches only of distance + * one. Do not maintain a hash table. (It will be regenerated if this run of + * deflate switches away from Z_RLE.) + */ +function deflate_rle(s, flush) { + var bflush; /* set if current block must be flushed */ + var prev; /* byte at distance one to match */ + var scan, strend; /* scan goes up to strend for length of run */ + + var _win = s.window; + + for (;;) { + /* Make sure that we always have enough lookahead, except + * at the end of the input file. We need MAX_MATCH bytes + * for the longest run, plus one for the unrolled loop. + */ + if (s.lookahead <= MAX_MATCH) { + fill_window(s); + if (s.lookahead <= MAX_MATCH && flush === Z_NO_FLUSH) { + return BS_NEED_MORE; + } + if (s.lookahead === 0) { break; } /* flush the current block */ + } + + /* See how many times the previous byte repeats */ + s.match_length = 0; + if (s.lookahead >= MIN_MATCH && s.strstart > 0) { + scan = s.strstart - 1; + prev = _win[scan]; + if (prev === _win[++scan] && prev === _win[++scan] && prev === _win[++scan]) { + strend = s.strstart + MAX_MATCH; + do { + /*jshint noempty:false*/ + } while (prev === _win[++scan] && prev === _win[++scan] && + prev === _win[++scan] && prev === _win[++scan] && + prev === _win[++scan] && prev === _win[++scan] && + prev === _win[++scan] && prev === _win[++scan] && + scan < strend); + s.match_length = MAX_MATCH - (strend - scan); + if (s.match_length > s.lookahead) { + s.match_length = s.lookahead; + } + } + //Assert(scan <= s->window+(uInt)(s->window_size-1), "wild scan"); + } + + /* Emit match if have run of MIN_MATCH or longer, else emit literal */ + if (s.match_length >= MIN_MATCH) { + //check_match(s, s.strstart, s.strstart - 1, s.match_length); + + /*** _tr_tally_dist(s, 1, s.match_length - MIN_MATCH, bflush); ***/ + bflush = trees._tr_tally(s, 1, s.match_length - MIN_MATCH); + + s.lookahead -= s.match_length; + s.strstart += s.match_length; + s.match_length = 0; + } else { + /* No match, output a literal byte */ + //Tracevv((stderr,"%c", s->window[s->strstart])); + /*** _tr_tally_lit(s, s.window[s.strstart], bflush); ***/ + bflush = trees._tr_tally(s, 0, s.window[s.strstart]); + + s.lookahead--; + s.strstart++; + } + if (bflush) { + /*** FLUSH_BLOCK(s, 0); ***/ + flush_block_only(s, false); + if (s.strm.avail_out === 0) { + return BS_NEED_MORE; + } + /***/ + } + } + s.insert = 0; + if (flush === Z_FINISH) { + /*** FLUSH_BLOCK(s, 1); ***/ + flush_block_only(s, true); + if (s.strm.avail_out === 0) { + return BS_FINISH_STARTED; + } + /***/ + return BS_FINISH_DONE; + } + if (s.last_lit) { + /*** FLUSH_BLOCK(s, 0); ***/ + flush_block_only(s, false); + if (s.strm.avail_out === 0) { + return BS_NEED_MORE; + } + /***/ + } + return BS_BLOCK_DONE; +} + +/* =========================================================================== + * For Z_HUFFMAN_ONLY, do not look for matches. Do not maintain a hash table. + * (It will be regenerated if this run of deflate switches away from Huffman.) + */ +function deflate_huff(s, flush) { + var bflush; /* set if current block must be flushed */ + + for (;;) { + /* Make sure that we have a literal to write. */ + if (s.lookahead === 0) { + fill_window(s); + if (s.lookahead === 0) { + if (flush === Z_NO_FLUSH) { + return BS_NEED_MORE; + } + break; /* flush the current block */ + } + } + + /* Output a literal byte */ + s.match_length = 0; + //Tracevv((stderr,"%c", s->window[s->strstart])); + /*** _tr_tally_lit(s, s.window[s.strstart], bflush); ***/ + bflush = trees._tr_tally(s, 0, s.window[s.strstart]); + s.lookahead--; + s.strstart++; + if (bflush) { + /*** FLUSH_BLOCK(s, 0); ***/ + flush_block_only(s, false); + if (s.strm.avail_out === 0) { + return BS_NEED_MORE; + } + /***/ + } + } + s.insert = 0; + if (flush === Z_FINISH) { + /*** FLUSH_BLOCK(s, 1); ***/ + flush_block_only(s, true); + if (s.strm.avail_out === 0) { + return BS_FINISH_STARTED; + } + /***/ + return BS_FINISH_DONE; + } + if (s.last_lit) { + /*** FLUSH_BLOCK(s, 0); ***/ + flush_block_only(s, false); + if (s.strm.avail_out === 0) { + return BS_NEED_MORE; + } + /***/ + } + return BS_BLOCK_DONE; +} + +/* Values for max_lazy_match, good_match and max_chain_length, depending on + * the desired pack level (0..9). The values given below have been tuned to + * exclude worst case performance for pathological files. Better values may be + * found for specific files. + */ +function Config(good_length, max_lazy, nice_length, max_chain, func) { + this.good_length = good_length; + this.max_lazy = max_lazy; + this.nice_length = nice_length; + this.max_chain = max_chain; + this.func = func; +} + +var configuration_table; + +configuration_table = [ + /* good lazy nice chain */ + new Config(0, 0, 0, 0, deflate_stored), /* 0 store only */ + new Config(4, 4, 8, 4, deflate_fast), /* 1 max speed, no lazy matches */ + new Config(4, 5, 16, 8, deflate_fast), /* 2 */ + new Config(4, 6, 32, 32, deflate_fast), /* 3 */ + + new Config(4, 4, 16, 16, deflate_slow), /* 4 lazy matches */ + new Config(8, 16, 32, 32, deflate_slow), /* 5 */ + new Config(8, 16, 128, 128, deflate_slow), /* 6 */ + new Config(8, 32, 128, 256, deflate_slow), /* 7 */ + new Config(32, 128, 258, 1024, deflate_slow), /* 8 */ + new Config(32, 258, 258, 4096, deflate_slow) /* 9 max compression */ +]; + + +/* =========================================================================== + * Initialize the "longest match" routines for a new zlib stream + */ +function lm_init(s) { + s.window_size = 2 * s.w_size; + + /*** CLEAR_HASH(s); ***/ + zero(s.head); // Fill with NIL (= 0); + + /* Set the default configuration parameters: + */ + s.max_lazy_match = configuration_table[s.level].max_lazy; + s.good_match = configuration_table[s.level].good_length; + s.nice_match = configuration_table[s.level].nice_length; + s.max_chain_length = configuration_table[s.level].max_chain; + + s.strstart = 0; + s.block_start = 0; + s.lookahead = 0; + s.insert = 0; + s.match_length = s.prev_length = MIN_MATCH - 1; + s.match_available = 0; + s.ins_h = 0; +} + + +function DeflateState() { + this.strm = null; /* pointer back to this zlib stream */ + this.status = 0; /* as the name implies */ + this.pending_buf = null; /* output still pending */ + this.pending_buf_size = 0; /* size of pending_buf */ + this.pending_out = 0; /* next pending byte to output to the stream */ + this.pending = 0; /* nb of bytes in the pending buffer */ + this.wrap = 0; /* bit 0 true for zlib, bit 1 true for gzip */ + this.gzhead = null; /* gzip header information to write */ + this.gzindex = 0; /* where in extra, name, or comment */ + this.method = Z_DEFLATED; /* can only be DEFLATED */ + this.last_flush = -1; /* value of flush param for previous deflate call */ + + this.w_size = 0; /* LZ77 window size (32K by default) */ + this.w_bits = 0; /* log2(w_size) (8..16) */ + this.w_mask = 0; /* w_size - 1 */ + + this.window = null; + /* Sliding window. Input bytes are read into the second half of the window, + * and move to the first half later to keep a dictionary of at least wSize + * bytes. With this organization, matches are limited to a distance of + * wSize-MAX_MATCH bytes, but this ensures that IO is always + * performed with a length multiple of the block size. + */ + + this.window_size = 0; + /* Actual size of window: 2*wSize, except when the user input buffer + * is directly used as sliding window. + */ + + this.prev = null; + /* Link to older string with same hash index. To limit the size of this + * array to 64K, this link is maintained only for the last 32K strings. + * An index in this array is thus a window index modulo 32K. + */ + + this.head = null; /* Heads of the hash chains or NIL. */ + + this.ins_h = 0; /* hash index of string to be inserted */ + this.hash_size = 0; /* number of elements in hash table */ + this.hash_bits = 0; /* log2(hash_size) */ + this.hash_mask = 0; /* hash_size-1 */ + + this.hash_shift = 0; + /* Number of bits by which ins_h must be shifted at each input + * step. It must be such that after MIN_MATCH steps, the oldest + * byte no longer takes part in the hash key, that is: + * hash_shift * MIN_MATCH >= hash_bits + */ + + this.block_start = 0; + /* Window position at the beginning of the current output block. Gets + * negative when the window is moved backwards. + */ + + this.match_length = 0; /* length of best match */ + this.prev_match = 0; /* previous match */ + this.match_available = 0; /* set if previous match exists */ + this.strstart = 0; /* start of string to insert */ + this.match_start = 0; /* start of matching string */ + this.lookahead = 0; /* number of valid bytes ahead in window */ + + this.prev_length = 0; + /* Length of the best match at previous step. Matches not greater than this + * are discarded. This is used in the lazy match evaluation. + */ + + this.max_chain_length = 0; + /* To speed up deflation, hash chains are never searched beyond this + * length. A higher limit improves compression ratio but degrades the + * speed. + */ + + this.max_lazy_match = 0; + /* Attempt to find a better match only when the current match is strictly + * smaller than this value. This mechanism is used only for compression + * levels >= 4. + */ + // That's alias to max_lazy_match, don't use directly + //this.max_insert_length = 0; + /* Insert new strings in the hash table only if the match length is not + * greater than this length. This saves time but degrades compression. + * max_insert_length is used only for compression levels <= 3. + */ + + this.level = 0; /* compression level (1..9) */ + this.strategy = 0; /* favor or force Huffman coding*/ + + this.good_match = 0; + /* Use a faster search when the previous match is longer than this */ + + this.nice_match = 0; /* Stop searching when current match exceeds this */ + + /* used by trees.c: */ + + /* Didn't use ct_data typedef below to suppress compiler warning */ + + // struct ct_data_s dyn_ltree[HEAP_SIZE]; /* literal and length tree */ + // struct ct_data_s dyn_dtree[2*D_CODES+1]; /* distance tree */ + // struct ct_data_s bl_tree[2*BL_CODES+1]; /* Huffman tree for bit lengths */ + + // Use flat array of DOUBLE size, with interleaved fata, + // because JS does not support effective + this.dyn_ltree = new utils.Buf16(HEAP_SIZE * 2); + this.dyn_dtree = new utils.Buf16((2 * D_CODES + 1) * 2); + this.bl_tree = new utils.Buf16((2 * BL_CODES + 1) * 2); + zero(this.dyn_ltree); + zero(this.dyn_dtree); + zero(this.bl_tree); + + this.l_desc = null; /* desc. for literal tree */ + this.d_desc = null; /* desc. for distance tree */ + this.bl_desc = null; /* desc. for bit length tree */ + + //ush bl_count[MAX_BITS+1]; + this.bl_count = new utils.Buf16(MAX_BITS + 1); + /* number of codes at each bit length for an optimal tree */ + + //int heap[2*L_CODES+1]; /* heap used to build the Huffman trees */ + this.heap = new utils.Buf16(2 * L_CODES + 1); /* heap used to build the Huffman trees */ + zero(this.heap); + + this.heap_len = 0; /* number of elements in the heap */ + this.heap_max = 0; /* element of largest frequency */ + /* The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used. + * The same heap array is used to build all trees. + */ + + this.depth = new utils.Buf16(2 * L_CODES + 1); //uch depth[2*L_CODES+1]; + zero(this.depth); + /* Depth of each subtree used as tie breaker for trees of equal frequency + */ + + this.l_buf = 0; /* buffer index for literals or lengths */ + + this.lit_bufsize = 0; + /* Size of match buffer for literals/lengths. There are 4 reasons for + * limiting lit_bufsize to 64K: + * - frequencies can be kept in 16 bit counters + * - if compression is not successful for the first block, all input + * data is still in the window so we can still emit a stored block even + * when input comes from standard input. (This can also be done for + * all blocks if lit_bufsize is not greater than 32K.) + * - if compression is not successful for a file smaller than 64K, we can + * even emit a stored file instead of a stored block (saving 5 bytes). + * This is applicable only for zip (not gzip or zlib). + * - creating new Huffman trees less frequently may not provide fast + * adaptation to changes in the input data statistics. (Take for + * example a binary file with poorly compressible code followed by + * a highly compressible string table.) Smaller buffer sizes give + * fast adaptation but have of course the overhead of transmitting + * trees more frequently. + * - I can't count above 4 + */ + + this.last_lit = 0; /* running index in l_buf */ + + this.d_buf = 0; + /* Buffer index for distances. To simplify the code, d_buf and l_buf have + * the same number of elements. To use different lengths, an extra flag + * array would be necessary. + */ + + this.opt_len = 0; /* bit length of current block with optimal trees */ + this.static_len = 0; /* bit length of current block with static trees */ + this.matches = 0; /* number of string matches in current block */ + this.insert = 0; /* bytes at end of window left to insert */ + + + this.bi_buf = 0; + /* Output buffer. bits are inserted starting at the bottom (least + * significant bits). + */ + this.bi_valid = 0; + /* Number of valid bits in bi_buf. All bits above the last valid bit + * are always zero. + */ + + // Used for window memory init. We safely ignore it for JS. That makes + // sense only for pointers and memory check tools. + //this.high_water = 0; + /* High water mark offset in window for initialized bytes -- bytes above + * this are set to zero in order to avoid memory check warnings when + * longest match routines access bytes past the input. This is then + * updated to the new high water mark. + */ +} + + +function deflateResetKeep(strm) { + var s; + + if (!strm || !strm.state) { + return err(strm, Z_STREAM_ERROR); + } + + strm.total_in = strm.total_out = 0; + strm.data_type = Z_UNKNOWN; + + s = strm.state; + s.pending = 0; + s.pending_out = 0; + + if (s.wrap < 0) { + s.wrap = -s.wrap; + /* was made negative by deflate(..., Z_FINISH); */ + } + s.status = (s.wrap ? INIT_STATE : BUSY_STATE); + strm.adler = (s.wrap === 2) ? + 0 // crc32(0, Z_NULL, 0) + : + 1; // adler32(0, Z_NULL, 0) + s.last_flush = Z_NO_FLUSH; + trees._tr_init(s); + return Z_OK; +} + + +function deflateReset(strm) { + var ret = deflateResetKeep(strm); + if (ret === Z_OK) { + lm_init(strm.state); + } + return ret; +} + + +function deflateSetHeader(strm, head) { + if (!strm || !strm.state) { return Z_STREAM_ERROR; } + if (strm.state.wrap !== 2) { return Z_STREAM_ERROR; } + strm.state.gzhead = head; + return Z_OK; +} + + +function deflateInit2(strm, level, method, windowBits, memLevel, strategy) { + if (!strm) { // === Z_NULL + return Z_STREAM_ERROR; + } + var wrap = 1; + + if (level === Z_DEFAULT_COMPRESSION) { + level = 6; + } + + if (windowBits < 0) { /* suppress zlib wrapper */ + wrap = 0; + windowBits = -windowBits; + } + + else if (windowBits > 15) { + wrap = 2; /* write gzip wrapper instead */ + windowBits -= 16; + } + + + if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || method !== Z_DEFLATED || + windowBits < 8 || windowBits > 15 || level < 0 || level > 9 || + strategy < 0 || strategy > Z_FIXED) { + return err(strm, Z_STREAM_ERROR); + } + + + if (windowBits === 8) { + windowBits = 9; + } + /* until 256-byte window bug fixed */ + + var s = new DeflateState(); + + strm.state = s; + s.strm = strm; + + s.wrap = wrap; + s.gzhead = null; + s.w_bits = windowBits; + s.w_size = 1 << s.w_bits; + s.w_mask = s.w_size - 1; + + s.hash_bits = memLevel + 7; + s.hash_size = 1 << s.hash_bits; + s.hash_mask = s.hash_size - 1; + s.hash_shift = ~~((s.hash_bits + MIN_MATCH - 1) / MIN_MATCH); + + s.window = new utils.Buf8(s.w_size * 2); + s.head = new utils.Buf16(s.hash_size); + s.prev = new utils.Buf16(s.w_size); + + // Don't need mem init magic for JS. + //s.high_water = 0; /* nothing written to s->window yet */ + + s.lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */ + + s.pending_buf_size = s.lit_bufsize * 4; + + //overlay = (ushf *) ZALLOC(strm, s->lit_bufsize, sizeof(ush)+2); + //s->pending_buf = (uchf *) overlay; + s.pending_buf = new utils.Buf8(s.pending_buf_size); + + // It is offset from `s.pending_buf` (size is `s.lit_bufsize * 2`) + //s->d_buf = overlay + s->lit_bufsize/sizeof(ush); + s.d_buf = 1 * s.lit_bufsize; + + //s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize; + s.l_buf = (1 + 2) * s.lit_bufsize; + + s.level = level; + s.strategy = strategy; + s.method = method; + + return deflateReset(strm); +} + +function deflateInit(strm, level) { + return deflateInit2(strm, level, Z_DEFLATED, MAX_WBITS, DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY); +} + + +function deflate(strm, flush) { + var old_flush, s; + var beg, val; // for gzip header write only + + if (!strm || !strm.state || + flush > Z_BLOCK || flush < 0) { + return strm ? err(strm, Z_STREAM_ERROR) : Z_STREAM_ERROR; + } + + s = strm.state; + + if (!strm.output || + (!strm.input && strm.avail_in !== 0) || + (s.status === FINISH_STATE && flush !== Z_FINISH)) { + return err(strm, (strm.avail_out === 0) ? Z_BUF_ERROR : Z_STREAM_ERROR); + } + + s.strm = strm; /* just in case */ + old_flush = s.last_flush; + s.last_flush = flush; + + /* Write the header */ + if (s.status === INIT_STATE) { + + if (s.wrap === 2) { // GZIP header + strm.adler = 0; //crc32(0L, Z_NULL, 0); + put_byte(s, 31); + put_byte(s, 139); + put_byte(s, 8); + if (!s.gzhead) { // s->gzhead == Z_NULL + put_byte(s, 0); + put_byte(s, 0); + put_byte(s, 0); + put_byte(s, 0); + put_byte(s, 0); + put_byte(s, s.level === 9 ? 2 : + (s.strategy >= Z_HUFFMAN_ONLY || s.level < 2 ? + 4 : 0)); + put_byte(s, OS_CODE); + s.status = BUSY_STATE; + } + else { + put_byte(s, (s.gzhead.text ? 1 : 0) + + (s.gzhead.hcrc ? 2 : 0) + + (!s.gzhead.extra ? 0 : 4) + + (!s.gzhead.name ? 0 : 8) + + (!s.gzhead.comment ? 0 : 16) + ); + put_byte(s, s.gzhead.time & 0xff); + put_byte(s, (s.gzhead.time >> 8) & 0xff); + put_byte(s, (s.gzhead.time >> 16) & 0xff); + put_byte(s, (s.gzhead.time >> 24) & 0xff); + put_byte(s, s.level === 9 ? 2 : + (s.strategy >= Z_HUFFMAN_ONLY || s.level < 2 ? + 4 : 0)); + put_byte(s, s.gzhead.os & 0xff); + if (s.gzhead.extra && s.gzhead.extra.length) { + put_byte(s, s.gzhead.extra.length & 0xff); + put_byte(s, (s.gzhead.extra.length >> 8) & 0xff); + } + if (s.gzhead.hcrc) { + strm.adler = crc32(strm.adler, s.pending_buf, s.pending, 0); + } + s.gzindex = 0; + s.status = EXTRA_STATE; + } + } + else // DEFLATE header + { + var header = (Z_DEFLATED + ((s.w_bits - 8) << 4)) << 8; + var level_flags = -1; + + if (s.strategy >= Z_HUFFMAN_ONLY || s.level < 2) { + level_flags = 0; + } else if (s.level < 6) { + level_flags = 1; + } else if (s.level === 6) { + level_flags = 2; + } else { + level_flags = 3; + } + header |= (level_flags << 6); + if (s.strstart !== 0) { header |= PRESET_DICT; } + header += 31 - (header % 31); + + s.status = BUSY_STATE; + putShortMSB(s, header); + + /* Save the adler32 of the preset dictionary: */ + if (s.strstart !== 0) { + putShortMSB(s, strm.adler >>> 16); + putShortMSB(s, strm.adler & 0xffff); + } + strm.adler = 1; // adler32(0L, Z_NULL, 0); + } + } + +//#ifdef GZIP + if (s.status === EXTRA_STATE) { + if (s.gzhead.extra/* != Z_NULL*/) { + beg = s.pending; /* start of bytes to update crc */ + + while (s.gzindex < (s.gzhead.extra.length & 0xffff)) { + if (s.pending === s.pending_buf_size) { + if (s.gzhead.hcrc && s.pending > beg) { + strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg); + } + flush_pending(strm); + beg = s.pending; + if (s.pending === s.pending_buf_size) { + break; + } + } + put_byte(s, s.gzhead.extra[s.gzindex] & 0xff); + s.gzindex++; + } + if (s.gzhead.hcrc && s.pending > beg) { + strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg); + } + if (s.gzindex === s.gzhead.extra.length) { + s.gzindex = 0; + s.status = NAME_STATE; + } + } + else { + s.status = NAME_STATE; + } + } + if (s.status === NAME_STATE) { + if (s.gzhead.name/* != Z_NULL*/) { + beg = s.pending; /* start of bytes to update crc */ + //int val; + + do { + if (s.pending === s.pending_buf_size) { + if (s.gzhead.hcrc && s.pending > beg) { + strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg); + } + flush_pending(strm); + beg = s.pending; + if (s.pending === s.pending_buf_size) { + val = 1; + break; + } + } + // JS specific: little magic to add zero terminator to end of string + if (s.gzindex < s.gzhead.name.length) { + val = s.gzhead.name.charCodeAt(s.gzindex++) & 0xff; + } else { + val = 0; + } + put_byte(s, val); + } while (val !== 0); + + if (s.gzhead.hcrc && s.pending > beg) { + strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg); + } + if (val === 0) { + s.gzindex = 0; + s.status = COMMENT_STATE; + } + } + else { + s.status = COMMENT_STATE; + } + } + if (s.status === COMMENT_STATE) { + if (s.gzhead.comment/* != Z_NULL*/) { + beg = s.pending; /* start of bytes to update crc */ + //int val; + + do { + if (s.pending === s.pending_buf_size) { + if (s.gzhead.hcrc && s.pending > beg) { + strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg); + } + flush_pending(strm); + beg = s.pending; + if (s.pending === s.pending_buf_size) { + val = 1; + break; + } + } + // JS specific: little magic to add zero terminator to end of string + if (s.gzindex < s.gzhead.comment.length) { + val = s.gzhead.comment.charCodeAt(s.gzindex++) & 0xff; + } else { + val = 0; + } + put_byte(s, val); + } while (val !== 0); + + if (s.gzhead.hcrc && s.pending > beg) { + strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg); + } + if (val === 0) { + s.status = HCRC_STATE; + } + } + else { + s.status = HCRC_STATE; + } + } + if (s.status === HCRC_STATE) { + if (s.gzhead.hcrc) { + if (s.pending + 2 > s.pending_buf_size) { + flush_pending(strm); + } + if (s.pending + 2 <= s.pending_buf_size) { + put_byte(s, strm.adler & 0xff); + put_byte(s, (strm.adler >> 8) & 0xff); + strm.adler = 0; //crc32(0L, Z_NULL, 0); + s.status = BUSY_STATE; + } + } + else { + s.status = BUSY_STATE; + } + } +//#endif + + /* Flush as much pending output as possible */ + if (s.pending !== 0) { + flush_pending(strm); + if (strm.avail_out === 0) { + /* Since avail_out is 0, deflate will be called again with + * more output space, but possibly with both pending and + * avail_in equal to zero. There won't be anything to do, + * but this is not an error situation so make sure we + * return OK instead of BUF_ERROR at next call of deflate: + */ + s.last_flush = -1; + return Z_OK; + } + + /* Make sure there is something to do and avoid duplicate consecutive + * flushes. For repeated and useless calls with Z_FINISH, we keep + * returning Z_STREAM_END instead of Z_BUF_ERROR. + */ + } else if (strm.avail_in === 0 && rank(flush) <= rank(old_flush) && + flush !== Z_FINISH) { + return err(strm, Z_BUF_ERROR); + } + + /* User must not provide more input after the first FINISH: */ + if (s.status === FINISH_STATE && strm.avail_in !== 0) { + return err(strm, Z_BUF_ERROR); + } + + /* Start a new block or continue the current one. + */ + if (strm.avail_in !== 0 || s.lookahead !== 0 || + (flush !== Z_NO_FLUSH && s.status !== FINISH_STATE)) { + var bstate = (s.strategy === Z_HUFFMAN_ONLY) ? deflate_huff(s, flush) : + (s.strategy === Z_RLE ? deflate_rle(s, flush) : + configuration_table[s.level].func(s, flush)); + + if (bstate === BS_FINISH_STARTED || bstate === BS_FINISH_DONE) { + s.status = FINISH_STATE; + } + if (bstate === BS_NEED_MORE || bstate === BS_FINISH_STARTED) { + if (strm.avail_out === 0) { + s.last_flush = -1; + /* avoid BUF_ERROR next call, see above */ + } + return Z_OK; + /* If flush != Z_NO_FLUSH && avail_out == 0, the next call + * of deflate should use the same flush parameter to make sure + * that the flush is complete. So we don't have to output an + * empty block here, this will be done at next call. This also + * ensures that for a very small output buffer, we emit at most + * one empty block. + */ + } + if (bstate === BS_BLOCK_DONE) { + if (flush === Z_PARTIAL_FLUSH) { + trees._tr_align(s); + } + else if (flush !== Z_BLOCK) { /* FULL_FLUSH or SYNC_FLUSH */ + + trees._tr_stored_block(s, 0, 0, false); + /* For a full flush, this empty block will be recognized + * as a special marker by inflate_sync(). + */ + if (flush === Z_FULL_FLUSH) { + /*** CLEAR_HASH(s); ***/ /* forget history */ + zero(s.head); // Fill with NIL (= 0); + + if (s.lookahead === 0) { + s.strstart = 0; + s.block_start = 0; + s.insert = 0; + } + } + } + flush_pending(strm); + if (strm.avail_out === 0) { + s.last_flush = -1; /* avoid BUF_ERROR at next call, see above */ + return Z_OK; + } + } + } + //Assert(strm->avail_out > 0, "bug2"); + //if (strm.avail_out <= 0) { throw new Error("bug2");} + + if (flush !== Z_FINISH) { return Z_OK; } + if (s.wrap <= 0) { return Z_STREAM_END; } + + /* Write the trailer */ + if (s.wrap === 2) { + put_byte(s, strm.adler & 0xff); + put_byte(s, (strm.adler >> 8) & 0xff); + put_byte(s, (strm.adler >> 16) & 0xff); + put_byte(s, (strm.adler >> 24) & 0xff); + put_byte(s, strm.total_in & 0xff); + put_byte(s, (strm.total_in >> 8) & 0xff); + put_byte(s, (strm.total_in >> 16) & 0xff); + put_byte(s, (strm.total_in >> 24) & 0xff); + } + else + { + putShortMSB(s, strm.adler >>> 16); + putShortMSB(s, strm.adler & 0xffff); + } + + flush_pending(strm); + /* If avail_out is zero, the application will call deflate again + * to flush the rest. + */ + if (s.wrap > 0) { s.wrap = -s.wrap; } + /* write the trailer only once! */ + return s.pending !== 0 ? Z_OK : Z_STREAM_END; +} + +function deflateEnd(strm) { + var status; + + if (!strm/*== Z_NULL*/ || !strm.state/*== Z_NULL*/) { + return Z_STREAM_ERROR; + } + + status = strm.state.status; + if (status !== INIT_STATE && + status !== EXTRA_STATE && + status !== NAME_STATE && + status !== COMMENT_STATE && + status !== HCRC_STATE && + status !== BUSY_STATE && + status !== FINISH_STATE + ) { + return err(strm, Z_STREAM_ERROR); + } + + strm.state = null; + + return status === BUSY_STATE ? err(strm, Z_DATA_ERROR) : Z_OK; +} + + +/* ========================================================================= + * Initializes the compression dictionary from the given byte + * sequence without producing any compressed output. + */ +function deflateSetDictionary(strm, dictionary) { + var dictLength = dictionary.length; + + var s; + var str, n; + var wrap; + var avail; + var next; + var input; + var tmpDict; + + if (!strm/*== Z_NULL*/ || !strm.state/*== Z_NULL*/) { + return Z_STREAM_ERROR; + } + + s = strm.state; + wrap = s.wrap; + + if (wrap === 2 || (wrap === 1 && s.status !== INIT_STATE) || s.lookahead) { + return Z_STREAM_ERROR; + } + + /* when using zlib wrappers, compute Adler-32 for provided dictionary */ + if (wrap === 1) { + /* adler32(strm->adler, dictionary, dictLength); */ + strm.adler = adler32(strm.adler, dictionary, dictLength, 0); + } + + s.wrap = 0; /* avoid computing Adler-32 in read_buf */ + + /* if dictionary would fill window, just replace the history */ + if (dictLength >= s.w_size) { + if (wrap === 0) { /* already empty otherwise */ + /*** CLEAR_HASH(s); ***/ + zero(s.head); // Fill with NIL (= 0); + s.strstart = 0; + s.block_start = 0; + s.insert = 0; + } + /* use the tail */ + // dictionary = dictionary.slice(dictLength - s.w_size); + tmpDict = new utils.Buf8(s.w_size); + utils.arraySet(tmpDict, dictionary, dictLength - s.w_size, s.w_size, 0); + dictionary = tmpDict; + dictLength = s.w_size; + } + /* insert dictionary into window and hash */ + avail = strm.avail_in; + next = strm.next_in; + input = strm.input; + strm.avail_in = dictLength; + strm.next_in = 0; + strm.input = dictionary; + fill_window(s); + while (s.lookahead >= MIN_MATCH) { + str = s.strstart; + n = s.lookahead - (MIN_MATCH - 1); + do { + /* UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]); */ + s.ins_h = ((s.ins_h << s.hash_shift) ^ s.window[str + MIN_MATCH - 1]) & s.hash_mask; + + s.prev[str & s.w_mask] = s.head[s.ins_h]; + + s.head[s.ins_h] = str; + str++; + } while (--n); + s.strstart = str; + s.lookahead = MIN_MATCH - 1; + fill_window(s); + } + s.strstart += s.lookahead; + s.block_start = s.strstart; + s.insert = s.lookahead; + s.lookahead = 0; + s.match_length = s.prev_length = MIN_MATCH - 1; + s.match_available = 0; + strm.next_in = next; + strm.input = input; + strm.avail_in = avail; + s.wrap = wrap; + return Z_OK; +} + + +exports.deflateInit = deflateInit; +exports.deflateInit2 = deflateInit2; +exports.deflateReset = deflateReset; +exports.deflateResetKeep = deflateResetKeep; +exports.deflateSetHeader = deflateSetHeader; +exports.deflate = deflate; +exports.deflateEnd = deflateEnd; +exports.deflateSetDictionary = deflateSetDictionary; +exports.deflateInfo = 'pako deflate (from Nodeca project)'; + +/* Not implemented +exports.deflateBound = deflateBound; +exports.deflateCopy = deflateCopy; +exports.deflateParams = deflateParams; +exports.deflatePending = deflatePending; +exports.deflatePrime = deflatePrime; +exports.deflateTune = deflateTune; +*/ + +},{"../utils/common":50,"./adler32":52,"./crc32":54,"./messages":60,"./trees":61}],56:[function(require,module,exports){ +'use strict'; + +// (C) 1995-2013 Jean-loup Gailly and Mark Adler +// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// 3. This notice may not be removed or altered from any source distribution. + +function GZheader() { + /* true if compressed data believed to be text */ + this.text = 0; + /* modification time */ + this.time = 0; + /* extra flags (not used when writing a gzip file) */ + this.xflags = 0; + /* operating system */ + this.os = 0; + /* pointer to extra field or Z_NULL if none */ + this.extra = null; + /* extra field length (valid if extra != Z_NULL) */ + this.extra_len = 0; // Actually, we don't need it in JS, + // but leave for few code modifications + + // + // Setup limits is not necessary because in js we should not preallocate memory + // for inflate use constant limit in 65536 bytes + // + + /* space at extra (only when reading header) */ + // this.extra_max = 0; + /* pointer to zero-terminated file name or Z_NULL */ + this.name = ''; + /* space at name (only when reading header) */ + // this.name_max = 0; + /* pointer to zero-terminated comment or Z_NULL */ + this.comment = ''; + /* space at comment (only when reading header) */ + // this.comm_max = 0; + /* true if there was or will be a header crc */ + this.hcrc = 0; + /* true when done reading gzip header (not used when writing a gzip file) */ + this.done = false; +} + +module.exports = GZheader; + +},{}],57:[function(require,module,exports){ +'use strict'; + +// (C) 1995-2013 Jean-loup Gailly and Mark Adler +// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// 3. This notice may not be removed or altered from any source distribution. + +// See state defs from inflate.js +var BAD = 30; /* got a data error -- remain here until reset */ +var TYPE = 12; /* i: waiting for type bits, including last-flag bit */ + +/* + Decode literal, length, and distance codes and write out the resulting + literal and match bytes until either not enough input or output is + available, an end-of-block is encountered, or a data error is encountered. + When large enough input and output buffers are supplied to inflate(), for + example, a 16K input buffer and a 64K output buffer, more than 95% of the + inflate execution time is spent in this routine. + + Entry assumptions: + + state.mode === LEN + strm.avail_in >= 6 + strm.avail_out >= 258 + start >= strm.avail_out + state.bits < 8 + + On return, state.mode is one of: + + LEN -- ran out of enough output space or enough available input + TYPE -- reached end of block code, inflate() to interpret next block + BAD -- error in block data + + Notes: + + - The maximum input bits used by a length/distance pair is 15 bits for the + length code, 5 bits for the length extra, 15 bits for the distance code, + and 13 bits for the distance extra. This totals 48 bits, or six bytes. + Therefore if strm.avail_in >= 6, then there is enough input to avoid + checking for available input while decoding. + + - The maximum bytes that a single length/distance pair can output is 258 + bytes, which is the maximum length that can be coded. inflate_fast() + requires strm.avail_out >= 258 for each loop to avoid checking for + output space. + */ +module.exports = function inflate_fast(strm, start) { + var state; + var _in; /* local strm.input */ + var last; /* have enough input while in < last */ + var _out; /* local strm.output */ + var beg; /* inflate()'s initial strm.output */ + var end; /* while out < end, enough space available */ +//#ifdef INFLATE_STRICT + var dmax; /* maximum distance from zlib header */ +//#endif + var wsize; /* window size or zero if not using window */ + var whave; /* valid bytes in the window */ + var wnext; /* window write index */ + // Use `s_window` instead `window`, avoid conflict with instrumentation tools + var s_window; /* allocated sliding window, if wsize != 0 */ + var hold; /* local strm.hold */ + var bits; /* local strm.bits */ + var lcode; /* local strm.lencode */ + var dcode; /* local strm.distcode */ + var lmask; /* mask for first level of length codes */ + var dmask; /* mask for first level of distance codes */ + var here; /* retrieved table entry */ + var op; /* code bits, operation, extra bits, or */ + /* window position, window bytes to copy */ + var len; /* match length, unused bytes */ + var dist; /* match distance */ + var from; /* where to copy match from */ + var from_source; + + + var input, output; // JS specific, because we have no pointers + + /* copy state to local variables */ + state = strm.state; + //here = state.here; + _in = strm.next_in; + input = strm.input; + last = _in + (strm.avail_in - 5); + _out = strm.next_out; + output = strm.output; + beg = _out - (start - strm.avail_out); + end = _out + (strm.avail_out - 257); +//#ifdef INFLATE_STRICT + dmax = state.dmax; +//#endif + wsize = state.wsize; + whave = state.whave; + wnext = state.wnext; + s_window = state.window; + hold = state.hold; + bits = state.bits; + lcode = state.lencode; + dcode = state.distcode; + lmask = (1 << state.lenbits) - 1; + dmask = (1 << state.distbits) - 1; + + + /* decode literals and length/distances until end-of-block or not enough + input data or output space */ + + top: + do { + if (bits < 15) { + hold += input[_in++] << bits; + bits += 8; + hold += input[_in++] << bits; + bits += 8; + } + + here = lcode[hold & lmask]; + + dolen: + for (;;) { // Goto emulation + op = here >>> 24/*here.bits*/; + hold >>>= op; + bits -= op; + op = (here >>> 16) & 0xff/*here.op*/; + if (op === 0) { /* literal */ + //Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ? + // "inflate: literal '%c'\n" : + // "inflate: literal 0x%02x\n", here.val)); + output[_out++] = here & 0xffff/*here.val*/; + } + else if (op & 16) { /* length base */ + len = here & 0xffff/*here.val*/; + op &= 15; /* number of extra bits */ + if (op) { + if (bits < op) { + hold += input[_in++] << bits; + bits += 8; + } + len += hold & ((1 << op) - 1); + hold >>>= op; + bits -= op; + } + //Tracevv((stderr, "inflate: length %u\n", len)); + if (bits < 15) { + hold += input[_in++] << bits; + bits += 8; + hold += input[_in++] << bits; + bits += 8; + } + here = dcode[hold & dmask]; + + dodist: + for (;;) { // goto emulation + op = here >>> 24/*here.bits*/; + hold >>>= op; + bits -= op; + op = (here >>> 16) & 0xff/*here.op*/; + + if (op & 16) { /* distance base */ + dist = here & 0xffff/*here.val*/; + op &= 15; /* number of extra bits */ + if (bits < op) { + hold += input[_in++] << bits; + bits += 8; + if (bits < op) { + hold += input[_in++] << bits; + bits += 8; + } + } + dist += hold & ((1 << op) - 1); +//#ifdef INFLATE_STRICT + if (dist > dmax) { + strm.msg = 'invalid distance too far back'; + state.mode = BAD; + break top; + } +//#endif + hold >>>= op; + bits -= op; + //Tracevv((stderr, "inflate: distance %u\n", dist)); + op = _out - beg; /* max distance in output */ + if (dist > op) { /* see if copy from window */ + op = dist - op; /* distance back in window */ + if (op > whave) { + if (state.sane) { + strm.msg = 'invalid distance too far back'; + state.mode = BAD; + break top; + } + +// (!) This block is disabled in zlib defaults, +// don't enable it for binary compatibility +//#ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR +// if (len <= op - whave) { +// do { +// output[_out++] = 0; +// } while (--len); +// continue top; +// } +// len -= op - whave; +// do { +// output[_out++] = 0; +// } while (--op > whave); +// if (op === 0) { +// from = _out - dist; +// do { +// output[_out++] = output[from++]; +// } while (--len); +// continue top; +// } +//#endif + } + from = 0; // window index + from_source = s_window; + if (wnext === 0) { /* very common case */ + from += wsize - op; + if (op < len) { /* some from window */ + len -= op; + do { + output[_out++] = s_window[from++]; + } while (--op); + from = _out - dist; /* rest from output */ + from_source = output; + } + } + else if (wnext < op) { /* wrap around window */ + from += wsize + wnext - op; + op -= wnext; + if (op < len) { /* some from end of window */ + len -= op; + do { + output[_out++] = s_window[from++]; + } while (--op); + from = 0; + if (wnext < len) { /* some from start of window */ + op = wnext; + len -= op; + do { + output[_out++] = s_window[from++]; + } while (--op); + from = _out - dist; /* rest from output */ + from_source = output; + } + } + } + else { /* contiguous in window */ + from += wnext - op; + if (op < len) { /* some from window */ + len -= op; + do { + output[_out++] = s_window[from++]; + } while (--op); + from = _out - dist; /* rest from output */ + from_source = output; + } + } + while (len > 2) { + output[_out++] = from_source[from++]; + output[_out++] = from_source[from++]; + output[_out++] = from_source[from++]; + len -= 3; + } + if (len) { + output[_out++] = from_source[from++]; + if (len > 1) { + output[_out++] = from_source[from++]; + } + } + } + else { + from = _out - dist; /* copy direct from output */ + do { /* minimum length is three */ + output[_out++] = output[from++]; + output[_out++] = output[from++]; + output[_out++] = output[from++]; + len -= 3; + } while (len > 2); + if (len) { + output[_out++] = output[from++]; + if (len > 1) { + output[_out++] = output[from++]; + } + } + } + } + else if ((op & 64) === 0) { /* 2nd level distance code */ + here = dcode[(here & 0xffff)/*here.val*/ + (hold & ((1 << op) - 1))]; + continue dodist; + } + else { + strm.msg = 'invalid distance code'; + state.mode = BAD; + break top; + } + + break; // need to emulate goto via "continue" + } + } + else if ((op & 64) === 0) { /* 2nd level length code */ + here = lcode[(here & 0xffff)/*here.val*/ + (hold & ((1 << op) - 1))]; + continue dolen; + } + else if (op & 32) { /* end-of-block */ + //Tracevv((stderr, "inflate: end of block\n")); + state.mode = TYPE; + break top; + } + else { + strm.msg = 'invalid literal/length code'; + state.mode = BAD; + break top; + } + + break; // need to emulate goto via "continue" + } + } while (_in < last && _out < end); + + /* return unused bytes (on entry, bits < 8, so in won't go too far back) */ + len = bits >> 3; + _in -= len; + bits -= len << 3; + hold &= (1 << bits) - 1; + + /* update state and return */ + strm.next_in = _in; + strm.next_out = _out; + strm.avail_in = (_in < last ? 5 + (last - _in) : 5 - (_in - last)); + strm.avail_out = (_out < end ? 257 + (end - _out) : 257 - (_out - end)); + state.hold = hold; + state.bits = bits; + return; +}; + +},{}],58:[function(require,module,exports){ +'use strict'; + +// (C) 1995-2013 Jean-loup Gailly and Mark Adler +// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// 3. This notice may not be removed or altered from any source distribution. + +var utils = require('../utils/common'); +var adler32 = require('./adler32'); +var crc32 = require('./crc32'); +var inflate_fast = require('./inffast'); +var inflate_table = require('./inftrees'); + +var CODES = 0; +var LENS = 1; +var DISTS = 2; + +/* Public constants ==========================================================*/ +/* ===========================================================================*/ + + +/* Allowed flush values; see deflate() and inflate() below for details */ +//var Z_NO_FLUSH = 0; +//var Z_PARTIAL_FLUSH = 1; +//var Z_SYNC_FLUSH = 2; +//var Z_FULL_FLUSH = 3; +var Z_FINISH = 4; +var Z_BLOCK = 5; +var Z_TREES = 6; + + +/* Return codes for the compression/decompression functions. Negative values + * are errors, positive values are used for special but normal events. + */ +var Z_OK = 0; +var Z_STREAM_END = 1; +var Z_NEED_DICT = 2; +//var Z_ERRNO = -1; +var Z_STREAM_ERROR = -2; +var Z_DATA_ERROR = -3; +var Z_MEM_ERROR = -4; +var Z_BUF_ERROR = -5; +//var Z_VERSION_ERROR = -6; + +/* The deflate compression method */ +var Z_DEFLATED = 8; + + +/* STATES ====================================================================*/ +/* ===========================================================================*/ + + +var HEAD = 1; /* i: waiting for magic header */ +var FLAGS = 2; /* i: waiting for method and flags (gzip) */ +var TIME = 3; /* i: waiting for modification time (gzip) */ +var OS = 4; /* i: waiting for extra flags and operating system (gzip) */ +var EXLEN = 5; /* i: waiting for extra length (gzip) */ +var EXTRA = 6; /* i: waiting for extra bytes (gzip) */ +var NAME = 7; /* i: waiting for end of file name (gzip) */ +var COMMENT = 8; /* i: waiting for end of comment (gzip) */ +var HCRC = 9; /* i: waiting for header crc (gzip) */ +var DICTID = 10; /* i: waiting for dictionary check value */ +var DICT = 11; /* waiting for inflateSetDictionary() call */ +var TYPE = 12; /* i: waiting for type bits, including last-flag bit */ +var TYPEDO = 13; /* i: same, but skip check to exit inflate on new block */ +var STORED = 14; /* i: waiting for stored size (length and complement) */ +var COPY_ = 15; /* i/o: same as COPY below, but only first time in */ +var COPY = 16; /* i/o: waiting for input or output to copy stored block */ +var TABLE = 17; /* i: waiting for dynamic block table lengths */ +var LENLENS = 18; /* i: waiting for code length code lengths */ +var CODELENS = 19; /* i: waiting for length/lit and distance code lengths */ +var LEN_ = 20; /* i: same as LEN below, but only first time in */ +var LEN = 21; /* i: waiting for length/lit/eob code */ +var LENEXT = 22; /* i: waiting for length extra bits */ +var DIST = 23; /* i: waiting for distance code */ +var DISTEXT = 24; /* i: waiting for distance extra bits */ +var MATCH = 25; /* o: waiting for output space to copy string */ +var LIT = 26; /* o: waiting for output space to write literal */ +var CHECK = 27; /* i: waiting for 32-bit check value */ +var LENGTH = 28; /* i: waiting for 32-bit length (gzip) */ +var DONE = 29; /* finished check, done -- remain here until reset */ +var BAD = 30; /* got a data error -- remain here until reset */ +var MEM = 31; /* got an inflate() memory error -- remain here until reset */ +var SYNC = 32; /* looking for synchronization bytes to restart inflate() */ + +/* ===========================================================================*/ + + + +var ENOUGH_LENS = 852; +var ENOUGH_DISTS = 592; +//var ENOUGH = (ENOUGH_LENS+ENOUGH_DISTS); + +var MAX_WBITS = 15; +/* 32K LZ77 window */ +var DEF_WBITS = MAX_WBITS; + + +function zswap32(q) { + return (((q >>> 24) & 0xff) + + ((q >>> 8) & 0xff00) + + ((q & 0xff00) << 8) + + ((q & 0xff) << 24)); +} + + +function InflateState() { + this.mode = 0; /* current inflate mode */ + this.last = false; /* true if processing last block */ + this.wrap = 0; /* bit 0 true for zlib, bit 1 true for gzip */ + this.havedict = false; /* true if dictionary provided */ + this.flags = 0; /* gzip header method and flags (0 if zlib) */ + this.dmax = 0; /* zlib header max distance (INFLATE_STRICT) */ + this.check = 0; /* protected copy of check value */ + this.total = 0; /* protected copy of output count */ + // TODO: may be {} + this.head = null; /* where to save gzip header information */ + + /* sliding window */ + this.wbits = 0; /* log base 2 of requested window size */ + this.wsize = 0; /* window size or zero if not using window */ + this.whave = 0; /* valid bytes in the window */ + this.wnext = 0; /* window write index */ + this.window = null; /* allocated sliding window, if needed */ + + /* bit accumulator */ + this.hold = 0; /* input bit accumulator */ + this.bits = 0; /* number of bits in "in" */ + + /* for string and stored block copying */ + this.length = 0; /* literal or length of data to copy */ + this.offset = 0; /* distance back to copy string from */ + + /* for table and code decoding */ + this.extra = 0; /* extra bits needed */ + + /* fixed and dynamic code tables */ + this.lencode = null; /* starting table for length/literal codes */ + this.distcode = null; /* starting table for distance codes */ + this.lenbits = 0; /* index bits for lencode */ + this.distbits = 0; /* index bits for distcode */ + + /* dynamic table building */ + this.ncode = 0; /* number of code length code lengths */ + this.nlen = 0; /* number of length code lengths */ + this.ndist = 0; /* number of distance code lengths */ + this.have = 0; /* number of code lengths in lens[] */ + this.next = null; /* next available space in codes[] */ + + this.lens = new utils.Buf16(320); /* temporary storage for code lengths */ + this.work = new utils.Buf16(288); /* work area for code table building */ + + /* + because we don't have pointers in js, we use lencode and distcode directly + as buffers so we don't need codes + */ + //this.codes = new utils.Buf32(ENOUGH); /* space for code tables */ + this.lendyn = null; /* dynamic table for length/literal codes (JS specific) */ + this.distdyn = null; /* dynamic table for distance codes (JS specific) */ + this.sane = 0; /* if false, allow invalid distance too far */ + this.back = 0; /* bits back of last unprocessed length/lit */ + this.was = 0; /* initial length of match */ +} + +function inflateResetKeep(strm) { + var state; + + if (!strm || !strm.state) { return Z_STREAM_ERROR; } + state = strm.state; + strm.total_in = strm.total_out = state.total = 0; + strm.msg = ''; /*Z_NULL*/ + if (state.wrap) { /* to support ill-conceived Java test suite */ + strm.adler = state.wrap & 1; + } + state.mode = HEAD; + state.last = 0; + state.havedict = 0; + state.dmax = 32768; + state.head = null/*Z_NULL*/; + state.hold = 0; + state.bits = 0; + //state.lencode = state.distcode = state.next = state.codes; + state.lencode = state.lendyn = new utils.Buf32(ENOUGH_LENS); + state.distcode = state.distdyn = new utils.Buf32(ENOUGH_DISTS); + + state.sane = 1; + state.back = -1; + //Tracev((stderr, "inflate: reset\n")); + return Z_OK; +} + +function inflateReset(strm) { + var state; + + if (!strm || !strm.state) { return Z_STREAM_ERROR; } + state = strm.state; + state.wsize = 0; + state.whave = 0; + state.wnext = 0; + return inflateResetKeep(strm); + +} + +function inflateReset2(strm, windowBits) { + var wrap; + var state; + + /* get the state */ + if (!strm || !strm.state) { return Z_STREAM_ERROR; } + state = strm.state; + + /* extract wrap request from windowBits parameter */ + if (windowBits < 0) { + wrap = 0; + windowBits = -windowBits; + } + else { + wrap = (windowBits >> 4) + 1; + if (windowBits < 48) { + windowBits &= 15; + } + } + + /* set number of window bits, free window if different */ + if (windowBits && (windowBits < 8 || windowBits > 15)) { + return Z_STREAM_ERROR; + } + if (state.window !== null && state.wbits !== windowBits) { + state.window = null; + } + + /* update state and reset the rest of it */ + state.wrap = wrap; + state.wbits = windowBits; + return inflateReset(strm); +} + +function inflateInit2(strm, windowBits) { + var ret; + var state; + + if (!strm) { return Z_STREAM_ERROR; } + //strm.msg = Z_NULL; /* in case we return an error */ + + state = new InflateState(); + + //if (state === Z_NULL) return Z_MEM_ERROR; + //Tracev((stderr, "inflate: allocated\n")); + strm.state = state; + state.window = null/*Z_NULL*/; + ret = inflateReset2(strm, windowBits); + if (ret !== Z_OK) { + strm.state = null/*Z_NULL*/; + } + return ret; +} + +function inflateInit(strm) { + return inflateInit2(strm, DEF_WBITS); +} + + +/* + Return state with length and distance decoding tables and index sizes set to + fixed code decoding. Normally this returns fixed tables from inffixed.h. + If BUILDFIXED is defined, then instead this routine builds the tables the + first time it's called, and returns those tables the first time and + thereafter. This reduces the size of the code by about 2K bytes, in + exchange for a little execution time. However, BUILDFIXED should not be + used for threaded applications, since the rewriting of the tables and virgin + may not be thread-safe. + */ +var virgin = true; + +var lenfix, distfix; // We have no pointers in JS, so keep tables separate + +function fixedtables(state) { + /* build fixed huffman tables if first call (may not be thread safe) */ + if (virgin) { + var sym; + + lenfix = new utils.Buf32(512); + distfix = new utils.Buf32(32); + + /* literal/length table */ + sym = 0; + while (sym < 144) { state.lens[sym++] = 8; } + while (sym < 256) { state.lens[sym++] = 9; } + while (sym < 280) { state.lens[sym++] = 7; } + while (sym < 288) { state.lens[sym++] = 8; } + + inflate_table(LENS, state.lens, 0, 288, lenfix, 0, state.work, { bits: 9 }); + + /* distance table */ + sym = 0; + while (sym < 32) { state.lens[sym++] = 5; } + + inflate_table(DISTS, state.lens, 0, 32, distfix, 0, state.work, { bits: 5 }); + + /* do this just once */ + virgin = false; + } + + state.lencode = lenfix; + state.lenbits = 9; + state.distcode = distfix; + state.distbits = 5; +} + + +/* + Update the window with the last wsize (normally 32K) bytes written before + returning. If window does not exist yet, create it. This is only called + when a window is already in use, or when output has been written during this + inflate call, but the end of the deflate stream has not been reached yet. + It is also called to create a window for dictionary data when a dictionary + is loaded. + + Providing output buffers larger than 32K to inflate() should provide a speed + advantage, since only the last 32K of output is copied to the sliding window + upon return from inflate(), and since all distances after the first 32K of + output will fall in the output data, making match copies simpler and faster. + The advantage may be dependent on the size of the processor's data caches. + */ +function updatewindow(strm, src, end, copy) { + var dist; + var state = strm.state; + + /* if it hasn't been done already, allocate space for the window */ + if (state.window === null) { + state.wsize = 1 << state.wbits; + state.wnext = 0; + state.whave = 0; + + state.window = new utils.Buf8(state.wsize); + } + + /* copy state->wsize or less output bytes into the circular window */ + if (copy >= state.wsize) { + utils.arraySet(state.window, src, end - state.wsize, state.wsize, 0); + state.wnext = 0; + state.whave = state.wsize; + } + else { + dist = state.wsize - state.wnext; + if (dist > copy) { + dist = copy; + } + //zmemcpy(state->window + state->wnext, end - copy, dist); + utils.arraySet(state.window, src, end - copy, dist, state.wnext); + copy -= dist; + if (copy) { + //zmemcpy(state->window, end - copy, copy); + utils.arraySet(state.window, src, end - copy, copy, 0); + state.wnext = copy; + state.whave = state.wsize; + } + else { + state.wnext += dist; + if (state.wnext === state.wsize) { state.wnext = 0; } + if (state.whave < state.wsize) { state.whave += dist; } + } + } + return 0; +} + +function inflate(strm, flush) { + var state; + var input, output; // input/output buffers + var next; /* next input INDEX */ + var put; /* next output INDEX */ + var have, left; /* available input and output */ + var hold; /* bit buffer */ + var bits; /* bits in bit buffer */ + var _in, _out; /* save starting available input and output */ + var copy; /* number of stored or match bytes to copy */ + var from; /* where to copy match bytes from */ + var from_source; + var here = 0; /* current decoding table entry */ + var here_bits, here_op, here_val; // paked "here" denormalized (JS specific) + //var last; /* parent table entry */ + var last_bits, last_op, last_val; // paked "last" denormalized (JS specific) + var len; /* length to copy for repeats, bits to drop */ + var ret; /* return code */ + var hbuf = new utils.Buf8(4); /* buffer for gzip header crc calculation */ + var opts; + + var n; // temporary var for NEED_BITS + + var order = /* permutation of code lengths */ + [ 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 ]; + + + if (!strm || !strm.state || !strm.output || + (!strm.input && strm.avail_in !== 0)) { + return Z_STREAM_ERROR; + } + + state = strm.state; + if (state.mode === TYPE) { state.mode = TYPEDO; } /* skip check */ + + + //--- LOAD() --- + put = strm.next_out; + output = strm.output; + left = strm.avail_out; + next = strm.next_in; + input = strm.input; + have = strm.avail_in; + hold = state.hold; + bits = state.bits; + //--- + + _in = have; + _out = left; + ret = Z_OK; + + inf_leave: // goto emulation + for (;;) { + switch (state.mode) { + case HEAD: + if (state.wrap === 0) { + state.mode = TYPEDO; + break; + } + //=== NEEDBITS(16); + while (bits < 16) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + if ((state.wrap & 2) && hold === 0x8b1f) { /* gzip header */ + state.check = 0/*crc32(0L, Z_NULL, 0)*/; + //=== CRC2(state.check, hold); + hbuf[0] = hold & 0xff; + hbuf[1] = (hold >>> 8) & 0xff; + state.check = crc32(state.check, hbuf, 2, 0); + //===// + + //=== INITBITS(); + hold = 0; + bits = 0; + //===// + state.mode = FLAGS; + break; + } + state.flags = 0; /* expect zlib header */ + if (state.head) { + state.head.done = false; + } + if (!(state.wrap & 1) || /* check if zlib header allowed */ + (((hold & 0xff)/*BITS(8)*/ << 8) + (hold >> 8)) % 31) { + strm.msg = 'incorrect header check'; + state.mode = BAD; + break; + } + if ((hold & 0x0f)/*BITS(4)*/ !== Z_DEFLATED) { + strm.msg = 'unknown compression method'; + state.mode = BAD; + break; + } + //--- DROPBITS(4) ---// + hold >>>= 4; + bits -= 4; + //---// + len = (hold & 0x0f)/*BITS(4)*/ + 8; + if (state.wbits === 0) { + state.wbits = len; + } + else if (len > state.wbits) { + strm.msg = 'invalid window size'; + state.mode = BAD; + break; + } + state.dmax = 1 << len; + //Tracev((stderr, "inflate: zlib header ok\n")); + strm.adler = state.check = 1/*adler32(0L, Z_NULL, 0)*/; + state.mode = hold & 0x200 ? DICTID : TYPE; + //=== INITBITS(); + hold = 0; + bits = 0; + //===// + break; + case FLAGS: + //=== NEEDBITS(16); */ + while (bits < 16) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + state.flags = hold; + if ((state.flags & 0xff) !== Z_DEFLATED) { + strm.msg = 'unknown compression method'; + state.mode = BAD; + break; + } + if (state.flags & 0xe000) { + strm.msg = 'unknown header flags set'; + state.mode = BAD; + break; + } + if (state.head) { + state.head.text = ((hold >> 8) & 1); + } + if (state.flags & 0x0200) { + //=== CRC2(state.check, hold); + hbuf[0] = hold & 0xff; + hbuf[1] = (hold >>> 8) & 0xff; + state.check = crc32(state.check, hbuf, 2, 0); + //===// + } + //=== INITBITS(); + hold = 0; + bits = 0; + //===// + state.mode = TIME; + /* falls through */ + case TIME: + //=== NEEDBITS(32); */ + while (bits < 32) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + if (state.head) { + state.head.time = hold; + } + if (state.flags & 0x0200) { + //=== CRC4(state.check, hold) + hbuf[0] = hold & 0xff; + hbuf[1] = (hold >>> 8) & 0xff; + hbuf[2] = (hold >>> 16) & 0xff; + hbuf[3] = (hold >>> 24) & 0xff; + state.check = crc32(state.check, hbuf, 4, 0); + //=== + } + //=== INITBITS(); + hold = 0; + bits = 0; + //===// + state.mode = OS; + /* falls through */ + case OS: + //=== NEEDBITS(16); */ + while (bits < 16) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + if (state.head) { + state.head.xflags = (hold & 0xff); + state.head.os = (hold >> 8); + } + if (state.flags & 0x0200) { + //=== CRC2(state.check, hold); + hbuf[0] = hold & 0xff; + hbuf[1] = (hold >>> 8) & 0xff; + state.check = crc32(state.check, hbuf, 2, 0); + //===// + } + //=== INITBITS(); + hold = 0; + bits = 0; + //===// + state.mode = EXLEN; + /* falls through */ + case EXLEN: + if (state.flags & 0x0400) { + //=== NEEDBITS(16); */ + while (bits < 16) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + state.length = hold; + if (state.head) { + state.head.extra_len = hold; + } + if (state.flags & 0x0200) { + //=== CRC2(state.check, hold); + hbuf[0] = hold & 0xff; + hbuf[1] = (hold >>> 8) & 0xff; + state.check = crc32(state.check, hbuf, 2, 0); + //===// + } + //=== INITBITS(); + hold = 0; + bits = 0; + //===// + } + else if (state.head) { + state.head.extra = null/*Z_NULL*/; + } + state.mode = EXTRA; + /* falls through */ + case EXTRA: + if (state.flags & 0x0400) { + copy = state.length; + if (copy > have) { copy = have; } + if (copy) { + if (state.head) { + len = state.head.extra_len - state.length; + if (!state.head.extra) { + // Use untyped array for more convenient processing later + state.head.extra = new Array(state.head.extra_len); + } + utils.arraySet( + state.head.extra, + input, + next, + // extra field is limited to 65536 bytes + // - no need for additional size check + copy, + /*len + copy > state.head.extra_max - len ? state.head.extra_max : copy,*/ + len + ); + //zmemcpy(state.head.extra + len, next, + // len + copy > state.head.extra_max ? + // state.head.extra_max - len : copy); + } + if (state.flags & 0x0200) { + state.check = crc32(state.check, input, copy, next); + } + have -= copy; + next += copy; + state.length -= copy; + } + if (state.length) { break inf_leave; } + } + state.length = 0; + state.mode = NAME; + /* falls through */ + case NAME: + if (state.flags & 0x0800) { + if (have === 0) { break inf_leave; } + copy = 0; + do { + // TODO: 2 or 1 bytes? + len = input[next + copy++]; + /* use constant limit because in js we should not preallocate memory */ + if (state.head && len && + (state.length < 65536 /*state.head.name_max*/)) { + state.head.name += String.fromCharCode(len); + } + } while (len && copy < have); + + if (state.flags & 0x0200) { + state.check = crc32(state.check, input, copy, next); + } + have -= copy; + next += copy; + if (len) { break inf_leave; } + } + else if (state.head) { + state.head.name = null; + } + state.length = 0; + state.mode = COMMENT; + /* falls through */ + case COMMENT: + if (state.flags & 0x1000) { + if (have === 0) { break inf_leave; } + copy = 0; + do { + len = input[next + copy++]; + /* use constant limit because in js we should not preallocate memory */ + if (state.head && len && + (state.length < 65536 /*state.head.comm_max*/)) { + state.head.comment += String.fromCharCode(len); + } + } while (len && copy < have); + if (state.flags & 0x0200) { + state.check = crc32(state.check, input, copy, next); + } + have -= copy; + next += copy; + if (len) { break inf_leave; } + } + else if (state.head) { + state.head.comment = null; + } + state.mode = HCRC; + /* falls through */ + case HCRC: + if (state.flags & 0x0200) { + //=== NEEDBITS(16); */ + while (bits < 16) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + if (hold !== (state.check & 0xffff)) { + strm.msg = 'header crc mismatch'; + state.mode = BAD; + break; + } + //=== INITBITS(); + hold = 0; + bits = 0; + //===// + } + if (state.head) { + state.head.hcrc = ((state.flags >> 9) & 1); + state.head.done = true; + } + strm.adler = state.check = 0; + state.mode = TYPE; + break; + case DICTID: + //=== NEEDBITS(32); */ + while (bits < 32) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + strm.adler = state.check = zswap32(hold); + //=== INITBITS(); + hold = 0; + bits = 0; + //===// + state.mode = DICT; + /* falls through */ + case DICT: + if (state.havedict === 0) { + //--- RESTORE() --- + strm.next_out = put; + strm.avail_out = left; + strm.next_in = next; + strm.avail_in = have; + state.hold = hold; + state.bits = bits; + //--- + return Z_NEED_DICT; + } + strm.adler = state.check = 1/*adler32(0L, Z_NULL, 0)*/; + state.mode = TYPE; + /* falls through */ + case TYPE: + if (flush === Z_BLOCK || flush === Z_TREES) { break inf_leave; } + /* falls through */ + case TYPEDO: + if (state.last) { + //--- BYTEBITS() ---// + hold >>>= bits & 7; + bits -= bits & 7; + //---// + state.mode = CHECK; + break; + } + //=== NEEDBITS(3); */ + while (bits < 3) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + state.last = (hold & 0x01)/*BITS(1)*/; + //--- DROPBITS(1) ---// + hold >>>= 1; + bits -= 1; + //---// + + switch ((hold & 0x03)/*BITS(2)*/) { + case 0: /* stored block */ + //Tracev((stderr, "inflate: stored block%s\n", + // state.last ? " (last)" : "")); + state.mode = STORED; + break; + case 1: /* fixed block */ + fixedtables(state); + //Tracev((stderr, "inflate: fixed codes block%s\n", + // state.last ? " (last)" : "")); + state.mode = LEN_; /* decode codes */ + if (flush === Z_TREES) { + //--- DROPBITS(2) ---// + hold >>>= 2; + bits -= 2; + //---// + break inf_leave; + } + break; + case 2: /* dynamic block */ + //Tracev((stderr, "inflate: dynamic codes block%s\n", + // state.last ? " (last)" : "")); + state.mode = TABLE; + break; + case 3: + strm.msg = 'invalid block type'; + state.mode = BAD; + } + //--- DROPBITS(2) ---// + hold >>>= 2; + bits -= 2; + //---// + break; + case STORED: + //--- BYTEBITS() ---// /* go to byte boundary */ + hold >>>= bits & 7; + bits -= bits & 7; + //---// + //=== NEEDBITS(32); */ + while (bits < 32) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + if ((hold & 0xffff) !== ((hold >>> 16) ^ 0xffff)) { + strm.msg = 'invalid stored block lengths'; + state.mode = BAD; + break; + } + state.length = hold & 0xffff; + //Tracev((stderr, "inflate: stored length %u\n", + // state.length)); + //=== INITBITS(); + hold = 0; + bits = 0; + //===// + state.mode = COPY_; + if (flush === Z_TREES) { break inf_leave; } + /* falls through */ + case COPY_: + state.mode = COPY; + /* falls through */ + case COPY: + copy = state.length; + if (copy) { + if (copy > have) { copy = have; } + if (copy > left) { copy = left; } + if (copy === 0) { break inf_leave; } + //--- zmemcpy(put, next, copy); --- + utils.arraySet(output, input, next, copy, put); + //---// + have -= copy; + next += copy; + left -= copy; + put += copy; + state.length -= copy; + break; + } + //Tracev((stderr, "inflate: stored end\n")); + state.mode = TYPE; + break; + case TABLE: + //=== NEEDBITS(14); */ + while (bits < 14) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + state.nlen = (hold & 0x1f)/*BITS(5)*/ + 257; + //--- DROPBITS(5) ---// + hold >>>= 5; + bits -= 5; + //---// + state.ndist = (hold & 0x1f)/*BITS(5)*/ + 1; + //--- DROPBITS(5) ---// + hold >>>= 5; + bits -= 5; + //---// + state.ncode = (hold & 0x0f)/*BITS(4)*/ + 4; + //--- DROPBITS(4) ---// + hold >>>= 4; + bits -= 4; + //---// +//#ifndef PKZIP_BUG_WORKAROUND + if (state.nlen > 286 || state.ndist > 30) { + strm.msg = 'too many length or distance symbols'; + state.mode = BAD; + break; + } +//#endif + //Tracev((stderr, "inflate: table sizes ok\n")); + state.have = 0; + state.mode = LENLENS; + /* falls through */ + case LENLENS: + while (state.have < state.ncode) { + //=== NEEDBITS(3); + while (bits < 3) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + state.lens[order[state.have++]] = (hold & 0x07);//BITS(3); + //--- DROPBITS(3) ---// + hold >>>= 3; + bits -= 3; + //---// + } + while (state.have < 19) { + state.lens[order[state.have++]] = 0; + } + // We have separate tables & no pointers. 2 commented lines below not needed. + //state.next = state.codes; + //state.lencode = state.next; + // Switch to use dynamic table + state.lencode = state.lendyn; + state.lenbits = 7; + + opts = { bits: state.lenbits }; + ret = inflate_table(CODES, state.lens, 0, 19, state.lencode, 0, state.work, opts); + state.lenbits = opts.bits; + + if (ret) { + strm.msg = 'invalid code lengths set'; + state.mode = BAD; + break; + } + //Tracev((stderr, "inflate: code lengths ok\n")); + state.have = 0; + state.mode = CODELENS; + /* falls through */ + case CODELENS: + while (state.have < state.nlen + state.ndist) { + for (;;) { + here = state.lencode[hold & ((1 << state.lenbits) - 1)];/*BITS(state.lenbits)*/ + here_bits = here >>> 24; + here_op = (here >>> 16) & 0xff; + here_val = here & 0xffff; + + if ((here_bits) <= bits) { break; } + //--- PULLBYTE() ---// + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + //---// + } + if (here_val < 16) { + //--- DROPBITS(here.bits) ---// + hold >>>= here_bits; + bits -= here_bits; + //---// + state.lens[state.have++] = here_val; + } + else { + if (here_val === 16) { + //=== NEEDBITS(here.bits + 2); + n = here_bits + 2; + while (bits < n) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + //--- DROPBITS(here.bits) ---// + hold >>>= here_bits; + bits -= here_bits; + //---// + if (state.have === 0) { + strm.msg = 'invalid bit length repeat'; + state.mode = BAD; + break; + } + len = state.lens[state.have - 1]; + copy = 3 + (hold & 0x03);//BITS(2); + //--- DROPBITS(2) ---// + hold >>>= 2; + bits -= 2; + //---// + } + else if (here_val === 17) { + //=== NEEDBITS(here.bits + 3); + n = here_bits + 3; + while (bits < n) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + //--- DROPBITS(here.bits) ---// + hold >>>= here_bits; + bits -= here_bits; + //---// + len = 0; + copy = 3 + (hold & 0x07);//BITS(3); + //--- DROPBITS(3) ---// + hold >>>= 3; + bits -= 3; + //---// + } + else { + //=== NEEDBITS(here.bits + 7); + n = here_bits + 7; + while (bits < n) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + //--- DROPBITS(here.bits) ---// + hold >>>= here_bits; + bits -= here_bits; + //---// + len = 0; + copy = 11 + (hold & 0x7f);//BITS(7); + //--- DROPBITS(7) ---// + hold >>>= 7; + bits -= 7; + //---// + } + if (state.have + copy > state.nlen + state.ndist) { + strm.msg = 'invalid bit length repeat'; + state.mode = BAD; + break; + } + while (copy--) { + state.lens[state.have++] = len; + } + } + } + + /* handle error breaks in while */ + if (state.mode === BAD) { break; } + + /* check for end-of-block code (better have one) */ + if (state.lens[256] === 0) { + strm.msg = 'invalid code -- missing end-of-block'; + state.mode = BAD; + break; + } + + /* build code tables -- note: do not change the lenbits or distbits + values here (9 and 6) without reading the comments in inftrees.h + concerning the ENOUGH constants, which depend on those values */ + state.lenbits = 9; + + opts = { bits: state.lenbits }; + ret = inflate_table(LENS, state.lens, 0, state.nlen, state.lencode, 0, state.work, opts); + // We have separate tables & no pointers. 2 commented lines below not needed. + // state.next_index = opts.table_index; + state.lenbits = opts.bits; + // state.lencode = state.next; + + if (ret) { + strm.msg = 'invalid literal/lengths set'; + state.mode = BAD; + break; + } + + state.distbits = 6; + //state.distcode.copy(state.codes); + // Switch to use dynamic table + state.distcode = state.distdyn; + opts = { bits: state.distbits }; + ret = inflate_table(DISTS, state.lens, state.nlen, state.ndist, state.distcode, 0, state.work, opts); + // We have separate tables & no pointers. 2 commented lines below not needed. + // state.next_index = opts.table_index; + state.distbits = opts.bits; + // state.distcode = state.next; + + if (ret) { + strm.msg = 'invalid distances set'; + state.mode = BAD; + break; + } + //Tracev((stderr, 'inflate: codes ok\n')); + state.mode = LEN_; + if (flush === Z_TREES) { break inf_leave; } + /* falls through */ + case LEN_: + state.mode = LEN; + /* falls through */ + case LEN: + if (have >= 6 && left >= 258) { + //--- RESTORE() --- + strm.next_out = put; + strm.avail_out = left; + strm.next_in = next; + strm.avail_in = have; + state.hold = hold; + state.bits = bits; + //--- + inflate_fast(strm, _out); + //--- LOAD() --- + put = strm.next_out; + output = strm.output; + left = strm.avail_out; + next = strm.next_in; + input = strm.input; + have = strm.avail_in; + hold = state.hold; + bits = state.bits; + //--- + + if (state.mode === TYPE) { + state.back = -1; + } + break; + } + state.back = 0; + for (;;) { + here = state.lencode[hold & ((1 << state.lenbits) - 1)]; /*BITS(state.lenbits)*/ + here_bits = here >>> 24; + here_op = (here >>> 16) & 0xff; + here_val = here & 0xffff; + + if (here_bits <= bits) { break; } + //--- PULLBYTE() ---// + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + //---// + } + if (here_op && (here_op & 0xf0) === 0) { + last_bits = here_bits; + last_op = here_op; + last_val = here_val; + for (;;) { + here = state.lencode[last_val + + ((hold & ((1 << (last_bits + last_op)) - 1))/*BITS(last.bits + last.op)*/ >> last_bits)]; + here_bits = here >>> 24; + here_op = (here >>> 16) & 0xff; + here_val = here & 0xffff; + + if ((last_bits + here_bits) <= bits) { break; } + //--- PULLBYTE() ---// + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + //---// + } + //--- DROPBITS(last.bits) ---// + hold >>>= last_bits; + bits -= last_bits; + //---// + state.back += last_bits; + } + //--- DROPBITS(here.bits) ---// + hold >>>= here_bits; + bits -= here_bits; + //---// + state.back += here_bits; + state.length = here_val; + if (here_op === 0) { + //Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ? + // "inflate: literal '%c'\n" : + // "inflate: literal 0x%02x\n", here.val)); + state.mode = LIT; + break; + } + if (here_op & 32) { + //Tracevv((stderr, "inflate: end of block\n")); + state.back = -1; + state.mode = TYPE; + break; + } + if (here_op & 64) { + strm.msg = 'invalid literal/length code'; + state.mode = BAD; + break; + } + state.extra = here_op & 15; + state.mode = LENEXT; + /* falls through */ + case LENEXT: + if (state.extra) { + //=== NEEDBITS(state.extra); + n = state.extra; + while (bits < n) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + state.length += hold & ((1 << state.extra) - 1)/*BITS(state.extra)*/; + //--- DROPBITS(state.extra) ---// + hold >>>= state.extra; + bits -= state.extra; + //---// + state.back += state.extra; + } + //Tracevv((stderr, "inflate: length %u\n", state.length)); + state.was = state.length; + state.mode = DIST; + /* falls through */ + case DIST: + for (;;) { + here = state.distcode[hold & ((1 << state.distbits) - 1)];/*BITS(state.distbits)*/ + here_bits = here >>> 24; + here_op = (here >>> 16) & 0xff; + here_val = here & 0xffff; + + if ((here_bits) <= bits) { break; } + //--- PULLBYTE() ---// + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + //---// + } + if ((here_op & 0xf0) === 0) { + last_bits = here_bits; + last_op = here_op; + last_val = here_val; + for (;;) { + here = state.distcode[last_val + + ((hold & ((1 << (last_bits + last_op)) - 1))/*BITS(last.bits + last.op)*/ >> last_bits)]; + here_bits = here >>> 24; + here_op = (here >>> 16) & 0xff; + here_val = here & 0xffff; + + if ((last_bits + here_bits) <= bits) { break; } + //--- PULLBYTE() ---// + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + //---// + } + //--- DROPBITS(last.bits) ---// + hold >>>= last_bits; + bits -= last_bits; + //---// + state.back += last_bits; + } + //--- DROPBITS(here.bits) ---// + hold >>>= here_bits; + bits -= here_bits; + //---// + state.back += here_bits; + if (here_op & 64) { + strm.msg = 'invalid distance code'; + state.mode = BAD; + break; + } + state.offset = here_val; + state.extra = (here_op) & 15; + state.mode = DISTEXT; + /* falls through */ + case DISTEXT: + if (state.extra) { + //=== NEEDBITS(state.extra); + n = state.extra; + while (bits < n) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + state.offset += hold & ((1 << state.extra) - 1)/*BITS(state.extra)*/; + //--- DROPBITS(state.extra) ---// + hold >>>= state.extra; + bits -= state.extra; + //---// + state.back += state.extra; + } +//#ifdef INFLATE_STRICT + if (state.offset > state.dmax) { + strm.msg = 'invalid distance too far back'; + state.mode = BAD; + break; + } +//#endif + //Tracevv((stderr, "inflate: distance %u\n", state.offset)); + state.mode = MATCH; + /* falls through */ + case MATCH: + if (left === 0) { break inf_leave; } + copy = _out - left; + if (state.offset > copy) { /* copy from window */ + copy = state.offset - copy; + if (copy > state.whave) { + if (state.sane) { + strm.msg = 'invalid distance too far back'; + state.mode = BAD; + break; + } +// (!) This block is disabled in zlib defaults, +// don't enable it for binary compatibility +//#ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR +// Trace((stderr, "inflate.c too far\n")); +// copy -= state.whave; +// if (copy > state.length) { copy = state.length; } +// if (copy > left) { copy = left; } +// left -= copy; +// state.length -= copy; +// do { +// output[put++] = 0; +// } while (--copy); +// if (state.length === 0) { state.mode = LEN; } +// break; +//#endif + } + if (copy > state.wnext) { + copy -= state.wnext; + from = state.wsize - copy; + } + else { + from = state.wnext - copy; + } + if (copy > state.length) { copy = state.length; } + from_source = state.window; + } + else { /* copy from output */ + from_source = output; + from = put - state.offset; + copy = state.length; + } + if (copy > left) { copy = left; } + left -= copy; + state.length -= copy; + do { + output[put++] = from_source[from++]; + } while (--copy); + if (state.length === 0) { state.mode = LEN; } + break; + case LIT: + if (left === 0) { break inf_leave; } + output[put++] = state.length; + left--; + state.mode = LEN; + break; + case CHECK: + if (state.wrap) { + //=== NEEDBITS(32); + while (bits < 32) { + if (have === 0) { break inf_leave; } + have--; + // Use '|' instead of '+' to make sure that result is signed + hold |= input[next++] << bits; + bits += 8; + } + //===// + _out -= left; + strm.total_out += _out; + state.total += _out; + if (_out) { + strm.adler = state.check = + /*UPDATE(state.check, put - _out, _out);*/ + (state.flags ? crc32(state.check, output, _out, put - _out) : adler32(state.check, output, _out, put - _out)); + + } + _out = left; + // NB: crc32 stored as signed 32-bit int, zswap32 returns signed too + if ((state.flags ? hold : zswap32(hold)) !== state.check) { + strm.msg = 'incorrect data check'; + state.mode = BAD; + break; + } + //=== INITBITS(); + hold = 0; + bits = 0; + //===// + //Tracev((stderr, "inflate: check matches trailer\n")); + } + state.mode = LENGTH; + /* falls through */ + case LENGTH: + if (state.wrap && state.flags) { + //=== NEEDBITS(32); + while (bits < 32) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + if (hold !== (state.total & 0xffffffff)) { + strm.msg = 'incorrect length check'; + state.mode = BAD; + break; + } + //=== INITBITS(); + hold = 0; + bits = 0; + //===// + //Tracev((stderr, "inflate: length matches trailer\n")); + } + state.mode = DONE; + /* falls through */ + case DONE: + ret = Z_STREAM_END; + break inf_leave; + case BAD: + ret = Z_DATA_ERROR; + break inf_leave; + case MEM: + return Z_MEM_ERROR; + case SYNC: + /* falls through */ + default: + return Z_STREAM_ERROR; + } + } + + // inf_leave <- here is real place for "goto inf_leave", emulated via "break inf_leave" + + /* + Return from inflate(), updating the total counts and the check value. + If there was no progress during the inflate() call, return a buffer + error. Call updatewindow() to create and/or update the window state. + Note: a memory error from inflate() is non-recoverable. + */ + + //--- RESTORE() --- + strm.next_out = put; + strm.avail_out = left; + strm.next_in = next; + strm.avail_in = have; + state.hold = hold; + state.bits = bits; + //--- + + if (state.wsize || (_out !== strm.avail_out && state.mode < BAD && + (state.mode < CHECK || flush !== Z_FINISH))) { + if (updatewindow(strm, strm.output, strm.next_out, _out - strm.avail_out)) { + state.mode = MEM; + return Z_MEM_ERROR; + } + } + _in -= strm.avail_in; + _out -= strm.avail_out; + strm.total_in += _in; + strm.total_out += _out; + state.total += _out; + if (state.wrap && _out) { + strm.adler = state.check = /*UPDATE(state.check, strm.next_out - _out, _out);*/ + (state.flags ? crc32(state.check, output, _out, strm.next_out - _out) : adler32(state.check, output, _out, strm.next_out - _out)); + } + strm.data_type = state.bits + (state.last ? 64 : 0) + + (state.mode === TYPE ? 128 : 0) + + (state.mode === LEN_ || state.mode === COPY_ ? 256 : 0); + if (((_in === 0 && _out === 0) || flush === Z_FINISH) && ret === Z_OK) { + ret = Z_BUF_ERROR; + } + return ret; +} + +function inflateEnd(strm) { + + if (!strm || !strm.state /*|| strm->zfree == (free_func)0*/) { + return Z_STREAM_ERROR; + } + + var state = strm.state; + if (state.window) { + state.window = null; + } + strm.state = null; + return Z_OK; +} + +function inflateGetHeader(strm, head) { + var state; + + /* check state */ + if (!strm || !strm.state) { return Z_STREAM_ERROR; } + state = strm.state; + if ((state.wrap & 2) === 0) { return Z_STREAM_ERROR; } + + /* save header structure */ + state.head = head; + head.done = false; + return Z_OK; +} + +function inflateSetDictionary(strm, dictionary) { + var dictLength = dictionary.length; + + var state; + var dictid; + var ret; + + /* check state */ + if (!strm /* == Z_NULL */ || !strm.state /* == Z_NULL */) { return Z_STREAM_ERROR; } + state = strm.state; + + if (state.wrap !== 0 && state.mode !== DICT) { + return Z_STREAM_ERROR; + } + + /* check for correct dictionary identifier */ + if (state.mode === DICT) { + dictid = 1; /* adler32(0, null, 0)*/ + /* dictid = adler32(dictid, dictionary, dictLength); */ + dictid = adler32(dictid, dictionary, dictLength, 0); + if (dictid !== state.check) { + return Z_DATA_ERROR; + } + } + /* copy dictionary to window using updatewindow(), which will amend the + existing dictionary if appropriate */ + ret = updatewindow(strm, dictionary, dictLength, dictLength); + if (ret) { + state.mode = MEM; + return Z_MEM_ERROR; + } + state.havedict = 1; + // Tracev((stderr, "inflate: dictionary set\n")); + return Z_OK; +} + +exports.inflateReset = inflateReset; +exports.inflateReset2 = inflateReset2; +exports.inflateResetKeep = inflateResetKeep; +exports.inflateInit = inflateInit; +exports.inflateInit2 = inflateInit2; +exports.inflate = inflate; +exports.inflateEnd = inflateEnd; +exports.inflateGetHeader = inflateGetHeader; +exports.inflateSetDictionary = inflateSetDictionary; +exports.inflateInfo = 'pako inflate (from Nodeca project)'; + +/* Not implemented +exports.inflateCopy = inflateCopy; +exports.inflateGetDictionary = inflateGetDictionary; +exports.inflateMark = inflateMark; +exports.inflatePrime = inflatePrime; +exports.inflateSync = inflateSync; +exports.inflateSyncPoint = inflateSyncPoint; +exports.inflateUndermine = inflateUndermine; +*/ + +},{"../utils/common":50,"./adler32":52,"./crc32":54,"./inffast":57,"./inftrees":59}],59:[function(require,module,exports){ +'use strict'; + +// (C) 1995-2013 Jean-loup Gailly and Mark Adler +// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// 3. This notice may not be removed or altered from any source distribution. + +var utils = require('../utils/common'); + +var MAXBITS = 15; +var ENOUGH_LENS = 852; +var ENOUGH_DISTS = 592; +//var ENOUGH = (ENOUGH_LENS+ENOUGH_DISTS); + +var CODES = 0; +var LENS = 1; +var DISTS = 2; + +var lbase = [ /* Length codes 257..285 base */ + 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, + 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0 +]; + +var lext = [ /* Length codes 257..285 extra */ + 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 18, 18, 18, 18, + 19, 19, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 16, 72, 78 +]; + +var dbase = [ /* Distance codes 0..29 base */ + 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, + 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, + 8193, 12289, 16385, 24577, 0, 0 +]; + +var dext = [ /* Distance codes 0..29 extra */ + 16, 16, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, + 23, 23, 24, 24, 25, 25, 26, 26, 27, 27, + 28, 28, 29, 29, 64, 64 +]; + +module.exports = function inflate_table(type, lens, lens_index, codes, table, table_index, work, opts) +{ + var bits = opts.bits; + //here = opts.here; /* table entry for duplication */ + + var len = 0; /* a code's length in bits */ + var sym = 0; /* index of code symbols */ + var min = 0, max = 0; /* minimum and maximum code lengths */ + var root = 0; /* number of index bits for root table */ + var curr = 0; /* number of index bits for current table */ + var drop = 0; /* code bits to drop for sub-table */ + var left = 0; /* number of prefix codes available */ + var used = 0; /* code entries in table used */ + var huff = 0; /* Huffman code */ + var incr; /* for incrementing code, index */ + var fill; /* index for replicating entries */ + var low; /* low bits for current root entry */ + var mask; /* mask for low root bits */ + var next; /* next available space in table */ + var base = null; /* base value table to use */ + var base_index = 0; +// var shoextra; /* extra bits table to use */ + var end; /* use base and extra for symbol > end */ + var count = new utils.Buf16(MAXBITS + 1); //[MAXBITS+1]; /* number of codes of each length */ + var offs = new utils.Buf16(MAXBITS + 1); //[MAXBITS+1]; /* offsets in table for each length */ + var extra = null; + var extra_index = 0; + + var here_bits, here_op, here_val; + + /* + Process a set of code lengths to create a canonical Huffman code. The + code lengths are lens[0..codes-1]. Each length corresponds to the + symbols 0..codes-1. The Huffman code is generated by first sorting the + symbols by length from short to long, and retaining the symbol order + for codes with equal lengths. Then the code starts with all zero bits + for the first code of the shortest length, and the codes are integer + increments for the same length, and zeros are appended as the length + increases. For the deflate format, these bits are stored backwards + from their more natural integer increment ordering, and so when the + decoding tables are built in the large loop below, the integer codes + are incremented backwards. + + This routine assumes, but does not check, that all of the entries in + lens[] are in the range 0..MAXBITS. The caller must assure this. + 1..MAXBITS is interpreted as that code length. zero means that that + symbol does not occur in this code. + + The codes are sorted by computing a count of codes for each length, + creating from that a table of starting indices for each length in the + sorted table, and then entering the symbols in order in the sorted + table. The sorted table is work[], with that space being provided by + the caller. + + The length counts are used for other purposes as well, i.e. finding + the minimum and maximum length codes, determining if there are any + codes at all, checking for a valid set of lengths, and looking ahead + at length counts to determine sub-table sizes when building the + decoding tables. + */ + + /* accumulate lengths for codes (assumes lens[] all in 0..MAXBITS) */ + for (len = 0; len <= MAXBITS; len++) { + count[len] = 0; + } + for (sym = 0; sym < codes; sym++) { + count[lens[lens_index + sym]]++; + } + + /* bound code lengths, force root to be within code lengths */ + root = bits; + for (max = MAXBITS; max >= 1; max--) { + if (count[max] !== 0) { break; } + } + if (root > max) { + root = max; + } + if (max === 0) { /* no symbols to code at all */ + //table.op[opts.table_index] = 64; //here.op = (var char)64; /* invalid code marker */ + //table.bits[opts.table_index] = 1; //here.bits = (var char)1; + //table.val[opts.table_index++] = 0; //here.val = (var short)0; + table[table_index++] = (1 << 24) | (64 << 16) | 0; + + + //table.op[opts.table_index] = 64; + //table.bits[opts.table_index] = 1; + //table.val[opts.table_index++] = 0; + table[table_index++] = (1 << 24) | (64 << 16) | 0; + + opts.bits = 1; + return 0; /* no symbols, but wait for decoding to report error */ + } + for (min = 1; min < max; min++) { + if (count[min] !== 0) { break; } + } + if (root < min) { + root = min; + } + + /* check for an over-subscribed or incomplete set of lengths */ + left = 1; + for (len = 1; len <= MAXBITS; len++) { + left <<= 1; + left -= count[len]; + if (left < 0) { + return -1; + } /* over-subscribed */ + } + if (left > 0 && (type === CODES || max !== 1)) { + return -1; /* incomplete set */ + } + + /* generate offsets into symbol table for each length for sorting */ + offs[1] = 0; + for (len = 1; len < MAXBITS; len++) { + offs[len + 1] = offs[len] + count[len]; + } + + /* sort symbols by length, by symbol order within each length */ + for (sym = 0; sym < codes; sym++) { + if (lens[lens_index + sym] !== 0) { + work[offs[lens[lens_index + sym]]++] = sym; + } + } + + /* + Create and fill in decoding tables. In this loop, the table being + filled is at next and has curr index bits. The code being used is huff + with length len. That code is converted to an index by dropping drop + bits off of the bottom. For codes where len is less than drop + curr, + those top drop + curr - len bits are incremented through all values to + fill the table with replicated entries. + + root is the number of index bits for the root table. When len exceeds + root, sub-tables are created pointed to by the root entry with an index + of the low root bits of huff. This is saved in low to check for when a + new sub-table should be started. drop is zero when the root table is + being filled, and drop is root when sub-tables are being filled. + + When a new sub-table is needed, it is necessary to look ahead in the + code lengths to determine what size sub-table is needed. The length + counts are used for this, and so count[] is decremented as codes are + entered in the tables. + + used keeps track of how many table entries have been allocated from the + provided *table space. It is checked for LENS and DIST tables against + the constants ENOUGH_LENS and ENOUGH_DISTS to guard against changes in + the initial root table size constants. See the comments in inftrees.h + for more information. + + sym increments through all symbols, and the loop terminates when + all codes of length max, i.e. all codes, have been processed. This + routine permits incomplete codes, so another loop after this one fills + in the rest of the decoding tables with invalid code markers. + */ + + /* set up for code type */ + // poor man optimization - use if-else instead of switch, + // to avoid deopts in old v8 + if (type === CODES) { + base = extra = work; /* dummy value--not used */ + end = 19; + + } else if (type === LENS) { + base = lbase; + base_index -= 257; + extra = lext; + extra_index -= 257; + end = 256; + + } else { /* DISTS */ + base = dbase; + extra = dext; + end = -1; + } + + /* initialize opts for loop */ + huff = 0; /* starting code */ + sym = 0; /* starting code symbol */ + len = min; /* starting code length */ + next = table_index; /* current table to fill in */ + curr = root; /* current table index bits */ + drop = 0; /* current bits to drop from code for index */ + low = -1; /* trigger new sub-table when len > root */ + used = 1 << root; /* use root table entries */ + mask = used - 1; /* mask for comparing low */ + + /* check available table space */ + if ((type === LENS && used > ENOUGH_LENS) || + (type === DISTS && used > ENOUGH_DISTS)) { + return 1; + } + + /* process all codes and make table entries */ + for (;;) { + /* create table entry */ + here_bits = len - drop; + if (work[sym] < end) { + here_op = 0; + here_val = work[sym]; + } + else if (work[sym] > end) { + here_op = extra[extra_index + work[sym]]; + here_val = base[base_index + work[sym]]; + } + else { + here_op = 32 + 64; /* end of block */ + here_val = 0; + } + + /* replicate for those indices with low len bits equal to huff */ + incr = 1 << (len - drop); + fill = 1 << curr; + min = fill; /* save offset to next table */ + do { + fill -= incr; + table[next + (huff >> drop) + fill] = (here_bits << 24) | (here_op << 16) | here_val |0; + } while (fill !== 0); + + /* backwards increment the len-bit code huff */ + incr = 1 << (len - 1); + while (huff & incr) { + incr >>= 1; + } + if (incr !== 0) { + huff &= incr - 1; + huff += incr; + } else { + huff = 0; + } + + /* go to next symbol, update count, len */ + sym++; + if (--count[len] === 0) { + if (len === max) { break; } + len = lens[lens_index + work[sym]]; + } + + /* create new sub-table if needed */ + if (len > root && (huff & mask) !== low) { + /* if first time, transition to sub-tables */ + if (drop === 0) { + drop = root; + } + + /* increment past last table */ + next += min; /* here min is 1 << curr */ + + /* determine length of next table */ + curr = len - drop; + left = 1 << curr; + while (curr + drop < max) { + left -= count[curr + drop]; + if (left <= 0) { break; } + curr++; + left <<= 1; + } + + /* check for enough space */ + used += 1 << curr; + if ((type === LENS && used > ENOUGH_LENS) || + (type === DISTS && used > ENOUGH_DISTS)) { + return 1; + } + + /* point entry in root table to sub-table */ + low = huff & mask; + /*table.op[low] = curr; + table.bits[low] = root; + table.val[low] = next - opts.table_index;*/ + table[low] = (root << 24) | (curr << 16) | (next - table_index) |0; + } + } + + /* fill in remaining table entry if code is incomplete (guaranteed to have + at most one remaining entry, since if the code is incomplete, the + maximum code length that was allowed to get this far is one bit) */ + if (huff !== 0) { + //table.op[next + huff] = 64; /* invalid code marker */ + //table.bits[next + huff] = len - drop; + //table.val[next + huff] = 0; + table[next + huff] = ((len - drop) << 24) | (64 << 16) |0; + } + + /* set return parameters */ + //opts.table_index += used; + opts.bits = root; + return 0; +}; + +},{"../utils/common":50}],60:[function(require,module,exports){ +'use strict'; + +// (C) 1995-2013 Jean-loup Gailly and Mark Adler +// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// 3. This notice may not be removed or altered from any source distribution. + +module.exports = { + 2: 'need dictionary', /* Z_NEED_DICT 2 */ + 1: 'stream end', /* Z_STREAM_END 1 */ + 0: '', /* Z_OK 0 */ + '-1': 'file error', /* Z_ERRNO (-1) */ + '-2': 'stream error', /* Z_STREAM_ERROR (-2) */ + '-3': 'data error', /* Z_DATA_ERROR (-3) */ + '-4': 'insufficient memory', /* Z_MEM_ERROR (-4) */ + '-5': 'buffer error', /* Z_BUF_ERROR (-5) */ + '-6': 'incompatible version' /* Z_VERSION_ERROR (-6) */ +}; + +},{}],61:[function(require,module,exports){ +'use strict'; + +// (C) 1995-2013 Jean-loup Gailly and Mark Adler +// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// 3. This notice may not be removed or altered from any source distribution. + +/* eslint-disable space-unary-ops */ + +var utils = require('../utils/common'); + +/* Public constants ==========================================================*/ +/* ===========================================================================*/ + + +//var Z_FILTERED = 1; +//var Z_HUFFMAN_ONLY = 2; +//var Z_RLE = 3; +var Z_FIXED = 4; +//var Z_DEFAULT_STRATEGY = 0; + +/* Possible values of the data_type field (though see inflate()) */ +var Z_BINARY = 0; +var Z_TEXT = 1; +//var Z_ASCII = 1; // = Z_TEXT +var Z_UNKNOWN = 2; + +/*============================================================================*/ + + +function zero(buf) { var len = buf.length; while (--len >= 0) { buf[len] = 0; } } + +// From zutil.h + +var STORED_BLOCK = 0; +var STATIC_TREES = 1; +var DYN_TREES = 2; +/* The three kinds of block type */ + +var MIN_MATCH = 3; +var MAX_MATCH = 258; +/* The minimum and maximum match lengths */ + +// From deflate.h +/* =========================================================================== + * Internal compression state. + */ + +var LENGTH_CODES = 29; +/* number of length codes, not counting the special END_BLOCK code */ + +var LITERALS = 256; +/* number of literal bytes 0..255 */ + +var L_CODES = LITERALS + 1 + LENGTH_CODES; +/* number of Literal or Length codes, including the END_BLOCK code */ + +var D_CODES = 30; +/* number of distance codes */ + +var BL_CODES = 19; +/* number of codes used to transfer the bit lengths */ + +var HEAP_SIZE = 2 * L_CODES + 1; +/* maximum heap size */ + +var MAX_BITS = 15; +/* All codes must not exceed MAX_BITS bits */ + +var Buf_size = 16; +/* size of bit buffer in bi_buf */ + + +/* =========================================================================== + * Constants + */ + +var MAX_BL_BITS = 7; +/* Bit length codes must not exceed MAX_BL_BITS bits */ + +var END_BLOCK = 256; +/* end of block literal code */ + +var REP_3_6 = 16; +/* repeat previous bit length 3-6 times (2 bits of repeat count) */ + +var REPZ_3_10 = 17; +/* repeat a zero length 3-10 times (3 bits of repeat count) */ + +var REPZ_11_138 = 18; +/* repeat a zero length 11-138 times (7 bits of repeat count) */ + +/* eslint-disable comma-spacing,array-bracket-spacing */ +var extra_lbits = /* extra bits for each length code */ + [0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0]; + +var extra_dbits = /* extra bits for each distance code */ + [0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13]; + +var extra_blbits = /* extra bits for each bit length code */ + [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7]; + +var bl_order = + [16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15]; +/* eslint-enable comma-spacing,array-bracket-spacing */ + +/* The lengths of the bit length codes are sent in order of decreasing + * probability, to avoid transmitting the lengths for unused bit length codes. + */ + +/* =========================================================================== + * Local data. These are initialized only once. + */ + +// We pre-fill arrays with 0 to avoid uninitialized gaps + +var DIST_CODE_LEN = 512; /* see definition of array dist_code below */ + +// !!!! Use flat array instead of structure, Freq = i*2, Len = i*2+1 +var static_ltree = new Array((L_CODES + 2) * 2); +zero(static_ltree); +/* The static literal tree. Since the bit lengths are imposed, there is no + * need for the L_CODES extra codes used during heap construction. However + * The codes 286 and 287 are needed to build a canonical tree (see _tr_init + * below). + */ + +var static_dtree = new Array(D_CODES * 2); +zero(static_dtree); +/* The static distance tree. (Actually a trivial tree since all codes use + * 5 bits.) + */ + +var _dist_code = new Array(DIST_CODE_LEN); +zero(_dist_code); +/* Distance codes. The first 256 values correspond to the distances + * 3 .. 258, the last 256 values correspond to the top 8 bits of + * the 15 bit distances. + */ + +var _length_code = new Array(MAX_MATCH - MIN_MATCH + 1); +zero(_length_code); +/* length code for each normalized match length (0 == MIN_MATCH) */ + +var base_length = new Array(LENGTH_CODES); +zero(base_length); +/* First normalized length for each code (0 = MIN_MATCH) */ + +var base_dist = new Array(D_CODES); +zero(base_dist); +/* First normalized distance for each code (0 = distance of 1) */ + + +function StaticTreeDesc(static_tree, extra_bits, extra_base, elems, max_length) { + + this.static_tree = static_tree; /* static tree or NULL */ + this.extra_bits = extra_bits; /* extra bits for each code or NULL */ + this.extra_base = extra_base; /* base index for extra_bits */ + this.elems = elems; /* max number of elements in the tree */ + this.max_length = max_length; /* max bit length for the codes */ + + // show if `static_tree` has data or dummy - needed for monomorphic objects + this.has_stree = static_tree && static_tree.length; +} + + +var static_l_desc; +var static_d_desc; +var static_bl_desc; + + +function TreeDesc(dyn_tree, stat_desc) { + this.dyn_tree = dyn_tree; /* the dynamic tree */ + this.max_code = 0; /* largest code with non zero frequency */ + this.stat_desc = stat_desc; /* the corresponding static tree */ +} + + + +function d_code(dist) { + return dist < 256 ? _dist_code[dist] : _dist_code[256 + (dist >>> 7)]; +} + + +/* =========================================================================== + * Output a short LSB first on the stream. + * IN assertion: there is enough room in pendingBuf. + */ +function put_short(s, w) { +// put_byte(s, (uch)((w) & 0xff)); +// put_byte(s, (uch)((ush)(w) >> 8)); + s.pending_buf[s.pending++] = (w) & 0xff; + s.pending_buf[s.pending++] = (w >>> 8) & 0xff; +} + + +/* =========================================================================== + * Send a value on a given number of bits. + * IN assertion: length <= 16 and value fits in length bits. + */ +function send_bits(s, value, length) { + if (s.bi_valid > (Buf_size - length)) { + s.bi_buf |= (value << s.bi_valid) & 0xffff; + put_short(s, s.bi_buf); + s.bi_buf = value >> (Buf_size - s.bi_valid); + s.bi_valid += length - Buf_size; + } else { + s.bi_buf |= (value << s.bi_valid) & 0xffff; + s.bi_valid += length; + } +} + + +function send_code(s, c, tree) { + send_bits(s, tree[c * 2]/*.Code*/, tree[c * 2 + 1]/*.Len*/); +} + + +/* =========================================================================== + * Reverse the first len bits of a code, using straightforward code (a faster + * method would use a table) + * IN assertion: 1 <= len <= 15 + */ +function bi_reverse(code, len) { + var res = 0; + do { + res |= code & 1; + code >>>= 1; + res <<= 1; + } while (--len > 0); + return res >>> 1; +} + + +/* =========================================================================== + * Flush the bit buffer, keeping at most 7 bits in it. + */ +function bi_flush(s) { + if (s.bi_valid === 16) { + put_short(s, s.bi_buf); + s.bi_buf = 0; + s.bi_valid = 0; + + } else if (s.bi_valid >= 8) { + s.pending_buf[s.pending++] = s.bi_buf & 0xff; + s.bi_buf >>= 8; + s.bi_valid -= 8; + } +} + + +/* =========================================================================== + * Compute the optimal bit lengths for a tree and update the total bit length + * for the current block. + * IN assertion: the fields freq and dad are set, heap[heap_max] and + * above are the tree nodes sorted by increasing frequency. + * OUT assertions: the field len is set to the optimal bit length, the + * array bl_count contains the frequencies for each bit length. + * The length opt_len is updated; static_len is also updated if stree is + * not null. + */ +function gen_bitlen(s, desc) +// deflate_state *s; +// tree_desc *desc; /* the tree descriptor */ +{ + var tree = desc.dyn_tree; + var max_code = desc.max_code; + var stree = desc.stat_desc.static_tree; + var has_stree = desc.stat_desc.has_stree; + var extra = desc.stat_desc.extra_bits; + var base = desc.stat_desc.extra_base; + var max_length = desc.stat_desc.max_length; + var h; /* heap index */ + var n, m; /* iterate over the tree elements */ + var bits; /* bit length */ + var xbits; /* extra bits */ + var f; /* frequency */ + var overflow = 0; /* number of elements with bit length too large */ + + for (bits = 0; bits <= MAX_BITS; bits++) { + s.bl_count[bits] = 0; + } + + /* In a first pass, compute the optimal bit lengths (which may + * overflow in the case of the bit length tree). + */ + tree[s.heap[s.heap_max] * 2 + 1]/*.Len*/ = 0; /* root of the heap */ + + for (h = s.heap_max + 1; h < HEAP_SIZE; h++) { + n = s.heap[h]; + bits = tree[tree[n * 2 + 1]/*.Dad*/ * 2 + 1]/*.Len*/ + 1; + if (bits > max_length) { + bits = max_length; + overflow++; + } + tree[n * 2 + 1]/*.Len*/ = bits; + /* We overwrite tree[n].Dad which is no longer needed */ + + if (n > max_code) { continue; } /* not a leaf node */ + + s.bl_count[bits]++; + xbits = 0; + if (n >= base) { + xbits = extra[n - base]; + } + f = tree[n * 2]/*.Freq*/; + s.opt_len += f * (bits + xbits); + if (has_stree) { + s.static_len += f * (stree[n * 2 + 1]/*.Len*/ + xbits); + } + } + if (overflow === 0) { return; } + + // Trace((stderr,"\nbit length overflow\n")); + /* This happens for example on obj2 and pic of the Calgary corpus */ + + /* Find the first bit length which could increase: */ + do { + bits = max_length - 1; + while (s.bl_count[bits] === 0) { bits--; } + s.bl_count[bits]--; /* move one leaf down the tree */ + s.bl_count[bits + 1] += 2; /* move one overflow item as its brother */ + s.bl_count[max_length]--; + /* The brother of the overflow item also moves one step up, + * but this does not affect bl_count[max_length] + */ + overflow -= 2; + } while (overflow > 0); + + /* Now recompute all bit lengths, scanning in increasing frequency. + * h is still equal to HEAP_SIZE. (It is simpler to reconstruct all + * lengths instead of fixing only the wrong ones. This idea is taken + * from 'ar' written by Haruhiko Okumura.) + */ + for (bits = max_length; bits !== 0; bits--) { + n = s.bl_count[bits]; + while (n !== 0) { + m = s.heap[--h]; + if (m > max_code) { continue; } + if (tree[m * 2 + 1]/*.Len*/ !== bits) { + // Trace((stderr,"code %d bits %d->%d\n", m, tree[m].Len, bits)); + s.opt_len += (bits - tree[m * 2 + 1]/*.Len*/) * tree[m * 2]/*.Freq*/; + tree[m * 2 + 1]/*.Len*/ = bits; + } + n--; + } + } +} + + +/* =========================================================================== + * Generate the codes for a given tree and bit counts (which need not be + * optimal). + * IN assertion: the array bl_count contains the bit length statistics for + * the given tree and the field len is set for all tree elements. + * OUT assertion: the field code is set for all tree elements of non + * zero code length. + */ +function gen_codes(tree, max_code, bl_count) +// ct_data *tree; /* the tree to decorate */ +// int max_code; /* largest code with non zero frequency */ +// ushf *bl_count; /* number of codes at each bit length */ +{ + var next_code = new Array(MAX_BITS + 1); /* next code value for each bit length */ + var code = 0; /* running code value */ + var bits; /* bit index */ + var n; /* code index */ + + /* The distribution counts are first used to generate the code values + * without bit reversal. + */ + for (bits = 1; bits <= MAX_BITS; bits++) { + next_code[bits] = code = (code + bl_count[bits - 1]) << 1; + } + /* Check that the bit counts in bl_count are consistent. The last code + * must be all ones. + */ + //Assert (code + bl_count[MAX_BITS]-1 == (1<<MAX_BITS)-1, + // "inconsistent bit counts"); + //Tracev((stderr,"\ngen_codes: max_code %d ", max_code)); + + for (n = 0; n <= max_code; n++) { + var len = tree[n * 2 + 1]/*.Len*/; + if (len === 0) { continue; } + /* Now reverse the bits */ + tree[n * 2]/*.Code*/ = bi_reverse(next_code[len]++, len); + + //Tracecv(tree != static_ltree, (stderr,"\nn %3d %c l %2d c %4x (%x) ", + // n, (isgraph(n) ? n : ' '), len, tree[n].Code, next_code[len]-1)); + } +} + + +/* =========================================================================== + * Initialize the various 'constant' tables. + */ +function tr_static_init() { + var n; /* iterates over tree elements */ + var bits; /* bit counter */ + var length; /* length value */ + var code; /* code value */ + var dist; /* distance index */ + var bl_count = new Array(MAX_BITS + 1); + /* number of codes at each bit length for an optimal tree */ + + // do check in _tr_init() + //if (static_init_done) return; + + /* For some embedded targets, global variables are not initialized: */ +/*#ifdef NO_INIT_GLOBAL_POINTERS + static_l_desc.static_tree = static_ltree; + static_l_desc.extra_bits = extra_lbits; + static_d_desc.static_tree = static_dtree; + static_d_desc.extra_bits = extra_dbits; + static_bl_desc.extra_bits = extra_blbits; +#endif*/ + + /* Initialize the mapping length (0..255) -> length code (0..28) */ + length = 0; + for (code = 0; code < LENGTH_CODES - 1; code++) { + base_length[code] = length; + for (n = 0; n < (1 << extra_lbits[code]); n++) { + _length_code[length++] = code; + } + } + //Assert (length == 256, "tr_static_init: length != 256"); + /* Note that the length 255 (match length 258) can be represented + * in two different ways: code 284 + 5 bits or code 285, so we + * overwrite length_code[255] to use the best encoding: + */ + _length_code[length - 1] = code; + + /* Initialize the mapping dist (0..32K) -> dist code (0..29) */ + dist = 0; + for (code = 0; code < 16; code++) { + base_dist[code] = dist; + for (n = 0; n < (1 << extra_dbits[code]); n++) { + _dist_code[dist++] = code; + } + } + //Assert (dist == 256, "tr_static_init: dist != 256"); + dist >>= 7; /* from now on, all distances are divided by 128 */ + for (; code < D_CODES; code++) { + base_dist[code] = dist << 7; + for (n = 0; n < (1 << (extra_dbits[code] - 7)); n++) { + _dist_code[256 + dist++] = code; + } + } + //Assert (dist == 256, "tr_static_init: 256+dist != 512"); + + /* Construct the codes of the static literal tree */ + for (bits = 0; bits <= MAX_BITS; bits++) { + bl_count[bits] = 0; + } + + n = 0; + while (n <= 143) { + static_ltree[n * 2 + 1]/*.Len*/ = 8; + n++; + bl_count[8]++; + } + while (n <= 255) { + static_ltree[n * 2 + 1]/*.Len*/ = 9; + n++; + bl_count[9]++; + } + while (n <= 279) { + static_ltree[n * 2 + 1]/*.Len*/ = 7; + n++; + bl_count[7]++; + } + while (n <= 287) { + static_ltree[n * 2 + 1]/*.Len*/ = 8; + n++; + bl_count[8]++; + } + /* Codes 286 and 287 do not exist, but we must include them in the + * tree construction to get a canonical Huffman tree (longest code + * all ones) + */ + gen_codes(static_ltree, L_CODES + 1, bl_count); + + /* The static distance tree is trivial: */ + for (n = 0; n < D_CODES; n++) { + static_dtree[n * 2 + 1]/*.Len*/ = 5; + static_dtree[n * 2]/*.Code*/ = bi_reverse(n, 5); + } + + // Now data ready and we can init static trees + static_l_desc = new StaticTreeDesc(static_ltree, extra_lbits, LITERALS + 1, L_CODES, MAX_BITS); + static_d_desc = new StaticTreeDesc(static_dtree, extra_dbits, 0, D_CODES, MAX_BITS); + static_bl_desc = new StaticTreeDesc(new Array(0), extra_blbits, 0, BL_CODES, MAX_BL_BITS); + + //static_init_done = true; +} + + +/* =========================================================================== + * Initialize a new block. + */ +function init_block(s) { + var n; /* iterates over tree elements */ + + /* Initialize the trees. */ + for (n = 0; n < L_CODES; n++) { s.dyn_ltree[n * 2]/*.Freq*/ = 0; } + for (n = 0; n < D_CODES; n++) { s.dyn_dtree[n * 2]/*.Freq*/ = 0; } + for (n = 0; n < BL_CODES; n++) { s.bl_tree[n * 2]/*.Freq*/ = 0; } + + s.dyn_ltree[END_BLOCK * 2]/*.Freq*/ = 1; + s.opt_len = s.static_len = 0; + s.last_lit = s.matches = 0; +} + + +/* =========================================================================== + * Flush the bit buffer and align the output on a byte boundary + */ +function bi_windup(s) +{ + if (s.bi_valid > 8) { + put_short(s, s.bi_buf); + } else if (s.bi_valid > 0) { + //put_byte(s, (Byte)s->bi_buf); + s.pending_buf[s.pending++] = s.bi_buf; + } + s.bi_buf = 0; + s.bi_valid = 0; +} + +/* =========================================================================== + * Copy a stored block, storing first the length and its + * one's complement if requested. + */ +function copy_block(s, buf, len, header) +//DeflateState *s; +//charf *buf; /* the input data */ +//unsigned len; /* its length */ +//int header; /* true if block header must be written */ +{ + bi_windup(s); /* align on byte boundary */ + + if (header) { + put_short(s, len); + put_short(s, ~len); + } +// while (len--) { +// put_byte(s, *buf++); +// } + utils.arraySet(s.pending_buf, s.window, buf, len, s.pending); + s.pending += len; +} + +/* =========================================================================== + * Compares to subtrees, using the tree depth as tie breaker when + * the subtrees have equal frequency. This minimizes the worst case length. + */ +function smaller(tree, n, m, depth) { + var _n2 = n * 2; + var _m2 = m * 2; + return (tree[_n2]/*.Freq*/ < tree[_m2]/*.Freq*/ || + (tree[_n2]/*.Freq*/ === tree[_m2]/*.Freq*/ && depth[n] <= depth[m])); +} + +/* =========================================================================== + * Restore the heap property by moving down the tree starting at node k, + * exchanging a node with the smallest of its two sons if necessary, stopping + * when the heap property is re-established (each father smaller than its + * two sons). + */ +function pqdownheap(s, tree, k) +// deflate_state *s; +// ct_data *tree; /* the tree to restore */ +// int k; /* node to move down */ +{ + var v = s.heap[k]; + var j = k << 1; /* left son of k */ + while (j <= s.heap_len) { + /* Set j to the smallest of the two sons: */ + if (j < s.heap_len && + smaller(tree, s.heap[j + 1], s.heap[j], s.depth)) { + j++; + } + /* Exit if v is smaller than both sons */ + if (smaller(tree, v, s.heap[j], s.depth)) { break; } + + /* Exchange v with the smallest son */ + s.heap[k] = s.heap[j]; + k = j; + + /* And continue down the tree, setting j to the left son of k */ + j <<= 1; + } + s.heap[k] = v; +} + + +// inlined manually +// var SMALLEST = 1; + +/* =========================================================================== + * Send the block data compressed using the given Huffman trees + */ +function compress_block(s, ltree, dtree) +// deflate_state *s; +// const ct_data *ltree; /* literal tree */ +// const ct_data *dtree; /* distance tree */ +{ + var dist; /* distance of matched string */ + var lc; /* match length or unmatched char (if dist == 0) */ + var lx = 0; /* running index in l_buf */ + var code; /* the code to send */ + var extra; /* number of extra bits to send */ + + if (s.last_lit !== 0) { + do { + dist = (s.pending_buf[s.d_buf + lx * 2] << 8) | (s.pending_buf[s.d_buf + lx * 2 + 1]); + lc = s.pending_buf[s.l_buf + lx]; + lx++; + + if (dist === 0) { + send_code(s, lc, ltree); /* send a literal byte */ + //Tracecv(isgraph(lc), (stderr," '%c' ", lc)); + } else { + /* Here, lc is the match length - MIN_MATCH */ + code = _length_code[lc]; + send_code(s, code + LITERALS + 1, ltree); /* send the length code */ + extra = extra_lbits[code]; + if (extra !== 0) { + lc -= base_length[code]; + send_bits(s, lc, extra); /* send the extra length bits */ + } + dist--; /* dist is now the match distance - 1 */ + code = d_code(dist); + //Assert (code < D_CODES, "bad d_code"); + + send_code(s, code, dtree); /* send the distance code */ + extra = extra_dbits[code]; + if (extra !== 0) { + dist -= base_dist[code]; + send_bits(s, dist, extra); /* send the extra distance bits */ + } + } /* literal or match pair ? */ + + /* Check that the overlay between pending_buf and d_buf+l_buf is ok: */ + //Assert((uInt)(s->pending) < s->lit_bufsize + 2*lx, + // "pendingBuf overflow"); + + } while (lx < s.last_lit); + } + + send_code(s, END_BLOCK, ltree); +} + + +/* =========================================================================== + * Construct one Huffman tree and assigns the code bit strings and lengths. + * Update the total bit length for the current block. + * IN assertion: the field freq is set for all tree elements. + * OUT assertions: the fields len and code are set to the optimal bit length + * and corresponding code. The length opt_len is updated; static_len is + * also updated if stree is not null. The field max_code is set. + */ +function build_tree(s, desc) +// deflate_state *s; +// tree_desc *desc; /* the tree descriptor */ +{ + var tree = desc.dyn_tree; + var stree = desc.stat_desc.static_tree; + var has_stree = desc.stat_desc.has_stree; + var elems = desc.stat_desc.elems; + var n, m; /* iterate over heap elements */ + var max_code = -1; /* largest code with non zero frequency */ + var node; /* new node being created */ + + /* Construct the initial heap, with least frequent element in + * heap[SMALLEST]. The sons of heap[n] are heap[2*n] and heap[2*n+1]. + * heap[0] is not used. + */ + s.heap_len = 0; + s.heap_max = HEAP_SIZE; + + for (n = 0; n < elems; n++) { + if (tree[n * 2]/*.Freq*/ !== 0) { + s.heap[++s.heap_len] = max_code = n; + s.depth[n] = 0; + + } else { + tree[n * 2 + 1]/*.Len*/ = 0; + } + } + + /* The pkzip format requires that at least one distance code exists, + * and that at least one bit should be sent even if there is only one + * possible code. So to avoid special checks later on we force at least + * two codes of non zero frequency. + */ + while (s.heap_len < 2) { + node = s.heap[++s.heap_len] = (max_code < 2 ? ++max_code : 0); + tree[node * 2]/*.Freq*/ = 1; + s.depth[node] = 0; + s.opt_len--; + + if (has_stree) { + s.static_len -= stree[node * 2 + 1]/*.Len*/; + } + /* node is 0 or 1 so it does not have extra bits */ + } + desc.max_code = max_code; + + /* The elements heap[heap_len/2+1 .. heap_len] are leaves of the tree, + * establish sub-heaps of increasing lengths: + */ + for (n = (s.heap_len >> 1/*int /2*/); n >= 1; n--) { pqdownheap(s, tree, n); } + + /* Construct the Huffman tree by repeatedly combining the least two + * frequent nodes. + */ + node = elems; /* next internal node of the tree */ + do { + //pqremove(s, tree, n); /* n = node of least frequency */ + /*** pqremove ***/ + n = s.heap[1/*SMALLEST*/]; + s.heap[1/*SMALLEST*/] = s.heap[s.heap_len--]; + pqdownheap(s, tree, 1/*SMALLEST*/); + /***/ + + m = s.heap[1/*SMALLEST*/]; /* m = node of next least frequency */ + + s.heap[--s.heap_max] = n; /* keep the nodes sorted by frequency */ + s.heap[--s.heap_max] = m; + + /* Create a new node father of n and m */ + tree[node * 2]/*.Freq*/ = tree[n * 2]/*.Freq*/ + tree[m * 2]/*.Freq*/; + s.depth[node] = (s.depth[n] >= s.depth[m] ? s.depth[n] : s.depth[m]) + 1; + tree[n * 2 + 1]/*.Dad*/ = tree[m * 2 + 1]/*.Dad*/ = node; + + /* and insert the new node in the heap */ + s.heap[1/*SMALLEST*/] = node++; + pqdownheap(s, tree, 1/*SMALLEST*/); + + } while (s.heap_len >= 2); + + s.heap[--s.heap_max] = s.heap[1/*SMALLEST*/]; + + /* At this point, the fields freq and dad are set. We can now + * generate the bit lengths. + */ + gen_bitlen(s, desc); + + /* The field len is now set, we can generate the bit codes */ + gen_codes(tree, max_code, s.bl_count); +} + + +/* =========================================================================== + * Scan a literal or distance tree to determine the frequencies of the codes + * in the bit length tree. + */ +function scan_tree(s, tree, max_code) +// deflate_state *s; +// ct_data *tree; /* the tree to be scanned */ +// int max_code; /* and its largest code of non zero frequency */ +{ + var n; /* iterates over all tree elements */ + var prevlen = -1; /* last emitted length */ + var curlen; /* length of current code */ + + var nextlen = tree[0 * 2 + 1]/*.Len*/; /* length of next code */ + + var count = 0; /* repeat count of the current code */ + var max_count = 7; /* max repeat count */ + var min_count = 4; /* min repeat count */ + + if (nextlen === 0) { + max_count = 138; + min_count = 3; + } + tree[(max_code + 1) * 2 + 1]/*.Len*/ = 0xffff; /* guard */ + + for (n = 0; n <= max_code; n++) { + curlen = nextlen; + nextlen = tree[(n + 1) * 2 + 1]/*.Len*/; + + if (++count < max_count && curlen === nextlen) { + continue; + + } else if (count < min_count) { + s.bl_tree[curlen * 2]/*.Freq*/ += count; + + } else if (curlen !== 0) { + + if (curlen !== prevlen) { s.bl_tree[curlen * 2]/*.Freq*/++; } + s.bl_tree[REP_3_6 * 2]/*.Freq*/++; + + } else if (count <= 10) { + s.bl_tree[REPZ_3_10 * 2]/*.Freq*/++; + + } else { + s.bl_tree[REPZ_11_138 * 2]/*.Freq*/++; + } + + count = 0; + prevlen = curlen; + + if (nextlen === 0) { + max_count = 138; + min_count = 3; + + } else if (curlen === nextlen) { + max_count = 6; + min_count = 3; + + } else { + max_count = 7; + min_count = 4; + } + } +} + + +/* =========================================================================== + * Send a literal or distance tree in compressed form, using the codes in + * bl_tree. + */ +function send_tree(s, tree, max_code) +// deflate_state *s; +// ct_data *tree; /* the tree to be scanned */ +// int max_code; /* and its largest code of non zero frequency */ +{ + var n; /* iterates over all tree elements */ + var prevlen = -1; /* last emitted length */ + var curlen; /* length of current code */ + + var nextlen = tree[0 * 2 + 1]/*.Len*/; /* length of next code */ + + var count = 0; /* repeat count of the current code */ + var max_count = 7; /* max repeat count */ + var min_count = 4; /* min repeat count */ + + /* tree[max_code+1].Len = -1; */ /* guard already set */ + if (nextlen === 0) { + max_count = 138; + min_count = 3; + } + + for (n = 0; n <= max_code; n++) { + curlen = nextlen; + nextlen = tree[(n + 1) * 2 + 1]/*.Len*/; + + if (++count < max_count && curlen === nextlen) { + continue; + + } else if (count < min_count) { + do { send_code(s, curlen, s.bl_tree); } while (--count !== 0); + + } else if (curlen !== 0) { + if (curlen !== prevlen) { + send_code(s, curlen, s.bl_tree); + count--; + } + //Assert(count >= 3 && count <= 6, " 3_6?"); + send_code(s, REP_3_6, s.bl_tree); + send_bits(s, count - 3, 2); + + } else if (count <= 10) { + send_code(s, REPZ_3_10, s.bl_tree); + send_bits(s, count - 3, 3); + + } else { + send_code(s, REPZ_11_138, s.bl_tree); + send_bits(s, count - 11, 7); + } + + count = 0; + prevlen = curlen; + if (nextlen === 0) { + max_count = 138; + min_count = 3; + + } else if (curlen === nextlen) { + max_count = 6; + min_count = 3; + + } else { + max_count = 7; + min_count = 4; + } + } +} + + +/* =========================================================================== + * Construct the Huffman tree for the bit lengths and return the index in + * bl_order of the last bit length code to send. + */ +function build_bl_tree(s) { + var max_blindex; /* index of last bit length code of non zero freq */ + + /* Determine the bit length frequencies for literal and distance trees */ + scan_tree(s, s.dyn_ltree, s.l_desc.max_code); + scan_tree(s, s.dyn_dtree, s.d_desc.max_code); + + /* Build the bit length tree: */ + build_tree(s, s.bl_desc); + /* opt_len now includes the length of the tree representations, except + * the lengths of the bit lengths codes and the 5+5+4 bits for the counts. + */ + + /* Determine the number of bit length codes to send. The pkzip format + * requires that at least 4 bit length codes be sent. (appnote.txt says + * 3 but the actual value used is 4.) + */ + for (max_blindex = BL_CODES - 1; max_blindex >= 3; max_blindex--) { + if (s.bl_tree[bl_order[max_blindex] * 2 + 1]/*.Len*/ !== 0) { + break; + } + } + /* Update opt_len to include the bit length tree and counts */ + s.opt_len += 3 * (max_blindex + 1) + 5 + 5 + 4; + //Tracev((stderr, "\ndyn trees: dyn %ld, stat %ld", + // s->opt_len, s->static_len)); + + return max_blindex; +} + + +/* =========================================================================== + * Send the header for a block using dynamic Huffman trees: the counts, the + * lengths of the bit length codes, the literal tree and the distance tree. + * IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4. + */ +function send_all_trees(s, lcodes, dcodes, blcodes) +// deflate_state *s; +// int lcodes, dcodes, blcodes; /* number of codes for each tree */ +{ + var rank; /* index in bl_order */ + + //Assert (lcodes >= 257 && dcodes >= 1 && blcodes >= 4, "not enough codes"); + //Assert (lcodes <= L_CODES && dcodes <= D_CODES && blcodes <= BL_CODES, + // "too many codes"); + //Tracev((stderr, "\nbl counts: ")); + send_bits(s, lcodes - 257, 5); /* not +255 as stated in appnote.txt */ + send_bits(s, dcodes - 1, 5); + send_bits(s, blcodes - 4, 4); /* not -3 as stated in appnote.txt */ + for (rank = 0; rank < blcodes; rank++) { + //Tracev((stderr, "\nbl code %2d ", bl_order[rank])); + send_bits(s, s.bl_tree[bl_order[rank] * 2 + 1]/*.Len*/, 3); + } + //Tracev((stderr, "\nbl tree: sent %ld", s->bits_sent)); + + send_tree(s, s.dyn_ltree, lcodes - 1); /* literal tree */ + //Tracev((stderr, "\nlit tree: sent %ld", s->bits_sent)); + + send_tree(s, s.dyn_dtree, dcodes - 1); /* distance tree */ + //Tracev((stderr, "\ndist tree: sent %ld", s->bits_sent)); +} + + +/* =========================================================================== + * Check if the data type is TEXT or BINARY, using the following algorithm: + * - TEXT if the two conditions below are satisfied: + * a) There are no non-portable control characters belonging to the + * "black list" (0..6, 14..25, 28..31). + * b) There is at least one printable character belonging to the + * "white list" (9 {TAB}, 10 {LF}, 13 {CR}, 32..255). + * - BINARY otherwise. + * - The following partially-portable control characters form a + * "gray list" that is ignored in this detection algorithm: + * (7 {BEL}, 8 {BS}, 11 {VT}, 12 {FF}, 26 {SUB}, 27 {ESC}). + * IN assertion: the fields Freq of dyn_ltree are set. + */ +function detect_data_type(s) { + /* black_mask is the bit mask of black-listed bytes + * set bits 0..6, 14..25, and 28..31 + * 0xf3ffc07f = binary 11110011111111111100000001111111 + */ + var black_mask = 0xf3ffc07f; + var n; + + /* Check for non-textual ("black-listed") bytes. */ + for (n = 0; n <= 31; n++, black_mask >>>= 1) { + if ((black_mask & 1) && (s.dyn_ltree[n * 2]/*.Freq*/ !== 0)) { + return Z_BINARY; + } + } + + /* Check for textual ("white-listed") bytes. */ + if (s.dyn_ltree[9 * 2]/*.Freq*/ !== 0 || s.dyn_ltree[10 * 2]/*.Freq*/ !== 0 || + s.dyn_ltree[13 * 2]/*.Freq*/ !== 0) { + return Z_TEXT; + } + for (n = 32; n < LITERALS; n++) { + if (s.dyn_ltree[n * 2]/*.Freq*/ !== 0) { + return Z_TEXT; + } + } + + /* There are no "black-listed" or "white-listed" bytes: + * this stream either is empty or has tolerated ("gray-listed") bytes only. + */ + return Z_BINARY; +} + + +var static_init_done = false; + +/* =========================================================================== + * Initialize the tree data structures for a new zlib stream. + */ +function _tr_init(s) +{ + + if (!static_init_done) { + tr_static_init(); + static_init_done = true; + } + + s.l_desc = new TreeDesc(s.dyn_ltree, static_l_desc); + s.d_desc = new TreeDesc(s.dyn_dtree, static_d_desc); + s.bl_desc = new TreeDesc(s.bl_tree, static_bl_desc); + + s.bi_buf = 0; + s.bi_valid = 0; + + /* Initialize the first block of the first file: */ + init_block(s); +} + + +/* =========================================================================== + * Send a stored block + */ +function _tr_stored_block(s, buf, stored_len, last) +//DeflateState *s; +//charf *buf; /* input block */ +//ulg stored_len; /* length of input block */ +//int last; /* one if this is the last block for a file */ +{ + send_bits(s, (STORED_BLOCK << 1) + (last ? 1 : 0), 3); /* send block type */ + copy_block(s, buf, stored_len, true); /* with header */ +} + + +/* =========================================================================== + * Send one empty static block to give enough lookahead for inflate. + * This takes 10 bits, of which 7 may remain in the bit buffer. + */ +function _tr_align(s) { + send_bits(s, STATIC_TREES << 1, 3); + send_code(s, END_BLOCK, static_ltree); + bi_flush(s); +} + + +/* =========================================================================== + * Determine the best encoding for the current block: dynamic trees, static + * trees or store, and output the encoded block to the zip file. + */ +function _tr_flush_block(s, buf, stored_len, last) +//DeflateState *s; +//charf *buf; /* input block, or NULL if too old */ +//ulg stored_len; /* length of input block */ +//int last; /* one if this is the last block for a file */ +{ + var opt_lenb, static_lenb; /* opt_len and static_len in bytes */ + var max_blindex = 0; /* index of last bit length code of non zero freq */ + + /* Build the Huffman trees unless a stored block is forced */ + if (s.level > 0) { + + /* Check if the file is binary or text */ + if (s.strm.data_type === Z_UNKNOWN) { + s.strm.data_type = detect_data_type(s); + } + + /* Construct the literal and distance trees */ + build_tree(s, s.l_desc); + // Tracev((stderr, "\nlit data: dyn %ld, stat %ld", s->opt_len, + // s->static_len)); + + build_tree(s, s.d_desc); + // Tracev((stderr, "\ndist data: dyn %ld, stat %ld", s->opt_len, + // s->static_len)); + /* At this point, opt_len and static_len are the total bit lengths of + * the compressed block data, excluding the tree representations. + */ + + /* Build the bit length tree for the above two trees, and get the index + * in bl_order of the last bit length code to send. + */ + max_blindex = build_bl_tree(s); + + /* Determine the best encoding. Compute the block lengths in bytes. */ + opt_lenb = (s.opt_len + 3 + 7) >>> 3; + static_lenb = (s.static_len + 3 + 7) >>> 3; + + // Tracev((stderr, "\nopt %lu(%lu) stat %lu(%lu) stored %lu lit %u ", + // opt_lenb, s->opt_len, static_lenb, s->static_len, stored_len, + // s->last_lit)); + + if (static_lenb <= opt_lenb) { opt_lenb = static_lenb; } + + } else { + // Assert(buf != (char*)0, "lost buf"); + opt_lenb = static_lenb = stored_len + 5; /* force a stored block */ + } + + if ((stored_len + 4 <= opt_lenb) && (buf !== -1)) { + /* 4: two words for the lengths */ + + /* The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE. + * Otherwise we can't have processed more than WSIZE input bytes since + * the last block flush, because compression would have been + * successful. If LIT_BUFSIZE <= WSIZE, it is never too late to + * transform a block into a stored block. + */ + _tr_stored_block(s, buf, stored_len, last); + + } else if (s.strategy === Z_FIXED || static_lenb === opt_lenb) { + + send_bits(s, (STATIC_TREES << 1) + (last ? 1 : 0), 3); + compress_block(s, static_ltree, static_dtree); + + } else { + send_bits(s, (DYN_TREES << 1) + (last ? 1 : 0), 3); + send_all_trees(s, s.l_desc.max_code + 1, s.d_desc.max_code + 1, max_blindex + 1); + compress_block(s, s.dyn_ltree, s.dyn_dtree); + } + // Assert (s->compressed_len == s->bits_sent, "bad compressed size"); + /* The above check is made mod 2^32, for files larger than 512 MB + * and uLong implemented on 32 bits. + */ + init_block(s); + + if (last) { + bi_windup(s); + } + // Tracev((stderr,"\ncomprlen %lu(%lu) ", s->compressed_len>>3, + // s->compressed_len-7*last)); +} + +/* =========================================================================== + * Save the match info and tally the frequency counts. Return true if + * the current block must be flushed. + */ +function _tr_tally(s, dist, lc) +// deflate_state *s; +// unsigned dist; /* distance of matched string */ +// unsigned lc; /* match length-MIN_MATCH or unmatched char (if dist==0) */ +{ + //var out_length, in_length, dcode; + + s.pending_buf[s.d_buf + s.last_lit * 2] = (dist >>> 8) & 0xff; + s.pending_buf[s.d_buf + s.last_lit * 2 + 1] = dist & 0xff; + + s.pending_buf[s.l_buf + s.last_lit] = lc & 0xff; + s.last_lit++; + + if (dist === 0) { + /* lc is the unmatched char */ + s.dyn_ltree[lc * 2]/*.Freq*/++; + } else { + s.matches++; + /* Here, lc is the match length - MIN_MATCH */ + dist--; /* dist = match distance - 1 */ + //Assert((ush)dist < (ush)MAX_DIST(s) && + // (ush)lc <= (ush)(MAX_MATCH-MIN_MATCH) && + // (ush)d_code(dist) < (ush)D_CODES, "_tr_tally: bad match"); + + s.dyn_ltree[(_length_code[lc] + LITERALS + 1) * 2]/*.Freq*/++; + s.dyn_dtree[d_code(dist) * 2]/*.Freq*/++; + } + +// (!) This block is disabled in zlib defaults, +// don't enable it for binary compatibility + +//#ifdef TRUNCATE_BLOCK +// /* Try to guess if it is profitable to stop the current block here */ +// if ((s.last_lit & 0x1fff) === 0 && s.level > 2) { +// /* Compute an upper bound for the compressed length */ +// out_length = s.last_lit*8; +// in_length = s.strstart - s.block_start; +// +// for (dcode = 0; dcode < D_CODES; dcode++) { +// out_length += s.dyn_dtree[dcode*2]/*.Freq*/ * (5 + extra_dbits[dcode]); +// } +// out_length >>>= 3; +// //Tracev((stderr,"\nlast_lit %u, in %ld, out ~%ld(%ld%%) ", +// // s->last_lit, in_length, out_length, +// // 100L - out_length*100L/in_length)); +// if (s.matches < (s.last_lit>>1)/*int /2*/ && out_length < (in_length>>1)/*int /2*/) { +// return true; +// } +// } +//#endif + + return (s.last_lit === s.lit_bufsize - 1); + /* We avoid equality with lit_bufsize because of wraparound at 64K + * on 16 bit machines and because stored blocks are restricted to + * 64K-1 bytes. + */ +} + +exports._tr_init = _tr_init; +exports._tr_stored_block = _tr_stored_block; +exports._tr_flush_block = _tr_flush_block; +exports._tr_tally = _tr_tally; +exports._tr_align = _tr_align; + +},{"../utils/common":50}],62:[function(require,module,exports){ +'use strict'; + +// (C) 1995-2013 Jean-loup Gailly and Mark Adler +// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// 3. This notice may not be removed or altered from any source distribution. + +function ZStream() { + /* next input byte */ + this.input = null; // JS specific, because we have no pointers + this.next_in = 0; + /* number of bytes available at input */ + this.avail_in = 0; + /* total number of input bytes read so far */ + this.total_in = 0; + /* next output byte should be put there */ + this.output = null; // JS specific, because we have no pointers + this.next_out = 0; + /* remaining free space at output */ + this.avail_out = 0; + /* total number of bytes output so far */ + this.total_out = 0; + /* last error message, NULL if no error */ + this.msg = ''/*Z_NULL*/; + /* not visible by applications */ + this.state = null; + /* best guess about the data type: binary or text */ + this.data_type = 2/*Z_UNKNOWN*/; + /* adler32 value of the uncompressed data */ + this.adler = 0; +} + +module.exports = ZStream; + +},{}],63:[function(require,module,exports){ +(function (process){ +'use strict'; + +if (typeof process === 'undefined' || + !process.version || + process.version.indexOf('v0.') === 0 || + process.version.indexOf('v1.') === 0 && process.version.indexOf('v1.8.') !== 0) { + module.exports = { nextTick: nextTick }; +} else { + module.exports = process +} + +function nextTick(fn, arg1, arg2, arg3) { + if (typeof fn !== 'function') { + throw new TypeError('"callback" argument must be a function'); + } + var len = arguments.length; + var args, i; + switch (len) { + case 0: + case 1: + return process.nextTick(fn); + case 2: + return process.nextTick(function afterTickOne() { + fn.call(null, arg1); + }); + case 3: + return process.nextTick(function afterTickTwo() { + fn.call(null, arg1, arg2); + }); + case 4: + return process.nextTick(function afterTickThree() { + fn.call(null, arg1, arg2, arg3); + }); + default: + args = new Array(len - 1); + i = 0; + while (i < args.length) { + args[i++] = arguments[i]; + } + return process.nextTick(function afterTick() { + fn.apply(null, args); + }); + } +} + + +}).call(this,require('_process')) +},{"_process":64}],64:[function(require,module,exports){ +// shim for using process in browser +var process = module.exports = {}; + +// cached from whatever global is present so that test runners that stub it +// don't break things. But we need to wrap it in a try catch in case it is +// wrapped in strict mode code which doesn't define any globals. It's inside a +// function because try/catches deoptimize in certain engines. + +var cachedSetTimeout; +var cachedClearTimeout; + +function defaultSetTimout() { + throw new Error('setTimeout has not been defined'); +} +function defaultClearTimeout () { + throw new Error('clearTimeout has not been defined'); +} +(function () { + try { + if (typeof setTimeout === 'function') { + cachedSetTimeout = setTimeout; + } else { + cachedSetTimeout = defaultSetTimout; + } + } catch (e) { + cachedSetTimeout = defaultSetTimout; + } + try { + if (typeof clearTimeout === 'function') { + cachedClearTimeout = clearTimeout; + } else { + cachedClearTimeout = defaultClearTimeout; + } + } catch (e) { + cachedClearTimeout = defaultClearTimeout; + } +} ()) +function runTimeout(fun) { + if (cachedSetTimeout === setTimeout) { + //normal enviroments in sane situations + return setTimeout(fun, 0); + } + // if setTimeout wasn't available but was latter defined + if ((cachedSetTimeout === defaultSetTimout || !cachedSetTimeout) && setTimeout) { + cachedSetTimeout = setTimeout; + return setTimeout(fun, 0); + } + try { + // when when somebody has screwed with setTimeout but no I.E. maddness + return cachedSetTimeout(fun, 0); + } catch(e){ + try { + // When we are in I.E. but the script has been evaled so I.E. doesn't trust the global object when called normally + return cachedSetTimeout.call(null, fun, 0); + } catch(e){ + // same as above but when it's a version of I.E. that must have the global object for 'this', hopfully our context correct otherwise it will throw a global error + return cachedSetTimeout.call(this, fun, 0); + } + } + + +} +function runClearTimeout(marker) { + if (cachedClearTimeout === clearTimeout) { + //normal enviroments in sane situations + return clearTimeout(marker); + } + // if clearTimeout wasn't available but was latter defined + if ((cachedClearTimeout === defaultClearTimeout || !cachedClearTimeout) && clearTimeout) { + cachedClearTimeout = clearTimeout; + return clearTimeout(marker); + } + try { + // when when somebody has screwed with setTimeout but no I.E. maddness + return cachedClearTimeout(marker); + } catch (e){ + try { + // When we are in I.E. but the script has been evaled so I.E. doesn't trust the global object when called normally + return cachedClearTimeout.call(null, marker); + } catch (e){ + // same as above but when it's a version of I.E. that must have the global object for 'this', hopfully our context correct otherwise it will throw a global error. + // Some versions of I.E. have different rules for clearTimeout vs setTimeout + return cachedClearTimeout.call(this, marker); + } + } + + + +} +var queue = []; +var draining = false; +var currentQueue; +var queueIndex = -1; + +function cleanUpNextTick() { + if (!draining || !currentQueue) { + return; + } + draining = false; + if (currentQueue.length) { + queue = currentQueue.concat(queue); + } else { + queueIndex = -1; + } + if (queue.length) { + drainQueue(); + } +} + +function drainQueue() { + if (draining) { + return; + } + var timeout = runTimeout(cleanUpNextTick); + draining = true; + + var len = queue.length; + while(len) { + currentQueue = queue; + queue = []; + while (++queueIndex < len) { + if (currentQueue) { + currentQueue[queueIndex].run(); + } + } + queueIndex = -1; + len = queue.length; + } + currentQueue = null; + draining = false; + runClearTimeout(timeout); +} + +process.nextTick = function (fun) { + var args = new Array(arguments.length - 1); + if (arguments.length > 1) { + for (var i = 1; i < arguments.length; i++) { + args[i - 1] = arguments[i]; + } + } + queue.push(new Item(fun, args)); + if (queue.length === 1 && !draining) { + runTimeout(drainQueue); + } +}; + +// v8 likes predictible objects +function Item(fun, array) { + this.fun = fun; + this.array = array; +} +Item.prototype.run = function () { + this.fun.apply(null, this.array); +}; +process.title = 'browser'; +process.browser = true; +process.env = {}; +process.argv = []; +process.version = ''; // empty string to avoid regexp issues +process.versions = {}; + +function noop() {} + +process.on = noop; +process.addListener = noop; +process.once = noop; +process.off = noop; +process.removeListener = noop; +process.removeAllListeners = noop; +process.emit = noop; +process.prependListener = noop; +process.prependOnceListener = noop; + +process.listeners = function (name) { return [] } + +process.binding = function (name) { + throw new Error('process.binding is not supported'); +}; + +process.cwd = function () { return '/' }; +process.chdir = function (dir) { + throw new Error('process.chdir is not supported'); +}; +process.umask = function() { return 0; }; + +},{}],65:[function(require,module,exports){ +module.exports = require('./lib/_stream_duplex.js'); + +},{"./lib/_stream_duplex.js":66}],66:[function(require,module,exports){ +// Copyright Joyent, Inc. and other Node contributors. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to permit +// persons to whom the Software is furnished to do so, subject to the +// following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +// USE OR OTHER DEALINGS IN THE SOFTWARE. + +// a duplex stream is just a stream that is both readable and writable. +// Since JS doesn't have multiple prototypal inheritance, this class +// prototypally inherits from Readable, and then parasitically from +// Writable. + +'use strict'; + +/*<replacement>*/ + +var pna = require('process-nextick-args'); +/*</replacement>*/ + +/*<replacement>*/ +var objectKeys = Object.keys || function (obj) { + var keys = []; + for (var key in obj) { + keys.push(key); + }return keys; +}; +/*</replacement>*/ + +module.exports = Duplex; + +/*<replacement>*/ +var util = Object.create(require('core-util-is')); +util.inherits = require('inherits'); +/*</replacement>*/ + +var Readable = require('./_stream_readable'); +var Writable = require('./_stream_writable'); + +util.inherits(Duplex, Readable); + +{ + // avoid scope creep, the keys array can then be collected + var keys = objectKeys(Writable.prototype); + for (var v = 0; v < keys.length; v++) { + var method = keys[v]; + if (!Duplex.prototype[method]) Duplex.prototype[method] = Writable.prototype[method]; + } +} + +function Duplex(options) { + if (!(this instanceof Duplex)) return new Duplex(options); + + Readable.call(this, options); + Writable.call(this, options); + + if (options && options.readable === false) this.readable = false; + + if (options && options.writable === false) this.writable = false; + + this.allowHalfOpen = true; + if (options && options.allowHalfOpen === false) this.allowHalfOpen = false; + + this.once('end', onend); +} + +Object.defineProperty(Duplex.prototype, 'writableHighWaterMark', { + // making it explicit this property is not enumerable + // because otherwise some prototype manipulation in + // userland will fail + enumerable: false, + get: function () { + return this._writableState.highWaterMark; + } +}); + +// the no-half-open enforcer +function onend() { + // if we allow half-open state, or if the writable side ended, + // then we're ok. + if (this.allowHalfOpen || this._writableState.ended) return; + + // no more data can be written. + // But allow more writes to happen in this tick. + pna.nextTick(onEndNT, this); +} + +function onEndNT(self) { + self.end(); +} + +Object.defineProperty(Duplex.prototype, 'destroyed', { + get: function () { + if (this._readableState === undefined || this._writableState === undefined) { + return false; + } + return this._readableState.destroyed && this._writableState.destroyed; + }, + set: function (value) { + // we ignore the value if the stream + // has not been initialized yet + if (this._readableState === undefined || this._writableState === undefined) { + return; + } + + // backward compatibility, the user is explicitly + // managing destroyed + this._readableState.destroyed = value; + this._writableState.destroyed = value; + } +}); + +Duplex.prototype._destroy = function (err, cb) { + this.push(null); + this.end(); + + pna.nextTick(cb, err); +}; +},{"./_stream_readable":68,"./_stream_writable":70,"core-util-is":4,"inherits":8,"process-nextick-args":63}],67:[function(require,module,exports){ +// Copyright Joyent, Inc. and other Node contributors. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to permit +// persons to whom the Software is furnished to do so, subject to the +// following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +// USE OR OTHER DEALINGS IN THE SOFTWARE. + +// a passthrough stream. +// basically just the most minimal sort of Transform stream. +// Every written chunk gets output as-is. + +'use strict'; + +module.exports = PassThrough; + +var Transform = require('./_stream_transform'); + +/*<replacement>*/ +var util = Object.create(require('core-util-is')); +util.inherits = require('inherits'); +/*</replacement>*/ + +util.inherits(PassThrough, Transform); + +function PassThrough(options) { + if (!(this instanceof PassThrough)) return new PassThrough(options); + + Transform.call(this, options); +} + +PassThrough.prototype._transform = function (chunk, encoding, cb) { + cb(null, chunk); +}; +},{"./_stream_transform":69,"core-util-is":4,"inherits":8}],68:[function(require,module,exports){ +(function (process,global){ +// Copyright Joyent, Inc. and other Node contributors. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to permit +// persons to whom the Software is furnished to do so, subject to the +// following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +// USE OR OTHER DEALINGS IN THE SOFTWARE. + +'use strict'; + +/*<replacement>*/ + +var pna = require('process-nextick-args'); +/*</replacement>*/ + +module.exports = Readable; + +/*<replacement>*/ +var isArray = require('isarray'); +/*</replacement>*/ + +/*<replacement>*/ +var Duplex; +/*</replacement>*/ + +Readable.ReadableState = ReadableState; + +/*<replacement>*/ +var EE = require('events').EventEmitter; + +var EElistenerCount = function (emitter, type) { + return emitter.listeners(type).length; +}; +/*</replacement>*/ + +/*<replacement>*/ +var Stream = require('./internal/streams/stream'); +/*</replacement>*/ + +/*<replacement>*/ + +var Buffer = require('safe-buffer').Buffer; +var OurUint8Array = global.Uint8Array || function () {}; +function _uint8ArrayToBuffer(chunk) { + return Buffer.from(chunk); +} +function _isUint8Array(obj) { + return Buffer.isBuffer(obj) || obj instanceof OurUint8Array; +} + +/*</replacement>*/ + +/*<replacement>*/ +var util = Object.create(require('core-util-is')); +util.inherits = require('inherits'); +/*</replacement>*/ + +/*<replacement>*/ +var debugUtil = require('util'); +var debug = void 0; +if (debugUtil && debugUtil.debuglog) { + debug = debugUtil.debuglog('stream'); +} else { + debug = function () {}; +} +/*</replacement>*/ + +var BufferList = require('./internal/streams/BufferList'); +var destroyImpl = require('./internal/streams/destroy'); +var StringDecoder; + +util.inherits(Readable, Stream); + +var kProxyEvents = ['error', 'close', 'destroy', 'pause', 'resume']; + +function prependListener(emitter, event, fn) { + // Sadly this is not cacheable as some libraries bundle their own + // event emitter implementation with them. + if (typeof emitter.prependListener === 'function') return emitter.prependListener(event, fn); + + // This is a hack to make sure that our error handler is attached before any + // userland ones. NEVER DO THIS. This is here only because this code needs + // to continue to work with older versions of Node.js that do not include + // the prependListener() method. The goal is to eventually remove this hack. + if (!emitter._events || !emitter._events[event]) emitter.on(event, fn);else if (isArray(emitter._events[event])) emitter._events[event].unshift(fn);else emitter._events[event] = [fn, emitter._events[event]]; +} + +function ReadableState(options, stream) { + Duplex = Duplex || require('./_stream_duplex'); + + options = options || {}; + + // Duplex streams are both readable and writable, but share + // the same options object. + // However, some cases require setting options to different + // values for the readable and the writable sides of the duplex stream. + // These options can be provided separately as readableXXX and writableXXX. + var isDuplex = stream instanceof Duplex; + + // object stream flag. Used to make read(n) ignore n and to + // make all the buffer merging and length checks go away + this.objectMode = !!options.objectMode; + + if (isDuplex) this.objectMode = this.objectMode || !!options.readableObjectMode; + + // the point at which it stops calling _read() to fill the buffer + // Note: 0 is a valid value, means "don't call _read preemptively ever" + var hwm = options.highWaterMark; + var readableHwm = options.readableHighWaterMark; + var defaultHwm = this.objectMode ? 16 : 16 * 1024; + + if (hwm || hwm === 0) this.highWaterMark = hwm;else if (isDuplex && (readableHwm || readableHwm === 0)) this.highWaterMark = readableHwm;else this.highWaterMark = defaultHwm; + + // cast to ints. + this.highWaterMark = Math.floor(this.highWaterMark); + + // A linked list is used to store data chunks instead of an array because the + // linked list can remove elements from the beginning faster than + // array.shift() + this.buffer = new BufferList(); + this.length = 0; + this.pipes = null; + this.pipesCount = 0; + this.flowing = null; + this.ended = false; + this.endEmitted = false; + this.reading = false; + + // a flag to be able to tell if the event 'readable'/'data' is emitted + // immediately, or on a later tick. We set this to true at first, because + // any actions that shouldn't happen until "later" should generally also + // not happen before the first read call. + this.sync = true; + + // whenever we return null, then we set a flag to say + // that we're awaiting a 'readable' event emission. + this.needReadable = false; + this.emittedReadable = false; + this.readableListening = false; + this.resumeScheduled = false; + + // has it been destroyed + this.destroyed = false; + + // Crypto is kind of old and crusty. Historically, its default string + // encoding is 'binary' so we have to make this configurable. + // Everything else in the universe uses 'utf8', though. + this.defaultEncoding = options.defaultEncoding || 'utf8'; + + // the number of writers that are awaiting a drain event in .pipe()s + this.awaitDrain = 0; + + // if true, a maybeReadMore has been scheduled + this.readingMore = false; + + this.decoder = null; + this.encoding = null; + if (options.encoding) { + if (!StringDecoder) StringDecoder = require('string_decoder/').StringDecoder; + this.decoder = new StringDecoder(options.encoding); + this.encoding = options.encoding; + } +} + +function Readable(options) { + Duplex = Duplex || require('./_stream_duplex'); + + if (!(this instanceof Readable)) return new Readable(options); + + this._readableState = new ReadableState(options, this); + + // legacy + this.readable = true; + + if (options) { + if (typeof options.read === 'function') this._read = options.read; + + if (typeof options.destroy === 'function') this._destroy = options.destroy; + } + + Stream.call(this); +} + +Object.defineProperty(Readable.prototype, 'destroyed', { + get: function () { + if (this._readableState === undefined) { + return false; + } + return this._readableState.destroyed; + }, + set: function (value) { + // we ignore the value if the stream + // has not been initialized yet + if (!this._readableState) { + return; + } + + // backward compatibility, the user is explicitly + // managing destroyed + this._readableState.destroyed = value; + } +}); + +Readable.prototype.destroy = destroyImpl.destroy; +Readable.prototype._undestroy = destroyImpl.undestroy; +Readable.prototype._destroy = function (err, cb) { + this.push(null); + cb(err); +}; + +// Manually shove something into the read() buffer. +// This returns true if the highWaterMark has not been hit yet, +// similar to how Writable.write() returns true if you should +// write() some more. +Readable.prototype.push = function (chunk, encoding) { + var state = this._readableState; + var skipChunkCheck; + + if (!state.objectMode) { + if (typeof chunk === 'string') { + encoding = encoding || state.defaultEncoding; + if (encoding !== state.encoding) { + chunk = Buffer.from(chunk, encoding); + encoding = ''; + } + skipChunkCheck = true; + } + } else { + skipChunkCheck = true; + } + + return readableAddChunk(this, chunk, encoding, false, skipChunkCheck); +}; + +// Unshift should *always* be something directly out of read() +Readable.prototype.unshift = function (chunk) { + return readableAddChunk(this, chunk, null, true, false); +}; + +function readableAddChunk(stream, chunk, encoding, addToFront, skipChunkCheck) { + var state = stream._readableState; + if (chunk === null) { + state.reading = false; + onEofChunk(stream, state); + } else { + var er; + if (!skipChunkCheck) er = chunkInvalid(state, chunk); + if (er) { + stream.emit('error', er); + } else if (state.objectMode || chunk && chunk.length > 0) { + if (typeof chunk !== 'string' && !state.objectMode && Object.getPrototypeOf(chunk) !== Buffer.prototype) { + chunk = _uint8ArrayToBuffer(chunk); + } + + if (addToFront) { + if (state.endEmitted) stream.emit('error', new Error('stream.unshift() after end event'));else addChunk(stream, state, chunk, true); + } else if (state.ended) { + stream.emit('error', new Error('stream.push() after EOF')); + } else { + state.reading = false; + if (state.decoder && !encoding) { + chunk = state.decoder.write(chunk); + if (state.objectMode || chunk.length !== 0) addChunk(stream, state, chunk, false);else maybeReadMore(stream, state); + } else { + addChunk(stream, state, chunk, false); + } + } + } else if (!addToFront) { + state.reading = false; + } + } + + return needMoreData(state); +} + +function addChunk(stream, state, chunk, addToFront) { + if (state.flowing && state.length === 0 && !state.sync) { + stream.emit('data', chunk); + stream.read(0); + } else { + // update the buffer info. + state.length += state.objectMode ? 1 : chunk.length; + if (addToFront) state.buffer.unshift(chunk);else state.buffer.push(chunk); + + if (state.needReadable) emitReadable(stream); + } + maybeReadMore(stream, state); +} + +function chunkInvalid(state, chunk) { + var er; + if (!_isUint8Array(chunk) && typeof chunk !== 'string' && chunk !== undefined && !state.objectMode) { + er = new TypeError('Invalid non-string/buffer chunk'); + } + return er; +} + +// if it's past the high water mark, we can push in some more. +// Also, if we have no data yet, we can stand some +// more bytes. This is to work around cases where hwm=0, +// such as the repl. Also, if the push() triggered a +// readable event, and the user called read(largeNumber) such that +// needReadable was set, then we ought to push more, so that another +// 'readable' event will be triggered. +function needMoreData(state) { + return !state.ended && (state.needReadable || state.length < state.highWaterMark || state.length === 0); +} + +Readable.prototype.isPaused = function () { + return this._readableState.flowing === false; +}; + +// backwards compatibility. +Readable.prototype.setEncoding = function (enc) { + if (!StringDecoder) StringDecoder = require('string_decoder/').StringDecoder; + this._readableState.decoder = new StringDecoder(enc); + this._readableState.encoding = enc; + return this; +}; + +// Don't raise the hwm > 8MB +var MAX_HWM = 0x800000; +function computeNewHighWaterMark(n) { + if (n >= MAX_HWM) { + n = MAX_HWM; + } else { + // Get the next highest power of 2 to prevent increasing hwm excessively in + // tiny amounts + n--; + n |= n >>> 1; + n |= n >>> 2; + n |= n >>> 4; + n |= n >>> 8; + n |= n >>> 16; + n++; + } + return n; +} + +// This function is designed to be inlinable, so please take care when making +// changes to the function body. +function howMuchToRead(n, state) { + if (n <= 0 || state.length === 0 && state.ended) return 0; + if (state.objectMode) return 1; + if (n !== n) { + // Only flow one buffer at a time + if (state.flowing && state.length) return state.buffer.head.data.length;else return state.length; + } + // If we're asking for more than the current hwm, then raise the hwm. + if (n > state.highWaterMark) state.highWaterMark = computeNewHighWaterMark(n); + if (n <= state.length) return n; + // Don't have enough + if (!state.ended) { + state.needReadable = true; + return 0; + } + return state.length; +} + +// you can override either this method, or the async _read(n) below. +Readable.prototype.read = function (n) { + debug('read', n); + n = parseInt(n, 10); + var state = this._readableState; + var nOrig = n; + + if (n !== 0) state.emittedReadable = false; + + // if we're doing read(0) to trigger a readable event, but we + // already have a bunch of data in the buffer, then just trigger + // the 'readable' event and move on. + if (n === 0 && state.needReadable && (state.length >= state.highWaterMark || state.ended)) { + debug('read: emitReadable', state.length, state.ended); + if (state.length === 0 && state.ended) endReadable(this);else emitReadable(this); + return null; + } + + n = howMuchToRead(n, state); + + // if we've ended, and we're now clear, then finish it up. + if (n === 0 && state.ended) { + if (state.length === 0) endReadable(this); + return null; + } + + // All the actual chunk generation logic needs to be + // *below* the call to _read. The reason is that in certain + // synthetic stream cases, such as passthrough streams, _read + // may be a completely synchronous operation which may change + // the state of the read buffer, providing enough data when + // before there was *not* enough. + // + // So, the steps are: + // 1. Figure out what the state of things will be after we do + // a read from the buffer. + // + // 2. If that resulting state will trigger a _read, then call _read. + // Note that this may be asynchronous, or synchronous. Yes, it is + // deeply ugly to write APIs this way, but that still doesn't mean + // that the Readable class should behave improperly, as streams are + // designed to be sync/async agnostic. + // Take note if the _read call is sync or async (ie, if the read call + // has returned yet), so that we know whether or not it's safe to emit + // 'readable' etc. + // + // 3. Actually pull the requested chunks out of the buffer and return. + + // if we need a readable event, then we need to do some reading. + var doRead = state.needReadable; + debug('need readable', doRead); + + // if we currently have less than the highWaterMark, then also read some + if (state.length === 0 || state.length - n < state.highWaterMark) { + doRead = true; + debug('length less than watermark', doRead); + } + + // however, if we've ended, then there's no point, and if we're already + // reading, then it's unnecessary. + if (state.ended || state.reading) { + doRead = false; + debug('reading or ended', doRead); + } else if (doRead) { + debug('do read'); + state.reading = true; + state.sync = true; + // if the length is currently zero, then we *need* a readable event. + if (state.length === 0) state.needReadable = true; + // call internal read method + this._read(state.highWaterMark); + state.sync = false; + // If _read pushed data synchronously, then `reading` will be false, + // and we need to re-evaluate how much data we can return to the user. + if (!state.reading) n = howMuchToRead(nOrig, state); + } + + var ret; + if (n > 0) ret = fromList(n, state);else ret = null; + + if (ret === null) { + state.needReadable = true; + n = 0; + } else { + state.length -= n; + } + + if (state.length === 0) { + // If we have nothing in the buffer, then we want to know + // as soon as we *do* get something into the buffer. + if (!state.ended) state.needReadable = true; + + // If we tried to read() past the EOF, then emit end on the next tick. + if (nOrig !== n && state.ended) endReadable(this); + } + + if (ret !== null) this.emit('data', ret); + + return ret; +}; + +function onEofChunk(stream, state) { + if (state.ended) return; + if (state.decoder) { + var chunk = state.decoder.end(); + if (chunk && chunk.length) { + state.buffer.push(chunk); + state.length += state.objectMode ? 1 : chunk.length; + } + } + state.ended = true; + + // emit 'readable' now to make sure it gets picked up. + emitReadable(stream); +} + +// Don't emit readable right away in sync mode, because this can trigger +// another read() call => stack overflow. This way, it might trigger +// a nextTick recursion warning, but that's not so bad. +function emitReadable(stream) { + var state = stream._readableState; + state.needReadable = false; + if (!state.emittedReadable) { + debug('emitReadable', state.flowing); + state.emittedReadable = true; + if (state.sync) pna.nextTick(emitReadable_, stream);else emitReadable_(stream); + } +} + +function emitReadable_(stream) { + debug('emit readable'); + stream.emit('readable'); + flow(stream); +} + +// at this point, the user has presumably seen the 'readable' event, +// and called read() to consume some data. that may have triggered +// in turn another _read(n) call, in which case reading = true if +// it's in progress. +// However, if we're not ended, or reading, and the length < hwm, +// then go ahead and try to read some more preemptively. +function maybeReadMore(stream, state) { + if (!state.readingMore) { + state.readingMore = true; + pna.nextTick(maybeReadMore_, stream, state); + } +} + +function maybeReadMore_(stream, state) { + var len = state.length; + while (!state.reading && !state.flowing && !state.ended && state.length < state.highWaterMark) { + debug('maybeReadMore read 0'); + stream.read(0); + if (len === state.length) + // didn't get any data, stop spinning. + break;else len = state.length; + } + state.readingMore = false; +} + +// abstract method. to be overridden in specific implementation classes. +// call cb(er, data) where data is <= n in length. +// for virtual (non-string, non-buffer) streams, "length" is somewhat +// arbitrary, and perhaps not very meaningful. +Readable.prototype._read = function (n) { + this.emit('error', new Error('_read() is not implemented')); +}; + +Readable.prototype.pipe = function (dest, pipeOpts) { + var src = this; + var state = this._readableState; + + switch (state.pipesCount) { + case 0: + state.pipes = dest; + break; + case 1: + state.pipes = [state.pipes, dest]; + break; + default: + state.pipes.push(dest); + break; + } + state.pipesCount += 1; + debug('pipe count=%d opts=%j', state.pipesCount, pipeOpts); + + var doEnd = (!pipeOpts || pipeOpts.end !== false) && dest !== process.stdout && dest !== process.stderr; + + var endFn = doEnd ? onend : unpipe; + if (state.endEmitted) pna.nextTick(endFn);else src.once('end', endFn); + + dest.on('unpipe', onunpipe); + function onunpipe(readable, unpipeInfo) { + debug('onunpipe'); + if (readable === src) { + if (unpipeInfo && unpipeInfo.hasUnpiped === false) { + unpipeInfo.hasUnpiped = true; + cleanup(); + } + } + } + + function onend() { + debug('onend'); + dest.end(); + } + + // when the dest drains, it reduces the awaitDrain counter + // on the source. This would be more elegant with a .once() + // handler in flow(), but adding and removing repeatedly is + // too slow. + var ondrain = pipeOnDrain(src); + dest.on('drain', ondrain); + + var cleanedUp = false; + function cleanup() { + debug('cleanup'); + // cleanup event handlers once the pipe is broken + dest.removeListener('close', onclose); + dest.removeListener('finish', onfinish); + dest.removeListener('drain', ondrain); + dest.removeListener('error', onerror); + dest.removeListener('unpipe', onunpipe); + src.removeListener('end', onend); + src.removeListener('end', unpipe); + src.removeListener('data', ondata); + + cleanedUp = true; + + // if the reader is waiting for a drain event from this + // specific writer, then it would cause it to never start + // flowing again. + // So, if this is awaiting a drain, then we just call it now. + // If we don't know, then assume that we are waiting for one. + if (state.awaitDrain && (!dest._writableState || dest._writableState.needDrain)) ondrain(); + } + + // If the user pushes more data while we're writing to dest then we'll end up + // in ondata again. However, we only want to increase awaitDrain once because + // dest will only emit one 'drain' event for the multiple writes. + // => Introduce a guard on increasing awaitDrain. + var increasedAwaitDrain = false; + src.on('data', ondata); + function ondata(chunk) { + debug('ondata'); + increasedAwaitDrain = false; + var ret = dest.write(chunk); + if (false === ret && !increasedAwaitDrain) { + // If the user unpiped during `dest.write()`, it is possible + // to get stuck in a permanently paused state if that write + // also returned false. + // => Check whether `dest` is still a piping destination. + if ((state.pipesCount === 1 && state.pipes === dest || state.pipesCount > 1 && indexOf(state.pipes, dest) !== -1) && !cleanedUp) { + debug('false write response, pause', src._readableState.awaitDrain); + src._readableState.awaitDrain++; + increasedAwaitDrain = true; + } + src.pause(); + } + } + + // if the dest has an error, then stop piping into it. + // however, don't suppress the throwing behavior for this. + function onerror(er) { + debug('onerror', er); + unpipe(); + dest.removeListener('error', onerror); + if (EElistenerCount(dest, 'error') === 0) dest.emit('error', er); + } + + // Make sure our error handler is attached before userland ones. + prependListener(dest, 'error', onerror); + + // Both close and finish should trigger unpipe, but only once. + function onclose() { + dest.removeListener('finish', onfinish); + unpipe(); + } + dest.once('close', onclose); + function onfinish() { + debug('onfinish'); + dest.removeListener('close', onclose); + unpipe(); + } + dest.once('finish', onfinish); + + function unpipe() { + debug('unpipe'); + src.unpipe(dest); + } + + // tell the dest that it's being piped to + dest.emit('pipe', src); + + // start the flow if it hasn't been started already. + if (!state.flowing) { + debug('pipe resume'); + src.resume(); + } + + return dest; +}; + +function pipeOnDrain(src) { + return function () { + var state = src._readableState; + debug('pipeOnDrain', state.awaitDrain); + if (state.awaitDrain) state.awaitDrain--; + if (state.awaitDrain === 0 && EElistenerCount(src, 'data')) { + state.flowing = true; + flow(src); + } + }; +} + +Readable.prototype.unpipe = function (dest) { + var state = this._readableState; + var unpipeInfo = { hasUnpiped: false }; + + // if we're not piping anywhere, then do nothing. + if (state.pipesCount === 0) return this; + + // just one destination. most common case. + if (state.pipesCount === 1) { + // passed in one, but it's not the right one. + if (dest && dest !== state.pipes) return this; + + if (!dest) dest = state.pipes; + + // got a match. + state.pipes = null; + state.pipesCount = 0; + state.flowing = false; + if (dest) dest.emit('unpipe', this, unpipeInfo); + return this; + } + + // slow case. multiple pipe destinations. + + if (!dest) { + // remove all. + var dests = state.pipes; + var len = state.pipesCount; + state.pipes = null; + state.pipesCount = 0; + state.flowing = false; + + for (var i = 0; i < len; i++) { + dests[i].emit('unpipe', this, unpipeInfo); + }return this; + } + + // try to find the right one. + var index = indexOf(state.pipes, dest); + if (index === -1) return this; + + state.pipes.splice(index, 1); + state.pipesCount -= 1; + if (state.pipesCount === 1) state.pipes = state.pipes[0]; + + dest.emit('unpipe', this, unpipeInfo); + + return this; +}; + +// set up data events if they are asked for +// Ensure readable listeners eventually get something +Readable.prototype.on = function (ev, fn) { + var res = Stream.prototype.on.call(this, ev, fn); + + if (ev === 'data') { + // Start flowing on next tick if stream isn't explicitly paused + if (this._readableState.flowing !== false) this.resume(); + } else if (ev === 'readable') { + var state = this._readableState; + if (!state.endEmitted && !state.readableListening) { + state.readableListening = state.needReadable = true; + state.emittedReadable = false; + if (!state.reading) { + pna.nextTick(nReadingNextTick, this); + } else if (state.length) { + emitReadable(this); + } + } + } + + return res; +}; +Readable.prototype.addListener = Readable.prototype.on; + +function nReadingNextTick(self) { + debug('readable nexttick read 0'); + self.read(0); +} + +// pause() and resume() are remnants of the legacy readable stream API +// If the user uses them, then switch into old mode. +Readable.prototype.resume = function () { + var state = this._readableState; + if (!state.flowing) { + debug('resume'); + state.flowing = true; + resume(this, state); + } + return this; +}; + +function resume(stream, state) { + if (!state.resumeScheduled) { + state.resumeScheduled = true; + pna.nextTick(resume_, stream, state); + } +} + +function resume_(stream, state) { + if (!state.reading) { + debug('resume read 0'); + stream.read(0); + } + + state.resumeScheduled = false; + state.awaitDrain = 0; + stream.emit('resume'); + flow(stream); + if (state.flowing && !state.reading) stream.read(0); +} + +Readable.prototype.pause = function () { + debug('call pause flowing=%j', this._readableState.flowing); + if (false !== this._readableState.flowing) { + debug('pause'); + this._readableState.flowing = false; + this.emit('pause'); + } + return this; +}; + +function flow(stream) { + var state = stream._readableState; + debug('flow', state.flowing); + while (state.flowing && stream.read() !== null) {} +} + +// wrap an old-style stream as the async data source. +// This is *not* part of the readable stream interface. +// It is an ugly unfortunate mess of history. +Readable.prototype.wrap = function (stream) { + var _this = this; + + var state = this._readableState; + var paused = false; + + stream.on('end', function () { + debug('wrapped end'); + if (state.decoder && !state.ended) { + var chunk = state.decoder.end(); + if (chunk && chunk.length) _this.push(chunk); + } + + _this.push(null); + }); + + stream.on('data', function (chunk) { + debug('wrapped data'); + if (state.decoder) chunk = state.decoder.write(chunk); + + // don't skip over falsy values in objectMode + if (state.objectMode && (chunk === null || chunk === undefined)) return;else if (!state.objectMode && (!chunk || !chunk.length)) return; + + var ret = _this.push(chunk); + if (!ret) { + paused = true; + stream.pause(); + } + }); + + // proxy all the other methods. + // important when wrapping filters and duplexes. + for (var i in stream) { + if (this[i] === undefined && typeof stream[i] === 'function') { + this[i] = function (method) { + return function () { + return stream[method].apply(stream, arguments); + }; + }(i); + } + } + + // proxy certain important events. + for (var n = 0; n < kProxyEvents.length; n++) { + stream.on(kProxyEvents[n], this.emit.bind(this, kProxyEvents[n])); + } + + // when we try to consume some more bytes, simply unpause the + // underlying stream. + this._read = function (n) { + debug('wrapped _read', n); + if (paused) { + paused = false; + stream.resume(); + } + }; + + return this; +}; + +Object.defineProperty(Readable.prototype, 'readableHighWaterMark', { + // making it explicit this property is not enumerable + // because otherwise some prototype manipulation in + // userland will fail + enumerable: false, + get: function () { + return this._readableState.highWaterMark; + } +}); + +// exposed for testing purposes only. +Readable._fromList = fromList; + +// Pluck off n bytes from an array of buffers. +// Length is the combined lengths of all the buffers in the list. +// This function is designed to be inlinable, so please take care when making +// changes to the function body. +function fromList(n, state) { + // nothing buffered + if (state.length === 0) return null; + + var ret; + if (state.objectMode) ret = state.buffer.shift();else if (!n || n >= state.length) { + // read it all, truncate the list + if (state.decoder) ret = state.buffer.join('');else if (state.buffer.length === 1) ret = state.buffer.head.data;else ret = state.buffer.concat(state.length); + state.buffer.clear(); + } else { + // read part of list + ret = fromListPartial(n, state.buffer, state.decoder); + } + + return ret; +} + +// Extracts only enough buffered data to satisfy the amount requested. +// This function is designed to be inlinable, so please take care when making +// changes to the function body. +function fromListPartial(n, list, hasStrings) { + var ret; + if (n < list.head.data.length) { + // slice is the same for buffers and strings + ret = list.head.data.slice(0, n); + list.head.data = list.head.data.slice(n); + } else if (n === list.head.data.length) { + // first chunk is a perfect match + ret = list.shift(); + } else { + // result spans more than one buffer + ret = hasStrings ? copyFromBufferString(n, list) : copyFromBuffer(n, list); + } + return ret; +} + +// Copies a specified amount of characters from the list of buffered data +// chunks. +// This function is designed to be inlinable, so please take care when making +// changes to the function body. +function copyFromBufferString(n, list) { + var p = list.head; + var c = 1; + var ret = p.data; + n -= ret.length; + while (p = p.next) { + var str = p.data; + var nb = n > str.length ? str.length : n; + if (nb === str.length) ret += str;else ret += str.slice(0, n); + n -= nb; + if (n === 0) { + if (nb === str.length) { + ++c; + if (p.next) list.head = p.next;else list.head = list.tail = null; + } else { + list.head = p; + p.data = str.slice(nb); + } + break; + } + ++c; + } + list.length -= c; + return ret; +} + +// Copies a specified amount of bytes from the list of buffered data chunks. +// This function is designed to be inlinable, so please take care when making +// changes to the function body. +function copyFromBuffer(n, list) { + var ret = Buffer.allocUnsafe(n); + var p = list.head; + var c = 1; + p.data.copy(ret); + n -= p.data.length; + while (p = p.next) { + var buf = p.data; + var nb = n > buf.length ? buf.length : n; + buf.copy(ret, ret.length - n, 0, nb); + n -= nb; + if (n === 0) { + if (nb === buf.length) { + ++c; + if (p.next) list.head = p.next;else list.head = list.tail = null; + } else { + list.head = p; + p.data = buf.slice(nb); + } + break; + } + ++c; + } + list.length -= c; + return ret; +} + +function endReadable(stream) { + var state = stream._readableState; + + // If we get here before consuming all the bytes, then that is a + // bug in node. Should never happen. + if (state.length > 0) throw new Error('"endReadable()" called on non-empty stream'); + + if (!state.endEmitted) { + state.ended = true; + pna.nextTick(endReadableNT, state, stream); + } +} + +function endReadableNT(state, stream) { + // Check that we didn't get one last unshift. + if (!state.endEmitted && state.length === 0) { + state.endEmitted = true; + stream.readable = false; + stream.emit('end'); + } +} + +function indexOf(xs, x) { + for (var i = 0, l = xs.length; i < l; i++) { + if (xs[i] === x) return i; + } + return -1; +} +}).call(this,require('_process'),typeof global !== "undefined" ? global : typeof self !== "undefined" ? self : typeof window !== "undefined" ? window : {}) +},{"./_stream_duplex":66,"./internal/streams/BufferList":71,"./internal/streams/destroy":72,"./internal/streams/stream":73,"_process":64,"core-util-is":4,"events":5,"inherits":8,"isarray":10,"process-nextick-args":63,"safe-buffer":78,"string_decoder/":81,"util":2}],69:[function(require,module,exports){ +// Copyright Joyent, Inc. and other Node contributors. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to permit +// persons to whom the Software is furnished to do so, subject to the +// following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +// USE OR OTHER DEALINGS IN THE SOFTWARE. + +// a transform stream is a readable/writable stream where you do +// something with the data. Sometimes it's called a "filter", +// but that's not a great name for it, since that implies a thing where +// some bits pass through, and others are simply ignored. (That would +// be a valid example of a transform, of course.) +// +// While the output is causally related to the input, it's not a +// necessarily symmetric or synchronous transformation. For example, +// a zlib stream might take multiple plain-text writes(), and then +// emit a single compressed chunk some time in the future. +// +// Here's how this works: +// +// The Transform stream has all the aspects of the readable and writable +// stream classes. When you write(chunk), that calls _write(chunk,cb) +// internally, and returns false if there's a lot of pending writes +// buffered up. When you call read(), that calls _read(n) until +// there's enough pending readable data buffered up. +// +// In a transform stream, the written data is placed in a buffer. When +// _read(n) is called, it transforms the queued up data, calling the +// buffered _write cb's as it consumes chunks. If consuming a single +// written chunk would result in multiple output chunks, then the first +// outputted bit calls the readcb, and subsequent chunks just go into +// the read buffer, and will cause it to emit 'readable' if necessary. +// +// This way, back-pressure is actually determined by the reading side, +// since _read has to be called to start processing a new chunk. However, +// a pathological inflate type of transform can cause excessive buffering +// here. For example, imagine a stream where every byte of input is +// interpreted as an integer from 0-255, and then results in that many +// bytes of output. Writing the 4 bytes {ff,ff,ff,ff} would result in +// 1kb of data being output. In this case, you could write a very small +// amount of input, and end up with a very large amount of output. In +// such a pathological inflating mechanism, there'd be no way to tell +// the system to stop doing the transform. A single 4MB write could +// cause the system to run out of memory. +// +// However, even in such a pathological case, only a single written chunk +// would be consumed, and then the rest would wait (un-transformed) until +// the results of the previous transformed chunk were consumed. + +'use strict'; + +module.exports = Transform; + +var Duplex = require('./_stream_duplex'); + +/*<replacement>*/ +var util = Object.create(require('core-util-is')); +util.inherits = require('inherits'); +/*</replacement>*/ + +util.inherits(Transform, Duplex); + +function afterTransform(er, data) { + var ts = this._transformState; + ts.transforming = false; + + var cb = ts.writecb; + + if (!cb) { + return this.emit('error', new Error('write callback called multiple times')); + } + + ts.writechunk = null; + ts.writecb = null; + + if (data != null) // single equals check for both `null` and `undefined` + this.push(data); + + cb(er); + + var rs = this._readableState; + rs.reading = false; + if (rs.needReadable || rs.length < rs.highWaterMark) { + this._read(rs.highWaterMark); + } +} + +function Transform(options) { + if (!(this instanceof Transform)) return new Transform(options); + + Duplex.call(this, options); + + this._transformState = { + afterTransform: afterTransform.bind(this), + needTransform: false, + transforming: false, + writecb: null, + writechunk: null, + writeencoding: null + }; + + // start out asking for a readable event once data is transformed. + this._readableState.needReadable = true; + + // we have implemented the _read method, and done the other things + // that Readable wants before the first _read call, so unset the + // sync guard flag. + this._readableState.sync = false; + + if (options) { + if (typeof options.transform === 'function') this._transform = options.transform; + + if (typeof options.flush === 'function') this._flush = options.flush; + } + + // When the writable side finishes, then flush out anything remaining. + this.on('prefinish', prefinish); +} + +function prefinish() { + var _this = this; + + if (typeof this._flush === 'function') { + this._flush(function (er, data) { + done(_this, er, data); + }); + } else { + done(this, null, null); + } +} + +Transform.prototype.push = function (chunk, encoding) { + this._transformState.needTransform = false; + return Duplex.prototype.push.call(this, chunk, encoding); +}; + +// This is the part where you do stuff! +// override this function in implementation classes. +// 'chunk' is an input chunk. +// +// Call `push(newChunk)` to pass along transformed output +// to the readable side. You may call 'push' zero or more times. +// +// Call `cb(err)` when you are done with this chunk. If you pass +// an error, then that'll put the hurt on the whole operation. If you +// never call cb(), then you'll never get another chunk. +Transform.prototype._transform = function (chunk, encoding, cb) { + throw new Error('_transform() is not implemented'); +}; + +Transform.prototype._write = function (chunk, encoding, cb) { + var ts = this._transformState; + ts.writecb = cb; + ts.writechunk = chunk; + ts.writeencoding = encoding; + if (!ts.transforming) { + var rs = this._readableState; + if (ts.needTransform || rs.needReadable || rs.length < rs.highWaterMark) this._read(rs.highWaterMark); + } +}; + +// Doesn't matter what the args are here. +// _transform does all the work. +// That we got here means that the readable side wants more data. +Transform.prototype._read = function (n) { + var ts = this._transformState; + + if (ts.writechunk !== null && ts.writecb && !ts.transforming) { + ts.transforming = true; + this._transform(ts.writechunk, ts.writeencoding, ts.afterTransform); + } else { + // mark that we need a transform, so that any data that comes in + // will get processed, now that we've asked for it. + ts.needTransform = true; + } +}; + +Transform.prototype._destroy = function (err, cb) { + var _this2 = this; + + Duplex.prototype._destroy.call(this, err, function (err2) { + cb(err2); + _this2.emit('close'); + }); +}; + +function done(stream, er, data) { + if (er) return stream.emit('error', er); + + if (data != null) // single equals check for both `null` and `undefined` + stream.push(data); + + // if there's nothing in the write buffer, then that means + // that nothing more will ever be provided + if (stream._writableState.length) throw new Error('Calling transform done when ws.length != 0'); + + if (stream._transformState.transforming) throw new Error('Calling transform done when still transforming'); + + return stream.push(null); +} +},{"./_stream_duplex":66,"core-util-is":4,"inherits":8}],70:[function(require,module,exports){ +(function (process,global,setImmediate){ +// Copyright Joyent, Inc. and other Node contributors. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to permit +// persons to whom the Software is furnished to do so, subject to the +// following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +// USE OR OTHER DEALINGS IN THE SOFTWARE. + +// A bit simpler than readable streams. +// Implement an async ._write(chunk, encoding, cb), and it'll handle all +// the drain event emission and buffering. + +'use strict'; + +/*<replacement>*/ + +var pna = require('process-nextick-args'); +/*</replacement>*/ + +module.exports = Writable; + +/* <replacement> */ +function WriteReq(chunk, encoding, cb) { + this.chunk = chunk; + this.encoding = encoding; + this.callback = cb; + this.next = null; +} + +// It seems a linked list but it is not +// there will be only 2 of these for each stream +function CorkedRequest(state) { + var _this = this; + + this.next = null; + this.entry = null; + this.finish = function () { + onCorkedFinish(_this, state); + }; +} +/* </replacement> */ + +/*<replacement>*/ +var asyncWrite = !process.browser && ['v0.10', 'v0.9.'].indexOf(process.version.slice(0, 5)) > -1 ? setImmediate : pna.nextTick; +/*</replacement>*/ + +/*<replacement>*/ +var Duplex; +/*</replacement>*/ + +Writable.WritableState = WritableState; + +/*<replacement>*/ +var util = Object.create(require('core-util-is')); +util.inherits = require('inherits'); +/*</replacement>*/ + +/*<replacement>*/ +var internalUtil = { + deprecate: require('util-deprecate') +}; +/*</replacement>*/ + +/*<replacement>*/ +var Stream = require('./internal/streams/stream'); +/*</replacement>*/ + +/*<replacement>*/ + +var Buffer = require('safe-buffer').Buffer; +var OurUint8Array = global.Uint8Array || function () {}; +function _uint8ArrayToBuffer(chunk) { + return Buffer.from(chunk); +} +function _isUint8Array(obj) { + return Buffer.isBuffer(obj) || obj instanceof OurUint8Array; +} + +/*</replacement>*/ + +var destroyImpl = require('./internal/streams/destroy'); + +util.inherits(Writable, Stream); + +function nop() {} + +function WritableState(options, stream) { + Duplex = Duplex || require('./_stream_duplex'); + + options = options || {}; + + // Duplex streams are both readable and writable, but share + // the same options object. + // However, some cases require setting options to different + // values for the readable and the writable sides of the duplex stream. + // These options can be provided separately as readableXXX and writableXXX. + var isDuplex = stream instanceof Duplex; + + // object stream flag to indicate whether or not this stream + // contains buffers or objects. + this.objectMode = !!options.objectMode; + + if (isDuplex) this.objectMode = this.objectMode || !!options.writableObjectMode; + + // the point at which write() starts returning false + // Note: 0 is a valid value, means that we always return false if + // the entire buffer is not flushed immediately on write() + var hwm = options.highWaterMark; + var writableHwm = options.writableHighWaterMark; + var defaultHwm = this.objectMode ? 16 : 16 * 1024; + + if (hwm || hwm === 0) this.highWaterMark = hwm;else if (isDuplex && (writableHwm || writableHwm === 0)) this.highWaterMark = writableHwm;else this.highWaterMark = defaultHwm; + + // cast to ints. + this.highWaterMark = Math.floor(this.highWaterMark); + + // if _final has been called + this.finalCalled = false; + + // drain event flag. + this.needDrain = false; + // at the start of calling end() + this.ending = false; + // when end() has been called, and returned + this.ended = false; + // when 'finish' is emitted + this.finished = false; + + // has it been destroyed + this.destroyed = false; + + // should we decode strings into buffers before passing to _write? + // this is here so that some node-core streams can optimize string + // handling at a lower level. + var noDecode = options.decodeStrings === false; + this.decodeStrings = !noDecode; + + // Crypto is kind of old and crusty. Historically, its default string + // encoding is 'binary' so we have to make this configurable. + // Everything else in the universe uses 'utf8', though. + this.defaultEncoding = options.defaultEncoding || 'utf8'; + + // not an actual buffer we keep track of, but a measurement + // of how much we're waiting to get pushed to some underlying + // socket or file. + this.length = 0; + + // a flag to see when we're in the middle of a write. + this.writing = false; + + // when true all writes will be buffered until .uncork() call + this.corked = 0; + + // a flag to be able to tell if the onwrite cb is called immediately, + // or on a later tick. We set this to true at first, because any + // actions that shouldn't happen until "later" should generally also + // not happen before the first write call. + this.sync = true; + + // a flag to know if we're processing previously buffered items, which + // may call the _write() callback in the same tick, so that we don't + // end up in an overlapped onwrite situation. + this.bufferProcessing = false; + + // the callback that's passed to _write(chunk,cb) + this.onwrite = function (er) { + onwrite(stream, er); + }; + + // the callback that the user supplies to write(chunk,encoding,cb) + this.writecb = null; + + // the amount that is being written when _write is called. + this.writelen = 0; + + this.bufferedRequest = null; + this.lastBufferedRequest = null; + + // number of pending user-supplied write callbacks + // this must be 0 before 'finish' can be emitted + this.pendingcb = 0; + + // emit prefinish if the only thing we're waiting for is _write cbs + // This is relevant for synchronous Transform streams + this.prefinished = false; + + // True if the error was already emitted and should not be thrown again + this.errorEmitted = false; + + // count buffered requests + this.bufferedRequestCount = 0; + + // allocate the first CorkedRequest, there is always + // one allocated and free to use, and we maintain at most two + this.corkedRequestsFree = new CorkedRequest(this); +} + +WritableState.prototype.getBuffer = function getBuffer() { + var current = this.bufferedRequest; + var out = []; + while (current) { + out.push(current); + current = current.next; + } + return out; +}; + +(function () { + try { + Object.defineProperty(WritableState.prototype, 'buffer', { + get: internalUtil.deprecate(function () { + return this.getBuffer(); + }, '_writableState.buffer is deprecated. Use _writableState.getBuffer ' + 'instead.', 'DEP0003') + }); + } catch (_) {} +})(); + +// Test _writableState for inheritance to account for Duplex streams, +// whose prototype chain only points to Readable. +var realHasInstance; +if (typeof Symbol === 'function' && Symbol.hasInstance && typeof Function.prototype[Symbol.hasInstance] === 'function') { + realHasInstance = Function.prototype[Symbol.hasInstance]; + Object.defineProperty(Writable, Symbol.hasInstance, { + value: function (object) { + if (realHasInstance.call(this, object)) return true; + if (this !== Writable) return false; + + return object && object._writableState instanceof WritableState; + } + }); +} else { + realHasInstance = function (object) { + return object instanceof this; + }; +} + +function Writable(options) { + Duplex = Duplex || require('./_stream_duplex'); + + // Writable ctor is applied to Duplexes, too. + // `realHasInstance` is necessary because using plain `instanceof` + // would return false, as no `_writableState` property is attached. + + // Trying to use the custom `instanceof` for Writable here will also break the + // Node.js LazyTransform implementation, which has a non-trivial getter for + // `_writableState` that would lead to infinite recursion. + if (!realHasInstance.call(Writable, this) && !(this instanceof Duplex)) { + return new Writable(options); + } + + this._writableState = new WritableState(options, this); + + // legacy. + this.writable = true; + + if (options) { + if (typeof options.write === 'function') this._write = options.write; + + if (typeof options.writev === 'function') this._writev = options.writev; + + if (typeof options.destroy === 'function') this._destroy = options.destroy; + + if (typeof options.final === 'function') this._final = options.final; + } + + Stream.call(this); +} + +// Otherwise people can pipe Writable streams, which is just wrong. +Writable.prototype.pipe = function () { + this.emit('error', new Error('Cannot pipe, not readable')); +}; + +function writeAfterEnd(stream, cb) { + var er = new Error('write after end'); + // TODO: defer error events consistently everywhere, not just the cb + stream.emit('error', er); + pna.nextTick(cb, er); +} + +// Checks that a user-supplied chunk is valid, especially for the particular +// mode the stream is in. Currently this means that `null` is never accepted +// and undefined/non-string values are only allowed in object mode. +function validChunk(stream, state, chunk, cb) { + var valid = true; + var er = false; + + if (chunk === null) { + er = new TypeError('May not write null values to stream'); + } else if (typeof chunk !== 'string' && chunk !== undefined && !state.objectMode) { + er = new TypeError('Invalid non-string/buffer chunk'); + } + if (er) { + stream.emit('error', er); + pna.nextTick(cb, er); + valid = false; + } + return valid; +} + +Writable.prototype.write = function (chunk, encoding, cb) { + var state = this._writableState; + var ret = false; + var isBuf = !state.objectMode && _isUint8Array(chunk); + + if (isBuf && !Buffer.isBuffer(chunk)) { + chunk = _uint8ArrayToBuffer(chunk); + } + + if (typeof encoding === 'function') { + cb = encoding; + encoding = null; + } + + if (isBuf) encoding = 'buffer';else if (!encoding) encoding = state.defaultEncoding; + + if (typeof cb !== 'function') cb = nop; + + if (state.ended) writeAfterEnd(this, cb);else if (isBuf || validChunk(this, state, chunk, cb)) { + state.pendingcb++; + ret = writeOrBuffer(this, state, isBuf, chunk, encoding, cb); + } + + return ret; +}; + +Writable.prototype.cork = function () { + var state = this._writableState; + + state.corked++; +}; + +Writable.prototype.uncork = function () { + var state = this._writableState; + + if (state.corked) { + state.corked--; + + if (!state.writing && !state.corked && !state.finished && !state.bufferProcessing && state.bufferedRequest) clearBuffer(this, state); + } +}; + +Writable.prototype.setDefaultEncoding = function setDefaultEncoding(encoding) { + // node::ParseEncoding() requires lower case. + if (typeof encoding === 'string') encoding = encoding.toLowerCase(); + if (!(['hex', 'utf8', 'utf-8', 'ascii', 'binary', 'base64', 'ucs2', 'ucs-2', 'utf16le', 'utf-16le', 'raw'].indexOf((encoding + '').toLowerCase()) > -1)) throw new TypeError('Unknown encoding: ' + encoding); + this._writableState.defaultEncoding = encoding; + return this; +}; + +function decodeChunk(state, chunk, encoding) { + if (!state.objectMode && state.decodeStrings !== false && typeof chunk === 'string') { + chunk = Buffer.from(chunk, encoding); + } + return chunk; +} + +Object.defineProperty(Writable.prototype, 'writableHighWaterMark', { + // making it explicit this property is not enumerable + // because otherwise some prototype manipulation in + // userland will fail + enumerable: false, + get: function () { + return this._writableState.highWaterMark; + } +}); + +// if we're already writing something, then just put this +// in the queue, and wait our turn. Otherwise, call _write +// If we return false, then we need a drain event, so set that flag. +function writeOrBuffer(stream, state, isBuf, chunk, encoding, cb) { + if (!isBuf) { + var newChunk = decodeChunk(state, chunk, encoding); + if (chunk !== newChunk) { + isBuf = true; + encoding = 'buffer'; + chunk = newChunk; + } + } + var len = state.objectMode ? 1 : chunk.length; + + state.length += len; + + var ret = state.length < state.highWaterMark; + // we must ensure that previous needDrain will not be reset to false. + if (!ret) state.needDrain = true; + + if (state.writing || state.corked) { + var last = state.lastBufferedRequest; + state.lastBufferedRequest = { + chunk: chunk, + encoding: encoding, + isBuf: isBuf, + callback: cb, + next: null + }; + if (last) { + last.next = state.lastBufferedRequest; + } else { + state.bufferedRequest = state.lastBufferedRequest; + } + state.bufferedRequestCount += 1; + } else { + doWrite(stream, state, false, len, chunk, encoding, cb); + } + + return ret; +} + +function doWrite(stream, state, writev, len, chunk, encoding, cb) { + state.writelen = len; + state.writecb = cb; + state.writing = true; + state.sync = true; + if (writev) stream._writev(chunk, state.onwrite);else stream._write(chunk, encoding, state.onwrite); + state.sync = false; +} + +function onwriteError(stream, state, sync, er, cb) { + --state.pendingcb; + + if (sync) { + // defer the callback if we are being called synchronously + // to avoid piling up things on the stack + pna.nextTick(cb, er); + // this can emit finish, and it will always happen + // after error + pna.nextTick(finishMaybe, stream, state); + stream._writableState.errorEmitted = true; + stream.emit('error', er); + } else { + // the caller expect this to happen before if + // it is async + cb(er); + stream._writableState.errorEmitted = true; + stream.emit('error', er); + // this can emit finish, but finish must + // always follow error + finishMaybe(stream, state); + } +} + +function onwriteStateUpdate(state) { + state.writing = false; + state.writecb = null; + state.length -= state.writelen; + state.writelen = 0; +} + +function onwrite(stream, er) { + var state = stream._writableState; + var sync = state.sync; + var cb = state.writecb; + + onwriteStateUpdate(state); + + if (er) onwriteError(stream, state, sync, er, cb);else { + // Check if we're actually ready to finish, but don't emit yet + var finished = needFinish(state); + + if (!finished && !state.corked && !state.bufferProcessing && state.bufferedRequest) { + clearBuffer(stream, state); + } + + if (sync) { + /*<replacement>*/ + asyncWrite(afterWrite, stream, state, finished, cb); + /*</replacement>*/ + } else { + afterWrite(stream, state, finished, cb); + } + } +} + +function afterWrite(stream, state, finished, cb) { + if (!finished) onwriteDrain(stream, state); + state.pendingcb--; + cb(); + finishMaybe(stream, state); +} + +// Must force callback to be called on nextTick, so that we don't +// emit 'drain' before the write() consumer gets the 'false' return +// value, and has a chance to attach a 'drain' listener. +function onwriteDrain(stream, state) { + if (state.length === 0 && state.needDrain) { + state.needDrain = false; + stream.emit('drain'); + } +} + +// if there's something in the buffer waiting, then process it +function clearBuffer(stream, state) { + state.bufferProcessing = true; + var entry = state.bufferedRequest; + + if (stream._writev && entry && entry.next) { + // Fast case, write everything using _writev() + var l = state.bufferedRequestCount; + var buffer = new Array(l); + var holder = state.corkedRequestsFree; + holder.entry = entry; + + var count = 0; + var allBuffers = true; + while (entry) { + buffer[count] = entry; + if (!entry.isBuf) allBuffers = false; + entry = entry.next; + count += 1; + } + buffer.allBuffers = allBuffers; + + doWrite(stream, state, true, state.length, buffer, '', holder.finish); + + // doWrite is almost always async, defer these to save a bit of time + // as the hot path ends with doWrite + state.pendingcb++; + state.lastBufferedRequest = null; + if (holder.next) { + state.corkedRequestsFree = holder.next; + holder.next = null; + } else { + state.corkedRequestsFree = new CorkedRequest(state); + } + state.bufferedRequestCount = 0; + } else { + // Slow case, write chunks one-by-one + while (entry) { + var chunk = entry.chunk; + var encoding = entry.encoding; + var cb = entry.callback; + var len = state.objectMode ? 1 : chunk.length; + + doWrite(stream, state, false, len, chunk, encoding, cb); + entry = entry.next; + state.bufferedRequestCount--; + // if we didn't call the onwrite immediately, then + // it means that we need to wait until it does. + // also, that means that the chunk and cb are currently + // being processed, so move the buffer counter past them. + if (state.writing) { + break; + } + } + + if (entry === null) state.lastBufferedRequest = null; + } + + state.bufferedRequest = entry; + state.bufferProcessing = false; +} + +Writable.prototype._write = function (chunk, encoding, cb) { + cb(new Error('_write() is not implemented')); +}; + +Writable.prototype._writev = null; + +Writable.prototype.end = function (chunk, encoding, cb) { + var state = this._writableState; + + if (typeof chunk === 'function') { + cb = chunk; + chunk = null; + encoding = null; + } else if (typeof encoding === 'function') { + cb = encoding; + encoding = null; + } + + if (chunk !== null && chunk !== undefined) this.write(chunk, encoding); + + // .end() fully uncorks + if (state.corked) { + state.corked = 1; + this.uncork(); + } + + // ignore unnecessary end() calls. + if (!state.ending && !state.finished) endWritable(this, state, cb); +}; + +function needFinish(state) { + return state.ending && state.length === 0 && state.bufferedRequest === null && !state.finished && !state.writing; +} +function callFinal(stream, state) { + stream._final(function (err) { + state.pendingcb--; + if (err) { + stream.emit('error', err); + } + state.prefinished = true; + stream.emit('prefinish'); + finishMaybe(stream, state); + }); +} +function prefinish(stream, state) { + if (!state.prefinished && !state.finalCalled) { + if (typeof stream._final === 'function') { + state.pendingcb++; + state.finalCalled = true; + pna.nextTick(callFinal, stream, state); + } else { + state.prefinished = true; + stream.emit('prefinish'); + } + } +} + +function finishMaybe(stream, state) { + var need = needFinish(state); + if (need) { + prefinish(stream, state); + if (state.pendingcb === 0) { + state.finished = true; + stream.emit('finish'); + } + } + return need; +} + +function endWritable(stream, state, cb) { + state.ending = true; + finishMaybe(stream, state); + if (cb) { + if (state.finished) pna.nextTick(cb);else stream.once('finish', cb); + } + state.ended = true; + stream.writable = false; +} + +function onCorkedFinish(corkReq, state, err) { + var entry = corkReq.entry; + corkReq.entry = null; + while (entry) { + var cb = entry.callback; + state.pendingcb--; + cb(err); + entry = entry.next; + } + if (state.corkedRequestsFree) { + state.corkedRequestsFree.next = corkReq; + } else { + state.corkedRequestsFree = corkReq; + } +} + +Object.defineProperty(Writable.prototype, 'destroyed', { + get: function () { + if (this._writableState === undefined) { + return false; + } + return this._writableState.destroyed; + }, + set: function (value) { + // we ignore the value if the stream + // has not been initialized yet + if (!this._writableState) { + return; + } + + // backward compatibility, the user is explicitly + // managing destroyed + this._writableState.destroyed = value; + } +}); + +Writable.prototype.destroy = destroyImpl.destroy; +Writable.prototype._undestroy = destroyImpl.undestroy; +Writable.prototype._destroy = function (err, cb) { + this.end(); + cb(err); +}; +}).call(this,require('_process'),typeof global !== "undefined" ? global : typeof self !== "undefined" ? self : typeof window !== "undefined" ? window : {},require("timers").setImmediate) +},{"./_stream_duplex":66,"./internal/streams/destroy":72,"./internal/streams/stream":73,"_process":64,"core-util-is":4,"inherits":8,"process-nextick-args":63,"safe-buffer":78,"timers":82,"util-deprecate":83}],71:[function(require,module,exports){ +'use strict'; + +function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } } + +var Buffer = require('safe-buffer').Buffer; +var util = require('util'); + +function copyBuffer(src, target, offset) { + src.copy(target, offset); +} + +module.exports = function () { + function BufferList() { + _classCallCheck(this, BufferList); + + this.head = null; + this.tail = null; + this.length = 0; + } + + BufferList.prototype.push = function push(v) { + var entry = { data: v, next: null }; + if (this.length > 0) this.tail.next = entry;else this.head = entry; + this.tail = entry; + ++this.length; + }; + + BufferList.prototype.unshift = function unshift(v) { + var entry = { data: v, next: this.head }; + if (this.length === 0) this.tail = entry; + this.head = entry; + ++this.length; + }; + + BufferList.prototype.shift = function shift() { + if (this.length === 0) return; + var ret = this.head.data; + if (this.length === 1) this.head = this.tail = null;else this.head = this.head.next; + --this.length; + return ret; + }; + + BufferList.prototype.clear = function clear() { + this.head = this.tail = null; + this.length = 0; + }; + + BufferList.prototype.join = function join(s) { + if (this.length === 0) return ''; + var p = this.head; + var ret = '' + p.data; + while (p = p.next) { + ret += s + p.data; + }return ret; + }; + + BufferList.prototype.concat = function concat(n) { + if (this.length === 0) return Buffer.alloc(0); + if (this.length === 1) return this.head.data; + var ret = Buffer.allocUnsafe(n >>> 0); + var p = this.head; + var i = 0; + while (p) { + copyBuffer(p.data, ret, i); + i += p.data.length; + p = p.next; + } + return ret; + }; + + return BufferList; +}(); + +if (util && util.inspect && util.inspect.custom) { + module.exports.prototype[util.inspect.custom] = function () { + var obj = util.inspect({ length: this.length }); + return this.constructor.name + ' ' + obj; + }; +} +},{"safe-buffer":78,"util":2}],72:[function(require,module,exports){ +'use strict'; + +/*<replacement>*/ + +var pna = require('process-nextick-args'); +/*</replacement>*/ + +// undocumented cb() API, needed for core, not for public API +function destroy(err, cb) { + var _this = this; + + var readableDestroyed = this._readableState && this._readableState.destroyed; + var writableDestroyed = this._writableState && this._writableState.destroyed; + + if (readableDestroyed || writableDestroyed) { + if (cb) { + cb(err); + } else if (err && (!this._writableState || !this._writableState.errorEmitted)) { + pna.nextTick(emitErrorNT, this, err); + } + return this; + } + + // we set destroyed to true before firing error callbacks in order + // to make it re-entrance safe in case destroy() is called within callbacks + + if (this._readableState) { + this._readableState.destroyed = true; + } + + // if this is a duplex stream mark the writable part as destroyed as well + if (this._writableState) { + this._writableState.destroyed = true; + } + + this._destroy(err || null, function (err) { + if (!cb && err) { + pna.nextTick(emitErrorNT, _this, err); + if (_this._writableState) { + _this._writableState.errorEmitted = true; + } + } else if (cb) { + cb(err); + } + }); + + return this; +} + +function undestroy() { + if (this._readableState) { + this._readableState.destroyed = false; + this._readableState.reading = false; + this._readableState.ended = false; + this._readableState.endEmitted = false; + } + + if (this._writableState) { + this._writableState.destroyed = false; + this._writableState.ended = false; + this._writableState.ending = false; + this._writableState.finished = false; + this._writableState.errorEmitted = false; + } +} + +function emitErrorNT(self, err) { + self.emit('error', err); +} + +module.exports = { + destroy: destroy, + undestroy: undestroy +}; +},{"process-nextick-args":63}],73:[function(require,module,exports){ +module.exports = require('events').EventEmitter; + +},{"events":5}],74:[function(require,module,exports){ +module.exports = require('./readable').PassThrough + +},{"./readable":75}],75:[function(require,module,exports){ +exports = module.exports = require('./lib/_stream_readable.js'); +exports.Stream = exports; +exports.Readable = exports; +exports.Writable = require('./lib/_stream_writable.js'); +exports.Duplex = require('./lib/_stream_duplex.js'); +exports.Transform = require('./lib/_stream_transform.js'); +exports.PassThrough = require('./lib/_stream_passthrough.js'); + +},{"./lib/_stream_duplex.js":66,"./lib/_stream_passthrough.js":67,"./lib/_stream_readable.js":68,"./lib/_stream_transform.js":69,"./lib/_stream_writable.js":70}],76:[function(require,module,exports){ +module.exports = require('./readable').Transform + +},{"./readable":75}],77:[function(require,module,exports){ +module.exports = require('./lib/_stream_writable.js'); + +},{"./lib/_stream_writable.js":70}],78:[function(require,module,exports){ +/* eslint-disable node/no-deprecated-api */ +var buffer = require('buffer') +var Buffer = buffer.Buffer + +// alternative to using Object.keys for old browsers +function copyProps (src, dst) { + for (var key in src) { + dst[key] = src[key] + } +} +if (Buffer.from && Buffer.alloc && Buffer.allocUnsafe && Buffer.allocUnsafeSlow) { + module.exports = buffer +} else { + // Copy properties from require('buffer') + copyProps(buffer, exports) + exports.Buffer = SafeBuffer +} + +function SafeBuffer (arg, encodingOrOffset, length) { + return Buffer(arg, encodingOrOffset, length) +} + +// Copy static methods from Buffer +copyProps(Buffer, SafeBuffer) + +SafeBuffer.from = function (arg, encodingOrOffset, length) { + if (typeof arg === 'number') { + throw new TypeError('Argument must not be a number') + } + return Buffer(arg, encodingOrOffset, length) +} + +SafeBuffer.alloc = function (size, fill, encoding) { + if (typeof size !== 'number') { + throw new TypeError('Argument must be a number') + } + var buf = Buffer(size) + if (fill !== undefined) { + if (typeof encoding === 'string') { + buf.fill(fill, encoding) + } else { + buf.fill(fill) + } + } else { + buf.fill(0) + } + return buf +} + +SafeBuffer.allocUnsafe = function (size) { + if (typeof size !== 'number') { + throw new TypeError('Argument must be a number') + } + return Buffer(size) +} + +SafeBuffer.allocUnsafeSlow = function (size) { + if (typeof size !== 'number') { + throw new TypeError('Argument must be a number') + } + return buffer.SlowBuffer(size) +} + +},{"buffer":3}],79:[function(require,module,exports){ +(function (setImmediate){ +'use strict'; +module.exports = typeof setImmediate === 'function' ? setImmediate : + function setImmediate() { + var args = [].slice.apply(arguments); + args.splice(1, 0, 0); + setTimeout.apply(null, args); + }; + +}).call(this,require("timers").setImmediate) +},{"timers":82}],80:[function(require,module,exports){ +// Copyright Joyent, Inc. and other Node contributors. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to permit +// persons to whom the Software is furnished to do so, subject to the +// following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +// USE OR OTHER DEALINGS IN THE SOFTWARE. + +module.exports = Stream; + +var EE = require('events').EventEmitter; +var inherits = require('inherits'); + +inherits(Stream, EE); +Stream.Readable = require('readable-stream/readable.js'); +Stream.Writable = require('readable-stream/writable.js'); +Stream.Duplex = require('readable-stream/duplex.js'); +Stream.Transform = require('readable-stream/transform.js'); +Stream.PassThrough = require('readable-stream/passthrough.js'); + +// Backwards-compat with node 0.4.x +Stream.Stream = Stream; + + + +// old-style streams. Note that the pipe method (the only relevant +// part of this class) is overridden in the Readable class. + +function Stream() { + EE.call(this); +} + +Stream.prototype.pipe = function(dest, options) { + var source = this; + + function ondata(chunk) { + if (dest.writable) { + if (false === dest.write(chunk) && source.pause) { + source.pause(); + } + } + } + + source.on('data', ondata); + + function ondrain() { + if (source.readable && source.resume) { + source.resume(); + } + } + + dest.on('drain', ondrain); + + // If the 'end' option is not supplied, dest.end() will be called when + // source gets the 'end' or 'close' events. Only dest.end() once. + if (!dest._isStdio && (!options || options.end !== false)) { + source.on('end', onend); + source.on('close', onclose); + } + + var didOnEnd = false; + function onend() { + if (didOnEnd) return; + didOnEnd = true; + + dest.end(); + } + + + function onclose() { + if (didOnEnd) return; + didOnEnd = true; + + if (typeof dest.destroy === 'function') dest.destroy(); + } + + // don't leave dangling pipes when there are errors. + function onerror(er) { + cleanup(); + if (EE.listenerCount(this, 'error') === 0) { + throw er; // Unhandled stream error in pipe. + } + } + + source.on('error', onerror); + dest.on('error', onerror); + + // remove all the event listeners that were added. + function cleanup() { + source.removeListener('data', ondata); + dest.removeListener('drain', ondrain); + + source.removeListener('end', onend); + source.removeListener('close', onclose); + + source.removeListener('error', onerror); + dest.removeListener('error', onerror); + + source.removeListener('end', cleanup); + source.removeListener('close', cleanup); + + dest.removeListener('close', cleanup); + } + + source.on('end', cleanup); + source.on('close', cleanup); + + dest.on('close', cleanup); + + dest.emit('pipe', source); + + // Allow for unix-like usage: A.pipe(B).pipe(C) + return dest; +}; + +},{"events":5,"inherits":8,"readable-stream/duplex.js":65,"readable-stream/passthrough.js":74,"readable-stream/readable.js":75,"readable-stream/transform.js":76,"readable-stream/writable.js":77}],81:[function(require,module,exports){ +// Copyright Joyent, Inc. and other Node contributors. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to permit +// persons to whom the Software is furnished to do so, subject to the +// following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +// USE OR OTHER DEALINGS IN THE SOFTWARE. + +'use strict'; + +/*<replacement>*/ + +var Buffer = require('safe-buffer').Buffer; +/*</replacement>*/ + +var isEncoding = Buffer.isEncoding || function (encoding) { + encoding = '' + encoding; + switch (encoding && encoding.toLowerCase()) { + case 'hex':case 'utf8':case 'utf-8':case 'ascii':case 'binary':case 'base64':case 'ucs2':case 'ucs-2':case 'utf16le':case 'utf-16le':case 'raw': + return true; + default: + return false; + } +}; + +function _normalizeEncoding(enc) { + if (!enc) return 'utf8'; + var retried; + while (true) { + switch (enc) { + case 'utf8': + case 'utf-8': + return 'utf8'; + case 'ucs2': + case 'ucs-2': + case 'utf16le': + case 'utf-16le': + return 'utf16le'; + case 'latin1': + case 'binary': + return 'latin1'; + case 'base64': + case 'ascii': + case 'hex': + return enc; + default: + if (retried) return; // undefined + enc = ('' + enc).toLowerCase(); + retried = true; + } + } +}; + +// Do not cache `Buffer.isEncoding` when checking encoding names as some +// modules monkey-patch it to support additional encodings +function normalizeEncoding(enc) { + var nenc = _normalizeEncoding(enc); + if (typeof nenc !== 'string' && (Buffer.isEncoding === isEncoding || !isEncoding(enc))) throw new Error('Unknown encoding: ' + enc); + return nenc || enc; +} + +// StringDecoder provides an interface for efficiently splitting a series of +// buffers into a series of JS strings without breaking apart multi-byte +// characters. +exports.StringDecoder = StringDecoder; +function StringDecoder(encoding) { + this.encoding = normalizeEncoding(encoding); + var nb; + switch (this.encoding) { + case 'utf16le': + this.text = utf16Text; + this.end = utf16End; + nb = 4; + break; + case 'utf8': + this.fillLast = utf8FillLast; + nb = 4; + break; + case 'base64': + this.text = base64Text; + this.end = base64End; + nb = 3; + break; + default: + this.write = simpleWrite; + this.end = simpleEnd; + return; + } + this.lastNeed = 0; + this.lastTotal = 0; + this.lastChar = Buffer.allocUnsafe(nb); +} + +StringDecoder.prototype.write = function (buf) { + if (buf.length === 0) return ''; + var r; + var i; + if (this.lastNeed) { + r = this.fillLast(buf); + if (r === undefined) return ''; + i = this.lastNeed; + this.lastNeed = 0; + } else { + i = 0; + } + if (i < buf.length) return r ? r + this.text(buf, i) : this.text(buf, i); + return r || ''; +}; + +StringDecoder.prototype.end = utf8End; + +// Returns only complete characters in a Buffer +StringDecoder.prototype.text = utf8Text; + +// Attempts to complete a partial non-UTF-8 character using bytes from a Buffer +StringDecoder.prototype.fillLast = function (buf) { + if (this.lastNeed <= buf.length) { + buf.copy(this.lastChar, this.lastTotal - this.lastNeed, 0, this.lastNeed); + return this.lastChar.toString(this.encoding, 0, this.lastTotal); + } + buf.copy(this.lastChar, this.lastTotal - this.lastNeed, 0, buf.length); + this.lastNeed -= buf.length; +}; + +// Checks the type of a UTF-8 byte, whether it's ASCII, a leading byte, or a +// continuation byte. If an invalid byte is detected, -2 is returned. +function utf8CheckByte(byte) { + if (byte <= 0x7F) return 0;else if (byte >> 5 === 0x06) return 2;else if (byte >> 4 === 0x0E) return 3;else if (byte >> 3 === 0x1E) return 4; + return byte >> 6 === 0x02 ? -1 : -2; +} + +// Checks at most 3 bytes at the end of a Buffer in order to detect an +// incomplete multi-byte UTF-8 character. The total number of bytes (2, 3, or 4) +// needed to complete the UTF-8 character (if applicable) are returned. +function utf8CheckIncomplete(self, buf, i) { + var j = buf.length - 1; + if (j < i) return 0; + var nb = utf8CheckByte(buf[j]); + if (nb >= 0) { + if (nb > 0) self.lastNeed = nb - 1; + return nb; + } + if (--j < i || nb === -2) return 0; + nb = utf8CheckByte(buf[j]); + if (nb >= 0) { + if (nb > 0) self.lastNeed = nb - 2; + return nb; + } + if (--j < i || nb === -2) return 0; + nb = utf8CheckByte(buf[j]); + if (nb >= 0) { + if (nb > 0) { + if (nb === 2) nb = 0;else self.lastNeed = nb - 3; + } + return nb; + } + return 0; +} + +// Validates as many continuation bytes for a multi-byte UTF-8 character as +// needed or are available. If we see a non-continuation byte where we expect +// one, we "replace" the validated continuation bytes we've seen so far with +// a single UTF-8 replacement character ('\ufffd'), to match v8's UTF-8 decoding +// behavior. The continuation byte check is included three times in the case +// where all of the continuation bytes for a character exist in the same buffer. +// It is also done this way as a slight performance increase instead of using a +// loop. +function utf8CheckExtraBytes(self, buf, p) { + if ((buf[0] & 0xC0) !== 0x80) { + self.lastNeed = 0; + return '\ufffd'; + } + if (self.lastNeed > 1 && buf.length > 1) { + if ((buf[1] & 0xC0) !== 0x80) { + self.lastNeed = 1; + return '\ufffd'; + } + if (self.lastNeed > 2 && buf.length > 2) { + if ((buf[2] & 0xC0) !== 0x80) { + self.lastNeed = 2; + return '\ufffd'; + } + } + } +} + +// Attempts to complete a multi-byte UTF-8 character using bytes from a Buffer. +function utf8FillLast(buf) { + var p = this.lastTotal - this.lastNeed; + var r = utf8CheckExtraBytes(this, buf, p); + if (r !== undefined) return r; + if (this.lastNeed <= buf.length) { + buf.copy(this.lastChar, p, 0, this.lastNeed); + return this.lastChar.toString(this.encoding, 0, this.lastTotal); + } + buf.copy(this.lastChar, p, 0, buf.length); + this.lastNeed -= buf.length; +} + +// Returns all complete UTF-8 characters in a Buffer. If the Buffer ended on a +// partial character, the character's bytes are buffered until the required +// number of bytes are available. +function utf8Text(buf, i) { + var total = utf8CheckIncomplete(this, buf, i); + if (!this.lastNeed) return buf.toString('utf8', i); + this.lastTotal = total; + var end = buf.length - (total - this.lastNeed); + buf.copy(this.lastChar, 0, end); + return buf.toString('utf8', i, end); +} + +// For UTF-8, a replacement character is added when ending on a partial +// character. +function utf8End(buf) { + var r = buf && buf.length ? this.write(buf) : ''; + if (this.lastNeed) return r + '\ufffd'; + return r; +} + +// UTF-16LE typically needs two bytes per character, but even if we have an even +// number of bytes available, we need to check if we end on a leading/high +// surrogate. In that case, we need to wait for the next two bytes in order to +// decode the last character properly. +function utf16Text(buf, i) { + if ((buf.length - i) % 2 === 0) { + var r = buf.toString('utf16le', i); + if (r) { + var c = r.charCodeAt(r.length - 1); + if (c >= 0xD800 && c <= 0xDBFF) { + this.lastNeed = 2; + this.lastTotal = 4; + this.lastChar[0] = buf[buf.length - 2]; + this.lastChar[1] = buf[buf.length - 1]; + return r.slice(0, -1); + } + } + return r; + } + this.lastNeed = 1; + this.lastTotal = 2; + this.lastChar[0] = buf[buf.length - 1]; + return buf.toString('utf16le', i, buf.length - 1); +} + +// For UTF-16LE we do not explicitly append special replacement characters if we +// end on a partial character, we simply let v8 handle that. +function utf16End(buf) { + var r = buf && buf.length ? this.write(buf) : ''; + if (this.lastNeed) { + var end = this.lastTotal - this.lastNeed; + return r + this.lastChar.toString('utf16le', 0, end); + } + return r; +} + +function base64Text(buf, i) { + var n = (buf.length - i) % 3; + if (n === 0) return buf.toString('base64', i); + this.lastNeed = 3 - n; + this.lastTotal = 3; + if (n === 1) { + this.lastChar[0] = buf[buf.length - 1]; + } else { + this.lastChar[0] = buf[buf.length - 2]; + this.lastChar[1] = buf[buf.length - 1]; + } + return buf.toString('base64', i, buf.length - n); +} + +function base64End(buf) { + var r = buf && buf.length ? this.write(buf) : ''; + if (this.lastNeed) return r + this.lastChar.toString('base64', 0, 3 - this.lastNeed); + return r; +} + +// Pass bytes on through for single-byte encodings (e.g. ascii, latin1, hex) +function simpleWrite(buf) { + return buf.toString(this.encoding); +} + +function simpleEnd(buf) { + return buf && buf.length ? this.write(buf) : ''; +} +},{"safe-buffer":78}],82:[function(require,module,exports){ +(function (setImmediate,clearImmediate){ +var nextTick = require('process/browser.js').nextTick; +var apply = Function.prototype.apply; +var slice = Array.prototype.slice; +var immediateIds = {}; +var nextImmediateId = 0; + +// DOM APIs, for completeness + +exports.setTimeout = function() { + return new Timeout(apply.call(setTimeout, window, arguments), clearTimeout); +}; +exports.setInterval = function() { + return new Timeout(apply.call(setInterval, window, arguments), clearInterval); +}; +exports.clearTimeout = +exports.clearInterval = function(timeout) { timeout.close(); }; + +function Timeout(id, clearFn) { + this._id = id; + this._clearFn = clearFn; +} +Timeout.prototype.unref = Timeout.prototype.ref = function() {}; +Timeout.prototype.close = function() { + this._clearFn.call(window, this._id); +}; + +// Does not start the time, just sets up the members needed. +exports.enroll = function(item, msecs) { + clearTimeout(item._idleTimeoutId); + item._idleTimeout = msecs; +}; + +exports.unenroll = function(item) { + clearTimeout(item._idleTimeoutId); + item._idleTimeout = -1; +}; + +exports._unrefActive = exports.active = function(item) { + clearTimeout(item._idleTimeoutId); + + var msecs = item._idleTimeout; + if (msecs >= 0) { + item._idleTimeoutId = setTimeout(function onTimeout() { + if (item._onTimeout) + item._onTimeout(); + }, msecs); + } +}; + +// That's not how node.js implements it but the exposed api is the same. +exports.setImmediate = typeof setImmediate === "function" ? setImmediate : function(fn) { + var id = nextImmediateId++; + var args = arguments.length < 2 ? false : slice.call(arguments, 1); + + immediateIds[id] = true; + + nextTick(function onNextTick() { + if (immediateIds[id]) { + // fn.call() is faster so we optimize for the common use-case + // @see http://jsperf.com/call-apply-segu + if (args) { + fn.apply(null, args); + } else { + fn.call(null); + } + // Prevent ids from leaking + exports.clearImmediate(id); + } + }); + + return id; +}; + +exports.clearImmediate = typeof clearImmediate === "function" ? clearImmediate : function(id) { + delete immediateIds[id]; +}; +}).call(this,require("timers").setImmediate,require("timers").clearImmediate) +},{"process/browser.js":64,"timers":82}],83:[function(require,module,exports){ +(function (global){ + +/** + * Module exports. + */ + +module.exports = deprecate; + +/** + * Mark that a method should not be used. + * Returns a modified function which warns once by default. + * + * If `localStorage.noDeprecation = true` is set, then it is a no-op. + * + * If `localStorage.throwDeprecation = true` is set, then deprecated functions + * will throw an Error when invoked. + * + * If `localStorage.traceDeprecation = true` is set, then deprecated functions + * will invoke `console.trace()` instead of `console.error()`. + * + * @param {Function} fn - the function to deprecate + * @param {String} msg - the string to print to the console when `fn` is invoked + * @returns {Function} a new "deprecated" version of `fn` + * @api public + */ + +function deprecate (fn, msg) { + if (config('noDeprecation')) { + return fn; + } + + var warned = false; + function deprecated() { + if (!warned) { + if (config('throwDeprecation')) { + throw new Error(msg); + } else if (config('traceDeprecation')) { + console.trace(msg); + } else { + console.warn(msg); + } + warned = true; + } + return fn.apply(this, arguments); + } + + return deprecated; +} + +/** + * Checks `localStorage` for boolean values for the given `name`. + * + * @param {String} name + * @returns {Boolean} + * @api private + */ + +function config (name) { + // accessing global.localStorage can trigger a DOMException in sandboxed iframes + try { + if (!global.localStorage) return false; + } catch (_) { + return false; + } + var val = global.localStorage[name]; + if (null == val) return false; + return String(val).toLowerCase() === 'true'; +} + +}).call(this,typeof global !== "undefined" ? global : typeof self !== "undefined" ? self : typeof window !== "undefined" ? window : {}) +},{}],84:[function(require,module,exports){ +"use strict"; + +var __importDefault = void 0 && (void 0).__importDefault || function (mod) { + return mod && mod.__esModule ? mod : { + "default": mod + }; +}; + +Object.defineProperty(exports, "__esModule", { + value: true +}); +exports.HandleZip = void 0; + +var jszip_1 = __importDefault(require("jszip")); + +var method_1 = require("./common/method"); + +var HandleZip = +/** @class */ +function () { + function HandleZip(file) { + // Support nodejs fs to read files + // if(file instanceof File){ + this.uploadFile = file; // } + } + + HandleZip.prototype.unzipFile = function (successFunc, errorFunc) { + // var new_zip:JSZip = new JSZip(); + jszip_1["default"].loadAsync(this.uploadFile) // 1) read the Blob + .then(function (zip) { + var fileList = {}, + lastIndex = Object.keys(zip.files).length, + index = 0; + zip.forEach(function (relativePath, zipEntry) { + var fileName = zipEntry.name; + var fileNameArr = fileName.split("."); + var suffix = fileNameArr[fileNameArr.length - 1].toLowerCase(); + var fileType = "string"; + + if (suffix in { + "png": 1, + "jpeg": 1, + "jpg": 1, + "gif": 1, + "bmp": 1, + "tif": 1, + "webp": 1 + }) { + fileType = "base64"; + } else if (suffix == "emf") { + fileType = "arraybuffer"; + } + + zipEntry.async(fileType).then(function (data) { + if (fileType == "base64") { + data = "data:image/" + suffix + ";base64," + data; + } + + fileList[zipEntry.name] = data; // console.log(lastIndex, index); + + if (lastIndex == index + 1) { + successFunc(fileList); + } + + index++; + }); + }); + }, function (e) { + errorFunc(e); + }); + }; + + HandleZip.prototype.unzipFileByUrl = function (url, successFunc, errorFunc) { + var new_zip = new jszip_1["default"](); + method_1.getBinaryContent(url, function (err, data) { + if (err) { + throw err; // or handle err + } + + jszip_1["default"].loadAsync(data).then(function (zip) { + var fileList = {}, + lastIndex = Object.keys(zip.files).length, + index = 0; + zip.forEach(function (relativePath, zipEntry) { + var fileName = zipEntry.name; + var fileNameArr = fileName.split("."); + var suffix = fileNameArr[fileNameArr.length - 1].toLowerCase(); + var fileType = "string"; + + if (suffix in { + "png": 1, + "jpeg": 1, + "jpg": 1, + "gif": 1, + "bmp": 1, + "tif": 1, + "webp": 1 + }) { + fileType = "base64"; + } else if (suffix == "emf") { + fileType = "arraybuffer"; + } + + zipEntry.async(fileType).then(function (data) { + if (fileType == "base64") { + data = "data:image/" + suffix + ";base64," + data; + } + + fileList[zipEntry.name] = data; // console.log(lastIndex, index); + + if (lastIndex == index + 1) { + successFunc(fileList); + } + + index++; + }); + }); + }, function (e) { + errorFunc(e); + }); + }); + }; + + HandleZip.prototype.newZipFile = function () { + var zip = new jszip_1["default"](); + this.workBook = zip; + }; //title:"nested/hello.txt", content:"Hello Worldasdfasfasdfasfasfasfasfasdfas" + + + HandleZip.prototype.addToZipFile = function (title, content) { + if (this.workBook == null) { + var zip = new jszip_1["default"](); + this.workBook = zip; + } + + this.workBook.file(title, content); + }; + + return HandleZip; +}(); + +exports.HandleZip = HandleZip; + +},{"./common/method":93,"jszip":20}],85:[function(require,module,exports){ +"use strict"; + +Object.defineProperty(exports, "__esModule", { + value: true +}); +exports.LuckyImageBase = exports.LuckysheetCalcChain = exports.LuckySheetConfigMerge = exports.LuckySheetborderInfoCellValueStyle = exports.LuckySheetborderInfoCellValue = exports.LuckySheetborderInfoCellForImp = exports.LuckyConfig = exports.LuckyInlineString = exports.LuckySheetCellFormat = exports.LuckySheetCelldataValue = exports.LuckySheetCelldataBase = exports.LuckyFileInfo = exports.LuckySheetBase = exports.LuckyFileBase = void 0; + +var LuckyFileBase = +/** @class */ +function () { + function LuckyFileBase() {} + + return LuckyFileBase; +}(); + +exports.LuckyFileBase = LuckyFileBase; + +var LuckySheetBase = +/** @class */ +function () { + function LuckySheetBase() {} + + return LuckySheetBase; +}(); + +exports.LuckySheetBase = LuckySheetBase; + +var LuckyFileInfo = +/** @class */ +function () { + function LuckyFileInfo() {} + + return LuckyFileInfo; +}(); + +exports.LuckyFileInfo = LuckyFileInfo; + +var LuckySheetCelldataBase = +/** @class */ +function () { + function LuckySheetCelldataBase() {} + + return LuckySheetCelldataBase; +}(); + +exports.LuckySheetCelldataBase = LuckySheetCelldataBase; + +var LuckySheetCelldataValue = +/** @class */ +function () { + function LuckySheetCelldataValue() {} + + return LuckySheetCelldataValue; +}(); + +exports.LuckySheetCelldataValue = LuckySheetCelldataValue; + +var LuckySheetCellFormat = +/** @class */ +function () { + function LuckySheetCellFormat() {} + + return LuckySheetCellFormat; +}(); + +exports.LuckySheetCellFormat = LuckySheetCellFormat; + +var LuckyInlineString = +/** @class */ +function () { + function LuckyInlineString() {} + + return LuckyInlineString; +}(); + +exports.LuckyInlineString = LuckyInlineString; + +var LuckyConfig = +/** @class */ +function () { + function LuckyConfig() {} + + return LuckyConfig; +}(); + +exports.LuckyConfig = LuckyConfig; + +var LuckySheetborderInfoCellForImp = +/** @class */ +function () { + function LuckySheetborderInfoCellForImp() {} + + return LuckySheetborderInfoCellForImp; +}(); + +exports.LuckySheetborderInfoCellForImp = LuckySheetborderInfoCellForImp; + +var LuckySheetborderInfoCellValue = +/** @class */ +function () { + function LuckySheetborderInfoCellValue() {} + + return LuckySheetborderInfoCellValue; +}(); + +exports.LuckySheetborderInfoCellValue = LuckySheetborderInfoCellValue; + +var LuckySheetborderInfoCellValueStyle = +/** @class */ +function () { + function LuckySheetborderInfoCellValueStyle() {} + + return LuckySheetborderInfoCellValueStyle; +}(); + +exports.LuckySheetborderInfoCellValueStyle = LuckySheetborderInfoCellValueStyle; + +var LuckySheetConfigMerge = +/** @class */ +function () { + function LuckySheetConfigMerge() {} + + return LuckySheetConfigMerge; +}(); + +exports.LuckySheetConfigMerge = LuckySheetConfigMerge; + +var LuckysheetCalcChain = +/** @class */ +function () { + function LuckysheetCalcChain() {} + + return LuckysheetCalcChain; +}(); + +exports.LuckysheetCalcChain = LuckysheetCalcChain; + +var LuckyImageBase = +/** @class */ +function () { + function LuckyImageBase() {} + + return LuckyImageBase; +}(); + +exports.LuckyImageBase = LuckyImageBase; + +},{}],86:[function(require,module,exports){ +"use strict"; + +var __extends = void 0 && (void 0).__extends || function () { + var _extendStatics = function extendStatics(d, b) { + _extendStatics = Object.setPrototypeOf || { + __proto__: [] + } instanceof Array && function (d, b) { + d.__proto__ = b; + } || function (d, b) { + for (var p in b) { + if (b.hasOwnProperty(p)) d[p] = b[p]; + } + }; + + return _extendStatics(d, b); + }; + + return function (d, b) { + _extendStatics(d, b); + + function __() { + this.constructor = d; + } + + d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __()); + }; +}(); + +Object.defineProperty(exports, "__esModule", { + value: true +}); +exports.LuckySheetCelldata = void 0; + +var ReadXml_1 = require("./ReadXml"); + +var method_1 = require("../common/method"); + +var constant_1 = require("../common/constant"); + +var LuckyBase_1 = require("./LuckyBase"); + +var LuckySheetCelldata = +/** @class */ +function (_super) { + __extends(LuckySheetCelldata, _super); + + function LuckySheetCelldata(cell, styles, sharedStrings, mergeCells, sheetFile, ReadXml) { + var _this = //Private + _super.call(this) || this; + + _this.cell = cell; + _this.sheetFile = sheetFile; + _this.styles = styles; + _this.sharedStrings = sharedStrings; + _this.readXml = ReadXml; + _this.mergeCells = mergeCells; + var attrList = cell.attributeList; + var r = attrList.r, + s = attrList.s, + t = attrList.t; + var range = method_1.getcellrange(r); + _this.r = range.row[0]; + _this.c = range.column[0]; + _this.v = _this.generateValue(s, t); + return _this; + } + /** + * @param s Style index ,start 1 + * @param t Cell type, Optional value is ST_CellType, it's found at constat.ts + */ + + + LuckySheetCelldata.prototype.generateValue = function (s, t) { + var _this = this; + + var v = this.cell.getInnerElements("v"); + var f = this.cell.getInnerElements("f"); + + if (v == null) { + v = this.cell.getInnerElements("t"); + } + + var cellXfs = this.styles["cellXfs"]; + var cellStyleXfs = this.styles["cellStyleXfs"]; + var cellStyles = this.styles["cellStyles"]; + var fonts = this.styles["fonts"]; + var fills = this.styles["fills"]; + var borders = this.styles["borders"]; + var numfmts = this.styles["numfmts"]; + var clrScheme = this.styles["clrScheme"]; + var sharedStrings = this.sharedStrings; + var cellValue = new LuckyBase_1.LuckySheetCelldataValue(); + + if (f != null) { + var formula = f[0], + attrList = formula.attributeList; + var t_1 = attrList.t, + ref = attrList.ref, + si = attrList.si; + var formulaValue = f[0].value; + + if (t_1 == "shared") { + this._fomulaRef = ref; + this._formulaType = t_1; + this._formulaSi = si; + } // console.log(ref, t, si); + + + if (ref != null || formulaValue != null && formulaValue.length > 0) { + formulaValue = method_1.escapeCharacter(formulaValue); + cellValue.f = "=" + formulaValue; + } + } + + var familyFont = null; + var quotePrefix; + + if (s != null) { + var sNum = parseInt(s); + var cellXf = cellXfs[sNum]; + var xfId = cellXf.attributeList.xfId; + var numFmtId = void 0, + fontId = void 0, + fillId = void 0, + borderId = void 0; + var horizontal = void 0, + vertical = void 0, + wrapText = void 0, + textRotation = void 0, + shrinkToFit = void 0, + indent = void 0, + applyProtection = void 0; + + if (xfId != null) { + var cellStyleXf = cellStyleXfs[parseInt(xfId)]; + var attrList = cellStyleXf.attributeList; + var applyNumberFormat_1 = attrList.applyNumberFormat; + var applyFont_1 = attrList.applyFont; + var applyFill_1 = attrList.applyFill; + var applyBorder_1 = attrList.applyBorder; + var applyAlignment_1 = attrList.applyAlignment; // let applyProtection = attrList.applyProtection; + + applyProtection = attrList.applyProtection; + quotePrefix = attrList.quotePrefix; + + if (applyNumberFormat_1 != "0" && attrList.numFmtId != null) { + // if(attrList.numFmtId!="0"){ + numFmtId = attrList.numFmtId; // } + } + + if (applyFont_1 != "0" && attrList.fontId != null) { + fontId = attrList.fontId; + } + + if (applyFill_1 != "0" && attrList.fillId != null) { + fillId = attrList.fillId; + } + + if (applyBorder_1 != "0" && attrList.borderId != null) { + borderId = attrList.borderId; + } + + if (applyAlignment_1 != null && applyAlignment_1 != "0") { + var alignment = cellStyleXf.getInnerElements("alignment"); + + if (alignment != null) { + var attrList_1 = alignment[0].attributeList; + + if (attrList_1.horizontal != null) { + horizontal = attrList_1.horizontal; + } + + if (attrList_1.vertical != null) { + vertical = attrList_1.vertical; + } + + if (attrList_1.wrapText != null) { + wrapText = attrList_1.wrapText; + } + + if (attrList_1.textRotation != null) { + textRotation = attrList_1.textRotation; + } + + if (attrList_1.shrinkToFit != null) { + shrinkToFit = attrList_1.shrinkToFit; + } + + if (attrList_1.indent != null) { + indent = attrList_1.indent; + } + } + } + } + + var applyNumberFormat = cellXf.attributeList.applyNumberFormat; + var applyFont = cellXf.attributeList.applyFont; + var applyFill = cellXf.attributeList.applyFill; + var applyBorder = cellXf.attributeList.applyBorder; + var applyAlignment = cellXf.attributeList.applyAlignment; + + if (cellXf.attributeList.applyProtection != null) { + applyProtection = cellXf.attributeList.applyProtection; + } + + if (cellXf.attributeList.quotePrefix != null) { + quotePrefix = cellXf.attributeList.quotePrefix; + } + + if (applyNumberFormat != "0" && cellXf.attributeList.numFmtId != null) { + numFmtId = cellXf.attributeList.numFmtId; + } + + if (applyFont != "0") { + fontId = cellXf.attributeList.fontId; + } + + if (applyFill != "0") { + fillId = cellXf.attributeList.fillId; + } + + if (applyBorder != "0") { + borderId = cellXf.attributeList.borderId; + } + + if (applyAlignment != "0") { + var alignment = cellXf.getInnerElements("alignment"); + + if (alignment != null && alignment.length > 0) { + var attrList = alignment[0].attributeList; + + if (attrList.horizontal != null) { + horizontal = attrList.horizontal; + } + + if (attrList.vertical != null) { + vertical = attrList.vertical; + } + + if (attrList.wrapText != null) { + wrapText = attrList.wrapText; + } + + if (attrList.textRotation != null) { + textRotation = attrList.textRotation; + } + + if (attrList.shrinkToFit != null) { + shrinkToFit = attrList.shrinkToFit; + } + + if (attrList.indent != null) { + indent = attrList.indent; + } + } + } + + if (numFmtId != undefined) { + var numf = numfmts[parseInt(numFmtId)]; + var cellFormat = new LuckyBase_1.LuckySheetCellFormat(); + cellFormat.fa = method_1.escapeCharacter(numf); // console.log(numf, numFmtId, this.v); + + cellFormat.t = t; + cellValue.ct = cellFormat; + } + + if (fillId != undefined) { + var fillIdNum = parseInt(fillId); + var fill = fills[fillIdNum]; // console.log(cellValue.v); + + var bg = this.getBackgroundByFill(fill, clrScheme); + + if (bg != null) { + cellValue.bg = bg; + } + } + + if (fontId != undefined) { + var fontIdNum = parseInt(fontId); + var font = fonts[fontIdNum]; + + if (font != null) { + var sz = font.getInnerElements("sz"); //font size + + var colors = font.getInnerElements("color"); //font color + + var family = font.getInnerElements("name"); //font family + + var familyOverrides = font.getInnerElements("family"); //font family will be overrided by name + + var charset = font.getInnerElements("charset"); //font charset + + var bolds = font.getInnerElements("b"); //font bold + + var italics = font.getInnerElements("i"); //font italic + + var strikes = font.getInnerElements("strike"); //font italic + + var underlines = font.getInnerElements("u"); //font italic + + if (sz != null && sz.length > 0) { + var fs = sz[0].attributeList.val; + + if (fs != null) { + cellValue.fs = parseInt(fs); + } + } + + if (colors != null && colors.length > 0) { + var color = colors[0]; + var fc = ReadXml_1.getColor(color, this.styles, "t"); + + if (fc != null) { + cellValue.fc = fc; + } + } + + if (familyOverrides != null && familyOverrides.length > 0) { + var val = familyOverrides[0].attributeList.val; + + if (val != null) { + familyFont = constant_1.fontFamilys[val]; + } + } + + if (family != null && family.length > 0) { + var val = family[0].attributeList.val; + + if (val != null) { + cellValue.ff = val; + } + } + + if (bolds != null && bolds.length > 0) { + var bold = bolds[0].attributeList.val; + + if (bold == "0") { + cellValue.bl = 0; + } else { + cellValue.bl = 1; + } + } + + if (italics != null && italics.length > 0) { + var italic = italics[0].attributeList.val; + + if (italic == "0") { + cellValue.it = 0; + } else { + cellValue.it = 1; + } + } + + if (strikes != null && strikes.length > 0) { + var strike = strikes[0].attributeList.val; + + if (strike == "0") { + cellValue.cl = 0; + } else { + cellValue.cl = 1; + } + } + + if (underlines != null && underlines.length > 0) { + var underline = underlines[0].attributeList.val; + + if (underline == "single") { + cellValue.un = 1; + } else if (underline == "double") { + cellValue.un = 2; + } else if (underline == "singleAccounting") { + cellValue.un = 3; + } else if (underline == "doubleAccounting") { + cellValue.un = 4; + } else { + cellValue.un = 0; + } + } + } + } // vt: number | undefined//Vertical alignment, 0 middle, 1 up, 2 down, alignment + // ht: number | undefined//Horizontal alignment,0 center, 1 left, 2 right, alignment + // tr: number | undefined //Text rotation,0: 0、1: 45 、2: -45、3 Vertical text、4: 90 、5: -90, alignment + // tb: number | undefined //Text wrap,0 truncation, 1 overflow, 2 word wrap, alignment + + + if (horizontal != undefined) { + //Horizontal alignment + if (horizontal == "center") { + cellValue.ht = 0; + } else if (horizontal == "centerContinuous") { + cellValue.ht = 0; //luckysheet unsupport + } else if (horizontal == "left") { + cellValue.ht = 1; + } else if (horizontal == "right") { + cellValue.ht = 2; + } else if (horizontal == "distributed") { + cellValue.ht = 0; //luckysheet unsupport + } else if (horizontal == "fill") { + cellValue.ht = 1; //luckysheet unsupport + } else if (horizontal == "general") { + cellValue.ht = 1; //luckysheet unsupport + } else if (horizontal == "justify") { + cellValue.ht = 0; //luckysheet unsupport + } else { + cellValue.ht = 1; + } + } + + if (vertical != undefined) { + //Vertical alignment + if (vertical == "bottom") { + cellValue.vt = 2; + } else if (vertical == "center") { + cellValue.vt = 0; + } else if (vertical == "distributed") { + cellValue.vt = 0; //luckysheet unsupport + } else if (vertical == "justify") { + cellValue.vt = 0; //luckysheet unsupport + } else if (vertical == "top") { + cellValue.vt = 1; + } else { + cellValue.vt = 1; + } + } + + if (wrapText != undefined) { + if (wrapText == "1") { + cellValue.tb = 2; + } else { + cellValue.tb = 1; + } + } else { + cellValue.tb = 1; + } + + if (textRotation != undefined) { + // tr: number | undefined //Text rotation,0: 0、1: 45 、2: -45、3 Vertical text、4: 90 、5: -90, alignment + if (textRotation == "255") { + cellValue.tr = 3; + } // else if(textRotation=="45"){ + // cellValue.tr = 1; + // } + // else if(textRotation=="90"){ + // cellValue.tr = 4; + // } + // else if(textRotation=="135"){ + // cellValue.tr = 2; + // } + // else if(textRotation=="180"){ + // cellValue.tr = 5; + // } + else { + cellValue.tr = 0; + cellValue.rt = parseInt(textRotation); + } + } + + if (shrinkToFit != undefined) {//luckysheet unsupport + } + + if (indent != undefined) {//luckysheet unsupport + } + + if (borderId != undefined) { + var borderIdNum = parseInt(borderId); + var border = borders[borderIdNum]; // this._borderId = borderIdNum; + + var borderObject = new LuckyBase_1.LuckySheetborderInfoCellForImp(); + borderObject.rangeType = "cell"; // borderObject.cells = []; + + var borderCellValue = new LuckyBase_1.LuckySheetborderInfoCellValue(); + borderCellValue.row_index = this.r; + borderCellValue.col_index = this.c; + var lefts = border.getInnerElements("left"); + var rights = border.getInnerElements("right"); + var tops = border.getInnerElements("top"); + var bottoms = border.getInnerElements("bottom"); + var diagonals = border.getInnerElements("diagonal"); + var starts = border.getInnerElements("start"); + var ends = border.getInnerElements("end"); + var left = this.getBorderInfo(lefts); + var right = this.getBorderInfo(rights); + var top_1 = this.getBorderInfo(tops); + var bottom = this.getBorderInfo(bottoms); + var diagonal = this.getBorderInfo(diagonals); + var start = this.getBorderInfo(starts); + var end = this.getBorderInfo(ends); + var isAdd = false; + + if (start != null && start.color != null) { + borderCellValue.l = start; + isAdd = true; + } + + if (end != null && end.color != null) { + borderCellValue.r = end; + isAdd = true; + } + + if (left != null && left.color != null) { + borderCellValue.l = left; + isAdd = true; + } + + if (right != null && right.color != null) { + borderCellValue.r = right; + isAdd = true; + } + + if (top_1 != null && top_1.color != null) { + borderCellValue.t = top_1; + isAdd = true; + } + + if (bottom != null && bottom.color != null) { + borderCellValue.b = bottom; + isAdd = true; + } + + if (isAdd) { + borderObject.value = borderCellValue; // this.config._borderInfo[borderId] = borderObject; + + this._borderObject = borderObject; + } + } + } else { + cellValue.tb = 1; + } + + if (v != null) { + var value = v[0].value; + + if (/&#\d+;/.test(value)) { + value = this.htmlDecode(value); + } + + if (t == constant_1.ST_CellType["SharedString"]) { + var siIndex = parseInt(v[0].value); + var sharedSI = sharedStrings[siIndex]; + var rFlag = sharedSI.getInnerElements("r"); + + if (rFlag == null) { + var tFlag = sharedSI.getInnerElements("t"); + + if (tFlag != null) { + var text_1 = ""; + tFlag.forEach(function (t) { + text_1 += t.value; + }); + text_1 = method_1.escapeCharacter(text_1); //isContainMultiType(text) && + + if (familyFont == "Roman" && text_1.length > 0) { + var textArray = text_1.split(""); + var preWordType = null, + wordText = "", + preWholef = null; + var wholef = "Times New Roman"; + + if (cellValue.ff != null) { + wholef = cellValue.ff; + } + + var cellFormat = cellValue.ct; + + if (cellFormat == null) { + cellFormat = new LuckyBase_1.LuckySheetCellFormat(); + } + + if (cellFormat.s == null) { + cellFormat.s = []; + } + + for (var i = 0; i < textArray.length; i++) { + var w = textArray[i]; + var type = null, + ff = wholef; + + if (method_1.isChinese(w)) { + type = "c"; + ff = "宋体"; + } else if (method_1.isJapanese(w)) { + type = "j"; + ff = "Yu Gothic"; + } else if (method_1.isKoera(w)) { + type = "k"; + ff = "Malgun Gothic"; + } else { + type = "e"; + } + + if (type != preWordType && preWordType != null || i == textArray.length - 1) { + var InlineString = new LuckyBase_1.LuckyInlineString(); + InlineString.ff = preWholef; + + if (cellValue.fc != null) { + InlineString.fc = cellValue.fc; + } + + if (cellValue.fs != null) { + InlineString.fs = cellValue.fs; + } + + if (cellValue.cl != null) { + InlineString.cl = cellValue.cl; + } + + if (cellValue.un != null) { + InlineString.un = cellValue.un; + } + + if (cellValue.bl != null) { + InlineString.bl = cellValue.bl; + } + + if (cellValue.it != null) { + InlineString.it = cellValue.it; + } + + if (i == textArray.length - 1) { + if (type == preWordType) { + InlineString.ff = ff; + InlineString.v = wordText + w; + } else { + InlineString.ff = preWholef; + InlineString.v = wordText; + cellFormat.s.push(InlineString); + var InlineStringLast = new LuckyBase_1.LuckyInlineString(); + InlineStringLast.ff = ff; + InlineStringLast.v = w; + + if (cellValue.fc != null) { + InlineStringLast.fc = cellValue.fc; + } + + if (cellValue.fs != null) { + InlineStringLast.fs = cellValue.fs; + } + + if (cellValue.cl != null) { + InlineStringLast.cl = cellValue.cl; + } + + if (cellValue.un != null) { + InlineStringLast.un = cellValue.un; + } + + if (cellValue.bl != null) { + InlineStringLast.bl = cellValue.bl; + } + + if (cellValue.it != null) { + InlineStringLast.it = cellValue.it; + } + + cellFormat.s.push(InlineStringLast); + break; + } + } else { + InlineString.v = wordText; + } + + cellFormat.s.push(InlineString); + wordText = w; + } else { + wordText += w; + } + + preWordType = type; + preWholef = ff; + } + + cellFormat.t = "inlineStr"; // cellFormat.s = [InlineString]; + + cellValue.ct = cellFormat; // console.log(cellValue); + } else { + text_1 = this.replaceSpecialWrap(text_1); + + if (text_1.indexOf("\r\n") > -1 || text_1.indexOf("\n") > -1) { + var InlineString = new LuckyBase_1.LuckyInlineString(); + InlineString.v = text_1; + var cellFormat = cellValue.ct; + + if (cellFormat == null) { + cellFormat = new LuckyBase_1.LuckySheetCellFormat(); + } + + if (cellValue.ff != null) { + InlineString.ff = cellValue.ff; + } + + if (cellValue.fc != null) { + InlineString.fc = cellValue.fc; + } + + if (cellValue.fs != null) { + InlineString.fs = cellValue.fs; + } + + if (cellValue.cl != null) { + InlineString.cl = cellValue.cl; + } + + if (cellValue.un != null) { + InlineString.un = cellValue.un; + } + + if (cellValue.bl != null) { + InlineString.bl = cellValue.bl; + } + + if (cellValue.it != null) { + InlineString.it = cellValue.it; + } + + cellFormat.t = "inlineStr"; + cellFormat.s = [InlineString]; + cellValue.ct = cellFormat; + } else { + cellValue.v = text_1; + quotePrefix = "1"; + } + } + } + } else { + var styles_1 = []; + rFlag.forEach(function (r) { + var tFlag = r.getInnerElements("t"); + var rPr = r.getInnerElements("rPr"); + var InlineString = new LuckyBase_1.LuckyInlineString(); + + if (tFlag != null && tFlag.length > 0) { + var text = tFlag[0].value; + text = _this.replaceSpecialWrap(text); + text = method_1.escapeCharacter(text); + InlineString.v = text; + } + + if (rPr != null && rPr.length > 0) { + var frpr = rPr[0]; + var sz = ReadXml_1.getlineStringAttr(frpr, "sz"), + rFont = ReadXml_1.getlineStringAttr(frpr, "rFont"), + family = ReadXml_1.getlineStringAttr(frpr, "family"), + charset = ReadXml_1.getlineStringAttr(frpr, "charset"), + scheme = ReadXml_1.getlineStringAttr(frpr, "scheme"), + b = ReadXml_1.getlineStringAttr(frpr, "b"), + i = ReadXml_1.getlineStringAttr(frpr, "i"), + u = ReadXml_1.getlineStringAttr(frpr, "u"), + strike = ReadXml_1.getlineStringAttr(frpr, "strike"), + vertAlign = ReadXml_1.getlineStringAttr(frpr, "vertAlign"), + color = void 0; + var cEle = frpr.getInnerElements("color"); + + if (cEle != null && cEle.length > 0) { + color = ReadXml_1.getColor(cEle[0], _this.styles, "t"); + } + + var ff = void 0; // if(family!=null){ + // ff = fontFamilys[family]; + // } + + if (rFont != null) { + ff = rFont; + } + + if (ff != null) { + InlineString.ff = ff; + } else if (cellValue.ff != null) { + InlineString.ff = cellValue.ff; + } + + if (color != null) { + InlineString.fc = color; + } else if (cellValue.fc != null) { + InlineString.fc = cellValue.fc; + } + + if (sz != null) { + InlineString.fs = parseInt(sz); + } else if (cellValue.fs != null) { + InlineString.fs = cellValue.fs; + } + + if (strike != null) { + InlineString.cl = parseInt(strike); + } else if (cellValue.cl != null) { + InlineString.cl = cellValue.cl; + } + + if (u != null) { + InlineString.un = parseInt(u); + } else if (cellValue.un != null) { + InlineString.un = cellValue.un; + } + + if (b != null) { + InlineString.bl = parseInt(b); + } else if (cellValue.bl != null) { + InlineString.bl = cellValue.bl; + } + + if (i != null) { + InlineString.it = parseInt(i); + } else if (cellValue.it != null) { + InlineString.it = cellValue.it; + } + + if (vertAlign != null) { + InlineString.va = parseInt(vertAlign); + } // ff:string | undefined //font family + // fc:string | undefined//font color + // fs:number | undefined//font size + // cl:number | undefined//strike + // un:number | undefined//underline + // bl:number | undefined//blod + // it:number | undefined//italic + // v:string | undefined + + } else { + if (InlineString.ff == null && cellValue.ff != null) { + InlineString.ff = cellValue.ff; + } + + if (InlineString.fc == null && cellValue.fc != null) { + InlineString.fc = cellValue.fc; + } + + if (InlineString.fs == null && cellValue.fs != null) { + InlineString.fs = cellValue.fs; + } + + if (InlineString.cl == null && cellValue.cl != null) { + InlineString.cl = cellValue.cl; + } + + if (InlineString.un == null && cellValue.un != null) { + InlineString.un = cellValue.un; + } + + if (InlineString.bl == null && cellValue.bl != null) { + InlineString.bl = cellValue.bl; + } + + if (InlineString.it == null && cellValue.it != null) { + InlineString.it = cellValue.it; + } + } + + styles_1.push(InlineString); + }); + var cellFormat = cellValue.ct; + + if (cellFormat == null) { + cellFormat = new LuckyBase_1.LuckySheetCellFormat(); + } + + cellFormat.t = "inlineStr"; + cellFormat.s = styles_1; + cellValue.ct = cellFormat; + } + } // else if(t==ST_CellType["InlineString"] && v!=null){ + // } + else { + value = method_1.escapeCharacter(value); + cellValue.v = value; + } + } + + if (quotePrefix != null) { + cellValue.qp = parseInt(quotePrefix); + } + + return cellValue; + }; + + LuckySheetCelldata.prototype.replaceSpecialWrap = function (text) { + text = text.replace(/_x000D_/g, "").replace(/ /g, "\r\n").replace(/ /g, "\r").replace(/ /g, "\n"); + return text; + }; + + LuckySheetCelldata.prototype.getBackgroundByFill = function (fill, clrScheme) { + var patternFills = fill.getInnerElements("patternFill"); + + if (patternFills != null) { + var patternFill = patternFills[0]; + var fgColors = patternFill.getInnerElements("fgColor"); + var bgColors = patternFill.getInnerElements("bgColor"); + var fg = void 0, + bg = void 0; + + if (fgColors != null) { + var fgColor = fgColors[0]; + fg = ReadXml_1.getColor(fgColor, this.styles); + } + + if (bgColors != null) { + var bgColor = bgColors[0]; + bg = ReadXml_1.getColor(bgColor, this.styles); + } // console.log(fgColors,bgColors,clrScheme); + + + if (fg != null) { + return fg; + } else if (bg != null) { + return bg; + } + } else { + var gradientfills = fill.getInnerElements("gradientFill"); + + if (gradientfills != null) { + //graient color fill handler + return null; + } + } + }; + + LuckySheetCelldata.prototype.getBorderInfo = function (borders) { + if (borders == null) { + return null; + } + + var border = borders[0], + attrList = border.attributeList; + var clrScheme = this.styles["clrScheme"]; + var style = attrList.style; + + if (style == null || style == "none") { + return null; + } + + var colors = border.getInnerElements("color"); + var colorRet = "#000000"; + + if (colors != null) { + var color = colors[0]; + colorRet = ReadXml_1.getColor(color, this.styles, "b"); + + if (colorRet == null) { + colorRet = "#000000"; + } + } + + var ret = new LuckyBase_1.LuckySheetborderInfoCellValueStyle(); + ret.style = constant_1.borderTypes[style]; + ret.color = colorRet; + return ret; + }; + + LuckySheetCelldata.prototype.htmlDecode = function (str) { + return str.replace(/&#(x)?([^&]{1,5});?/g, function ($, $1, $2) { + return String.fromCharCode(parseInt($2, $1 ? 16 : 10)); + }); + }; + + ; + return LuckySheetCelldata; +}(LuckyBase_1.LuckySheetCelldataBase); + +exports.LuckySheetCelldata = LuckySheetCelldata; + +},{"../common/constant":91,"../common/method":93,"./LuckyBase":85,"./ReadXml":90}],87:[function(require,module,exports){ +"use strict"; + +var __extends = void 0 && (void 0).__extends || function () { + var _extendStatics = function extendStatics(d, b) { + _extendStatics = Object.setPrototypeOf || { + __proto__: [] + } instanceof Array && function (d, b) { + d.__proto__ = b; + } || function (d, b) { + for (var p in b) { + if (b.hasOwnProperty(p)) d[p] = b[p]; + } + }; + + return _extendStatics(d, b); + }; + + return function (d, b) { + _extendStatics(d, b); + + function __() { + this.constructor = d; + } + + d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __()); + }; +}(); + +Object.defineProperty(exports, "__esModule", { + value: true +}); +exports.LuckyFile = void 0; + +var LuckySheet_1 = require("./LuckySheet"); + +var constant_1 = require("../common/constant"); + +var ReadXml_1 = require("./ReadXml"); + +var method_1 = require("../common/method"); + +var LuckyBase_1 = require("./LuckyBase"); + +var LuckyImage_1 = require("./LuckyImage"); + +var LuckyFile = +/** @class */ +function (_super) { + __extends(LuckyFile, _super); + + function LuckyFile(files, fileName) { + var _this = _super.call(this) || this; + + _this.columnWidthSet = []; + _this.rowHeightSet = []; + _this.files = files; + _this.fileName = fileName; + _this.readXml = new ReadXml_1.ReadXml(files); + + _this.getSheetNameList(); + + _this.sharedStrings = _this.readXml.getElementsByTagName("sst/si", constant_1.sharedStringsFile); + _this.calcChain = _this.readXml.getElementsByTagName("calcChain/c", constant_1.calcChainFile); + _this.styles = {}; + _this.styles["cellXfs"] = _this.readXml.getElementsByTagName("cellXfs/xf", constant_1.stylesFile); + _this.styles["cellStyleXfs"] = _this.readXml.getElementsByTagName("cellStyleXfs/xf", constant_1.stylesFile); + _this.styles["cellStyles"] = _this.readXml.getElementsByTagName("cellStyles/cellStyle", constant_1.stylesFile); + _this.styles["fonts"] = _this.readXml.getElementsByTagName("fonts/font", constant_1.stylesFile); + _this.styles["fills"] = _this.readXml.getElementsByTagName("fills/fill", constant_1.stylesFile); + _this.styles["borders"] = _this.readXml.getElementsByTagName("borders/border", constant_1.stylesFile); + _this.styles["clrScheme"] = _this.readXml.getElementsByTagName("a:clrScheme/a:dk1|a:lt1|a:dk2|a:lt2|a:accent1|a:accent2|a:accent3|a:accent4|a:accent5|a:accent6|a:hlink|a:folHlink", constant_1.theme1File); + _this.styles["indexedColors"] = _this.readXml.getElementsByTagName("colors/indexedColors/rgbColor", constant_1.stylesFile); + _this.styles["mruColors"] = _this.readXml.getElementsByTagName("colors/mruColors/color", constant_1.stylesFile); + _this.imageList = new LuckyImage_1.ImageList(files); + + var numfmts = _this.readXml.getElementsByTagName("numFmt/numFmt", constant_1.stylesFile); + + var numFmtDefaultC = JSON.parse(JSON.stringify(constant_1.numFmtDefault)); + + for (var i = 0; i < numfmts.length; i++) { + var attrList = numfmts[i].attributeList; + var numfmtid = method_1.getXmlAttibute(attrList, "numFmtId", "49"); + var formatcode = method_1.getXmlAttibute(attrList, "formatCode", "@"); // console.log(numfmtid, formatcode); + + if (!(numfmtid in constant_1.numFmtDefault)) { + numFmtDefaultC[numfmtid] = formatcode; + } + } // console.log(JSON.stringify(numFmtDefaultC), numfmts); + + + _this.styles["numfmts"] = numFmtDefaultC; + return _this; + } + /** + * @return All sheet name of workbook + */ + + + LuckyFile.prototype.getSheetNameList = function () { + var workbookRelList = this.readXml.getElementsByTagName("Relationships/Relationship", constant_1.workbookRels); + + if (workbookRelList == null) { + return; + } + + var regex = new RegExp("worksheets/[^/]*?.xml"); + var sheetNames = {}; + + for (var i = 0; i < workbookRelList.length; i++) { + var rel = workbookRelList[i], + attrList = rel.attributeList; + var id = attrList["Id"], + target = attrList["Target"]; + + if (regex.test(target)) { + if (target.startsWith('/xl')) { + sheetNames[id] = target.substr(1); + } else { + sheetNames[id] = "xl/" + target; + } + } + } + + this.sheetNameList = sheetNames; + }; + /** + * @param sheetName WorkSheet'name + * @return sheet file name and path in zip + */ + + + LuckyFile.prototype.getSheetFileBysheetId = function (sheetId) { + // for(let i=0;i<this.sheetNameList.length;i++){ + // let sheetFileName = this.sheetNameList[i]; + // if(sheetFileName.indexOf("sheet"+sheetId)>-1){ + // return sheetFileName; + // } + // } + return this.sheetNameList[sheetId]; + }; + /** + * @return workBook information + */ + + + LuckyFile.prototype.getWorkBookInfo = function () { + var Company = this.readXml.getElementsByTagName("Company", constant_1.appFile); + var AppVersion = this.readXml.getElementsByTagName("AppVersion", constant_1.appFile); + var creator = this.readXml.getElementsByTagName("dc:creator", constant_1.coreFile); + var lastModifiedBy = this.readXml.getElementsByTagName("cp:lastModifiedBy", constant_1.coreFile); + var created = this.readXml.getElementsByTagName("dcterms:created", constant_1.coreFile); + var modified = this.readXml.getElementsByTagName("dcterms:modified", constant_1.coreFile); + this.info = new LuckyBase_1.LuckyFileInfo(); + this.info.name = this.fileName; + this.info.creator = creator.length > 0 ? creator[0].value : ""; + this.info.lastmodifiedby = lastModifiedBy.length > 0 ? lastModifiedBy[0].value : ""; + this.info.createdTime = created.length > 0 ? created[0].value : ""; + this.info.modifiedTime = modified.length > 0 ? modified[0].value : ""; + this.info.company = Company.length > 0 ? Company[0].value : ""; + this.info.appversion = AppVersion.length > 0 ? AppVersion[0].value : ""; + }; + /** + * @return All sheet , include whole information + */ + + + LuckyFile.prototype.getSheetsFull = function (isInitialCell) { + if (isInitialCell === void 0) { + isInitialCell = true; + } + + var sheets = this.readXml.getElementsByTagName("sheets/sheet", constant_1.workBookFile); + var sheetList = {}; + + for (var key in sheets) { + var sheet = sheets[key]; + sheetList[sheet.attributeList.name] = sheet.attributeList["sheetId"]; + } + + this.sheets = []; + var order = 0; + + for (var key in sheets) { + var sheet = sheets[key]; + var sheetName = sheet.attributeList.name; + var sheetId = sheet.attributeList["sheetId"]; + var rid = sheet.attributeList["r:id"]; + var sheetFile = this.getSheetFileBysheetId(rid); + var drawing = this.readXml.getElementsByTagName("worksheet/drawing", sheetFile), + drawingFile = void 0, + drawingRelsFile = void 0; + + if (drawing != null && drawing.length > 0) { + var attrList = drawing[0].attributeList; + var rid_1 = method_1.getXmlAttibute(attrList, "r:id", null); + + if (rid_1 != null) { + drawingFile = this.getDrawingFile(rid_1, sheetFile); + drawingRelsFile = this.getDrawingRelsFile(drawingFile); + } + } + + if (sheetFile != null) { + var sheet_1 = new LuckySheet_1.LuckySheet(sheetName, sheetId, order, isInitialCell, { + sheetFile: sheetFile, + readXml: this.readXml, + sheetList: sheetList, + styles: this.styles, + sharedStrings: this.sharedStrings, + calcChain: this.calcChain, + imageList: this.imageList, + drawingFile: drawingFile, + drawingRelsFile: drawingRelsFile + }); + this.columnWidthSet = []; + this.rowHeightSet = []; + this.imagePositionCaculation(sheet_1); + this.sheets.push(sheet_1); + order++; + } + } + }; + + LuckyFile.prototype.extendArray = function (index, sets, def, hidden, lens) { + if (index < sets.length) { + return; + } + + var startIndex = sets.length, + endIndex = index; + var allGap = 0; + + if (startIndex > 0) { + allGap = sets[startIndex - 1]; + } // else{ + // sets.push(0); + // } + + + for (var i = startIndex; i <= endIndex; i++) { + var gap = def, + istring = i.toString(); + + if (istring in hidden) { + gap = 0; + } else if (istring in lens) { + gap = lens[istring]; + } + + allGap += Math.round(gap + 1); + sets.push(allGap); + } + }; + + LuckyFile.prototype.imagePositionCaculation = function (sheet) { + var images = sheet.images, + defaultColWidth = sheet.defaultColWidth, + defaultRowHeight = sheet.defaultRowHeight; + var colhidden = {}; + + if (sheet.config.colhidden) { + colhidden = sheet.config.colhidden; + } + + var columnlen = {}; + + if (sheet.config.columnlen) { + columnlen = sheet.config.columnlen; + } + + var rowhidden = {}; + + if (sheet.config.rowhidden) { + rowhidden = sheet.config.rowhidden; + } + + var rowlen = {}; + + if (sheet.config.rowlen) { + rowlen = sheet.config.rowlen; + } + + for (var key in images) { + var imageObject = images[key]; //Image, luckyImage + + var fromCol = imageObject.fromCol; + var fromColOff = imageObject.fromColOff; + var fromRow = imageObject.fromRow; + var fromRowOff = imageObject.fromRowOff; + var toCol = imageObject.toCol; + var toColOff = imageObject.toColOff; + var toRow = imageObject.toRow; + var toRowOff = imageObject.toRowOff; + var x_n = 0, + y_n = 0; + var cx_n = 0, + cy_n = 0; + + if (fromCol >= this.columnWidthSet.length) { + this.extendArray(fromCol, this.columnWidthSet, defaultColWidth, colhidden, columnlen); + } + + if (fromCol == 0) { + x_n = 0; + } else { + x_n = this.columnWidthSet[fromCol - 1]; + } + + x_n = x_n + fromColOff; + + if (fromRow >= this.rowHeightSet.length) { + this.extendArray(fromRow, this.rowHeightSet, defaultRowHeight, rowhidden, rowlen); + } + + if (fromRow == 0) { + y_n = 0; + } else { + y_n = this.rowHeightSet[fromRow - 1]; + } + + y_n = y_n + fromRowOff; + + if (toCol >= this.columnWidthSet.length) { + this.extendArray(toCol, this.columnWidthSet, defaultColWidth, colhidden, columnlen); + } + + if (toCol == 0) { + cx_n = 0; + } else { + cx_n = this.columnWidthSet[toCol - 1]; + } + + cx_n = cx_n + toColOff - x_n; + + if (toRow >= this.rowHeightSet.length) { + this.extendArray(toRow, this.rowHeightSet, defaultRowHeight, rowhidden, rowlen); + } + + if (toRow == 0) { + cy_n = 0; + } else { + cy_n = this.rowHeightSet[toRow - 1]; + } + + cy_n = cy_n + toRowOff - y_n; + console.log(defaultColWidth, colhidden, columnlen); + console.log(fromCol, this.columnWidthSet[fromCol], fromColOff); + console.log(toCol, this.columnWidthSet[toCol], toColOff, JSON.stringify(this.columnWidthSet)); + imageObject.originWidth = cx_n; + imageObject.originHeight = cy_n; + imageObject.crop.height = cy_n; + imageObject.crop.width = cx_n; + imageObject["default"].height = cy_n; + imageObject["default"].left = x_n; + imageObject["default"].top = y_n; + imageObject["default"].width = cx_n; + } + + console.log(this.columnWidthSet, this.rowHeightSet); + }; + /** + * @return drawing file string + */ + + + LuckyFile.prototype.getDrawingFile = function (rid, sheetFile) { + var sheetRelsPath = "xl/worksheets/_rels/"; + var sheetFileArr = sheetFile.split("/"); + var sheetRelsName = sheetFileArr[sheetFileArr.length - 1]; + var sheetRelsFile = sheetRelsPath + sheetRelsName + ".rels"; + var drawing = this.readXml.getElementsByTagName("Relationships/Relationship", sheetRelsFile); + + if (drawing.length > 0) { + for (var i = 0; i < drawing.length; i++) { + var relationship = drawing[i]; + var attrList = relationship.attributeList; + var relationshipId = method_1.getXmlAttibute(attrList, "Id", null); + + if (relationshipId == rid) { + var target = method_1.getXmlAttibute(attrList, "Target", null); + + if (target != null) { + return target.replace(/\.\.\//g, ""); + } + } + } + } + + return null; + }; + + LuckyFile.prototype.getDrawingRelsFile = function (drawingFile) { + var drawingRelsPath = "xl/drawings/_rels/"; + var drawingFileArr = drawingFile.split("/"); + var drawingRelsName = drawingFileArr[drawingFileArr.length - 1]; + var drawingRelsFile = drawingRelsPath + drawingRelsName + ".rels"; + return drawingRelsFile; + }; + /** + * @return All sheet base information widthout cell and config + */ + + + LuckyFile.prototype.getSheetsWithoutCell = function () { + this.getSheetsFull(false); + }; + /** + * @return LuckySheet file json + */ + + + LuckyFile.prototype.Parse = function () { + // let xml = this.readXml; + // for(let key in this.sheetNameList){ + // let sheetName=this.sheetNameList[key]; + // let sheetColumns = xml.getElementsByTagName("row/c/f", sheetName); + // console.log(sheetColumns); + // } + // return ""; + this.getWorkBookInfo(); + this.getSheetsFull(); // for(let i=0;i<this.sheets.length;i++){ + // let sheet = this.sheets[i]; + // let _borderInfo = sheet.config._borderInfo; + // if(_borderInfo==null){ + // continue; + // } + // let _borderInfoKeys = Object.keys(_borderInfo); + // _borderInfoKeys.sort(); + // for(let a=0;a<_borderInfoKeys.length;a++){ + // let key = parseInt(_borderInfoKeys[a]); + // let b = _borderInfo[key]; + // if(b.cells.length==0){ + // continue; + // } + // if(sheet.config.borderInfo==null){ + // sheet.config.borderInfo = []; + // } + // sheet.config.borderInfo.push(b); + // } + // } + + return this.toJsonString(this); + }; + + LuckyFile.prototype.toJsonString = function (file) { + var LuckyOutPutFile = new LuckyBase_1.LuckyFileBase(); + LuckyOutPutFile.info = file.info; + LuckyOutPutFile.sheets = []; + file.sheets.forEach(function (sheet) { + var sheetout = new LuckyBase_1.LuckySheetBase(); //let attrName = ["name","color","config","index","status","order","row","column","luckysheet_select_save","scrollLeft","scrollTop","zoomRatio","showGridLines","defaultColWidth","defaultRowHeight","celldata","chart","isPivotTable","pivotTable","luckysheet_conditionformat_save","freezen","calcChain"]; + + if (sheet.name != null) { + sheetout.name = sheet.name; + } + + if (sheet.color != null) { + sheetout.color = sheet.color; + } + + if (sheet.config != null) { + sheetout.config = sheet.config; // if(sheetout.config._borderInfo!=null){ + // delete sheetout.config._borderInfo; + // } + } + + if (sheet.index != null) { + sheetout.index = sheet.index; + } + + if (sheet.status != null) { + sheetout.status = sheet.status; + } + + if (sheet.order != null) { + sheetout.order = sheet.order; + } + + if (sheet.row != null) { + sheetout.row = sheet.row; + } + + if (sheet.column != null) { + sheetout.column = sheet.column; + } + + if (sheet.luckysheet_select_save != null) { + sheetout.luckysheet_select_save = sheet.luckysheet_select_save; + } + + if (sheet.scrollLeft != null) { + sheetout.scrollLeft = sheet.scrollLeft; + } + + if (sheet.scrollTop != null) { + sheetout.scrollTop = sheet.scrollTop; + } + + if (sheet.zoomRatio != null) { + sheetout.zoomRatio = sheet.zoomRatio; + } + + if (sheet.showGridLines != null) { + sheetout.showGridLines = sheet.showGridLines; + } + + if (sheet.defaultColWidth != null) { + sheetout.defaultColWidth = sheet.defaultColWidth; + } + + if (sheet.defaultRowHeight != null) { + sheetout.defaultRowHeight = sheet.defaultRowHeight; + } + + if (sheet.celldata != null) { + // sheetout.celldata = sheet.celldata; + sheetout.celldata = []; + sheet.celldata.forEach(function (cell) { + var cellout = new LuckyBase_1.LuckySheetCelldataBase(); + cellout.r = cell.r; + cellout.c = cell.c; + cellout.v = cell.v; + sheetout.celldata.push(cellout); + }); + } + + if (sheet.chart != null) { + sheetout.chart = sheet.chart; + } + + if (sheet.isPivotTable != null) { + sheetout.isPivotTable = sheet.isPivotTable; + } + + if (sheet.pivotTable != null) { + sheetout.pivotTable = sheet.pivotTable; + } + + if (sheet.luckysheet_conditionformat_save != null) { + sheetout.luckysheet_conditionformat_save = sheet.luckysheet_conditionformat_save; + } + + if (sheet.freezen != null) { + sheetout.freezen = sheet.freezen; + } + + if (sheet.calcChain != null) { + sheetout.calcChain = sheet.calcChain; + } + + if (sheet.images != null) { + sheetout.images = sheet.images; + } + + LuckyOutPutFile.sheets.push(sheetout); + }); + return JSON.stringify(LuckyOutPutFile); + }; + + return LuckyFile; +}(LuckyBase_1.LuckyFileBase); + +exports.LuckyFile = LuckyFile; + +},{"../common/constant":91,"../common/method":93,"./LuckyBase":85,"./LuckyImage":88,"./LuckySheet":89,"./ReadXml":90}],88:[function(require,module,exports){ +"use strict"; + +var __extends = void 0 && (void 0).__extends || function () { + var _extendStatics = function extendStatics(d, b) { + _extendStatics = Object.setPrototypeOf || { + __proto__: [] + } instanceof Array && function (d, b) { + d.__proto__ = b; + } || function (d, b) { + for (var p in b) { + if (b.hasOwnProperty(p)) d[p] = b[p]; + } + }; + + return _extendStatics(d, b); + }; + + return function (d, b) { + _extendStatics(d, b); + + function __() { + this.constructor = d; + } + + d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __()); + }; +}(); + +Object.defineProperty(exports, "__esModule", { + value: true +}); +exports.ImageList = void 0; + +var LuckyBase_1 = require("./LuckyBase"); + +var emf_1 = require("../common/emf"); + +var ImageList = +/** @class */ +function () { + function ImageList(files) { + if (files == null) { + return; + } + + this.images = {}; + + for (var fileKey in files) { + // let reg = new RegExp("xl/media/image1.png", "g"); + if (fileKey.indexOf("xl/media/") > -1) { + var fileNameArr = fileKey.split("."); + var suffix = fileNameArr[fileNameArr.length - 1].toLowerCase(); + + if (suffix in { + "png": 1, + "jpeg": 1, + "jpg": 1, + "gif": 1, + "bmp": 1, + "tif": 1, + "webp": 1, + "emf": 1 + }) { + if (suffix == "emf") { + var pNum = 0; // number of the page, that you want to render + + var scale = 1; // the scale of the document + + var wrt = new emf_1.ToContext2D(pNum, scale); + var inp, out, stt; + emf_1.FromEMF.K = []; + inp = emf_1.FromEMF.C; + out = emf_1.FromEMF.K; + stt = 4; + + for (var p in inp) { + out[inp[p]] = p.slice(stt); + } + + emf_1.FromEMF.Parse(files[fileKey], wrt); + this.images[fileKey] = wrt.canvas.toDataURL("image/png"); + } else { + this.images[fileKey] = files[fileKey]; + } + } + } + } + } + + ImageList.prototype.getImageByName = function (pathName) { + if (pathName in this.images) { + var base64 = this.images[pathName]; + return new Image(pathName, base64); + } + + return null; + }; + + return ImageList; +}(); + +exports.ImageList = ImageList; + +var Image = +/** @class */ +function (_super) { + __extends(Image, _super); + + function Image(pathName, base64) { + var _this = _super.call(this) || this; + + _this.src = base64; + return _this; + } + + Image.prototype.setDefault = function () {}; + + return Image; +}(LuckyBase_1.LuckyImageBase); + +},{"../common/emf":92,"./LuckyBase":85}],89:[function(require,module,exports){ +"use strict"; + +var __extends = void 0 && (void 0).__extends || function () { + var _extendStatics = function extendStatics(d, b) { + _extendStatics = Object.setPrototypeOf || { + __proto__: [] + } instanceof Array && function (d, b) { + d.__proto__ = b; + } || function (d, b) { + for (var p in b) { + if (b.hasOwnProperty(p)) d[p] = b[p]; + } + }; + + return _extendStatics(d, b); + }; + + return function (d, b) { + _extendStatics(d, b); + + function __() { + this.constructor = d; + } + + d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __()); + }; +}(); + +Object.defineProperty(exports, "__esModule", { + value: true +}); +exports.LuckySheet = void 0; + +var LuckyCell_1 = require("./LuckyCell"); + +var method_1 = require("../common/method"); + +var ReadXml_1 = require("./ReadXml"); + +var LuckyBase_1 = require("./LuckyBase"); + +var LuckySheet = +/** @class */ +function (_super) { + __extends(LuckySheet, _super); + + function LuckySheet(sheetName, sheetId, sheetOrder, isInitialCell, allFileOption) { + if (isInitialCell === void 0) { + isInitialCell = false; + } + + var _this = //Private + _super.call(this) || this; + + _this.isInitialCell = isInitialCell; + _this.readXml = allFileOption.readXml; + _this.sheetFile = allFileOption.sheetFile; + _this.styles = allFileOption.styles; + _this.sharedStrings = allFileOption.sharedStrings; + _this.calcChainEles = allFileOption.calcChain; + _this.sheetList = allFileOption.sheetList; + _this.imageList = allFileOption.imageList; //Output + + _this.name = sheetName; + _this.index = sheetId; + _this.order = sheetOrder.toString(); + _this.config = new LuckyBase_1.LuckyConfig(); + _this.celldata = []; + _this.mergeCells = _this.readXml.getElementsByTagName("mergeCells/mergeCell", _this.sheetFile); + var clrScheme = _this.styles["clrScheme"]; + + var sheetView = _this.readXml.getElementsByTagName("sheetViews/sheetView", _this.sheetFile); + + var showGridLines = "1", + tabSelected = "0", + zoomScale = "100", + activeCell = "A1"; + + if (sheetView.length > 0) { + var attrList = sheetView[0].attributeList; + showGridLines = method_1.getXmlAttibute(attrList, "showGridLines", "1"); + tabSelected = method_1.getXmlAttibute(attrList, "tabSelected", "0"); + zoomScale = method_1.getXmlAttibute(attrList, "zoomScale", "100"); // let colorId = getXmlAttibute(attrList, "colorId", "0"); + + var selections = sheetView[0].getInnerElements("selection"); + + if (selections != null && selections.length > 0) { + activeCell = method_1.getXmlAttibute(selections[0].attributeList, "activeCell", "A1"); + var range = method_1.getcellrange(activeCell, _this.sheetList, sheetId); + _this.luckysheet_select_save = []; + + _this.luckysheet_select_save.push(range); + } + } + + _this.showGridLines = showGridLines; + _this.status = tabSelected; + _this.zoomRatio = parseInt(zoomScale) / 100; + + var tabColors = _this.readXml.getElementsByTagName("sheetPr/tabColor", _this.sheetFile); + + if (tabColors != null && tabColors.length > 0) { + var tabColor = tabColors[0], + attrList = tabColor.attributeList; // if(attrList.rgb!=null){ + + var tc = ReadXml_1.getColor(tabColor, _this.styles, "b"); + _this.color = tc; // } + } + + var sheetFormatPr = _this.readXml.getElementsByTagName("sheetFormatPr", _this.sheetFile); + + var defaultColWidth, defaultRowHeight; + + if (sheetFormatPr.length > 0) { + var attrList = sheetFormatPr[0].attributeList; + defaultColWidth = method_1.getXmlAttibute(attrList, "defaultColWidth", "9.21"); + defaultRowHeight = method_1.getXmlAttibute(attrList, "defaultRowHeight", "19"); + } + + _this.defaultColWidth = method_1.getColumnWidthPixel(parseFloat(defaultColWidth)); + _this.defaultRowHeight = method_1.getRowHeightPixel(parseFloat(defaultRowHeight)); + + _this.generateConfigColumnLenAndHidden(); + + var cellOtherInfo = _this.generateConfigRowLenAndHiddenAddCell(); + + if (_this.formulaRefList != null) { + for (var key in _this.formulaRefList) { + var funclist = _this.formulaRefList[key]; + var mainFunc = funclist["mainRef"], + mainCellValue = mainFunc.cellValue; + var formulaTxt = mainFunc.fv; + var mainR = mainCellValue.r, + mainC = mainCellValue.c; // let refRange = getcellrange(ref); + + for (var name_1 in funclist) { + if (name_1 == "mainRef") { + continue; + } + + var funcValue = funclist[name_1], + cellValue = funcValue.cellValue; + + if (cellValue == null) { + continue; + } + + var r = cellValue.r, + c = cellValue.c; + var func = formulaTxt; + var offsetRow = r - mainR, + offsetCol = c - mainC; + + if (offsetRow > 0) { + func = "=" + method_1.fromulaRef.functionCopy(func, "down", offsetRow); + } else if (offsetRow < 0) { + func = "=" + method_1.fromulaRef.functionCopy(func, "up", Math.abs(offsetRow)); + } + + if (offsetCol > 0) { + func = "=" + method_1.fromulaRef.functionCopy(func, "right", offsetCol); + } else if (offsetCol < 0) { + func = "=" + method_1.fromulaRef.functionCopy(func, "left", Math.abs(offsetCol)); + } // console.log(offsetRow, offsetCol, func); + + + cellValue.v.f = func; + } + } + } + + if (_this.calcChain == null) { + _this.calcChain = []; + } + + var formulaListExist = {}; + + for (var c = 0; c < _this.calcChainEles.length; c++) { + var calcChainEle = _this.calcChainEles[c], + attrList = calcChainEle.attributeList; + + if (attrList.i != sheetId) { + continue; + } + + var r = attrList.r, + i = attrList.i, + l = attrList.l, + s = attrList.s, + a = attrList.a, + t = attrList.t; + var range = method_1.getcellrange(r); + var chain = new LuckyBase_1.LuckysheetCalcChain(); + chain.r = range.row[0]; + chain.c = range.column[0]; + chain.index = _this.index; + + _this.calcChain.push(chain); + + formulaListExist["r" + r + "c" + c] = null; + } //There may be formulas that do not appear in calcChain + + + for (var key in cellOtherInfo.formulaList) { + if (!(key in formulaListExist)) { + var formulaListItem = cellOtherInfo.formulaList[key]; + var chain = new LuckyBase_1.LuckysheetCalcChain(); + chain.r = formulaListItem.r; + chain.c = formulaListItem.c; + chain.index = _this.index; + + _this.calcChain.push(chain); + } + } + + if (_this.mergeCells != null) { + for (var i = 0; i < _this.mergeCells.length; i++) { + var merge = _this.mergeCells[i], + attrList = merge.attributeList; + var ref = attrList.ref; + + if (ref == null) { + continue; + } + + var range = method_1.getcellrange(ref, _this.sheetList, sheetId); + var mergeValue = new LuckyBase_1.LuckySheetConfigMerge(); + mergeValue.r = range.row[0]; + mergeValue.c = range.column[0]; + mergeValue.rs = range.row[1] - range.row[0] + 1; + mergeValue.cs = range.column[1] - range.column[0] + 1; + + if (_this.config.merge == null) { + _this.config.merge = {}; + } + + _this.config.merge[range.row[0] + "_" + range.column[0]] = mergeValue; + } + } + + var drawingFile = allFileOption.drawingFile, + drawingRelsFile = allFileOption.drawingRelsFile; + + if (drawingFile != null && drawingRelsFile != null) { + var twoCellAnchors = _this.readXml.getElementsByTagName("xdr:twoCellAnchor", drawingFile); + + if (twoCellAnchors != null && twoCellAnchors.length > 0) { + for (var i = 0; i < twoCellAnchors.length; i++) { + var twoCellAnchor = twoCellAnchors[i]; + var editAs = method_1.getXmlAttibute(twoCellAnchor.attributeList, "editAs", "twoCell"); + var xdrFroms = twoCellAnchor.getInnerElements("xdr:from"), + xdrTos = twoCellAnchor.getInnerElements("xdr:to"); + var xdr_blipfills = twoCellAnchor.getInnerElements("a:blip"); + + if (xdrFroms != null && xdr_blipfills != null && xdrFroms.length > 0 && xdr_blipfills.length > 0) { + var xdrFrom = xdrFroms[0], + xdrTo = xdrTos[0], + xdr_blipfill = xdr_blipfills[0]; + var rembed = method_1.getXmlAttibute(xdr_blipfill.attributeList, "r:embed", null); + + var imageObject = _this.getBase64ByRid(rembed, drawingRelsFile); // let aoff = xdr_xfrm.getInnerElements("a:off"), aext = xdr_xfrm.getInnerElements("a:ext"); + // if(aoff!=null && aext!=null && aoff.length>0 && aext.length>0){ + // let aoffAttribute = aoff[0].attributeList, aextAttribute = aext[0].attributeList; + // let x = getXmlAttibute(aoffAttribute, "x", null); + // let y = getXmlAttibute(aoffAttribute, "y", null); + // let cx = getXmlAttibute(aextAttribute, "cx", null); + // let cy = getXmlAttibute(aextAttribute, "cy", null); + // if(x!=null && y!=null && cx!=null && cy!=null && imageObject !=null){ + // let x_n = getPxByEMUs(parseInt(x), "c"),y_n = getPxByEMUs(parseInt(y)); + // let cx_n = getPxByEMUs(parseInt(cx), "c"),cy_n = getPxByEMUs(parseInt(cy)); + + + var x_n = 0, + y_n = 0; + var cx_n = 0, + cy_n = 0; + imageObject.fromCol = _this.getXdrValue(xdrFrom.getInnerElements("xdr:col")); + imageObject.fromColOff = method_1.getPxByEMUs(_this.getXdrValue(xdrFrom.getInnerElements("xdr:colOff"))); + imageObject.fromRow = _this.getXdrValue(xdrFrom.getInnerElements("xdr:row")); + imageObject.fromRowOff = method_1.getPxByEMUs(_this.getXdrValue(xdrFrom.getInnerElements("xdr:rowOff"))); + imageObject.toCol = _this.getXdrValue(xdrTo.getInnerElements("xdr:col")); + imageObject.toColOff = method_1.getPxByEMUs(_this.getXdrValue(xdrTo.getInnerElements("xdr:colOff"))); + imageObject.toRow = _this.getXdrValue(xdrTo.getInnerElements("xdr:row")); + imageObject.toRowOff = method_1.getPxByEMUs(_this.getXdrValue(xdrTo.getInnerElements("xdr:rowOff"))); + imageObject.originWidth = cx_n; + imageObject.originHeight = cy_n; + + if (editAs == "absolute") { + imageObject.type = "3"; + } else if (editAs == "oneCell") { + imageObject.type = "2"; + } else { + imageObject.type = "1"; + } + + imageObject.isFixedPos = false; + imageObject.fixedLeft = 0; + imageObject.fixedTop = 0; + var imageBorder = { + color: "#000", + radius: 0, + style: "solid", + width: 0 + }; + imageObject.border = imageBorder; + var imageCrop = { + height: cy_n, + offsetLeft: 0, + offsetTop: 0, + width: cx_n + }; + imageObject.crop = imageCrop; + var imageDefault = { + height: cy_n, + left: x_n, + top: y_n, + width: cx_n + }; + imageObject["default"] = imageDefault; + + if (_this.images == null) { + _this.images = {}; + } + + _this.images[method_1.generateRandomIndex("image")] = imageObject; // } + // } + } + } + } + } + + return _this; + } + + LuckySheet.prototype.getXdrValue = function (ele) { + if (ele == null || ele.length == 0) { + return null; + } + + return parseInt(ele[0].value); + }; + + LuckySheet.prototype.getBase64ByRid = function (rid, drawingRelsFile) { + var Relationships = this.readXml.getElementsByTagName("Relationships/Relationship", drawingRelsFile); + + if (Relationships != null && Relationships.length > 0) { + for (var i = 0; i < Relationships.length; i++) { + var Relationship = Relationships[i]; + var attrList = Relationship.attributeList; + var Id = method_1.getXmlAttibute(attrList, "Id", null); + var src = method_1.getXmlAttibute(attrList, "Target", null); + + if (Id == rid) { + src = src.replace(/\.\.\//g, ""); + src = "xl/" + src; + var imgage = this.imageList.getImageByName(src); + return imgage; + } + } + } + + return null; + }; + /** + * @desc This will convert cols/col to luckysheet config of column'width + */ + + + LuckySheet.prototype.generateConfigColumnLenAndHidden = function () { + var cols = this.readXml.getElementsByTagName("cols/col", this.sheetFile); + + for (var i = 0; i < cols.length; i++) { + var col = cols[i], + attrList = col.attributeList; + var min = method_1.getXmlAttibute(attrList, "min", null); + var max = method_1.getXmlAttibute(attrList, "max", null); + var width = method_1.getXmlAttibute(attrList, "width", null); + var hidden = method_1.getXmlAttibute(attrList, "hidden", null); + var customWidth = method_1.getXmlAttibute(attrList, "customWidth", null); + + if (min == null || max == null) { + continue; + } + + var minNum = parseInt(min) - 1, + maxNum = parseInt(max) - 1, + widthNum = parseFloat(width); + + for (var m = minNum; m <= maxNum; m++) { + if (width != null) { + if (this.config.columnlen == null) { + this.config.columnlen = {}; + } + + this.config.columnlen[m] = method_1.getColumnWidthPixel(widthNum); + } + + if (hidden == "1") { + if (this.config.colhidden == null) { + this.config.colhidden = {}; + } + + this.config.colhidden[m] = 0; + + if (this.config.columnlen) { + delete this.config.columnlen[m]; + } + } + + if (customWidth != null) { + if (this.config.customWidth == null) { + this.config.customWidth = {}; + } + + this.config.customWidth[m] = 1; + } + } + } + }; + /** + * @desc This will convert cols/col to luckysheet config of column'width + */ + + + LuckySheet.prototype.generateConfigRowLenAndHiddenAddCell = function () { + var rows = this.readXml.getElementsByTagName("sheetData/row", this.sheetFile); + var cellOtherInfo = {}; + var formulaList = {}; + cellOtherInfo.formulaList = formulaList; + + for (var i = 0; i < rows.length; i++) { + var row = rows[i], + attrList = row.attributeList; + var rowNo = method_1.getXmlAttibute(attrList, "r", null); + var height = method_1.getXmlAttibute(attrList, "ht", null); + var hidden = method_1.getXmlAttibute(attrList, "hidden", null); + var customHeight = method_1.getXmlAttibute(attrList, "customHeight", null); + + if (rowNo == null) { + continue; + } + + var rowNoNum = parseInt(rowNo) - 1; + + if (height != null) { + var heightNum = parseFloat(height); + + if (this.config.rowlen == null) { + this.config.rowlen = {}; + } + + this.config.rowlen[rowNoNum] = method_1.getRowHeightPixel(heightNum); + } + + if (hidden == "1") { + if (this.config.rowhidden == null) { + this.config.rowhidden = {}; + } + + this.config.rowhidden[rowNoNum] = 0; + + if (this.config.rowlen) { + delete this.config.rowlen[rowNoNum]; + } + } + + if (customHeight != null) { + if (this.config.customHeight == null) { + this.config.customHeight = {}; + } + + this.config.customHeight[rowNoNum] = 1; + } + + if (this.isInitialCell) { + var cells = row.getInnerElements("c"); + + for (var key in cells) { + var cell = cells[key]; + var cellValue = new LuckyCell_1.LuckySheetCelldata(cell, this.styles, this.sharedStrings, this.mergeCells, this.sheetFile, this.readXml); + + if (cellValue._borderObject != null) { + if (this.config.borderInfo == null) { + this.config.borderInfo = []; + } + + this.config.borderInfo.push(cellValue._borderObject); + delete cellValue._borderObject; + } // let borderId = cellValue._borderId; + // if(borderId!=null){ + // let borders = this.styles["borders"] as Element[]; + // if(this.config._borderInfo==null){ + // this.config._borderInfo = {}; + // } + // if( borderId in this.config._borderInfo){ + // this.config._borderInfo[borderId].cells.push(cellValue.r + "_" + cellValue.c); + // } + // else{ + // let border = borders[borderId]; + // let borderObject = new LuckySheetborderInfoCellForImp(); + // borderObject.rangeType = "cellGroup"; + // borderObject.cells = []; + // let borderCellValue = new LuckySheetborderInfoCellValue(); + // let lefts = border.getInnerElements("left"); + // let rights = border.getInnerElements("right"); + // let tops = border.getInnerElements("top"); + // let bottoms = border.getInnerElements("bottom"); + // let diagonals = border.getInnerElements("diagonal"); + // let left = this.getBorderInfo(lefts); + // let right = this.getBorderInfo(rights); + // let top = this.getBorderInfo(tops); + // let bottom = this.getBorderInfo(bottoms); + // let diagonal = this.getBorderInfo(diagonals); + // let isAdd = false; + // if(left!=null && left.color!=null){ + // borderCellValue.l = left; + // isAdd = true; + // } + // if(right!=null && right.color!=null){ + // borderCellValue.r = right; + // isAdd = true; + // } + // if(top!=null && top.color!=null){ + // borderCellValue.t = top; + // isAdd = true; + // } + // if(bottom!=null && bottom.color!=null){ + // borderCellValue.b = bottom; + // isAdd = true; + // } + // if(isAdd){ + // borderObject.value = borderCellValue; + // this.config._borderInfo[borderId] = borderObject; + // } + // } + // } + + + if (cellValue._formulaType == "shared") { + if (this.formulaRefList == null) { + this.formulaRefList = {}; + } + + if (this.formulaRefList[cellValue._formulaSi] == null) { + this.formulaRefList[cellValue._formulaSi] = {}; + } + + var fv = void 0; + + if (cellValue.v != null) { + fv = cellValue.v.f; + } + + var refValue = { + t: cellValue._formulaType, + ref: cellValue._fomulaRef, + si: cellValue._formulaSi, + fv: fv, + cellValue: cellValue + }; + + if (cellValue._fomulaRef != null) { + this.formulaRefList[cellValue._formulaSi]["mainRef"] = refValue; + } else { + this.formulaRefList[cellValue._formulaSi][cellValue.r + "_" + cellValue.c] = refValue; + } // console.log(refValue, this.formulaRefList); + + } //There may be formulas that do not appear in calcChain + + + if (cellValue.v != null && cellValue.v.f != null) { + var formulaCell = { + r: cellValue.r, + c: cellValue.c + }; + cellOtherInfo.formulaList["r" + cellValue.r + "c" + cellValue.c] = formulaCell; + } + + this.celldata.push(cellValue); + } + } + } + + return cellOtherInfo; + }; + + return LuckySheet; +}(LuckyBase_1.LuckySheetBase); + +exports.LuckySheet = LuckySheet; + +},{"../common/method":93,"./LuckyBase":85,"./LuckyCell":86,"./ReadXml":90}],90:[function(require,module,exports){ +"use strict"; + +var __extends = void 0 && (void 0).__extends || function () { + var _extendStatics = function extendStatics(d, b) { + _extendStatics = Object.setPrototypeOf || { + __proto__: [] + } instanceof Array && function (d, b) { + d.__proto__ = b; + } || function (d, b) { + for (var p in b) { + if (b.hasOwnProperty(p)) d[p] = b[p]; + } + }; + + return _extendStatics(d, b); + }; + + return function (d, b) { + _extendStatics(d, b); + + function __() { + this.constructor = d; + } + + d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __()); + }; +}(); + +Object.defineProperty(exports, "__esModule", { + value: true +}); +exports.getlineStringAttr = exports.getColor = exports.Element = exports.ReadXml = void 0; + +var constant_1 = require("../common/constant"); + +var method_1 = require("../common/method"); + +var xmloperation = +/** @class */ +function () { + function xmloperation() {} + /** + * @param tag Search xml tag name , div,title etc. + * @param file Xml string + * @return Xml element string + */ + + + xmloperation.prototype.getElementsByOneTag = function (tag, file) { + //<a:[^/>: ]+?>.*?</a:[^/>: ]+?> + var readTagReg; + + if (tag.indexOf("|") > -1) { + var tags = tag.split("|"), + tagsRegTxt = ""; + + for (var i = 0; i < tags.length; i++) { + var t = tags[i]; + tagsRegTxt += "|<" + t + " [^>]+?[^/]>[\\s\\S]*?</" + t + ">|<" + t + " [^>]+?/>|<" + t + ">[\\s\\S]*?</" + t + ">|<" + t + "/>"; + } + + tagsRegTxt = tagsRegTxt.substr(1, tagsRegTxt.length); + readTagReg = new RegExp(tagsRegTxt, "g"); + } else { + readTagReg = new RegExp("<" + tag + " [^>]+?[^/]>[\\s\\S]*?</" + tag + ">|<" + tag + " [^>]+?/>|<" + tag + ">[\\s\\S]*?</" + tag + ">|<" + tag + "/>", "g"); + } + + var ret = file.match(readTagReg); + + if (ret == null) { + return []; + } else { + return ret; + } + }; + + return xmloperation; +}(); + +var ReadXml = +/** @class */ +function (_super) { + __extends(ReadXml, _super); + + function ReadXml(files) { + var _this = _super.call(this) || this; + + _this.originFile = files; + return _this; + } + /** + * @param path Search xml tag group , div,title etc. + * @param fileName One of uploadfileList, uploadfileList is file group, {key:value} + * @return Xml element calss + */ + + + ReadXml.prototype.getElementsByTagName = function (path, fileName) { + var file = this.getFileByName(fileName); + var pathArr = path.split("/"), + ret; + + for (var key in pathArr) { + var path_1 = pathArr[key]; + + if (ret == undefined) { + ret = this.getElementsByOneTag(path_1, file); + } else { + if (ret instanceof Array) { + var items = []; + + for (var key_1 in ret) { + var item = ret[key_1]; + items = items.concat(this.getElementsByOneTag(path_1, item)); + } + + ret = items; + } else { + ret = this.getElementsByOneTag(path_1, ret); + } + } + } + + var elements = []; + + for (var i = 0; i < ret.length; i++) { + var ele = new Element(ret[i]); + elements.push(ele); + } + + return elements; + }; + /** + * @param name One of uploadfileList's name, search for file by this parameter + * @retrun Select a file from uploadfileList + */ + + + ReadXml.prototype.getFileByName = function (name) { + for (var fileKey in this.originFile) { + if (fileKey.indexOf(name) > -1) { + return this.originFile[fileKey]; + } + } + + return ""; + }; + + return ReadXml; +}(xmloperation); + +exports.ReadXml = ReadXml; + +var Element = +/** @class */ +function (_super) { + __extends(Element, _super); + + function Element(str) { + var _this = _super.call(this) || this; + + _this.elementString = str; + + _this.setValue(); + + var readAttrReg = new RegExp('[a-zA-Z0-9_:]*?=".*?"', "g"); + + var attrList = _this.container.match(readAttrReg); + + _this.attributeList = {}; + + if (attrList != null) { + for (var key in attrList) { + var attrFull = attrList[key]; // let al= attrFull.split("="); + + if (attrFull.length == 0) { + continue; + } + + var attrKey = attrFull.substr(0, attrFull.indexOf('=')); + var attrValue = attrFull.substr(attrFull.indexOf('=') + 1); + + if (attrKey == null || attrValue == null || attrKey.length == 0 || attrValue.length == 0) { + continue; + } + + _this.attributeList[attrKey] = attrValue.substr(1, attrValue.length - 2); + } + } + + return _this; + } + /** + * @param name Get attribute by key in element + * @return Single attribute + */ + + + Element.prototype.get = function (name) { + return this.attributeList[name]; + }; + /** + * @param tag Get elements by tag in elementString + * @return Element group + */ + + + Element.prototype.getInnerElements = function (tag) { + var ret = this.getElementsByOneTag(tag, this.elementString); + var elements = []; + + for (var i = 0; i < ret.length; i++) { + var ele = new Element(ret[i]); + elements.push(ele); + } + + if (elements.length == 0) { + return null; + } + + return elements; + }; + /** + * @desc get xml dom value and container, <container>value</container> + */ + + + Element.prototype.setValue = function () { + var str = this.elementString; + + if (str.substr(str.length - 2, 2) == "/>") { + this.value = ""; + this.container = str; + } else { + var firstTag = this.getFirstTag(); + var firstTagReg = new RegExp("(<" + firstTag + " [^>]+?[^/]>)([\\s\\S]*?)</" + firstTag + ">|(<" + firstTag + ">)([\\s\\S]*?)</" + firstTag + ">", "g"); + var result = firstTagReg.exec(str); + + if (result != null) { + if (result[1] != null) { + this.container = result[1]; + this.value = result[2]; + } else { + this.container = result[3]; + this.value = result[4]; + } + } + } + }; + /** + * @desc get xml dom first tag, <a><b></b></a>, get a + */ + + + Element.prototype.getFirstTag = function () { + var str = this.elementString; + var firstTag = str.substr(0, str.indexOf(' ')); + + if (firstTag == "" || firstTag.indexOf(">") > -1) { + firstTag = str.substr(0, str.indexOf('>')); + } + + firstTag = firstTag.substr(1, firstTag.length); + return firstTag; + }; + + return Element; +}(xmloperation); + +exports.Element = Element; + +function combineIndexedColor(indexedColorsInner, indexedColors) { + var ret = {}; + + if (indexedColorsInner == null || indexedColorsInner.length == 0) { + return indexedColors; + } + + for (var key in indexedColors) { + var value = indexedColors[key], + kn = parseInt(key); + var inner = indexedColorsInner[kn]; + + if (inner == null) { + ret[key] = value; + } else { + var rgb = inner.attributeList.rgb; + ret[key] = rgb; + } + } + + return ret; +} //clrScheme:Element[] + + +function getColor(color, styles, type) { + if (type === void 0) { + type = "g"; + } + + var attrList = color.attributeList; + var clrScheme = styles["clrScheme"]; + var indexedColorsInner = styles["indexedColors"]; + var mruColorsInner = styles["mruColors"]; + var indexedColorsList = combineIndexedColor(indexedColorsInner, constant_1.indexedColors); + var indexed = attrList.indexed, + rgb = attrList.rgb, + theme = attrList.theme, + tint = attrList.tint; + var bg; + + if (indexed != null) { + var indexedNum = parseInt(indexed); + bg = indexedColorsList[indexedNum]; + + if (bg != null) { + bg = bg.substring(bg.length - 6, bg.length); + bg = "#" + bg; + } + } else if (rgb != null) { + rgb = rgb.substring(rgb.length - 6, rgb.length); + bg = "#" + rgb; + } else if (theme != null) { + var themeNum = parseInt(theme); + + if (themeNum == 0) { + themeNum = 1; + } else if (themeNum == 1) { + themeNum = 0; + } else if (themeNum == 2) { + themeNum = 3; + } else if (themeNum == 3) { + themeNum = 2; + } + + var clrSchemeElement = clrScheme[themeNum]; + + if (clrSchemeElement != null) { + var clrs = clrSchemeElement.getInnerElements("a:sysClr|a:srgbClr"); + + if (clrs != null) { + var clr = clrs[0]; + var clrAttrList = clr.attributeList; // console.log(clr.container, ); + + if (clr.container.indexOf("sysClr") > -1) { + // if(type=="g" && clrAttrList.val=="windowText"){ + // bg = null; + // } + // else if((type=="t" || type=="b") && clrAttrList.val=="window"){ + // bg = null; + // } + // else + if (clrAttrList.lastClr != null) { + bg = "#" + clrAttrList.lastClr; + } else if (clrAttrList.val != null) { + bg = "#" + clrAttrList.val; + } + } else if (clr.container.indexOf("srgbClr") > -1) { + // console.log(clrAttrList.val); + bg = "#" + clrAttrList.val; + } + } + } + } + + if (tint != null) { + var tintNum = parseFloat(tint); + + if (bg != null) { + bg = method_1.LightenDarkenColor(bg, tintNum); + } + } + + return bg; +} + +exports.getColor = getColor; +/** + * @dom xml attribute object + * @attr attribute name + * @d if attribute is null, return default value + * @return attribute value +*/ + +function getlineStringAttr(frpr, attr) { + var attrEle = frpr.getInnerElements(attr), + value; + + if (attrEle != null && attrEle.length > 0) { + if (attr == "b" || attr == "i" || attr == "strike") { + value = "1"; + } else if (attr == "u") { + var v = attrEle[0].attributeList.val; + + if (v == "double") { + value = "2"; + } else if (v == "singleAccounting") { + value = "3"; + } else if (v == "doubleAccounting") { + value = "4"; + } else { + value = "1"; + } + } else if (attr == "vertAlign") { + var v = attrEle[0].attributeList.val; + + if (v == "subscript") { + value = "1"; + } else if (v == "superscript") { + value = "2"; + } + } else { + value = attrEle[0].attributeList.val; + } + } + + return value; +} + +exports.getlineStringAttr = getlineStringAttr; + +},{"../common/constant":91,"../common/method":93}],91:[function(require,module,exports){ +"use strict"; + +Object.defineProperty(exports, "__esModule", { + value: true +}); +exports.fontFamilys = exports.borderTypes = exports.OEM_CHARSET = exports.indexedColors = exports.numFmtDefault = exports.BuiltInCellStyles = exports.ST_CellType = exports.workbookRels = exports.theme1File = exports.worksheetFilePath = exports.sharedStringsFile = exports.stylesFile = exports.calcChainFile = exports.workBookFile = exports.contentTypesFile = exports.appFile = exports.coreFile = exports.columeHeader_word_index = exports.columeHeader_word = void 0; +exports.columeHeader_word = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']; +exports.columeHeader_word_index = { + 'A': 0, + 'B': 1, + 'C': 2, + 'D': 3, + 'E': 4, + 'F': 5, + 'G': 6, + 'H': 7, + 'I': 8, + 'J': 9, + 'K': 10, + 'L': 11, + 'M': 12, + 'N': 13, + 'O': 14, + 'P': 15, + 'Q': 16, + 'R': 17, + 'S': 18, + 'T': 19, + 'U': 20, + 'V': 21, + 'W': 22, + 'X': 23, + 'Y': 24, + 'Z': 25 +}; +exports.coreFile = "docProps/core.xml"; +exports.appFile = "docProps/app.xml"; +exports.contentTypesFile = "[Content_Types].xml"; +exports.workBookFile = "xl/workbook.xml"; +exports.calcChainFile = "xl/calcChain.xml"; +exports.stylesFile = "xl/styles.xml"; +exports.sharedStringsFile = "xl/sharedStrings.xml"; +exports.worksheetFilePath = "xl/worksheets/"; +exports.theme1File = "xl/theme/theme1.xml"; +exports.workbookRels = "xl/_rels/workbook.xml.rels"; //Excel Built-In cell type + +exports.ST_CellType = { + "Boolean": "b", + "Date": "d", + "Error": "e", + "InlineString": "inlineStr", + "Number": "n", + "SharedString": "s", + "String": "str" +}; //Excel Built-In cell style + +exports.BuiltInCellStyles = { + "0": "Normal" +}; +exports.numFmtDefault = { + "0": 'General', + "1": '0', + "2": '0.00', + "3": '#,##0', + "4": '#,##0.00', + "9": '0%', + "10": '0.00%', + "11": '0.00E+00', + "12": '# ?/?', + "13": '# ??/??', + "14": 'm/d/yy', + "15": 'd-mmm-yy', + "16": 'd-mmm', + "17": 'mmm-yy', + "18": 'h:mm AM/PM', + "19": 'h:mm:ss AM/PM', + "20": 'h:mm', + "21": 'h:mm:ss', + "22": 'm/d/yy h:mm', + "37": '#,##0 ;(#,##0)', + "38": '#,##0 ;[Red](#,##0)', + "39": '#,##0.00;(#,##0.00)', + "40": '#,##0.00;[Red](#,##0.00)', + "45": 'mm:ss', + "46": '[h]:mm:ss', + "47": 'mmss.0', + "48": '##0.0E+0', + "49": '@' +}; +exports.indexedColors = { + "0": '00000000', + "1": '00FFFFFF', + "2": '00FF0000', + "3": '0000FF00', + "4": '000000FF', + "5": '00FFFF00', + "6": '00FF00FF', + "7": '0000FFFF', + "8": '00000000', + "9": '00FFFFFF', + "10": '00FF0000', + "11": '0000FF00', + "12": '000000FF', + "13": '00FFFF00', + "14": '00FF00FF', + "15": '0000FFFF', + "16": '00800000', + "17": '00008000', + "18": '00000080', + "19": '00808000', + "20": '00800080', + "21": '00008080', + "22": '00C0C0C0', + "23": '00808080', + "24": '009999FF', + "25": '00993366', + "26": '00FFFFCC', + "27": '00CCFFFF', + "28": '00660066', + "29": '00FF8080', + "30": '000066CC', + "31": '00CCCCFF', + "32": '00000080', + "33": '00FF00FF', + "34": '00FFFF00', + "35": '0000FFFF', + "36": '00800080', + "37": '00800000', + "38": '00008080', + "39": '000000FF', + "40": '0000CCFF', + "41": '00CCFFFF', + "42": '00CCFFCC', + "43": '00FFFF99', + "44": '0099CCFF', + "45": '00FF99CC', + "46": '00CC99FF', + "47": '00FFCC99', + "48": '003366FF', + "49": '0033CCCC', + "50": '0099CC00', + "51": '00FFCC00', + "52": '00FF9900', + "53": '00FF6600', + "54": '00666699', + "55": '00969696', + "56": '00003366', + "57": '00339966', + "58": '00003300', + "59": '00333300', + "60": '00993300', + "61": '00993366', + "62": '00333399', + "63": '00333333', + "64": null, + "65": null +}; +exports.OEM_CHARSET = { + "0": "ANSI_CHARSET", + "1": "DEFAULT_CHARSET", + "2": "SYMBOL_CHARSET", + "77": "MAC_CHARSET", + "128": "SHIFTJIS_CHARSET", + "129": "HANGUL_CHARSET", + "130": "JOHAB_CHARSET", + "134": "GB2312_CHARSET", + "136": "CHINESEBIG5_CHARSET", + "161": "GREEK_CHARSET", + "162": "TURKISH_CHARSET", + "163": "VIETNAMESE_CHARSET", + "177": "HEBREW_CHARSET", + "178": "ARABIC_CHARSET", + "186": "BALTIC_CHARSET", + "204": "RUSSIAN_CHARSET", + "222": "THAI_CHARSET", + "238": "EASTEUROPE_CHARSET", + "255": "OEM_CHARSET" +}; +exports.borderTypes = { + "none": 0, + "thin": 1, + "hair": 2, + "dotted": 3, + "dashed": 4, + "dashDot": 5, + "dashDotDot": 6, + "double": 7, + "medium": 8, + "mediumDashed": 9, + "mediumDashDot": 10, + "mediumDashDotDot": 11, + "slantDashDot": 12, + "thick": 13 +}; +exports.fontFamilys = { + "0": "defualt", + "1": "Roman", + "2": "Swiss", + "3": "Modern", + "4": "Script", + "5": "Decorative" +}; + +},{}],92:[function(require,module,exports){ +"use strict"; + +Object.defineProperty(exports, "__esModule", { + value: true +}); +exports.ToContext2D = exports.FromEMF = exports.UDOC = void 0; +exports.UDOC = {}; +exports.UDOC.G = { + concat: function concat(p, r) { + for (var i = 0; i < r.cmds.length; i++) { + p.cmds.push(r.cmds[i]); + } + + for (var i = 0; i < r.crds.length; i++) { + p.crds.push(r.crds[i]); + } + }, + getBB: function getBB(ps) { + var x0 = 1e99, + y0 = 1e99, + x1 = -x0, + y1 = -y0; + + for (var i = 0; i < ps.length; i += 2) { + var x = ps[i], + y = ps[i + 1]; + if (x < x0) x0 = x;else if (x > x1) x1 = x; + if (y < y0) y0 = y;else if (y > y1) y1 = y; + } + + return [x0, y0, x1, y1]; + }, + rectToPath: function rectToPath(r) { + return { + cmds: ["M", "L", "L", "L", "Z"], + crds: [r[0], r[1], r[2], r[1], r[2], r[3], r[0], r[3]] + }; + }, + // a inside b + insideBox: function insideBox(a, b) { + return b[0] <= a[0] && b[1] <= a[1] && a[2] <= b[2] && a[3] <= b[3]; + }, + isBox: function isBox(p, bb) { + var sameCrd8 = function sameCrd8(pcrd, crds) { + for (var o = 0; o < 8; o += 2) { + var eq = true; + + for (var j = 0; j < 8; j++) { + if (Math.abs(crds[j] - pcrd[j + o & 7]) >= 2) { + eq = false; + break; + } + } + + if (eq) return true; + } + + return false; + }; + + if (p.cmds.length > 10) return false; + var cmds = p.cmds.join(""), + crds = p.crds; + var sameRect = false; + + if (cmds == "MLLLZ" && crds.length == 8 || cmds == "MLLLLZ" && crds.length == 10) { + if (crds.length == 10) crds = crds.slice(0, 8); + var x0 = bb[0], + y0 = bb[1], + x1 = bb[2], + y1 = bb[3]; + if (!sameRect) sameRect = sameCrd8(crds, [x0, y0, x1, y0, x1, y1, x0, y1]); + if (!sameRect) sameRect = sameCrd8(crds, [x0, y1, x1, y1, x1, y0, x0, y0]); + } + + return sameRect; + }, + boxArea: function boxArea(a) { + var w = a[2] - a[0], + h = a[3] - a[1]; + return w * h; + }, + newPath: function newPath(gst) { + gst.pth = { + cmds: [], + crds: [] + }; + }, + moveTo: function moveTo(gst, x, y) { + var p = exports.UDOC.M.multPoint(gst.ctm, [x, y]); //if(gst.cpos[0]==p[0] && gst.cpos[1]==p[1]) return; + + gst.pth.cmds.push("M"); + gst.pth.crds.push(p[0], p[1]); + gst.cpos = p; + }, + lineTo: function lineTo(gst, x, y) { + var p = exports.UDOC.M.multPoint(gst.ctm, [x, y]); + if (gst.cpos[0] == p[0] && gst.cpos[1] == p[1]) return; + gst.pth.cmds.push("L"); + gst.pth.crds.push(p[0], p[1]); + gst.cpos = p; + }, + curveTo: function curveTo(gst, x1, y1, x2, y2, x3, y3) { + var p; + p = exports.UDOC.M.multPoint(gst.ctm, [x1, y1]); + x1 = p[0]; + y1 = p[1]; + p = exports.UDOC.M.multPoint(gst.ctm, [x2, y2]); + x2 = p[0]; + y2 = p[1]; + p = exports.UDOC.M.multPoint(gst.ctm, [x3, y3]); + x3 = p[0]; + y3 = p[1]; + gst.cpos = p; + gst.pth.cmds.push("C"); + gst.pth.crds.push(x1, y1, x2, y2, x3, y3); + }, + closePath: function closePath(gst) { + gst.pth.cmds.push("Z"); + }, + arc: function arc(gst, x, y, r, a0, a1, neg) { + // circle from a0 counter-clock-wise to a1 + if (neg) while (a1 > a0) { + a1 -= 2 * Math.PI; + } else while (a1 < a0) { + a1 += 2 * Math.PI; + } + var th = (a1 - a0) / 4; + var x0 = Math.cos(th / 2), + y0 = -Math.sin(th / 2); + var x1 = (4 - x0) / 3, + y1 = y0 == 0 ? y0 : (1 - x0) * (3 - x0) / (3 * y0); + var x2 = x1, + y2 = -y1; + var x3 = x0, + y3 = -y0; + var p0 = [x0, y0], + p1 = [x1, y1], + p2 = [x2, y2], + p3 = [x3, y3]; + var pth = { + cmds: [gst.pth.cmds.length == 0 ? "M" : "L", "C", "C", "C", "C"], + crds: [x0, y0, x1, y1, x2, y2, x3, y3] + }; + var rot = [1, 0, 0, 1, 0, 0]; + exports.UDOC.M.rotate(rot, -th); + + for (var i = 0; i < 3; i++) { + p1 = exports.UDOC.M.multPoint(rot, p1); + p2 = exports.UDOC.M.multPoint(rot, p2); + p3 = exports.UDOC.M.multPoint(rot, p3); + pth.crds.push(p1[0], p1[1], p2[0], p2[1], p3[0], p3[1]); + } + + var sc = [r, 0, 0, r, x, y]; + exports.UDOC.M.rotate(rot, -a0 + th / 2); + exports.UDOC.M.concat(rot, sc); + exports.UDOC.M.multArray(rot, pth.crds); + exports.UDOC.M.multArray(gst.ctm, pth.crds); + exports.UDOC.G.concat(gst.pth, pth); + var y = pth.crds.pop(); + x = pth.crds.pop(); + gst.cpos = [x, y]; + }, + toPoly: function toPoly(p) { + if (p.cmds[0] != "M" || p.cmds[p.cmds.length - 1] != "Z") return null; + + for (var i = 1; i < p.cmds.length - 1; i++) { + if (p.cmds[i] != "L") return null; + } + + var out = [], + cl = p.crds.length; + if (p.crds[0] == p.crds[cl - 2] && p.crds[1] == p.crds[cl - 1]) cl -= 2; + + for (var i = 0; i < cl; i += 2) { + out.push([p.crds[i], p.crds[i + 1]]); + } + + if (exports.UDOC.G.polyArea(p.crds) < 0) out.reverse(); + return out; + }, + fromPoly: function fromPoly(p) { + var o = { + cmds: [], + crds: [] + }; + + for (var i = 0; i < p.length; i++) { + o.crds.push(p[i][0], p[i][1]); + o.cmds.push(i == 0 ? "M" : "L"); + } + + o.cmds.push("Z"); + return o; + }, + polyArea: function polyArea(p) { + if (p.length < 6) return 0; + var l = p.length - 2; + var sum = (p[0] - p[l]) * (p[l + 1] + p[1]); + + for (var i = 0; i < l; i += 2) { + sum += (p[i + 2] - p[i]) * (p[i + 1] + p[i + 3]); + } + + return -sum * 0.5; + }, + polyClip: function polyClip(p0, p1) { + var cp1, cp2, s, e; + + var inside = function inside(p) { + return (cp2[0] - cp1[0]) * (p[1] - cp1[1]) > (cp2[1] - cp1[1]) * (p[0] - cp1[0]); + }; + + var isc = function isc() { + var dc = [cp1[0] - cp2[0], cp1[1] - cp2[1]], + dp = [s[0] - e[0], s[1] - e[1]], + n1 = cp1[0] * cp2[1] - cp1[1] * cp2[0], + n2 = s[0] * e[1] - s[1] * e[0], + n3 = 1.0 / (dc[0] * dp[1] - dc[1] * dp[0]); + return [(n1 * dp[0] - n2 * dc[0]) * n3, (n1 * dp[1] - n2 * dc[1]) * n3]; + }; + + var out = p0; + cp1 = p1[p1.length - 1]; + + for (var j in p1) { + var cp2 = p1[j]; + var inp = out; + out = []; + s = inp[inp.length - 1]; //last on the input list + + for (var i in inp) { + var e = inp[i]; + + if (inside(e)) { + if (!inside(s)) { + out.push(isc()); + } + + out.push(e); + } else if (inside(s)) { + out.push(isc()); + } + + s = e; + } + + cp1 = cp2; + } + + return out; + } +}; +exports.UDOC.M = { + getScale: function getScale(m) { + return Math.sqrt(Math.abs(m[0] * m[3] - m[1] * m[2])); + }, + translate: function translate(m, x, y) { + exports.UDOC.M.concat(m, [1, 0, 0, 1, x, y]); + }, + rotate: function rotate(m, a) { + exports.UDOC.M.concat(m, [Math.cos(a), -Math.sin(a), Math.sin(a), Math.cos(a), 0, 0]); + }, + scale: function scale(m, x, y) { + exports.UDOC.M.concat(m, [x, 0, 0, y, 0, 0]); + }, + concat: function concat(m, w) { + var a = m[0], + b = m[1], + c = m[2], + d = m[3], + tx = m[4], + ty = m[5]; + m[0] = a * w[0] + b * w[2]; + m[1] = a * w[1] + b * w[3]; + m[2] = c * w[0] + d * w[2]; + m[3] = c * w[1] + d * w[3]; + m[4] = tx * w[0] + ty * w[2] + w[4]; + m[5] = tx * w[1] + ty * w[3] + w[5]; + }, + invert: function invert(m) { + var a = m[0], + b = m[1], + c = m[2], + d = m[3], + tx = m[4], + ty = m[5], + adbc = a * d - b * c; + m[0] = d / adbc; + m[1] = -b / adbc; + m[2] = -c / adbc; + m[3] = a / adbc; + m[4] = (c * ty - d * tx) / adbc; + m[5] = (b * tx - a * ty) / adbc; + }, + multPoint: function multPoint(m, p) { + var x = p[0], + y = p[1]; + return [x * m[0] + y * m[2] + m[4], x * m[1] + y * m[3] + m[5]]; + }, + multArray: function multArray(m, a) { + for (var i = 0; i < a.length; i += 2) { + var x = a[i], + y = a[i + 1]; + a[i] = x * m[0] + y * m[2] + m[4]; + a[i + 1] = x * m[1] + y * m[3] + m[5]; + } + } +}; +exports.UDOC.C = { + srgbGamma: function srgbGamma(x) { + return x < 0.0031308 ? 12.92 * x : 1.055 * Math.pow(x, 1.0 / 2.4) - 0.055; + }, + cmykToRgb: function cmykToRgb(clr) { + var c = clr[0], + m = clr[1], + y = clr[2], + k = clr[3]; // return [1-Math.min(1,c+k), 1-Math.min(1, m+k), 1-Math.min(1,y+k)]; + + var r = 255 + c * (-4.387332384609988 * c + 54.48615194189176 * m + 18.82290502165302 * y + 212.25662451639585 * k + -285.2331026137004) + m * (1.7149763477362134 * m - 5.6096736904047315 * y + -17.873870861415444 * k - 5.497006427196366) + y * (-2.5217340131683033 * y - 21.248923337353073 * k + 17.5119270841813) + k * (-21.86122147463605 * k - 189.48180835922747); + var g = 255 + c * (8.841041422036149 * c + 60.118027045597366 * m + 6.871425592049007 * y + 31.159100130055922 * k + -79.2970844816548) + m * (-15.310361306967817 * m + 17.575251261109482 * y + 131.35250912493976 * k - 190.9453302588951) + y * (4.444339102852739 * y + 9.8632861493405 * k - 24.86741582555878) + k * (-20.737325471181034 * k - 187.80453709719578); + var b = 255 + c * (0.8842522430003296 * c + 8.078677503112928 * m + 30.89978309703729 * y - 0.23883238689178934 * k + -14.183576799673286) + m * (10.49593273432072 * m + 63.02378494754052 * y + 50.606957656360734 * k - 112.23884253719248) + y * (0.03296041114873217 * y + 115.60384449646641 * k + -193.58209356861505) + k * (-22.33816807309886 * k - 180.12613974708367); + return [Math.max(0, Math.min(1, r / 255)), Math.max(0, Math.min(1, g / 255)), Math.max(0, Math.min(1, b / 255))]; //var iK = 1-c[3]; + //return [(1-c[0])*iK, (1-c[1])*iK, (1-c[2])*iK]; + }, + labToRgb: function labToRgb(lab) { + var k = 903.3, + e = 0.008856, + L = lab[0], + a = lab[1], + b = lab[2]; + var fy = (L + 16) / 116, + fy3 = fy * fy * fy; + var fz = fy - b / 200, + fz3 = fz * fz * fz; + var fx = a / 500 + fy, + fx3 = fx * fx * fx; + var zr = fz3 > e ? fz3 : (116 * fz - 16) / k; + var yr = fy3 > e ? fy3 : (116 * fy - 16) / k; + var xr = fx3 > e ? fx3 : (116 * fx - 16) / k; + var X = xr * 96.72, + Y = yr * 100, + Z = zr * 81.427, + xyz = [X / 100, Y / 100, Z / 100]; + var x2s = [3.1338561, -1.6168667, -0.4906146, -0.9787684, 1.9161415, 0.0334540, 0.0719453, -0.2289914, 1.4052427]; + var rgb = [x2s[0] * xyz[0] + x2s[1] * xyz[1] + x2s[2] * xyz[2], x2s[3] * xyz[0] + x2s[4] * xyz[1] + x2s[5] * xyz[2], x2s[6] * xyz[0] + x2s[7] * xyz[1] + x2s[8] * xyz[2]]; + + for (var i = 0; i < 3; i++) { + rgb[i] = Math.max(0, Math.min(1, exports.UDOC.C.srgbGamma(rgb[i]))); + } + + return rgb; + } +}; + +exports.UDOC.getState = function (crds) { + return { + font: exports.UDOC.getFont(), + dd: { + flat: 1 + }, + space: "/DeviceGray", + // fill + ca: 1, + colr: [0, 0, 0], + sspace: "/DeviceGray", + // stroke + CA: 1, + COLR: [0, 0, 0], + bmode: "/Normal", + SA: false, + OPM: 0, + AIS: false, + OP: false, + op: false, + SMask: "/None", + lwidth: 1, + lcap: 0, + ljoin: 0, + mlimit: 10, + SM: 0.1, + doff: 0, + dash: [], + ctm: [1, 0, 0, 1, 0, 0], + cpos: [0, 0], + pth: { + cmds: [], + crds: [] + }, + cpth: crds ? exports.UDOC.G.rectToPath(crds) : null // clipping path + + }; +}; + +exports.UDOC.getFont = function () { + return { + Tc: 0, + Tw: 0, + Th: 100, + Tl: 0, + Tf: "Helvetica-Bold", + Tfs: 1, + Tmode: 0, + Trise: 0, + Tk: 0, + Tal: 0, + Tun: 0, + Tm: [1, 0, 0, 1, 0, 0], + Tlm: [1, 0, 0, 1, 0, 0], + Trm: [1, 0, 0, 1, 0, 0] + }; +}; + +exports.FromEMF = function () {}; + +exports.FromEMF.Parse = function (buff, genv) { + buff = new Uint8Array(buff); + var off = 0; //console.log(buff.slice(0,32)); + + var prms = { + fill: false, + strk: false, + bb: [0, 0, 1, 1], + wbb: [0, 0, 1, 1], + fnt: { + nam: "Arial", + hgh: 25, + und: false, + orn: 0 + }, + tclr: [0, 0, 0], + talg: 0 + }, + gst, + tab = [], + sts = []; + var rI = exports.FromEMF.B.readShort, + rU = exports.FromEMF.B.readUshort, + rI32 = exports.FromEMF.B.readInt, + rU32 = exports.FromEMF.B.readUint, + rF32 = exports.FromEMF.B.readFloat; + var opn = 0; + + while (true) { + var fnc = rU32(buff, off); + off += 4; + var fnm = exports.FromEMF.K[fnc]; + var siz = rU32(buff, off); + off += 4; //if(gst && isNaN(gst.ctm[0])) throw "e"; + //console.log(fnc,fnm,siz); + + var loff = off; //if(opn++==253) break; + + var obj = null, + oid = 0; //console.log(fnm, siz); + + if (false) {} else if (fnm == "EOF") { + break; + } else if (fnm == "HEADER") { + prms.bb = exports.FromEMF._readBox(buff, loff); + loff += 16; //console.log(fnm, prms.bb); + + genv.StartPage(prms.bb[0], prms.bb[1], prms.bb[2], prms.bb[3]); + gst = exports.UDOC.getState(prms.bb); + } else if (fnm == "SAVEDC") sts.push(JSON.stringify(gst), JSON.stringify(prms));else if (fnm == "RESTOREDC") { + var dif = rI32(buff, loff); + loff += 4; + + while (dif < -1) { + sts.pop(); + sts.pop(); + } + + prms = JSON.parse(sts.pop()); + gst = JSON.parse(sts.pop()); + } else if (fnm == "SELECTCLIPPATH") { + gst.cpth = JSON.parse(JSON.stringify(gst.pth)); + } else if (["SETMAPMODE", "SETPOLYFILLMODE", "SETBKMODE" + /*,"SETVIEWPORTEXTEX"*/ + , "SETICMMODE", "SETROP2", "EXTSELECTCLIPRGN"].indexOf(fnm) != -1) {} //else if(fnm=="INTERSECTCLIPRECT") { var r=prms.crct=FromEMF._readBox(buff, loff); /*var y0=r[1],y1=r[3]; if(y0>y1){r[1]=y1; r[3]=y0;}*/ console.log(prms.crct); } + else if (fnm == "SETMITERLIMIT") gst.mlimit = rU32(buff, loff);else if (fnm == "SETTEXTCOLOR") prms.tclr = [buff[loff] / 255, buff[loff + 1] / 255, buff[loff + 2] / 255];else if (fnm == "SETTEXTALIGN") prms.talg = rU32(buff, loff);else if (fnm == "SETVIEWPORTEXTEX" || fnm == "SETVIEWPORTORGEX") { + if (prms.vbb == null) prms.vbb = []; + var coff = fnm == "SETVIEWPORTORGEX" ? 0 : 2; + prms.vbb[coff] = rI32(buff, loff); + loff += 4; + prms.vbb[coff + 1] = rI32(buff, loff); + loff += 4; //console.log(prms.vbb); + + if (fnm == "SETVIEWPORTEXTEX") exports.FromEMF._updateCtm(prms, gst); + } else if (fnm == "SETWINDOWEXTEX" || fnm == "SETWINDOWORGEX") { + var coff = fnm == "SETWINDOWORGEX" ? 0 : 2; + prms.wbb[coff] = rI32(buff, loff); + loff += 4; + prms.wbb[coff + 1] = rI32(buff, loff); + loff += 4; + if (fnm == "SETWINDOWEXTEX") exports.FromEMF._updateCtm(prms, gst); + } //else if(fnm=="SETMETARGN") {} + else if (fnm == "COMMENT") { + var ds = rU32(buff, loff); + loff += 4; + } else if (fnm == "SELECTOBJECT") { + var ind = rU32(buff, loff); + loff += 4; //console.log(ind.toString(16), tab, tab[ind]); + + if (ind == 0x80000000) { + prms.fill = true; + gst.colr = [1, 1, 1]; + } // white brush + else if (ind == 0x80000005) { + prms.fill = false; + } // null brush + else if (ind == 0x80000007) { + prms.strk = true; + prms.lwidth = 1; + gst.COLR = [0, 0, 0]; + } // black pen + else if (ind == 0x80000008) { + prms.strk = false; + } // null pen + else if (ind == 0x8000000d) {} // system font + else if (ind == 0x8000000e) {} // device default font + else { + var co = tab[ind]; //console.log(ind, co); + + if (co.t == "b") { + prms.fill = co.stl != 1; + + if (co.stl == 0) {} else if (co.stl == 1) {} else throw co.stl + " e"; + + gst.colr = co.clr; + } else if (co.t == "p") { + prms.strk = co.stl != 5; + gst.lwidth = co.wid; + gst.COLR = co.clr; + } else if (co.t == "f") { + prms.fnt = co; + gst.font.Tf = co.nam; + gst.font.Tfs = Math.abs(co.hgh); + gst.font.Tun = co.und; + } else throw "e"; + } + } else if (fnm == "DELETEOBJECT") { + var ind = rU32(buff, loff); + loff += 4; + if (tab[ind] != null) tab[ind] = null;else throw "e"; + } else if (fnm == "CREATEBRUSHINDIRECT") { + oid = rU32(buff, loff); + loff += 4; + obj = { + t: "b" + }; + obj.stl = rU32(buff, loff); + loff += 4; + obj.clr = [buff[loff] / 255, buff[loff + 1] / 255, buff[loff + 2] / 255]; + loff += 4; + obj.htc = rU32(buff, loff); + loff += 4; //console.log(oid, obj); + } else if (fnm == "CREATEPEN" || fnm == "EXTCREATEPEN") { + oid = rU32(buff, loff); + loff += 4; + obj = { + t: "p" + }; + + if (fnm == "EXTCREATEPEN") { + loff += 16; + obj.stl = rU32(buff, loff); + loff += 4; + obj.wid = rU32(buff, loff); + loff += 4; //obj.stl = rU32(buff, loff); + + loff += 4; + } else { + obj.stl = rU32(buff, loff); + loff += 4; + obj.wid = rU32(buff, loff); + loff += 4; + loff += 4; + } + + obj.clr = [buff[loff] / 255, buff[loff + 1] / 255, buff[loff + 2] / 255]; + loff += 4; + } else if (fnm == "EXTCREATEFONTINDIRECTW") { + oid = rU32(buff, loff); + loff += 4; + obj = { + t: "f", + nam: "" + }; + obj.hgh = rI32(buff, loff); + loff += 4; + loff += 4 * 2; + obj.orn = rI32(buff, loff) / 10; + loff += 4; + var wgh = rU32(buff, loff); + loff += 4; //console.log(fnm, obj.orn, wgh); + //console.log(rU32(buff,loff), rU32(buff,loff+4), buff.slice(loff,loff+8)); + + obj.und = buff[loff + 1]; + obj.stk = buff[loff + 2]; + loff += 4 * 2; + + while (rU(buff, loff) != 0) { + obj.nam += String.fromCharCode(rU(buff, loff)); + loff += 2; + } + + if (wgh > 500) obj.nam += "-Bold"; //console.log(wgh, obj.nam); + } else if (fnm == "EXTTEXTOUTW") { + //console.log(buff.slice(loff-8, loff-8+siz)); + loff += 16; + var mod = rU32(buff, loff); + loff += 4; //console.log(mod); + + var scx = rF32(buff, loff); + loff += 4; + var scy = rF32(buff, loff); + loff += 4; + var rfx = rI32(buff, loff); + loff += 4; + var rfy = rI32(buff, loff); + loff += 4; //console.log(mod, scx, scy,rfx,rfy); + + gst.font.Tm = [1, 0, 0, -1, 0, 0]; + exports.UDOC.M.rotate(gst.font.Tm, prms.fnt.orn * Math.PI / 180); + exports.UDOC.M.translate(gst.font.Tm, rfx, rfy); + var alg = prms.talg; //console.log(alg.toString(2)); + + if ((alg & 6) == 6) gst.font.Tal = 2;else if ((alg & 7) == 0) gst.font.Tal = 0;else throw alg + " e"; + + if ((alg & 24) == 24) {} // baseline + else if ((alg & 24) == 0) exports.UDOC.M.translate(gst.font.Tm, 0, gst.font.Tfs);else throw "e"; + + var crs = rU32(buff, loff); + loff += 4; + var ofs = rU32(buff, loff); + loff += 4; + var ops = rU32(buff, loff); + loff += 4; //if(ops!=0) throw "e"; + //console.log(ofs,ops,crs); + + loff += 16; + var ofD = rU32(buff, loff); + loff += 4; //console.log(ops, ofD, loff, ofs+off-8); + + ofs += off - 8; //console.log(crs, ops); + + var str = ""; + + for (var i = 0; i < crs; i++) { + var cc = rU(buff, ofs + i * 2); + str += String.fromCharCode(cc); + } + + ; + var oclr = gst.colr; + gst.colr = prms.tclr; //console.log(str, gst.colr, gst.font.Tm); + //var otfs = gst.font.Tfs; gst.font.Tfs *= 1/gst.ctm[0]; + + genv.PutText(gst, str, str.length * gst.font.Tfs * 0.5); + gst.colr = oclr; //gst.font.Tfs = otfs; + //console.log(rfx, rfy, scx, ops, rcX, rcY, rcW, rcH, offDx, str); + } else if (fnm == "BEGINPATH") { + exports.UDOC.G.newPath(gst); + } else if (fnm == "ENDPATH") {} else if (fnm == "CLOSEFIGURE") exports.UDOC.G.closePath(gst);else if (fnm == "MOVETOEX") { + exports.UDOC.G.moveTo(gst, rI32(buff, loff), rI32(buff, loff + 4)); + } else if (fnm == "LINETO") { + if (gst.pth.cmds.length == 0) { + var im = gst.ctm.slice(0); + exports.UDOC.M.invert(im); + var p = exports.UDOC.M.multPoint(im, gst.cpos); + exports.UDOC.G.moveTo(gst, p[0], p[1]); + } + + exports.UDOC.G.lineTo(gst, rI32(buff, loff), rI32(buff, loff + 4)); + } else if (fnm == "POLYGON" || fnm == "POLYGON16" || fnm == "POLYLINE" || fnm == "POLYLINE16" || fnm == "POLYLINETO" || fnm == "POLYLINETO16") { + loff += 16; + var ndf = fnm.startsWith("POLYGON"), + isTo = fnm.indexOf("TO") != -1; + var cnt = rU32(buff, loff); + loff += 4; + if (!isTo) exports.UDOC.G.newPath(gst); + loff = exports.FromEMF._drawPoly(buff, loff, cnt, gst, fnm.endsWith("16") ? 2 : 4, ndf, isTo); + if (!isTo) exports.FromEMF._draw(genv, gst, prms, ndf); //console.log(prms, gst.lwidth); + //console.log(JSON.parse(JSON.stringify(gst.pth))); + } else if (fnm == "POLYPOLYGON16") { + loff += 16; + var ndf = fnm.startsWith("POLYPOLYGON"), + isTo = fnm.indexOf("TO") != -1; + var nop = rU32(buff, loff); + loff += 4; + loff += 4; + var pi = loff; + loff += nop * 4; + if (!isTo) exports.UDOC.G.newPath(gst); + + for (var i = 0; i < nop; i++) { + var ppp = rU(buff, pi + i * 4); + loff = exports.FromEMF._drawPoly(buff, loff, ppp, gst, fnm.endsWith("16") ? 2 : 4, ndf, isTo); + } + + if (!isTo) exports.FromEMF._draw(genv, gst, prms, ndf); + } else if (fnm == "POLYBEZIER" || fnm == "POLYBEZIER16" || fnm == "POLYBEZIERTO" || fnm == "POLYBEZIERTO16") { + loff += 16; + var is16 = fnm.endsWith("16"), + rC = is16 ? rI : rI32, + nl = is16 ? 2 : 4; + var cnt = rU32(buff, loff); + loff += 4; + + if (fnm.indexOf("TO") == -1) { + exports.UDOC.G.moveTo(gst, rC(buff, loff), rC(buff, loff + nl)); + loff += 2 * nl; + cnt--; + } + + while (cnt > 0) { + exports.UDOC.G.curveTo(gst, rC(buff, loff), rC(buff, loff + nl), rC(buff, loff + 2 * nl), rC(buff, loff + 3 * nl), rC(buff, loff + 4 * nl), rC(buff, loff + 5 * nl)); + loff += 6 * nl; + cnt -= 3; + } //console.log(JSON.parse(JSON.stringify(gst.pth))); + + } else if (fnm == "RECTANGLE" || fnm == "ELLIPSE") { + exports.UDOC.G.newPath(gst); + + var bx = exports.FromEMF._readBox(buff, loff); + + if (fnm == "RECTANGLE") { + exports.UDOC.G.moveTo(gst, bx[0], bx[1]); + exports.UDOC.G.lineTo(gst, bx[2], bx[1]); + exports.UDOC.G.lineTo(gst, bx[2], bx[3]); + exports.UDOC.G.lineTo(gst, bx[0], bx[3]); + } else { + var x = (bx[0] + bx[2]) / 2, + y = (bx[1] + bx[3]) / 2; + exports.UDOC.G.arc(gst, x, y, (bx[2] - bx[0]) / 2, 0, 2 * Math.PI, false); + } + + exports.UDOC.G.closePath(gst); + + exports.FromEMF._draw(genv, gst, prms, true); //console.log(prms, gst.lwidth); + + } else if (fnm == "FILLPATH") genv.Fill(gst, false);else if (fnm == "STROKEPATH") genv.Stroke(gst);else if (fnm == "STROKEANDFILLPATH") { + genv.Fill(gst, false); + genv.Stroke(gst); + } else if (fnm == "SETWORLDTRANSFORM" || fnm == "MODIFYWORLDTRANSFORM") { + var mat = []; + + for (var i = 0; i < 6; i++) { + mat.push(rF32(buff, loff + i * 4)); + } + + loff += 24; //console.log(fnm, gst.ctm.slice(0), mat); + + if (fnm == "SETWORLDTRANSFORM") gst.ctm = mat;else { + var mod = rU32(buff, loff); + loff += 4; + + if (mod == 2) { + var om = gst.ctm; + gst.ctm = mat; + exports.UDOC.M.concat(gst.ctm, om); + } else throw "e"; + } + } else if (fnm == "SETSTRETCHBLTMODE") { + var sm = rU32(buff, loff); + loff += 4; + } else if (fnm == "STRETCHDIBITS") { + var bx = exports.FromEMF._readBox(buff, loff); + + loff += 16; + var xD = rI32(buff, loff); + loff += 4; + var yD = rI32(buff, loff); + loff += 4; + var xS = rI32(buff, loff); + loff += 4; + var yS = rI32(buff, loff); + loff += 4; + var wS = rI32(buff, loff); + loff += 4; + var hS = rI32(buff, loff); + loff += 4; + var ofH = rU32(buff, loff) + off - 8; + loff += 4; + var szH = rU32(buff, loff); + loff += 4; + var ofB = rU32(buff, loff) + off - 8; + loff += 4; + var szB = rU32(buff, loff); + loff += 4; + var usg = rU32(buff, loff); + loff += 4; + if (usg != 0) throw "e"; + var bop = rU32(buff, loff); + loff += 4; + var wD = rI32(buff, loff); + loff += 4; + var hD = rI32(buff, loff); + loff += 4; //console.log(bop, wD, hD); + //console.log(ofH, szH, ofB, szB, ofH+40); + //console.log(bx, xD,yD,wD,hD); + //console.log(xS,yS,wS,hS); + //console.log(ofH,szH,ofB,szB,usg,bop); + + var hl = rU32(buff, ofH); + ofH += 4; + var w = rU32(buff, ofH); + ofH += 4; + var h = rU32(buff, ofH); + ofH += 4; + if (w != wS || h != hS) throw "e"; + var ps = rU(buff, ofH); + ofH += 2; + var bc = rU(buff, ofH); + ofH += 2; + if (bc != 8 && bc != 24 && bc != 32) throw bc + " e"; + var cpr = rU32(buff, ofH); + ofH += 4; + if (cpr != 0) throw cpr + " e"; + var sz = rU32(buff, ofH); + ofH += 4; + var xpm = rU32(buff, ofH); + ofH += 4; + var ypm = rU32(buff, ofH); + ofH += 4; + var cu = rU32(buff, ofH); + ofH += 4; + var ci = rU32(buff, ofH); + ofH += 4; //console.log(hl, w, h, ps, bc, cpr, sz, xpm, ypm, cu, ci); + //console.log(hl,w,h,",",xS,yS,wS,hS,",",xD,yD,wD,hD,",",xpm,ypm); + + var rl = Math.floor((w * ps * bc + 31 & ~31) / 8); + var img = new Uint8Array(w * h * 4); + + if (bc == 8) { + for (var y = 0; y < h; y++) { + for (var x = 0; x < w; x++) { + var qi = y * w + x << 2, + ind = buff[ofB + (h - 1 - y) * rl + x] << 2; + img[qi] = buff[ofH + ind + 2]; + img[qi + 1] = buff[ofH + ind + 1]; + img[qi + 2] = buff[ofH + ind + 0]; + img[qi + 3] = 255; + } + } + } + + if (bc == 24) { + for (var y = 0; y < h; y++) { + for (var x = 0; x < w; x++) { + var qi = y * w + x << 2, + ti = ofB + (h - 1 - y) * rl + x * 3; + img[qi] = buff[ti + 2]; + img[qi + 1] = buff[ti + 1]; + img[qi + 2] = buff[ti + 0]; + img[qi + 3] = 255; + } + } + } + + if (bc == 32) { + for (var y = 0; y < h; y++) { + for (var x = 0; x < w; x++) { + var qi = y * w + x << 2, + ti = ofB + (h - 1 - y) * rl + x * 4; + img[qi] = buff[ti + 2]; + img[qi + 1] = buff[ti + 1]; + img[qi + 2] = buff[ti + 0]; + img[qi + 3] = buff[ti + 3]; + } + } + } + + var ctm = gst.ctm.slice(0); + gst.ctm = [1, 0, 0, 1, 0, 0]; + exports.UDOC.M.scale(gst.ctm, wD, -hD); + exports.UDOC.M.translate(gst.ctm, xD, yD + hD); + exports.UDOC.M.concat(gst.ctm, ctm); + genv.PutImage(gst, img, w, h); + gst.ctm = ctm; + } else { + console.log(fnm, siz); + } + + if (obj != null) tab[oid] = obj; + off += siz - 8; + } //genv.Stroke(gst); + + + genv.ShowPage(); + genv.Done(); +}; + +exports.FromEMF._readBox = function (buff, off) { + var b = []; + + for (var i = 0; i < 4; i++) { + b[i] = exports.FromEMF.B.readInt(buff, off + i * 4); + } + + return b; +}; + +exports.FromEMF._updateCtm = function (prms, gst) { + var mat = [1, 0, 0, 1, 0, 0]; + var wbb = prms.wbb, + bb = prms.bb, + vbb = prms.vbb && prms.vbb.length == 4 ? prms.vbb : prms.bb; //var y0 = bb[1], y1 = bb[3]; bb[1]=Math.min(y0,y1); bb[3]=Math.max(y0,y1); + + exports.UDOC.M.translate(mat, -wbb[0], -wbb[1]); + exports.UDOC.M.scale(mat, 1 / wbb[2], 1 / wbb[3]); + exports.UDOC.M.scale(mat, vbb[2], vbb[3]); //UDOC.M.scale(mat, vbb[2]/(bb[2]-bb[0]), vbb[3]/(bb[3]-bb[1])); + //UDOC.M.scale(mat, bb[2]-bb[0],bb[3]-bb[1]); + + gst.ctm = mat; +}; + +exports.FromEMF._draw = function (genv, gst, prms, needFill) { + if (prms.fill && needFill) genv.Fill(gst, false); + if (prms.strk && gst.lwidth != 0) genv.Stroke(gst); +}; + +exports.FromEMF._drawPoly = function (buff, off, ppp, gst, nl, clos, justLine) { + var rS = nl == 2 ? exports.FromEMF.B.readShort : exports.FromEMF.B.readInt; + + for (var j = 0; j < ppp; j++) { + var px = rS(buff, off); + off += nl; + var py = rS(buff, off); + off += nl; + if (j == 0 && !justLine) exports.UDOC.G.moveTo(gst, px, py);else exports.UDOC.G.lineTo(gst, px, py); + } + + if (clos) exports.UDOC.G.closePath(gst); + return off; +}; + +exports.FromEMF.B = { + uint8: new Uint8Array(4), + readShort: function readShort(buff, p) { + var u8 = exports.FromEMF.B.uint8; + u8[0] = buff[p]; + u8[1] = buff[p + 1]; + return exports.FromEMF.B.int16[0]; + }, + readUshort: function readUshort(buff, p) { + var u8 = exports.FromEMF.B.uint8; + u8[0] = buff[p]; + u8[1] = buff[p + 1]; + return exports.FromEMF.B.uint16[0]; + }, + readInt: function readInt(buff, p) { + var u8 = exports.FromEMF.B.uint8; + u8[0] = buff[p]; + u8[1] = buff[p + 1]; + u8[2] = buff[p + 2]; + u8[3] = buff[p + 3]; + return exports.FromEMF.B.int32[0]; + }, + readUint: function readUint(buff, p) { + var u8 = exports.FromEMF.B.uint8; + u8[0] = buff[p]; + u8[1] = buff[p + 1]; + u8[2] = buff[p + 2]; + u8[3] = buff[p + 3]; + return exports.FromEMF.B.uint32[0]; + }, + readFloat: function readFloat(buff, p) { + var u8 = exports.FromEMF.B.uint8; + u8[0] = buff[p]; + u8[1] = buff[p + 1]; + u8[2] = buff[p + 2]; + u8[3] = buff[p + 3]; + return exports.FromEMF.B.flot32[0]; + }, + readASCII: function readASCII(buff, p, l) { + var s = ""; + + for (var i = 0; i < l; i++) { + s += String.fromCharCode(buff[p + i]); + } + + return s; + } +}; +exports.FromEMF.B.int16 = new Int16Array(exports.FromEMF.B.uint8.buffer); +exports.FromEMF.B.uint16 = new Uint16Array(exports.FromEMF.B.uint8.buffer); +exports.FromEMF.B.int32 = new Int32Array(exports.FromEMF.B.uint8.buffer); +exports.FromEMF.B.uint32 = new Uint32Array(exports.FromEMF.B.uint8.buffer); +exports.FromEMF.B.flot32 = new Float32Array(exports.FromEMF.B.uint8.buffer); +exports.FromEMF.C = { + EMR_HEADER: 0x00000001, + EMR_POLYBEZIER: 0x00000002, + EMR_POLYGON: 0x00000003, + EMR_POLYLINE: 0x00000004, + EMR_POLYBEZIERTO: 0x00000005, + EMR_POLYLINETO: 0x00000006, + EMR_POLYPOLYLINE: 0x00000007, + EMR_POLYPOLYGON: 0x00000008, + EMR_SETWINDOWEXTEX: 0x00000009, + EMR_SETWINDOWORGEX: 0x0000000A, + EMR_SETVIEWPORTEXTEX: 0x0000000B, + EMR_SETVIEWPORTORGEX: 0x0000000C, + EMR_SETBRUSHORGEX: 0x0000000D, + EMR_EOF: 0x0000000E, + EMR_SETPIXELV: 0x0000000F, + EMR_SETMAPPERFLAGS: 0x00000010, + EMR_SETMAPMODE: 0x00000011, + EMR_SETBKMODE: 0x00000012, + EMR_SETPOLYFILLMODE: 0x00000013, + EMR_SETROP2: 0x00000014, + EMR_SETSTRETCHBLTMODE: 0x00000015, + EMR_SETTEXTALIGN: 0x00000016, + EMR_SETCOLORADJUSTMENT: 0x00000017, + EMR_SETTEXTCOLOR: 0x00000018, + EMR_SETBKCOLOR: 0x00000019, + EMR_OFFSETCLIPRGN: 0x0000001A, + EMR_MOVETOEX: 0x0000001B, + EMR_SETMETARGN: 0x0000001C, + EMR_EXCLUDECLIPRECT: 0x0000001D, + EMR_INTERSECTCLIPRECT: 0x0000001E, + EMR_SCALEVIEWPORTEXTEX: 0x0000001F, + EMR_SCALEWINDOWEXTEX: 0x00000020, + EMR_SAVEDC: 0x00000021, + EMR_RESTOREDC: 0x00000022, + EMR_SETWORLDTRANSFORM: 0x00000023, + EMR_MODIFYWORLDTRANSFORM: 0x00000024, + EMR_SELECTOBJECT: 0x00000025, + EMR_CREATEPEN: 0x00000026, + EMR_CREATEBRUSHINDIRECT: 0x00000027, + EMR_DELETEOBJECT: 0x00000028, + EMR_ANGLEARC: 0x00000029, + EMR_ELLIPSE: 0x0000002A, + EMR_RECTANGLE: 0x0000002B, + EMR_ROUNDRECT: 0x0000002C, + EMR_ARC: 0x0000002D, + EMR_CHORD: 0x0000002E, + EMR_PIE: 0x0000002F, + EMR_SELECTPALETTE: 0x00000030, + EMR_CREATEPALETTE: 0x00000031, + EMR_SETPALETTEENTRIES: 0x00000032, + EMR_RESIZEPALETTE: 0x00000033, + EMR_REALIZEPALETTE: 0x00000034, + EMR_EXTFLOODFILL: 0x00000035, + EMR_LINETO: 0x00000036, + EMR_ARCTO: 0x00000037, + EMR_POLYDRAW: 0x00000038, + EMR_SETARCDIRECTION: 0x00000039, + EMR_SETMITERLIMIT: 0x0000003A, + EMR_BEGINPATH: 0x0000003B, + EMR_ENDPATH: 0x0000003C, + EMR_CLOSEFIGURE: 0x0000003D, + EMR_FILLPATH: 0x0000003E, + EMR_STROKEANDFILLPATH: 0x0000003F, + EMR_STROKEPATH: 0x00000040, + EMR_FLATTENPATH: 0x00000041, + EMR_WIDENPATH: 0x00000042, + EMR_SELECTCLIPPATH: 0x00000043, + EMR_ABORTPATH: 0x00000044, + EMR_COMMENT: 0x00000046, + EMR_FILLRGN: 0x00000047, + EMR_FRAMERGN: 0x00000048, + EMR_INVERTRGN: 0x00000049, + EMR_PAINTRGN: 0x0000004A, + EMR_EXTSELECTCLIPRGN: 0x0000004B, + EMR_BITBLT: 0x0000004C, + EMR_STRETCHBLT: 0x0000004D, + EMR_MASKBLT: 0x0000004E, + EMR_PLGBLT: 0x0000004F, + EMR_SETDIBITSTODEVICE: 0x00000050, + EMR_STRETCHDIBITS: 0x00000051, + EMR_EXTCREATEFONTINDIRECTW: 0x00000052, + EMR_EXTTEXTOUTA: 0x00000053, + EMR_EXTTEXTOUTW: 0x00000054, + EMR_POLYBEZIER16: 0x00000055, + EMR_POLYGON16: 0x00000056, + EMR_POLYLINE16: 0x00000057, + EMR_POLYBEZIERTO16: 0x00000058, + EMR_POLYLINETO16: 0x00000059, + EMR_POLYPOLYLINE16: 0x0000005A, + EMR_POLYPOLYGON16: 0x0000005B, + EMR_POLYDRAW16: 0x0000005C, + EMR_CREATEMONOBRUSH: 0x0000005D, + EMR_CREATEDIBPATTERNBRUSHPT: 0x0000005E, + EMR_EXTCREATEPEN: 0x0000005F, + EMR_POLYTEXTOUTA: 0x00000060, + EMR_POLYTEXTOUTW: 0x00000061, + EMR_SETICMMODE: 0x00000062, + EMR_CREATECOLORSPACE: 0x00000063, + EMR_SETCOLORSPACE: 0x00000064, + EMR_DELETECOLORSPACE: 0x00000065, + EMR_GLSRECORD: 0x00000066, + EMR_GLSBOUNDEDRECORD: 0x00000067, + EMR_PIXELFORMAT: 0x00000068, + EMR_DRAWESCAPE: 0x00000069, + EMR_EXTESCAPE: 0x0000006A, + EMR_SMALLTEXTOUT: 0x0000006C, + EMR_FORCEUFIMAPPING: 0x0000006D, + EMR_NAMEDESCAPE: 0x0000006E, + EMR_COLORCORRECTPALETTE: 0x0000006F, + EMR_SETICMPROFILEA: 0x00000070, + EMR_SETICMPROFILEW: 0x00000071, + EMR_ALPHABLEND: 0x00000072, + EMR_SETLAYOUT: 0x00000073, + EMR_TRANSPARENTBLT: 0x00000074, + EMR_GRADIENTFILL: 0x00000076, + EMR_SETLINKEDUFIS: 0x00000077, + EMR_SETTEXTJUSTIFICATION: 0x00000078, + EMR_COLORMATCHTOTARGETW: 0x00000079, + EMR_CREATECOLORSPACEW: 0x0000007A +}; +exports.FromEMF.K = []; // (function() { +// var inp, out, stt; +// inp = FromEMF.C; out = FromEMF.K; stt=4; +// for(var p in inp) out[inp[p]] = p.slice(stt); +// } )(); + +exports.ToContext2D = function (needPage, scale) { + this.canvas = document.createElement("canvas"); + this.ctx = this.canvas.getContext("2d"); + this.bb = null; + this.currPage = 0; + this.needPage = needPage; + this.scale = scale; +}; + +exports.ToContext2D.prototype.StartPage = function (x, y, w, h) { + if (this.currPage != this.needPage) return; + this.bb = [x, y, w, h]; + var scl = this.scale, + dpr = window.devicePixelRatio; + var cnv = this.canvas, + ctx = this.ctx; + cnv.width = Math.round(w * scl); + cnv.height = Math.round(h * scl); + ctx.translate(0, h * scl); + ctx.scale(scl, -scl); + cnv.setAttribute("style", "border:1px solid; width:" + cnv.width / dpr + "px; height:" + cnv.height / dpr + "px"); +}; + +exports.ToContext2D.prototype.Fill = function (gst, evenOdd) { + if (this.currPage != this.needPage) return; + var ctx = this.ctx; + ctx.beginPath(); + + this._setStyle(gst, ctx); + + this._draw(gst.pth, ctx); + + ctx.fill(); +}; + +exports.ToContext2D.prototype.Stroke = function (gst) { + if (this.currPage != this.needPage) return; + var ctx = this.ctx; + ctx.beginPath(); + + this._setStyle(gst, ctx); + + this._draw(gst.pth, ctx); + + ctx.stroke(); +}; + +exports.ToContext2D.prototype.PutText = function (gst, str, stw) { + if (this.currPage != this.needPage) return; + + var scl = this._scale(gst.ctm); + + var ctx = this.ctx; + + this._setStyle(gst, ctx); + + ctx.save(); + var m = [1, 0, 0, -1, 0, 0]; + + this._concat(m, gst.font.Tm); + + this._concat(m, gst.ctm); //console.log(str, m, gst); throw "e"; + + + ctx.transform(m[0], m[1], m[2], m[3], m[4], m[5]); + ctx.fillText(str, 0, 0); + ctx.restore(); +}; + +exports.ToContext2D.prototype.PutImage = function (gst, buff, w, h, msk) { + if (this.currPage != this.needPage) return; + var ctx = this.ctx; + + if (buff.length == w * h * 4) { + buff = buff.slice(0); + if (msk && msk.length == w * h * 4) for (var i = 0; i < buff.length; i += 4) { + buff[i + 3] = msk[i + 1]; + } + var cnv = document.createElement("canvas"), + cctx = cnv.getContext("2d"); + cnv.width = w; + cnv.height = h; + var imgd = cctx.createImageData(w, h); + + for (var i = 0; i < buff.length; i++) { + imgd.data[i] = buff[i]; + } + + cctx.putImageData(imgd, 0, 0); + ctx.save(); + var m = [1, 0, 0, 1, 0, 0]; + + this._concat(m, [1 / w, 0, 0, -1 / h, 0, 1]); + + this._concat(m, gst.ctm); + + ctx.transform(m[0], m[1], m[2], m[3], m[4], m[5]); + ctx.drawImage(cnv, 0, 0); + ctx.restore(); + } +}; + +exports.ToContext2D.prototype.ShowPage = function () { + this.currPage++; +}; + +exports.ToContext2D.prototype.Done = function () {}; + +function _flt(n) { + return "" + parseFloat(n.toFixed(2)); +} + +exports.ToContext2D.prototype._setStyle = function (gst, ctx) { + var scl = this._scale(gst.ctm); + + ctx.fillStyle = this._getFill(gst.colr, gst.ca, ctx); + ctx.strokeStyle = this._getFill(gst.COLR, gst.CA, ctx); + ctx.lineCap = ["butt", "round", "square"][gst.lcap]; + ctx.lineJoin = ["miter", "round", "bevel"][gst.ljoin]; + ctx.lineWidth = gst.lwidth * scl; + var dsh = gst.dash.slice(0); + + for (var i = 0; i < dsh.length; i++) { + dsh[i] = _flt(dsh[i] * scl); + } + + ctx.setLineDash(dsh); + ctx.miterLimit = gst.mlimit * scl; + var fn = gst.font.Tf, + ln = fn.toLowerCase(); + var p0 = ln.indexOf("bold") != -1 ? "bold " : ""; + var p1 = ln.indexOf("italic") != -1 || ln.indexOf("oblique") != -1 ? "italic " : ""; + ctx.font = p0 + p1 + gst.font.Tfs + "px \"" + fn + "\""; +}; + +exports.ToContext2D.prototype._getFill = function (colr, ca, ctx) { + if (colr.typ == null) return this._colr(colr, ca);else { + var grd = colr, + crd = grd.crds, + mat = grd.mat, + scl = this._scale(mat), + gf; + + if (grd.typ == "lin") { + var p0 = this._multPoint(mat, crd.slice(0, 2)), + p1 = this._multPoint(mat, crd.slice(2)); + + gf = ctx.createLinearGradient(p0[0], p0[1], p1[0], p1[1]); + } else if (grd.typ == "rad") { + var p0 = this._multPoint(mat, crd.slice(0, 2)), + p1 = this._multPoint(mat, crd.slice(3)); + + gf = ctx.createRadialGradient(p0[0], p0[1], crd[2] * scl, p1[0], p1[1], crd[5] * scl); + } + + for (var i = 0; i < grd.grad.length; i++) { + gf.addColorStop(grd.grad[i][0], this._colr(grd.grad[i][1], ca)); + } + + return gf; + } +}; + +exports.ToContext2D.prototype._colr = function (c, a) { + return "rgba(" + Math.round(c[0] * 255) + "," + Math.round(c[1] * 255) + "," + Math.round(c[2] * 255) + "," + a + ")"; +}; + +exports.ToContext2D.prototype._scale = function (m) { + return Math.sqrt(Math.abs(m[0] * m[3] - m[1] * m[2])); +}; + +exports.ToContext2D.prototype._concat = function (m, w) { + var a = m[0], + b = m[1], + c = m[2], + d = m[3], + tx = m[4], + ty = m[5]; + m[0] = a * w[0] + b * w[2]; + m[1] = a * w[1] + b * w[3]; + m[2] = c * w[0] + d * w[2]; + m[3] = c * w[1] + d * w[3]; + m[4] = tx * w[0] + ty * w[2] + w[4]; + m[5] = tx * w[1] + ty * w[3] + w[5]; +}; + +exports.ToContext2D.prototype._multPoint = function (m, p) { + var x = p[0], + y = p[1]; + return [x * m[0] + y * m[2] + m[4], x * m[1] + y * m[3] + m[5]]; +}, exports.ToContext2D.prototype._draw = function (path, ctx) { + var c = 0, + crds = path.crds; + + for (var j = 0; j < path.cmds.length; j++) { + var cmd = path.cmds[j]; + + if (cmd == "M") { + ctx.moveTo(crds[c], crds[c + 1]); + c += 2; + } else if (cmd == "L") { + ctx.lineTo(crds[c], crds[c + 1]); + c += 2; + } else if (cmd == "C") { + ctx.bezierCurveTo(crds[c], crds[c + 1], crds[c + 2], crds[c + 3], crds[c + 4], crds[c + 5]); + c += 6; + } else if (cmd == "Q") { + ctx.quadraticCurveTo(crds[c], crds[c + 1], crds[c + 2], crds[c + 3]); + c += 4; + } else if (cmd == "Z") { + ctx.closePath(); + } + } +}; + +},{}],93:[function(require,module,exports){ +"use strict"; + +Object.defineProperty(exports, "__esModule", { + value: true +}); +exports.getBinaryContent = exports.isContainMultiType = exports.isKoera = exports.isJapanese = exports.isChinese = exports.fromulaRef = exports.escapeCharacter = exports.generateRandomIndex = exports.LightenDarkenColor = exports.getRowHeightPixel = exports.getColumnWidthPixel = exports.getXmlAttibute = exports.getPxByEMUs = exports.getptToPxRatioByDPI = exports.getcellrange = exports.getRangetxt = void 0; + +var constant_1 = require("./constant"); + +function getRangetxt(range, sheettxt) { + var row0 = range["row"][0], + row1 = range["row"][1]; + var column0 = range["column"][0], + column1 = range["column"][1]; + + if (row0 == null && row1 == null) { + return sheettxt + chatatABC(column0) + ":" + chatatABC(column1); + } else if (column0 == null && column1 == null) { + return sheettxt + (row0 + 1) + ":" + (row1 + 1); + } else { + if (column0 == column1 && row0 == row1) { + return sheettxt + chatatABC(column0) + (row0 + 1); + } else { + return sheettxt + chatatABC(column0) + (row0 + 1) + ":" + chatatABC(column1) + (row1 + 1); + } + } +} + +exports.getRangetxt = getRangetxt; + +function getcellrange(txt, sheets, sheetId) { + if (sheets === void 0) { + sheets = {}; + } + + if (sheetId === void 0) { + sheetId = "1"; + } + + var val = txt.split("!"); + var sheettxt = "", + rangetxt = "", + sheetIndex = -1; + + if (val.length > 1) { + sheettxt = val[0]; + rangetxt = val[1]; + var si = sheets[sheettxt]; + + if (si == null) { + sheetIndex = parseInt(sheetId); + } else { + sheetIndex = parseInt(si); + } + } else { + sheetIndex = parseInt(sheetId); + rangetxt = val[0]; + } + + if (rangetxt.indexOf(":") == -1) { + var row = parseInt(rangetxt.replace(/[^0-9]/g, "")) - 1; + var col = ABCatNum(rangetxt.replace(/[^A-Za-z]/g, "")); + + if (!isNaN(row) && !isNaN(col)) { + return { + "row": [row, row], + "column": [col, col], + "sheetIndex": sheetIndex + }; + } else { + return null; + } + } else { + var rangetxtArray = rangetxt.split(":"); + var row = [], + col = []; + row[0] = parseInt(rangetxtArray[0].replace(/[^0-9]/g, "")) - 1; + row[1] = parseInt(rangetxtArray[1].replace(/[^0-9]/g, "")) - 1; // if (isNaN(row[0])) { + // row[0] = 0; + // } + // if (isNaN(row[1])) { + // row[1] = sheetdata.length - 1; + // } + + if (row[0] > row[1]) { + return null; + } + + col[0] = ABCatNum(rangetxtArray[0].replace(/[^A-Za-z]/g, "")); + col[1] = ABCatNum(rangetxtArray[1].replace(/[^A-Za-z]/g, "")); // if (isNaN(col[0])) { + // col[0] = 0; + // } + // if (isNaN(col[1])) { + // col[1] = sheetdata[0].length - 1; + // } + + if (col[0] > col[1]) { + return null; + } + + return { + "row": row, + "column": col, + "sheetIndex": sheetIndex + }; + } +} + +exports.getcellrange = getcellrange; //列下标 字母转数字 + +function ABCatNum(abc) { + abc = abc.toUpperCase(); + var abc_len = abc.length; + + if (abc_len == 0) { + return NaN; + } + + var abc_array = abc.split(""); + var wordlen = constant_1.columeHeader_word.length; + var ret = 0; + + for (var i = abc_len - 1; i >= 0; i--) { + if (i == abc_len - 1) { + ret += constant_1.columeHeader_word_index[abc_array[i]]; + } else { + ret += Math.pow(wordlen, abc_len - i - 1) * (constant_1.columeHeader_word_index[abc_array[i]] + 1); + } + } + + return ret; +} //列下标 数字转字母 + + +function chatatABC(index) { + var wordlen = constant_1.columeHeader_word.length; + + if (index < wordlen) { + return constant_1.columeHeader_word[index]; + } else { + var last = 0, + pre = 0, + ret = ""; + var i = 1, + n = 0; + + while (index >= wordlen / (wordlen - 1) * (Math.pow(wordlen, i++) - 1)) { + n = i; + } + + var index_ab = index - wordlen / (wordlen - 1) * (Math.pow(wordlen, n - 1) - 1); //970 + + last = index_ab + 1; + + for (var x = n; x > 0; x--) { + var last1 = last, + x1 = x; //-702=268, 3 + + if (x == 1) { + last1 = last1 % wordlen; + + if (last1 == 0) { + last1 = 26; + } + + return ret + constant_1.columeHeader_word[last1 - 1]; + } + + last1 = Math.ceil(last1 / Math.pow(wordlen, x - 1)); //last1 = last1 % wordlen; + + ret += constant_1.columeHeader_word[last1 - 1]; + + if (x > 1) { + last = last - (last1 - 1) * wordlen; + } + } + } +} +/** + * @return ratio, default 0.75 1in = 2.54cm = 25.4mm = 72pt = 6pc, pt = 1/72 In, px = 1/dpi In +*/ + + +function getptToPxRatioByDPI() { + return 72 / 96; +} + +exports.getptToPxRatioByDPI = getptToPxRatioByDPI; +/** + * @emus EMUs, Excel drawing unit + * @return pixel +*/ + +function getPxByEMUs(emus) { + if (emus == null) { + return 0; + } + + var inch = emus / 914400; + var pt = inch * 72; + var px = pt / getptToPxRatioByDPI(); + return px; +} + +exports.getPxByEMUs = getPxByEMUs; +/** + * @dom xml attribute object + * @attr attribute name + * @d if attribute is null, return default value + * @return attribute value +*/ + +function getXmlAttibute(dom, attr, d) { + var value = dom[attr]; + value = value == null ? d : value; + return value; +} + +exports.getXmlAttibute = getXmlAttibute; +/** + * @columnWidth Excel column width + * @return pixel column width +*/ + +function getColumnWidthPixel(columnWidth) { + var pix = Math.round((columnWidth - 0.83) * 8 + 5); + return pix; +} + +exports.getColumnWidthPixel = getColumnWidthPixel; +/** + * @rowHeight Excel row height + * @return pixel row height +*/ + +function getRowHeightPixel(rowHeight) { + var pix = Math.round(rowHeight / getptToPxRatioByDPI()); + return pix; +} + +exports.getRowHeightPixel = getRowHeightPixel; + +function LightenDarkenColor(sixColor, tint) { + var hex = sixColor.substring(sixColor.length - 6, sixColor.length); + var rgbArray = hexToRgbArray("#" + hex); + var hslArray = rgbToHsl(rgbArray[0], rgbArray[1], rgbArray[2]); + + if (tint > 0) { + hslArray[2] = hslArray[2] * (1.0 - tint) + tint; + } else if (tint < 0) { + hslArray[2] = hslArray[2] * (1.0 + tint); + } else { + return "#" + hex; + } + + var newRgbArray = hslToRgb(hslArray[0], hslArray[1], hslArray[2]); + return rgbToHex("RGB(" + newRgbArray.join(",") + ")"); +} + +exports.LightenDarkenColor = LightenDarkenColor; + +function rgbToHex(rgb) { + //十六进制颜色值的正则表达式 + var reg = /^#([0-9a-fA-f]{3}|[0-9a-fA-f]{6})$/; // 如果是rgb颜色表示 + + if (/^(rgb|RGB)/.test(rgb)) { + var aColor = rgb.replace(/(?:\(|\)|rgb|RGB)*/g, "").split(","); + var strHex = "#"; + + for (var i = 0; i < aColor.length; i++) { + var hex = Number(aColor[i]).toString(16); + + if (hex.length < 2) { + hex = '0' + hex; + } + + strHex += hex; + } + + if (strHex.length !== 7) { + strHex = rgb; + } + + return strHex; + } else if (reg.test(rgb)) { + var aNum = rgb.replace(/#/, "").split(""); + + if (aNum.length === 6) { + return rgb; + } else if (aNum.length === 3) { + var numHex = "#"; + + for (var i = 0; i < aNum.length; i += 1) { + numHex += aNum[i] + aNum[i]; + } + + return numHex; + } + } + + return rgb; +} + +function hexToRgb(hex) { + var sColor = hex.toLowerCase(); //十六进制颜色值的正则表达式 + + var reg = /^#([0-9a-fA-f]{3}|[0-9a-fA-f]{6})$/; // 如果是16进制颜色 + + if (sColor && reg.test(sColor)) { + if (sColor.length === 4) { + var sColorNew = "#"; + + for (var i = 1; i < 4; i += 1) { + sColorNew += sColor.slice(i, i + 1).concat(sColor.slice(i, i + 1)); + } + + sColor = sColorNew; + } //处理六位的颜色值 + + + var sColorChange = []; + + for (var i = 1; i < 7; i += 2) { + sColorChange.push(parseInt("0x" + sColor.slice(i, i + 2))); + } + + return "RGB(" + sColorChange.join(",") + ")"; + } + + return sColor; +} + +function hexToRgbArray(hex) { + var sColor = hex.toLowerCase(); //十六进制颜色值的正则表达式 + + var reg = /^#([0-9a-fA-f]{3}|[0-9a-fA-f]{6})$/; // 如果是16进制颜色 + + if (sColor && reg.test(sColor)) { + if (sColor.length === 4) { + var sColorNew = "#"; + + for (var i = 1; i < 4; i += 1) { + sColorNew += sColor.slice(i, i + 1).concat(sColor.slice(i, i + 1)); + } + + sColor = sColorNew; + } //处理六位的颜色值 + + + var sColorChange = []; + + for (var i = 1; i < 7; i += 2) { + sColorChange.push(parseInt("0x" + sColor.slice(i, i + 2))); + } + + return sColorChange; + } + + return null; +} +/** + * HSL颜色值转换为RGB. + * 换算公式改编自 http://en.wikipedia.org/wiki/HSL_color_space. + * h, s, 和 l 设定在 [0, 1] 之间 + * 返回的 r, g, 和 b 在 [0, 255]之间 + * + * @param Number h 色相 + * @param Number s 饱和度 + * @param Number l 亮度 + * @return Array RGB色值数值 + */ + + +function hslToRgb(h, s, l) { + var r, g, b; + + if (s == 0) { + r = g = b = l; // achromatic + } else { + var hue2rgb = function hue2rgb(p, q, t) { + if (t < 0) t += 1; + if (t > 1) t -= 1; + if (t < 1 / 6) return p + (q - p) * 6 * t; + if (t < 1 / 2) return q; + if (t < 2 / 3) return p + (q - p) * (2 / 3 - t) * 6; + return p; + }; + + var q = l < 0.5 ? l * (1 + s) : l + s - l * s; + var p = 2 * l - q; + r = hue2rgb(p, q, h + 1 / 3); + g = hue2rgb(p, q, h); + b = hue2rgb(p, q, h - 1 / 3); + } + + return [Math.round(r * 255), Math.round(g * 255), Math.round(b * 255)]; +} +/** + * RGB 颜色值转换为 HSL. + * 转换公式参考自 http://en.wikipedia.org/wiki/HSL_color_space. + * r, g, 和 b 需要在 [0, 255] 范围内 + * 返回的 h, s, 和 l 在 [0, 1] 之间 + * + * @param Number r 红色色值 + * @param Number g 绿色色值 + * @param Number b 蓝色色值 + * @return Array HSL各值数组 + */ + + +function rgbToHsl(r, g, b) { + r /= 255, g /= 255, b /= 255; + var max = Math.max(r, g, b), + min = Math.min(r, g, b); + var h, + s, + l = (max + min) / 2; + + if (max == min) { + h = s = 0; // achromatic + } else { + var d = max - min; + s = l > 0.5 ? d / (2 - max - min) : d / (max + min); + + switch (max) { + case r: + h = (g - b) / d + (g < b ? 6 : 0); + break; + + case g: + h = (b - r) / d + 2; + break; + + case b: + h = (r - g) / d + 4; + break; + } + + h /= 6; + } + + return [h, s, l]; +} + +function generateRandomIndex(prefix) { + if (prefix == null) { + prefix = "Sheet"; + } + + var userAgent = window.navigator.userAgent.replace(/[^a-zA-Z0-9]/g, "").split(""); + var mid = ""; + + for (var i = 0; i < 5; i++) { + mid += userAgent[Math.round(Math.random() * (userAgent.length - 1))]; + } + + var time = new Date().getTime(); + return prefix + "_" + mid + "_" + time; +} + +exports.generateRandomIndex = generateRandomIndex; + +function escapeCharacter(str) { + if (str == null || str.length == 0) { + return str; + } + + return str.replace(/&/g, "&").replace(/"/g, '"').replace(/</g, '<').replace(/>/g, '>').replace(/ /g, ' ').replace(/'/g, "'").replace(/¡/g, "¡").replace(/¢/g, "¢").replace(/£/g, "£").replace(/¤/g, "¤").replace(/¥/g, "¥").replace(/¦/g, "¦").replace(/§/g, "§").replace(/¨/g, "¨").replace(/©/g, "©").replace(/ª/g, "ª").replace(/«/g, "«").replace(/¬/g, "¬").replace(/­/g, "").replace(/®/g, "®").replace(/¯/g, "¯").replace(/°/g, "°").replace(/±/g, "±").replace(/²/g, "²").replace(/³/g, "³").replace(/´/g, "´").replace(/µ/g, "µ").replace(/¶/g, "¶").replace(/·/g, "·").replace(/¸/g, "¸").replace(/¹/g, "¹").replace(/º/g, "º").replace(/»/g, "»").replace(/¼/g, "¼").replace(/½/g, "½").replace(/¾/g, "¾").replace(/¿/g, "¿").replace(/×/g, "×").replace(/÷/g, "÷").replace(/À/g, "À").replace(/Á/g, "Á").replace(/Â/g, "Â").replace(/Ã/g, "Ã").replace(/Ä/g, "Ä").replace(/Å/g, "Å").replace(/Æ/g, "Æ").replace(/Ç/g, "Ç").replace(/È/g, "È").replace(/É/g, "É").replace(/Ê/g, "Ê").replace(/Ë/g, "Ë").replace(/Ì/g, "Ì").replace(/Í/g, "Í").replace(/Î/g, "Î").replace(/Ï/g, "Ï").replace(/Ð/g, "Ð").replace(/Ñ/g, "Ñ").replace(/Ò/g, "Ò").replace(/Ó/g, "Ó").replace(/Ô/g, "Ô").replace(/Õ/g, "Õ").replace(/Ö/g, "Ö").replace(/Ø/g, "Ø").replace(/Ù/g, "Ù").replace(/Ú/g, "Ú").replace(/Û/g, "Û").replace(/Ü/g, "Ü").replace(/Ý/g, "Ý").replace(/Þ/g, "Þ").replace(/ß/g, "ß").replace(/à/g, "à").replace(/á/g, "á").replace(/â/g, "â").replace(/ã/g, "ã").replace(/ä/g, "ä").replace(/å/g, "å").replace(/æ/g, "æ").replace(/ç/g, "ç").replace(/è/g, "è").replace(/é/g, "é").replace(/ê/g, "ê").replace(/ë/g, "ë").replace(/ì/g, "ì").replace(/í/g, "í").replace(/î/g, "î").replace(/ï/g, "ï").replace(/ð/g, "ð").replace(/ñ/g, "ñ").replace(/ò/g, "ò").replace(/ó/g, "ó").replace(/ô/g, "ô").replace(/õ/g, "õ").replace(/ö/g, "ö").replace(/ø/g, "ø").replace(/ù/g, "ù").replace(/ú/g, "ú").replace(/û/g, "û").replace(/ü/g, "ü").replace(/ý/g, "ý").replace(/þ/g, "þ").replace(/ÿ/g, "ÿ"); +} + +exports.escapeCharacter = escapeCharacter; + +var fromulaRef = +/** @class */ +function () { + function fromulaRef() {} + + fromulaRef.trim = function (str) { + if (str == null) { + str = ""; + } + + return str.replace(/(^\s*)|(\s*$)/g, ""); + }; + + fromulaRef.functionCopy = function (txt, mode, step) { + var _this = this; + + if (_this.operatorjson == null) { + var arr = _this.operator.split("|"), + op = {}; + + for (var i_1 = 0; i_1 < arr.length; i_1++) { + op[arr[i_1].toString()] = 1; + } + + _this.operatorjson = op; + } + + if (mode == null) { + mode = "down"; + } + + if (step == null) { + step = 1; + } + + if (txt.substr(0, 1) == "=") { + txt = txt.substr(1); + } + + var funcstack = txt.split(""); + var i = 0, + str = "", + function_str = "", + ispassby = true; + var matchConfig = { + "bracket": 0, + "comma": 0, + "squote": 0, + "dquote": 0 + }; + + while (i < funcstack.length) { + var s = funcstack[i]; + + if (s == "(" && matchConfig.dquote == 0) { + matchConfig.bracket += 1; + + if (str.length > 0) { + function_str += str + "("; + } else { + function_str += "("; + } + + str = ""; + } else if (s == ")" && matchConfig.dquote == 0) { + matchConfig.bracket -= 1; + function_str += _this.functionCopy(str, mode, step) + ")"; + str = ""; + } else if (s == '"' && matchConfig.squote == 0) { + if (matchConfig.dquote > 0) { + function_str += str + '"'; + matchConfig.dquote -= 1; + str = ""; + } else { + matchConfig.dquote += 1; + str += '"'; + } + } else if (s == ',' && matchConfig.dquote == 0) { + function_str += _this.functionCopy(str, mode, step) + ','; + str = ""; + } else if (s == '&' && matchConfig.dquote == 0) { + if (str.length > 0) { + function_str += _this.functionCopy(str, mode, step) + "&"; + str = ""; + } else { + function_str += "&"; + } + } else if (s in _this.operatorjson && matchConfig.dquote == 0) { + var s_next = ""; + + if (i + 1 < funcstack.length) { + s_next = funcstack[i + 1]; + } + + var p = i - 1, + s_pre = null; + + if (p >= 0) { + do { + s_pre = funcstack[p--]; + } while (p >= 0 && s_pre == " "); + } + + if (s + s_next in _this.operatorjson) { + if (str.length > 0) { + function_str += _this.functionCopy(str, mode, step) + s + s_next; + str = ""; + } else { + function_str += s + s_next; + } + + i++; + } else if (!/[^0-9]/.test(s_next) && s == "-" && (s_pre == "(" || s_pre == null || s_pre == "," || s_pre == " " || s_pre in _this.operatorjson)) { + str += s; + } else { + if (str.length > 0) { + function_str += _this.functionCopy(str, mode, step) + s; + str = ""; + } else { + function_str += s; + } + } + } else { + str += s; + } + + if (i == funcstack.length - 1) { + if (_this.iscelldata(_this.trim(str))) { + if (mode == "down") { + function_str += _this.downparam(_this.trim(str), step); + } else if (mode == "up") { + function_str += _this.upparam(_this.trim(str), step); + } else if (mode == "left") { + function_str += _this.leftparam(_this.trim(str), step); + } else if (mode == "right") { + function_str += _this.rightparam(_this.trim(str), step); + } + } else { + function_str += _this.trim(str); + } + } + + i++; + } + + return function_str; + }; + + fromulaRef.downparam = function (txt, step) { + return this.updateparam("d", txt, step); + }; + + fromulaRef.upparam = function (txt, step) { + return this.updateparam("u", txt, step); + }; + + fromulaRef.leftparam = function (txt, step) { + return this.updateparam("l", txt, step); + }; + + fromulaRef.rightparam = function (txt, step) { + return this.updateparam("r", txt, step); + }; + + fromulaRef.updateparam = function (orient, txt, step) { + var _this = this; + + var val = txt.split("!"), + rangetxt, + prefix = ""; + + if (val.length > 1) { + rangetxt = val[1]; + prefix = val[0] + "!"; + } else { + rangetxt = val[0]; + } + + if (rangetxt.indexOf(":") == -1) { + var row = parseInt(rangetxt.replace(/[^0-9]/g, "")); + var col = ABCatNum(rangetxt.replace(/[^A-Za-z]/g, "")); + + var freezonFuc = _this.isfreezonFuc(rangetxt); + + var $row = freezonFuc[0] ? "$" : "", + $col = freezonFuc[1] ? "$" : ""; + + if (orient == "u" && !freezonFuc[0]) { + row -= step; + } else if (orient == "r" && !freezonFuc[1]) { + col += step; + } else if (orient == "l" && !freezonFuc[1]) { + col -= step; + } else if (!freezonFuc[0]) { + row += step; + } + + if (row < 0 || col < 0) { + return _this.error.r; + } + + if (!isNaN(row) && !isNaN(col)) { + return prefix + $col + chatatABC(col) + $row + row; + } else if (!isNaN(row)) { + return prefix + $row + row; + } else if (!isNaN(col)) { + return prefix + $col + chatatABC(col); + } else { + return txt; + } + } else { + rangetxt = rangetxt.split(":"); + var row = [], + col = []; + row[0] = parseInt(rangetxt[0].replace(/[^0-9]/g, "")); + row[1] = parseInt(rangetxt[1].replace(/[^0-9]/g, "")); + + if (row[0] > row[1]) { + return txt; + } + + col[0] = ABCatNum(rangetxt[0].replace(/[^A-Za-z]/g, "")); + col[1] = ABCatNum(rangetxt[1].replace(/[^A-Za-z]/g, "")); + + if (col[0] > col[1]) { + return txt; + } + + var freezonFuc0 = _this.isfreezonFuc(rangetxt[0]); + + var freezonFuc1 = _this.isfreezonFuc(rangetxt[1]); + + var $row0 = freezonFuc0[0] ? "$" : "", + $col0 = freezonFuc0[1] ? "$" : ""; + var $row1 = freezonFuc1[0] ? "$" : "", + $col1 = freezonFuc1[1] ? "$" : ""; + + if (orient == "u") { + if (!freezonFuc0[0]) { + row[0] -= step; + } + + if (!freezonFuc1[0]) { + row[1] -= step; + } + } else if (orient == "r") { + if (!freezonFuc0[1]) { + col[0] += step; + } + + if (!freezonFuc1[1]) { + col[1] += step; + } + } else if (orient == "l") { + if (!freezonFuc0[1]) { + col[0] -= step; + } + + if (!freezonFuc1[1]) { + col[1] -= step; + } + } else { + if (!freezonFuc0[0]) { + row[0] += step; + } + + if (!freezonFuc1[0]) { + row[1] += step; + } + } + + if (row[0] < 0 || col[0] < 0) { + return _this.error.r; + } + + if (isNaN(col[0]) && isNaN(col[1])) { + return prefix + $row0 + row[0] + ":" + $row1 + row[1]; + } else if (isNaN(row[0]) && isNaN(row[1])) { + return prefix + $col0 + chatatABC(col[0]) + ":" + $col1 + chatatABC(col[1]); + } else { + return prefix + $col0 + chatatABC(col[0]) + $row0 + row[0] + ":" + $col1 + chatatABC(col[1]) + $row1 + row[1]; + } + } + }; + + fromulaRef.iscelldata = function (txt) { + var val = txt.split("!"), + rangetxt; + + if (val.length > 1) { + rangetxt = val[1]; + } else { + rangetxt = val[0]; + } + + var reg_cell = /^(([a-zA-Z]+)|([$][a-zA-Z]+))(([0-9]+)|([$][0-9]+))$/g; //增加正则判断单元格为字母+数字的格式:如 A1:B3 + + var reg_cellRange = /^(((([a-zA-Z]+)|([$][a-zA-Z]+))(([0-9]+)|([$][0-9]+)))|((([a-zA-Z]+)|([$][a-zA-Z]+))))$/g; //增加正则判断单元格为字母+数字或字母的格式:如 A1:B3,A:A + + if (rangetxt.indexOf(":") == -1) { + var row = parseInt(rangetxt.replace(/[^0-9]/g, "")) - 1; + var col = ABCatNum(rangetxt.replace(/[^A-Za-z]/g, "")); + + if (!isNaN(row) && !isNaN(col) && rangetxt.toString().match(reg_cell)) { + return true; + } else if (!isNaN(row)) { + return false; + } else if (!isNaN(col)) { + return false; + } else { + return false; + } + } else { + reg_cellRange = /^(((([a-zA-Z]+)|([$][a-zA-Z]+))(([0-9]+)|([$][0-9]+)))|((([a-zA-Z]+)|([$][a-zA-Z]+)))|((([0-9]+)|([$][0-9]+s))))$/g; + rangetxt = rangetxt.split(":"); + var row = [], + col = []; + row[0] = parseInt(rangetxt[0].replace(/[^0-9]/g, "")) - 1; + row[1] = parseInt(rangetxt[1].replace(/[^0-9]/g, "")) - 1; + + if (row[0] > row[1]) { + return false; + } + + col[0] = ABCatNum(rangetxt[0].replace(/[^A-Za-z]/g, "")); + col[1] = ABCatNum(rangetxt[1].replace(/[^A-Za-z]/g, "")); + + if (col[0] > col[1]) { + return false; + } + + if (rangetxt[0].toString().match(reg_cellRange) && rangetxt[1].toString().match(reg_cellRange)) { + return true; + } else { + return false; + } + } + }; + + fromulaRef.isfreezonFuc = function (txt) { + var row = txt.replace(/[^0-9]/g, ""); + var col = txt.replace(/[^A-Za-z]/g, ""); + var row$ = txt.substr(txt.indexOf(row) - 1, 1); + var col$ = txt.substr(txt.indexOf(col) - 1, 1); + var ret = [false, false]; + + if (row$ == "$") { + ret[0] = true; + } + + if (col$ == "$") { + ret[1] = true; + } + + return ret; + }; + + fromulaRef.operator = '==|!=|<>|<=|>=|=|+|-|>|<|/|*|%|&|^'; + fromulaRef.error = { + v: "#VALUE!", + n: "#NAME?", + na: "#N/A", + r: "#REF!", + d: "#DIV/0!", + nm: "#NUM!", + nl: "#NULL!", + sp: "#SPILL!" //数组范围有其它值 + + }; + fromulaRef.operatorjson = null; + return fromulaRef; +}(); + +exports.fromulaRef = fromulaRef; + +function isChinese(temp) { + var re = /[^\u4e00-\u9fa5]/; + var reg = /[\u3002|\uff1f|\uff01|\uff0c|\u3001|\uff1b|\uff1a|\u201c|\u201d|\u2018|\u2019|\uff08|\uff09|\u300a|\u300b|\u3008|\u3009|\u3010|\u3011|\u300e|\u300f|\u300c|\u300d|\ufe43|\ufe44|\u3014|\u3015|\u2026|\u2014|\uff5e|\ufe4f|\uffe5]/; + if (reg.test(temp)) return true; + if (re.test(temp)) return false; + return true; +} + +exports.isChinese = isChinese; + +function isJapanese(temp) { + var re = /[^\u0800-\u4e00]/; + if (re.test(temp)) return false; + return true; +} + +exports.isJapanese = isJapanese; + +function isKoera(chr) { + if (chr > 0x3130 && chr < 0x318F || chr >= 0xAC00 && chr <= 0xD7A3) { + return true; + } + + return false; +} + +exports.isKoera = isKoera; + +function isContainMultiType(str) { + var isUnicode = false; + + if (escape(str).indexOf("%u") > -1) { + isUnicode = true; + } + + var isNot = false; + var reg = /[0-9a-z]/gi; + + if (reg.test(str)) { + isNot = true; + } + + var reEnSign = /[\x00-\xff]+/g; + + if (reEnSign.test(str)) { + isNot = true; + } + + if (isUnicode && isNot) { + return true; + } + + return false; +} + +exports.isContainMultiType = isContainMultiType; + +function getBinaryContent(path, options) { + var promise, resolve, reject; + var callback; + + if (!options) { + options = {}; + } // taken from jQuery + + + var createStandardXHR = function createStandardXHR() { + try { + return new window.XMLHttpRequest(); + } catch (e) {} + }; + + var createActiveXHR = function createActiveXHR() { + try { + return new window.ActiveXObject("Microsoft.XMLHTTP"); + } catch (e) {} + }; // Create the request object + + + var createXHR = typeof window !== "undefined" && window.ActiveXObject ? + /* Microsoft failed to properly + * implement the XMLHttpRequest in IE7 (can't request local files), + * so we use the ActiveXObject when it is available + * Additionally XMLHttpRequest can be disabled in IE7/IE8 so + * we need a fallback. + */ + function () { + return createStandardXHR() || createActiveXHR(); + } : // For all other browsers, use the standard XMLHttpRequest object + createStandardXHR; // backward compatible callback + + if (typeof options === "function") { + callback = options; + options = {}; + } else if (typeof options.callback === 'function') { + // callback inside options object + callback = options.callback; + } + + resolve = function resolve(data) { + callback(null, data); + }; + + reject = function reject(err) { + callback(err, null); + }; + + try { + var xhr = createXHR(); + xhr.open('GET', path, true); // recent browsers + + if ("responseType" in xhr) { + xhr.responseType = "arraybuffer"; + } // older browser + + + if (xhr.overrideMimeType) { + xhr.overrideMimeType("text/plain; charset=x-user-defined"); + } + + xhr.onreadystatechange = function (event) { + // use `xhr` and not `this`... thanks IE + if (xhr.readyState === 4) { + if (xhr.status === 200 || xhr.status === 0) { + try { + resolve(function (xhr) { + // for xhr.responseText, the 0xFF mask is applied by JSZip + return xhr.response || xhr.responseText; + }(xhr)); + } catch (err) { + reject(new Error(err)); + } + } else { + reject(new Error("Ajax error for " + path + " : " + this.status + " " + this.statusText)); + } + } + }; + + if (options.progress) { + xhr.onprogress = function (e) { + options.progress({ + path: path, + originalEvent: e, + percent: e.loaded / e.total * 100, + loaded: e.loaded, + total: e.total + }); + }; + } + + xhr.send(); + } catch (e) { + reject(new Error(e), null); + } // returns a promise or undefined depending on whether a callback was + // provided + + + return promise; +} + +exports.getBinaryContent = getBinaryContent; + +},{"./constant":91}],94:[function(require,module,exports){ +"use strict"; + +Object.defineProperty(exports, "__esModule", { + value: true +}); +exports.LuckyExcel = void 0; + +var LuckyFile_1 = require("./ToLuckySheet/LuckyFile"); // import {SecurityDoor,Car} from './content'; + + +var HandleZip_1 = require("./HandleZip"); // //demo +// function demoHandler(){ +// let upload = document.getElementById("Luckyexcel-demo-file"); +// let selectADemo = document.getElementById("Luckyexcel-select-demo"); +// let downlodDemo = document.getElementById("Luckyexcel-downlod-file"); +// let mask = document.getElementById("lucky-mask-demo"); +// if(upload){ +// window.onload = () => { +// upload.addEventListener("change", function(evt){ +// var files:FileList = (evt.target as any).files; +// if(files==null || files.length==0){ +// alert("No files wait for import"); +// return; +// } +// let name = files[0].name; +// let suffixArr = name.split("."), suffix = suffixArr[suffixArr.length-1]; +// if(suffix!="xlsx"){ +// alert("Currently only supports the import of xlsx files"); +// return; +// } +// LuckyExcel.transformExcelToLucky(files[0], function(exportJson:any, luckysheetfile:string){ +// if(exportJson.sheets==null || exportJson.sheets.length==0){ +// alert("Failed to read the content of the excel file, currently does not support xls files!"); +// return; +// } +// console.log(exportJson, luckysheetfile); +// window.luckysheet.destroy(); +// window.luckysheet.create({ +// container: 'luckysheet', //luckysheet is the container id +// showinfobar:false, +// data:exportJson.sheets, +// title:exportJson.info.name, +// userInfo:exportJson.info.name.creator +// }); +// }); +// }); +// selectADemo.addEventListener("change", function(evt){ +// var obj:any = selectADemo; +// var index = obj.selectedIndex; +// var value = obj.options[index].value; +// var name = obj.options[index].innerHTML; +// if(value==""){ +// return; +// } +// mask.style.display = "flex"; +// LuckyExcel.transformExcelToLuckyByUrl(value, name, function(exportJson:any, luckysheetfile:string){ +// if(exportJson.sheets==null || exportJson.sheets.length==0){ +// alert("Failed to read the content of the excel file, currently does not support xls files!"); +// return; +// } +// console.log(exportJson, luckysheetfile); +// mask.style.display = "none"; +// window.luckysheet.destroy(); +// window.luckysheet.create({ +// container: 'luckysheet', //luckysheet is the container id +// showinfobar:false, +// data:exportJson.sheets, +// title:exportJson.info.name, +// userInfo:exportJson.info.name.creator +// }); +// }); +// }); +// downlodDemo.addEventListener("click", function(evt){ +// var obj:any = selectADemo; +// var index = obj.selectedIndex; +// var value = obj.options[index].value; +// if(value.length==0){ +// alert("Please select a demo file"); +// return; +// } +// var elemIF:any = document.getElementById("Lucky-download-frame"); +// if(elemIF==null){ +// elemIF = document.createElement("iframe"); +// elemIF.style.display = "none"; +// elemIF.id = "Lucky-download-frame"; +// document.body.appendChild(elemIF); +// } +// elemIF.src = value; +// // elemIF.parentNode.removeChild(elemIF); +// }); +// } +// } +// } +// demoHandler(); +// api + + +var LuckyExcel = +/** @class */ +function () { + function LuckyExcel() {} + + LuckyExcel.transformExcelToLucky = function (excelFile, callBack) { + var handleZip = new HandleZip_1.HandleZip(excelFile); + handleZip.unzipFile(function (files) { + var luckyFile = new LuckyFile_1.LuckyFile(files, excelFile.name); + var luckysheetfile = luckyFile.Parse(); + var exportJson = JSON.parse(luckysheetfile); + + if (callBack != undefined) { + callBack(exportJson, luckysheetfile); + } + }, function (err) { + console.error(err); + }); + }; + + LuckyExcel.transformExcelToLuckyByUrl = function (url, name, callBack) { + var handleZip = new HandleZip_1.HandleZip(); + handleZip.unzipFileByUrl(url, function (files) { + var luckyFile = new LuckyFile_1.LuckyFile(files, name); + var luckysheetfile = luckyFile.Parse(); + var exportJson = JSON.parse(luckysheetfile); + + if (callBack != undefined) { + callBack(exportJson, luckysheetfile); + } + }, function (err) { + console.error(err); + }); + }; + + LuckyExcel.transformLuckyToExcel = function (LuckyFile, callBack) {}; + + return LuckyExcel; +}(); + +exports.LuckyExcel = LuckyExcel; + +},{"./HandleZip":84,"./ToLuckySheet/LuckyFile":87}],95:[function(require,module,exports){ +"use strict"; + +var main_1 = require("./main"); + +module.exports = main_1.LuckyExcel; + +},{"./main":94}]},{},[95])(95) +});