summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNarayan Kamath <narayan@google.com>2015-01-27 16:25:36 +0000
committerNarayan Kamath <narayan@google.com>2015-02-27 11:25:51 +0000
commit12f5c69e074d6ef012706068416f0a61b70b4e52 (patch)
tree3174fa65e1815131e5e6a0a2b5c3d0cca685f6b5
parenta378356955bcaf7be9434e8babed1b106f741a00 (diff)
downloadlibcore-12f5c69e074d6ef012706068416f0a61b70b4e52.zip
libcore-12f5c69e074d6ef012706068416f0a61b70b4e52.tar.gz
libcore-12f5c69e074d6ef012706068416f0a61b70b4e52.tar.bz2
Implement zip64 support for ZipFile/ZipInputStream/ZipOutputStream.
There are several open questions that I hope to resolve in future changes : - Our heuristics for detecting whether a zip outputstream should switch over to zip64 or not are imprecise. - Also, given that we now officially support zip64, we have to assume new entries whose size is unknown need the zip64 header / footer. This will make output files slightly larger and less compatible with older tools. If we don't do this, we'll have to go back and rewrite & compact parts of the stream we'd already flushed, which isn't always possible. The other option is to assume zip32 for streams of unknown length and throw if more than 4G of data is written to them. Change-Id: Ibb4a97b5f83fd3ab850d7c407ecfda663968a6b9
-rw-r--r--expectations/knownfailures.txt9
-rw-r--r--luni/src/main/java/java/util/zip/Zip64.java401
-rw-r--r--luni/src/main/java/java/util/zip/ZipEntry.java17
-rw-r--r--luni/src/main/java/java/util/zip/ZipFile.java95
-rw-r--r--luni/src/main/java/java/util/zip/ZipInputStream.java33
-rw-r--r--luni/src/main/java/java/util/zip/ZipOutputStream.java279
-rw-r--r--luni/src/main/java/libcore/util/CountingOutputStream.java59
-rw-r--r--luni/src/test/java/libcore/java/util/zip/ZipEntryTest.java35
-rw-r--r--luni/src/test/java/libcore/java/util/zip/ZipFileTest.java60
9 files changed, 879 insertions, 109 deletions
diff --git a/expectations/knownfailures.txt b/expectations/knownfailures.txt
index c6d3dd7..77d301e 100644
--- a/expectations/knownfailures.txt
+++ b/expectations/knownfailures.txt
@@ -1492,5 +1492,14 @@
names: [
"org.apache.harmony.tests.java.nio.charset.CharsetTest#testForName_withProviderWithRecursiveCall"
]
+},
+{
+ description: "Zip64 tests take too long to execute and need more than 5GB of space to run.",
+ result: EXEC_FAILED,
+ names: [
+ "libcore.java.util.zip.ZipFileTest#testZip64Support_largeNumberOfEntries",
+ "libcore.java.util.zip.ZipFileTest#testZip64Support_totalLargerThan4G",
+ "libcore.java.util.zip.ZipFileTest#testZip64Support_hugeEntry"
+ ]
}
]
diff --git a/luni/src/main/java/java/util/zip/Zip64.java b/luni/src/main/java/java/util/zip/Zip64.java
new file mode 100644
index 0000000..9be3d1c
--- /dev/null
+++ b/luni/src/main/java/java/util/zip/Zip64.java
@@ -0,0 +1,401 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License
+ */
+
+package java.util.zip;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.nio.BufferUnderflowException;
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+
+import static java.util.zip.ZipOutputStream.writeIntAsUint16;
+import static java.util.zip.ZipOutputStream.writeLongAsUint32;
+import static java.util.zip.ZipOutputStream.writeLongAsUint64;
+
+/**
+ * @hide
+ */
+public class Zip64 {
+
+ /* Non instantiable */
+ private Zip64() {}
+
+ /**
+ * The maximum supported entry / archive size for standard (non zip64) entries and archives.
+ */
+ static final long MAX_ZIP_ENTRY_AND_ARCHIVE_SIZE = 0x00000000ffffffffL;
+
+ /**
+ * The header ID of the zip64 extended info header. This value is used to identify
+ * zip64 data in the "extra" field in the file headers.
+ */
+ private static final short ZIP64_EXTENDED_INFO_HEADER_ID = 0x0001;
+
+ /**
+ * The minimum size of the zip64 extended info header. This excludes the 2 byte header ID
+ * and the 2 byte size.
+ */
+ private static final int ZIP64_EXTENDED_INFO_MIN_SIZE = 28;
+
+ /*
+ * Size (in bytes) of the zip64 end of central directory locator. This will be located
+ * immediately before the end of central directory record if a given zipfile is in the
+ * zip64 format.
+ */
+ private static final int ZIP64_LOCATOR_SIZE = 20;
+
+ /**
+ * The zip64 end of central directory locator signature (4 bytes wide).
+ */
+ private static final int ZIP64_LOCATOR_SIGNATURE = 0x07064b50;
+
+ /**
+ * The zip64 end of central directory record singature (4 bytes wide).
+ */
+ private static final int ZIP64_EOCD_RECORD_SIGNATURE = 0x06064b50;
+
+ /**
+ * The "effective" size of the zip64 eocd record. This excludes the fields that
+ * are proprietary, signature, or fields we aren't interested in. We include the
+ * following (contiguous) fields in this calculation :
+ * - disk number (4 bytes)
+ * - disk with start of central directory (4 bytes)
+ * - number of central directory entries on this disk (8 bytes)
+ * - total number of central directory entries (8 bytes)
+ * - size of the central directory (8 bytes)
+ * - offset of the start of the central directory (8 bytes)
+ */
+ private static final int ZIP64_EOCD_RECORD_EFFECTIVE_SIZE = 40;
+
+ /**
+ * Parses the zip64 end of central directory record locator. The locator
+ * must be placed immediately before the end of central directory (eocd) record
+ * starting at {@code eocdOffset}.
+ *
+ * The position of the file cursor for {@code raf} after a call to this method
+ * is undefined an callers must reposition it after each call to this method.
+ */
+ public static long parseZip64EocdRecordLocator(RandomAccessFile raf, long eocdOffset)
+ throws IOException {
+ // The spec stays curiously silent about whether a zip file with an EOCD record,
+ // a zip64 locator and a zip64 eocd record is considered "empty". In our implementation,
+ // we parse all records and read the counts from them instead of drawing any size or
+ // layout based information.
+ if (eocdOffset > ZIP64_LOCATOR_SIZE) {
+ raf.seek(eocdOffset - ZIP64_LOCATOR_SIZE);
+ if (Integer.reverseBytes(raf.readInt()) == ZIP64_LOCATOR_SIGNATURE) {
+ byte[] zip64EocdLocator = new byte[ZIP64_LOCATOR_SIZE - 4];
+ raf.readFully(zip64EocdLocator);
+ ByteBuffer buf = ByteBuffer.wrap(zip64EocdLocator).order(ByteOrder.LITTLE_ENDIAN);
+
+ final int diskWithCentralDir = buf.getInt();
+ final long zip64EocdRecordOffset = buf.getLong();
+ final int numDisks = buf.getInt();
+
+ if (numDisks != 1 || diskWithCentralDir != 0) {
+ throw new ZipException("Spanned archives not supported");
+ }
+
+ return zip64EocdRecordOffset;
+ }
+ }
+
+ return -1;
+ }
+
+ public static ZipFile.EocdRecord parseZip64EocdRecord(RandomAccessFile raf,
+ long eocdRecordOffset, int commentLength) throws IOException {
+ raf.seek(eocdRecordOffset);
+ final int signature = Integer.reverseBytes(raf.readInt());
+ if (signature != ZIP64_EOCD_RECORD_SIGNATURE) {
+ throw new ZipException("Invalid zip64 eocd record offset, sig="
+ + Integer.toHexString(signature) + " offset=" + eocdRecordOffset);
+ }
+
+ // The zip64 eocd record specifies its own size as an 8 byte integral type. It is variable
+ // length because of the "zip64 extensible data sector" but that field is reserved for
+ // pkware's proprietary use. We therefore disregard it altogether and treat the end of
+ // central directory structure as fixed length.
+ //
+ // We also skip "version made by" (2 bytes) and "version needed to extract" (2 bytes)
+ // fields. We perform additional validation at the ZipEntry level, where applicable.
+ //
+ // That's a total of 12 bytes to skip
+ raf.skipBytes(12);
+
+ byte[] zip64Eocd = new byte[ZIP64_EOCD_RECORD_EFFECTIVE_SIZE];
+ raf.readFully(zip64Eocd);
+
+ ByteBuffer buf = ByteBuffer.wrap(zip64Eocd).order(ByteOrder.LITTLE_ENDIAN);
+ try {
+ int diskNumber = buf.getInt();
+ int diskWithCentralDirStart = buf.getInt();
+ long numEntries = buf.getLong();
+ long totalNumEntries = buf.getLong();
+ buf.getLong(); // Ignore the size of the central directory
+ long centralDirOffset = buf.getLong();
+
+ if (numEntries != totalNumEntries || diskNumber != 0 || diskWithCentralDirStart != 0) {
+ throw new ZipException("Spanned archives not supported :" +
+ " numEntries=" + numEntries + ", totalNumEntries=" + totalNumEntries +
+ ", diskNumber=" + diskNumber + ", diskWithCentralDirStart=" +
+ diskWithCentralDirStart);
+ }
+
+ return new ZipFile.EocdRecord(numEntries, centralDirOffset, commentLength);
+ } catch (BufferUnderflowException bue) {
+ ZipException zipException = new ZipException("Error parsing zip64 eocd record.");
+ zipException.initCause(bue);
+ throw zipException;
+ }
+ }
+
+ /**
+ * Parse the zip64 extended info record from the extras present in {@code ze}.
+ *
+ * If {@code fromCentralDirectory} is true, we assume we're parsing a central directory
+ * record. We assume a local file header otherwise. The difference between the two is that
+ * a central directory entry is required to be complete, whereas a local file header isn't.
+ * This is due to the presence of an optional data descriptor after the file content.
+ *
+ * @return {@code} true iff. a zip64 extended info record was found.
+ */
+ public static boolean parseZip64ExtendedInfo(ZipEntry ze, boolean fromCentralDirectory)
+ throws ZipException {
+ int extendedInfoSize = -1;
+ int extendedInfoStart = -1;
+ // If this file contains a zip64 central directory locator, entries might
+ // optionally contain a zip64 extended information extra entry.
+ if (ze.extra != null && ze.extra.length > 0) {
+ // Extensible data fields are of the form header1+data1 + header2+data2 and so
+ // on, where each header consists of a 2 byte header ID followed by a 2 byte size.
+ // We need to iterate through the entire list of headers to find the header ID
+ // for the zip64 extended information extra field (0x0001).
+ final ByteBuffer buf = ByteBuffer.wrap(ze.extra).order(ByteOrder.LITTLE_ENDIAN);
+ extendedInfoSize = getZip64ExtendedInfoSize(buf);
+ if (extendedInfoSize != -1) {
+ extendedInfoStart = buf.position();
+ try {
+ if (extendedInfoSize < ZIP64_EXTENDED_INFO_MIN_SIZE) {
+ throw new ZipException("Invalid zip64 extended info size: " + extendedInfoSize);
+ }
+
+ // The size & compressed size only make sense in the central directory *or* if
+ // we know them beforehand. If we don't know them beforehand, they're stored in
+ // the data descriptor and should be read from there.
+ if (fromCentralDirectory || (ze.getMethod() == ZipEntry.STORED)) {
+ final long zip64Size = buf.getLong();
+ if (ze.size == MAX_ZIP_ENTRY_AND_ARCHIVE_SIZE) {
+ ze.size = zip64Size;
+ }
+
+ final long zip64CompressedSize = buf.getLong();
+ if (ze.compressedSize == MAX_ZIP_ENTRY_AND_ARCHIVE_SIZE) {
+ ze.compressedSize = zip64CompressedSize;
+ }
+ }
+
+ // The local header offset is significant only in the central directory. It makes no
+ // sense within the local header itself.
+ if (fromCentralDirectory) {
+ final long zip64LocalHeaderRelOffset = buf.getLong();
+ if (ze.localHeaderRelOffset == MAX_ZIP_ENTRY_AND_ARCHIVE_SIZE) {
+ ze.localHeaderRelOffset = zip64LocalHeaderRelOffset;
+ }
+ }
+ } catch (BufferUnderflowException bue) {
+ ZipException zipException = new ZipException("Error parsing extendend info ");
+ zipException.initCause(bue);
+ throw zipException;
+ }
+ }
+ }
+
+ // This entry doesn't contain a zip64 extended information data entry header.
+ // We have to check that the compressedSize / size / localHeaderRelOffset values
+ // are valid and don't require the presence of the extended header.
+ if (extendedInfoSize == -1) {
+ if (ze.compressedSize == MAX_ZIP_ENTRY_AND_ARCHIVE_SIZE ||
+ ze.size == MAX_ZIP_ENTRY_AND_ARCHIVE_SIZE ||
+ ze.localHeaderRelOffset == MAX_ZIP_ENTRY_AND_ARCHIVE_SIZE) {
+ throw new ZipException("File contains no zip64 extended information: "
+ + "name=" + ze.name + "compressedSize=" + ze.compressedSize + ", size="
+ + ze.size + ", localHeader=" + ze.localHeaderRelOffset);
+ }
+
+ return false;
+ } else {
+ // If we're parsed the zip64 extended info header, we remove it from the extras
+ // so that applications that set their own extras will see the data they set.
+
+ // This is an unfortunate workaround needed due to a gap in the spec. The spec demands
+ // that extras are present in the "extensible" format, which means that each extra field
+ // must be prefixed with a header ID and a length. However, earlier versions of the spec
+ // made no mention of this, nor did any existing API enforce it. This means users could
+ // set "free form" extras without caring very much whether the implementation wanted to
+ // extend or add to them.
+
+ // The start of the extended info header.
+ final int extendedInfoHeaderStart = extendedInfoStart - 4;
+ // The total size of the extended info, including the header.
+ final int extendedInfoTotalSize = extendedInfoSize + 4;
+
+ final int extrasLen = ze.extra.length - extendedInfoTotalSize;
+ byte[] extrasWithoutZip64 = new byte[extrasLen];
+
+ System.arraycopy(ze.extra, 0, extrasWithoutZip64, 0, extendedInfoHeaderStart);
+ System.arraycopy(ze.extra, extendedInfoHeaderStart + extendedInfoTotalSize,
+ extrasWithoutZip64, extendedInfoHeaderStart, (extrasLen - extendedInfoHeaderStart));
+
+ ze.extra = extrasWithoutZip64;
+ return true;
+ }
+ }
+
+ /**
+ * Appends a zip64 extended info record to the extras contained in {@code ze}. If {@code ze}
+ * contains no extras, a new extras array is created.
+ */
+ public static void insertZip64ExtendedInfoToExtras(ZipEntry ze) throws ZipException {
+ final byte[] output;
+ // We add 4 to ZIP64_EXTENDED_INFO_MIN_SIZE to account for the 2 byte header and length.
+ final int extendedInfoSize = ZIP64_EXTENDED_INFO_MIN_SIZE + 4;
+ if (ze.extra == null) {
+ output = new byte[extendedInfoSize];
+ } else {
+ // If the existing extras are already too big, we have no choice but to throw
+ // an error.
+ if (ze.extra.length + extendedInfoSize > 65535) {
+ throw new ZipException("No space in extras for zip64 extended entry info");
+ }
+
+ // We copy existing extras over and put the zip64 extended info at the beginning. This
+ // is to avoid breakages in the presence of "old style" extras which don't contain
+ // headers and lengths. The spec is again silent about these inconsistencies.
+ //
+ // This means that people that for ZipOutputStream users, the value ZipEntry.getExtra
+ // after an entry is written will be different from before. This shouldn't be an issue
+ // in practice.
+ output = new byte[ze.extra.length + ZIP64_EXTENDED_INFO_MIN_SIZE + 4];
+ System.arraycopy(ze.extra, 0, output, ZIP64_EXTENDED_INFO_MIN_SIZE + 4, ze.extra.length);
+ }
+
+ ByteBuffer bb = ByteBuffer.wrap(output).order(ByteOrder.LITTLE_ENDIAN);
+ bb.putShort(ZIP64_EXTENDED_INFO_HEADER_ID);
+ bb.putShort((short) ZIP64_EXTENDED_INFO_MIN_SIZE);
+
+ if (ze.getMethod() == ZipEntry.STORED) {
+ bb.putLong(ze.size);
+ bb.putLong(ze.compressedSize);
+ } else {
+ // Store these fields in the data descriptor instead.
+ bb.putLong(0); // size.
+ bb.putLong(0); // compressed size.
+ }
+
+ // The offset is only relevant in the central directory entry, but we write it out here
+ // anyway, since we know what it is.
+ bb.putLong(ze.localHeaderRelOffset);
+ bb.putInt(0); // disk number
+
+ ze.extra = output;
+ }
+
+ /**
+ * Returns the size of the extended info record if {@code extras} contains a zip64 extended info
+ * record, {@code -1} otherwise. The buffer will be positioned at the start of the extended info
+ * record.
+ */
+ private static int getZip64ExtendedInfoSize(ByteBuffer extras) {
+ try {
+ while (extras.hasRemaining()) {
+ final int headerId = extras.getShort() & 0xffff;
+ final int length = extras.getShort() & 0xffff;
+ if (headerId == ZIP64_EXTENDED_INFO_HEADER_ID) {
+ if (extras.remaining() >= length) {
+ return length;
+ } else {
+ return -1;
+ }
+ } else {
+ extras.position(extras.position() + length);
+ }
+ }
+
+ return -1;
+ } catch (BufferUnderflowException bue) {
+ // We'll underflow if we have an incomplete header in our extras.
+ return -1;
+ } catch (IllegalArgumentException iae) {
+ // ByteBuffer.position() will throw if we have a truncated extra or
+ // an invalid length in the header.
+ return -1;
+ }
+ }
+
+ /**
+ * Copy the size, compressed size and local header offset fields from {@code ze} to
+ * inside {@code ze}'s extended info record. This is additional step is necessary when
+ * we could calculate the correct sizes only after writing out the entry. In this case,
+ * the local file header would not contain real sizes, and they would be present in the
+ * data descriptor and the central directory only.
+ */
+ public static void refreshZip64ExtendedInfo(ZipEntry ze) {
+ if (ze.extra == null || ze.extra.length < ZIP64_EXTENDED_INFO_MIN_SIZE) {
+ throw new IllegalStateException("Zip64 entry has no available extras: " + ze);
+ }
+
+
+ ByteBuffer buf = ByteBuffer.wrap(ze.extra).order(ByteOrder.LITTLE_ENDIAN);
+ if (getZip64ExtendedInfoSize(buf) == -1) {
+ throw new IllegalStateException(
+ "Zip64 entry extras has no zip64 extended info record: " + ze);
+ }
+
+ buf.putLong(ze.size);
+ buf.putLong(ze.compressedSize);
+ buf.putLong(ze.localHeaderRelOffset);
+ buf.putInt(0); // disk number.
+ }
+
+ public static void writeZip64EocdRecordAndLocator(ByteArrayOutputStream baos,
+ long numEntries, long offset, long cDirSize) throws IOException {
+ // Step 1: Write out the zip64 EOCD record.
+ writeLongAsUint32(baos, ZIP64_EOCD_RECORD_SIGNATURE);
+ // The size of the zip64 eocd record. This is the effective size + the
+ // size of the "version made by" (2 bytes) and the "version needed to extract" (2 bytes)
+ // fields.
+ writeLongAsUint64(baos, ZIP64_EOCD_RECORD_EFFECTIVE_SIZE + 4);
+ // TODO: What values should we put here ? The pre-zip64 values we've chosen don't
+ // seem to make much sense either.
+ writeIntAsUint16(baos, 20);
+ writeIntAsUint16(baos, 20);
+ writeLongAsUint32(baos, 0L); // number of disk
+ writeLongAsUint32(baos, 0L); // number of disk with start of central dir.
+ writeLongAsUint64(baos, numEntries); // number of entries in this disk.
+ writeLongAsUint64(baos, numEntries); // number of entries in total.
+ writeLongAsUint64(baos, cDirSize); // size of the central directory.
+ writeLongAsUint64(baos, offset); // offset of the central directory wrt. this file.
+
+ // Step 2: Write out the zip64 EOCD record locator.
+ writeLongAsUint32(baos, ZIP64_LOCATOR_SIGNATURE);
+ writeLongAsUint32(baos, 0); // number of disk with start of central dir.
+ writeLongAsUint64(baos, offset + cDirSize); // offset of the eocd record wrt. this file.
+ writeLongAsUint32(baos, 1); // total number of disks.
+ }
+}
diff --git a/luni/src/main/java/java/util/zip/ZipEntry.java b/luni/src/main/java/java/util/zip/ZipEntry.java
index 771af83..d6781b8 100644
--- a/luni/src/main/java/java/util/zip/ZipEntry.java
+++ b/luni/src/main/java/java/util/zip/ZipEntry.java
@@ -268,17 +268,10 @@ public class ZipEntry implements ZipConstants, Cloneable {
/**
* Sets the uncompressed size of this {@code ZipEntry}.
*
- * @param value
- * the uncompressed size for this entry.
- * @throws IllegalArgumentException
- * if {@code value} < 0 or {@code value} > 0xFFFFFFFFL.
+ * @param value the uncompressed size for this entry.
*/
public void setSize(long value) {
- if (value >= 0 && value <= 0xFFFFFFFFL) {
- size = value;
- } else {
- throw new IllegalArgumentException("Bad size: " + value);
- }
+ size = value;
}
/**
@@ -380,7 +373,7 @@ public class ZipEntry implements ZipConstants, Cloneable {
* On exit, "in" will be positioned at the start of the next entry
* in the Central Directory.
*/
- ZipEntry(byte[] cdeHdrBuf, InputStream cdStream, Charset defaultCharset) throws IOException {
+ ZipEntry(byte[] cdeHdrBuf, InputStream cdStream, Charset defaultCharset, boolean isZip64) throws IOException {
Streams.readFully(cdStream, cdeHdrBuf, 0, cdeHdrBuf.length);
BufferIterator it = HeapBufferIterator.iterator(cdeHdrBuf, 0, cdeHdrBuf.length,
@@ -439,6 +432,10 @@ public class ZipEntry implements ZipConstants, Cloneable {
Streams.readFully(cdStream, commentBytes, 0, commentByteCount);
comment = new String(commentBytes, 0, commentBytes.length, charset);
}
+
+ if (isZip64) {
+ Zip64.parseZip64ExtendedInfo(this, true /* from central directory */);
+ }
}
private static boolean containsNulByte(byte[] bytes) {
diff --git a/luni/src/main/java/java/util/zip/ZipFile.java b/luni/src/main/java/java/util/zip/ZipFile.java
index b44156e..307e7fe 100644
--- a/luni/src/main/java/java/util/zip/ZipFile.java
+++ b/luni/src/main/java/java/util/zip/ZipFile.java
@@ -107,6 +107,18 @@ public class ZipFile implements Closeable, ZipConstants {
private final CloseGuard guard = CloseGuard.get();
+ static class EocdRecord {
+ final long numEntries;
+ final long centralDirOffset;
+ final int commentLength;
+
+ EocdRecord(long numEntries, long centralDirOffset, int commentLength) {
+ this.numEntries = numEntries;
+ this.centralDirOffset = centralDirOffset;
+ this.commentLength = commentLength;
+ }
+ }
+
/**
* Constructs a new {@code ZipFile} allowing read access to the contents of the given file.
*
@@ -390,9 +402,11 @@ public class ZipFile implements Closeable, ZipConstants {
stopOffset = 0;
}
+ long eocdOffset;
while (true) {
raf.seek(scanOffset);
if (Integer.reverseBytes(raf.readInt()) == ENDSIG) {
+ eocdOffset = scanOffset;
break;
}
@@ -402,41 +416,35 @@ public class ZipFile implements Closeable, ZipConstants {
}
}
- // Read the End Of Central Directory. ENDHDR includes the signature bytes,
- // which we've already read.
- byte[] eocd = new byte[ENDHDR - 4];
- raf.readFully(eocd);
+ final long zip64EocdRecordOffset = Zip64.parseZip64EocdRecordLocator(raf, eocdOffset);
- // Pull out the information we need.
- BufferIterator it = HeapBufferIterator.iterator(eocd, 0, eocd.length, ByteOrder.LITTLE_ENDIAN);
- int diskNumber = it.readShort() & 0xffff;
- int diskWithCentralDir = it.readShort() & 0xffff;
- int numEntries = it.readShort() & 0xffff;
- int totalNumEntries = it.readShort() & 0xffff;
- it.skip(4); // Ignore centralDirSize.
- long centralDirOffset = ((long) it.readInt()) & 0xffffffffL;
- int commentLength = it.readShort() & 0xffff;
-
- if (numEntries != totalNumEntries || diskNumber != 0 || diskWithCentralDir != 0) {
- throw new ZipException("Spanned archives not supported");
- }
-
- if (commentLength > 0) {
- byte[] commentBytes = new byte[commentLength];
+ // Seek back past the eocd signature so that we can continue with our search.
+ // Note that we add 4 bytes to the offset to skip past the signature.
+ EocdRecord record = parseEocdRecord(raf, eocdOffset + 4, (zip64EocdRecordOffset != -1) /* isZip64 */);
+ // Read the comment now to avoid an additional seek. We also know the commentLength
+ // won't change because that information isn't present in the zip64 eocd record.
+ if (record.commentLength > 0) {
+ byte[] commentBytes = new byte[record.commentLength];
raf.readFully(commentBytes);
comment = new String(commentBytes, 0, commentBytes.length, StandardCharsets.UTF_8);
}
+ // We have a zip64 eocd record : use that for getting the information we need.
+ if (zip64EocdRecordOffset != -1) {
+ record = Zip64.parseZip64EocdRecord(raf, zip64EocdRecordOffset, record.commentLength);
+ }
+
// Seek to the first CDE and read all entries.
// We have to do this now (from the constructor) rather than lazily because the
// public API doesn't allow us to throw IOException except from the constructor
// or from getInputStream.
- RAFStream rafStream = new RAFStream(raf, centralDirOffset);
+ RAFStream rafStream = new RAFStream(raf, record.centralDirOffset);
BufferedInputStream bufferedStream = new BufferedInputStream(rafStream, 4096);
byte[] hdrBuf = new byte[CENHDR]; // Reuse the same buffer for each entry.
- for (int i = 0; i < numEntries; ++i) {
- ZipEntry newEntry = new ZipEntry(hdrBuf, bufferedStream, StandardCharsets.UTF_8);
- if (newEntry.localHeaderRelOffset >= centralDirOffset) {
+ for (long i = 0; i < record.numEntries; ++i) {
+ ZipEntry newEntry = new ZipEntry(hdrBuf, bufferedStream, StandardCharsets.UTF_8,
+ (zip64EocdRecordOffset != -1) /* isZip64 */);
+ if (newEntry.localHeaderRelOffset >= record.centralDirOffset) {
throw new ZipException("Local file header offset is after central directory");
}
String entryName = newEntry.getName();
@@ -446,6 +454,45 @@ public class ZipFile implements Closeable, ZipConstants {
}
}
+ private static EocdRecord parseEocdRecord(RandomAccessFile raf, long offset, boolean isZip64) throws IOException {
+ raf.seek(offset);
+
+ // Read the End Of Central Directory. ENDHDR includes the signature bytes,
+ // which we've already read.
+ byte[] eocd = new byte[ENDHDR - 4];
+ raf.readFully(eocd);
+
+ BufferIterator it = HeapBufferIterator.iterator(eocd, 0, eocd.length, ByteOrder.LITTLE_ENDIAN);
+ final long numEntries;
+ final long centralDirOffset;
+ if (isZip64) {
+ numEntries = -1;
+ centralDirOffset = -1;
+
+ // If we have a zip64 end of central directory record, we skip through the regular
+ // end of central directory record and use the information from the zip64 eocd record.
+ // We're still forced to read the comment length (below) since it isn't present in the
+ // zip64 eocd record.
+ it.skip(16);
+ } else {
+ // If we don't have a zip64 eocd record, we read values from the "regular"
+ // eocd record.
+ int diskNumber = it.readShort() & 0xffff;
+ int diskWithCentralDir = it.readShort() & 0xffff;
+ numEntries = it.readShort() & 0xffff;
+ int totalNumEntries = it.readShort() & 0xffff;
+ it.skip(4); // Ignore centralDirSize.
+
+ centralDirOffset = ((long) it.readInt()) & 0xffffffffL;
+ if (numEntries != totalNumEntries || diskNumber != 0 || diskWithCentralDir != 0) {
+ throw new ZipException("Spanned archives not supported");
+ }
+ }
+
+ final int commentLength = it.readShort() & 0xffff;
+ return new EocdRecord(numEntries, centralDirOffset, commentLength);
+ }
+
static void throwZipException(String msg, int magic) throws ZipException {
final String hexString = IntegralToString.intToHexString(magic, true, 8);
throw new ZipException(msg + " signature not found; was " + hexString);
diff --git a/luni/src/main/java/java/util/zip/ZipInputStream.java b/luni/src/main/java/java/util/zip/ZipInputStream.java
index 4c0034e..f3ca74e 100644
--- a/luni/src/main/java/java/util/zip/ZipInputStream.java
+++ b/luni/src/main/java/java/util/zip/ZipInputStream.java
@@ -81,7 +81,9 @@ public class ZipInputStream extends InflaterInputStream implements ZipConstants
private ZipEntry currentEntry;
- private final byte[] hdrBuf = new byte[LOCHDR - LOCVER];
+ private boolean currentEntryIsZip64;
+
+ private final byte[] hdrBuf = new byte[LOCHDR - LOCVER + 8];
private final CRC32 crc = new CRC32();
@@ -159,7 +161,7 @@ public class ZipInputStream extends InflaterInputStream implements ZipConstants
}
try {
- readAndVerifyDataDescriptor(inB, out);
+ readAndVerifyDataDescriptor(inB, out, currentEntryIsZip64);
} catch (Exception e) {
if (failure == null) { // otherwise we're already going to throw
failure = e;
@@ -183,16 +185,31 @@ public class ZipInputStream extends InflaterInputStream implements ZipConstants
}
}
- private void readAndVerifyDataDescriptor(int inB, int out) throws IOException {
+ private void readAndVerifyDataDescriptor(long inB, long out, boolean isZip64) throws IOException {
if (hasDD) {
- Streams.readFully(in, hdrBuf, 0, EXTHDR);
+ if (isZip64) {
+ // 8 additional bytes since the compressed / uncompressed size fields
+ // in the extended header are 8 bytes each, instead of 4 bytes each.
+ Streams.readFully(in, hdrBuf, 0, EXTHDR + 8);
+ } else {
+ Streams.readFully(in, hdrBuf, 0, EXTHDR);
+ }
+
int sig = Memory.peekInt(hdrBuf, 0, ByteOrder.LITTLE_ENDIAN);
if (sig != (int) EXTSIG) {
throw new ZipException(String.format("unknown format (EXTSIG=%x)", sig));
}
currentEntry.crc = ((long) Memory.peekInt(hdrBuf, EXTCRC, ByteOrder.LITTLE_ENDIAN)) & 0xffffffffL;
- currentEntry.compressedSize = ((long) Memory.peekInt(hdrBuf, EXTSIZ, ByteOrder.LITTLE_ENDIAN)) & 0xffffffffL;
- currentEntry.size = ((long) Memory.peekInt(hdrBuf, EXTLEN, ByteOrder.LITTLE_ENDIAN)) & 0xffffffffL;
+
+ if (isZip64) {
+ currentEntry.compressedSize = Memory.peekLong(hdrBuf, EXTSIZ, ByteOrder.LITTLE_ENDIAN);
+ // Note that we apply an adjustment of 4 bytes to the offset of EXTLEN to account
+ // for the 8 byte size for zip64.
+ currentEntry.size = Memory.peekLong(hdrBuf, EXTLEN + 4, ByteOrder.LITTLE_ENDIAN);
+ } else {
+ currentEntry.compressedSize = ((long) Memory.peekInt(hdrBuf, EXTSIZ, ByteOrder.LITTLE_ENDIAN)) & 0xffffffffL;
+ currentEntry.size = ((long) Memory.peekInt(hdrBuf, EXTLEN, ByteOrder.LITTLE_ENDIAN)) & 0xffffffffL;
+ }
}
if (currentEntry.crc != crc.getValue()) {
throw new ZipException("CRC mismatch");
@@ -266,7 +283,11 @@ public class ZipInputStream extends InflaterInputStream implements ZipConstants
byte[] extraData = new byte[extraLength];
Streams.readFully(in, extraData, 0, extraLength);
currentEntry.setExtra(extraData);
+ currentEntryIsZip64 = Zip64.parseZip64ExtendedInfo(currentEntry, false /* from central directory */);
+ } else {
+ currentEntryIsZip64 = false;
}
+
return currentEntry;
}
diff --git a/luni/src/main/java/java/util/zip/ZipOutputStream.java b/luni/src/main/java/java/util/zip/ZipOutputStream.java
index 8278355..4995f5b 100644
--- a/luni/src/main/java/java/util/zip/ZipOutputStream.java
+++ b/luni/src/main/java/java/util/zip/ZipOutputStream.java
@@ -23,6 +23,8 @@ import java.io.OutputStream;
import java.nio.charset.StandardCharsets;
import java.util.Arrays;
import java.util.HashSet;
+
+import libcore.util.CountingOutputStream;
import libcore.util.EmptyArray;
/**
@@ -85,7 +87,7 @@ public class ZipOutputStream extends DeflaterOutputStream implements ZipConstant
private final CRC32 crc = new CRC32();
- private int offset = 0, curOffset = 0;
+ private long offset = 0;
/** The charset-encoded name for the current entry. */
private byte[] nameBytes;
@@ -93,6 +95,31 @@ public class ZipOutputStream extends DeflaterOutputStream implements ZipConstant
/** The charset-encoded comment for the current entry. */
private byte[] entryCommentBytes;
+ private static final byte[] ZIP64_PLACEHOLDER_BYTES =
+ new byte[] { (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff };
+
+ /**
+ * Whether this zip file needs a Zip64 EOCD record / zip64 EOCD record locator. This
+ * will be true if we wrote an entry whose size or compressed size was too large for
+ * the standard zip format or if we exceeded the maximum number of entries allowed
+ * in the standard format.
+ */
+ private boolean archiveNeedsZip64EocdRecord;
+
+ /**
+ * Whether the current entry being processed needs a zip64 extended info record. This
+ * will be true if the entry is too large for the standard zip format or if the offset
+ * to the start of the current entry header is greater than 0xFFFFFFFF.
+ */
+ private boolean currentEntryNeedsZip64;
+
+ /**
+ * Whether we force all entries in this archive to have a zip64 extended info record.
+ * This of course implies that the {@code currentEntryNeedsZip64} and
+ * {@code archiveNeedsZip64EocdRecord} are always {@code true}.
+ */
+ private final boolean forceZip64;
+
/**
* Constructs a new {@code ZipOutputStream} that writes a zip file to the given
* {@code OutputStream}.
@@ -100,7 +127,15 @@ public class ZipOutputStream extends DeflaterOutputStream implements ZipConstant
* <p>UTF-8 will be used to encode the file comment, entry names and comments.
*/
public ZipOutputStream(OutputStream os) {
- super(os, new Deflater(Deflater.DEFAULT_COMPRESSION, true));
+ this(os, false /* forceZip64 */);
+ }
+
+ /**
+ * @hide for testing only.
+ */
+ public ZipOutputStream(OutputStream os, boolean forceZip64) {
+ super(new CountingOutputStream(os), new Deflater(Deflater.DEFAULT_COMPRESSION, true));
+ this.forceZip64 = forceZip64;
}
/**
@@ -146,15 +181,30 @@ public class ZipOutputStream extends DeflaterOutputStream implements ZipConstant
throw new ZipException("Size mismatch");
}
}
- curOffset = LOCHDR;
+
+ long curOffset = LOCHDR;
// Write the DataDescriptor
if (currentEntry.getMethod() != STORED) {
curOffset += EXTHDR;
- writeLong(out, EXTSIG);
- writeLong(out, currentEntry.crc = crc.getValue());
- writeLong(out, currentEntry.compressedSize = def.getTotalOut());
- writeLong(out, currentEntry.size = def.getTotalIn());
+
+ // Data descriptor signature and CRC are 4 bytes each for both zip and zip64.
+ writeLongAsUint32(out, EXTSIG);
+ writeLongAsUint32(out, currentEntry.crc = crc.getValue());
+
+ currentEntry.compressedSize = def.getBytesWritten();
+ currentEntry.size = def.getBytesRead();
+
+ if (currentEntryNeedsZip64) {
+ // We need an additional 8 bytes to store 8 byte compressed / uncompressed
+ // sizes.
+ curOffset += 8;
+ writeLongAsUint64(out, currentEntry.compressedSize);
+ writeLongAsUint64(out, currentEntry.size);
+ } else {
+ writeLongAsUint32(out, currentEntry.compressedSize);
+ writeLongAsUint32(out, currentEntry.size);
+ }
}
// Update the CentralDirectory
// http://www.pkware.com/documents/casestudies/APPNOTE.TXT
@@ -163,33 +213,54 @@ public class ZipOutputStream extends DeflaterOutputStream implements ZipConstant
// Some tools insist that the central directory have the UTF-8 flag.
// http://code.google.com/p/android/issues/detail?id=20214
flags |= ZipFile.GPBF_UTF8_FLAG;
- writeLong(cDir, CENSIG);
- writeShort(cDir, ZIP_VERSION_2_0); // Version this file was made by.
- writeShort(cDir, ZIP_VERSION_2_0); // Minimum version needed to extract.
- writeShort(cDir, flags);
- writeShort(cDir, currentEntry.getMethod());
- writeShort(cDir, currentEntry.time);
- writeShort(cDir, currentEntry.modDate);
- writeLong(cDir, crc.getValue());
+ writeLongAsUint32(cDir, CENSIG);
+ writeIntAsUint16(cDir, ZIP_VERSION_2_0); // Version this file was made by.
+ writeIntAsUint16(cDir, ZIP_VERSION_2_0); // Minimum version needed to extract.
+ writeIntAsUint16(cDir, flags);
+ writeIntAsUint16(cDir, currentEntry.getMethod());
+ writeIntAsUint16(cDir, currentEntry.time);
+ writeIntAsUint16(cDir, currentEntry.modDate);
+ writeLongAsUint32(cDir, crc.getValue());
+
if (currentEntry.getMethod() == DEFLATED) {
- curOffset += writeLong(cDir, def.getTotalOut());
- writeLong(cDir, def.getTotalIn());
+ currentEntry.setCompressedSize(def.getBytesWritten());
+ currentEntry.setSize(def.getBytesRead());
+ curOffset += currentEntry.getCompressedSize();
} else {
- curOffset += writeLong(cDir, crc.tbytes);
- writeLong(cDir, crc.tbytes);
+ currentEntry.setCompressedSize(crc.tbytes);
+ currentEntry.setSize(crc.tbytes);
+ curOffset += currentEntry.getSize();
}
- curOffset += writeShort(cDir, nameBytes.length);
+
+ if (currentEntryNeedsZip64) {
+ // Refresh the extended info with the compressed size / size before
+ // writing it to the central directory.
+ Zip64.refreshZip64ExtendedInfo(currentEntry);
+
+ // NOTE: We would've written out the zip64 extended info locator to the entry
+ // extras while constructing the local file header. There's no need to do it again
+ // here. If we do, there will be a size mismatch since we're calculating offsets
+ // based on the *current* size of the extra data and not based on the size
+ // at the point of writing the LFH.
+ writeLongAsUint32(cDir, Zip64.MAX_ZIP_ENTRY_AND_ARCHIVE_SIZE);
+ writeLongAsUint32(cDir, Zip64.MAX_ZIP_ENTRY_AND_ARCHIVE_SIZE);
+ } else {
+ writeLongAsUint32(cDir, currentEntry.getCompressedSize());
+ writeLongAsUint32(cDir, currentEntry.getSize());
+ }
+
+ curOffset += writeIntAsUint16(cDir, nameBytes.length);
if (currentEntry.extra != null) {
- curOffset += writeShort(cDir, currentEntry.extra.length);
+ curOffset += writeIntAsUint16(cDir, currentEntry.extra.length);
} else {
- writeShort(cDir, 0);
+ writeIntAsUint16(cDir, 0);
}
- writeShort(cDir, entryCommentBytes.length); // Comment length.
- writeShort(cDir, 0); // Disk Start
- writeShort(cDir, 0); // Internal File Attributes
- writeLong(cDir, 0); // External File Attributes
- writeLong(cDir, offset);
+ writeIntAsUint16(cDir, entryCommentBytes.length); // Comment length.
+ writeIntAsUint16(cDir, 0); // Disk Start
+ writeIntAsUint16(cDir, 0); // Internal File Attributes
+ writeLongAsUint32(cDir, 0); // External File Attributes
+ writeLongAsUint32(cDir, offset);
cDir.write(nameBytes);
nameBytes = null;
if (currentEntry.extra != null) {
@@ -228,16 +299,32 @@ public class ZipOutputStream extends DeflaterOutputStream implements ZipConstant
if (currentEntry != null) {
closeEntry();
}
- int cdirSize = cDir.size();
+
+ int cdirEntriesSize = cDir.size();
+ if (archiveNeedsZip64EocdRecord) {
+ Zip64.writeZip64EocdRecordAndLocator(cDir, entries.size(), offset, cdirEntriesSize);
+ }
+
// Write Central Dir End
- writeLong(cDir, ENDSIG);
- writeShort(cDir, 0); // Disk Number
- writeShort(cDir, 0); // Start Disk
- writeShort(cDir, entries.size()); // Number of entries
- writeShort(cDir, entries.size()); // Number of entries
- writeLong(cDir, cdirSize); // Size of central dir
- writeLong(cDir, offset); // Offset of central dir
- writeShort(cDir, commentBytes.length);
+ writeLongAsUint32(cDir, ENDSIG);
+ writeIntAsUint16(cDir, 0); // Disk Number
+ writeIntAsUint16(cDir, 0); // Start Disk
+
+ // Instead of trying to figure out *why* this archive needed a zip64 eocd record,
+ // just delegate all these values to the zip64 eocd record.
+ if (archiveNeedsZip64EocdRecord) {
+ writeIntAsUint16(cDir, 0xFFFF); // Number of entries
+ writeIntAsUint16(cDir, 0xFFFF); // Number of entries
+ writeLongAsUint32(cDir, 0xFFFFFFFF); // Size of central dir
+ writeLongAsUint32(cDir, 0xFFFFFFFF); // Offset of central dir;
+ } else {
+ writeIntAsUint16(cDir, entries.size()); // Number of entries
+ writeIntAsUint16(cDir, entries.size()); // Number of entries
+ writeLongAsUint32(cDir, cdirEntriesSize); // Size of central dir
+ writeLongAsUint32(cDir, offset); // Offset of central dir
+ }
+
+ writeIntAsUint16(cDir, commentBytes.length);
if (commentBytes.length > 0) {
cDir.write(commentBytes);
}
@@ -288,14 +375,8 @@ public class ZipOutputStream extends DeflaterOutputStream implements ZipConstant
}
checkOpen();
+ checkAndSetZip64Requirements(ze);
- if (entries.contains(ze.name)) {
- throw new ZipException("Entry already exists: " + ze.name);
- }
- if (entries.size() == 64*1024-1) {
- // TODO: support Zip64.
- throw new ZipException("Too many entries for the zip file format's 16-bit entry count");
- }
nameBytes = ze.name.getBytes(StandardCharsets.UTF_8);
checkSizeIsWithinShort("Name", nameBytes);
entryCommentBytes = EmptyArray.BYTE;
@@ -310,6 +391,7 @@ public class ZipOutputStream extends DeflaterOutputStream implements ZipConstant
ze.setMethod(method);
currentEntry = ze;
+ currentEntry.localHeaderRelOffset = offset;
entries.add(currentEntry.name);
// Local file header.
@@ -318,30 +400,48 @@ public class ZipOutputStream extends DeflaterOutputStream implements ZipConstant
// Java always outputs UTF-8 filenames. (Before Java 7, the RI didn't set this flag and used
// modified UTF-8. From Java 7, when using UTF_8 it sets this flag and uses normal UTF-8.)
flags |= ZipFile.GPBF_UTF8_FLAG;
- writeLong(out, LOCSIG); // Entry header
- writeShort(out, ZIP_VERSION_2_0); // Minimum version needed to extract.
- writeShort(out, flags);
- writeShort(out, method);
+ writeLongAsUint32(out, LOCSIG); // Entry header
+ writeIntAsUint16(out, ZIP_VERSION_2_0); // Minimum version needed to extract.
+ writeIntAsUint16(out, flags);
+ writeIntAsUint16(out, method);
if (currentEntry.getTime() == -1) {
currentEntry.setTime(System.currentTimeMillis());
}
- writeShort(out, currentEntry.time);
- writeShort(out, currentEntry.modDate);
+ writeIntAsUint16(out, currentEntry.time);
+ writeIntAsUint16(out, currentEntry.modDate);
if (method == STORED) {
- writeLong(out, currentEntry.crc);
- writeLong(out, currentEntry.size);
- writeLong(out, currentEntry.size);
+ writeLongAsUint32(out, currentEntry.crc);
+
+ if (currentEntryNeedsZip64) {
+ // NOTE: According to the spec, we're allowed to use these fields under zip64
+ // as long as the sizes are <= 4G (and omit writing the zip64 extended information header).
+ //
+ // For simplicity, we write the zip64 extended info here even if we only need it
+ // in the central directory (i.e, the case where we're turning on zip64 because the
+ // offset to this entries LFH is > 0xFFFFFFFF).
+ out.write(ZIP64_PLACEHOLDER_BYTES); // compressed size
+ out.write(ZIP64_PLACEHOLDER_BYTES); // uncompressed size
+ } else {
+ writeLongAsUint32(out, currentEntry.size);
+ writeLongAsUint32(out, currentEntry.size);
+ }
} else {
- writeLong(out, 0);
- writeLong(out, 0);
- writeLong(out, 0);
+ writeLongAsUint32(out, 0);
+ writeLongAsUint32(out, 0);
+ writeLongAsUint32(out, 0);
}
- writeShort(out, nameBytes.length);
+
+ writeIntAsUint16(out, nameBytes.length);
+
+ if (currentEntryNeedsZip64) {
+ Zip64.insertZip64ExtendedInfoToExtras(currentEntry);
+ }
+
if (currentEntry.extra != null) {
- writeShort(out, currentEntry.extra.length);
+ writeIntAsUint16(out, currentEntry.extra.length);
} else {
- writeShort(out, 0);
+ writeIntAsUint16(out, 0);
}
out.write(nameBytes);
if (currentEntry.extra != null) {
@@ -349,6 +449,42 @@ public class ZipOutputStream extends DeflaterOutputStream implements ZipConstant
}
}
+ private void checkAndSetZip64Requirements(ZipEntry entry) {
+ final long totalBytesWritten = getBytesWritten();
+ final long entriesWritten = entries.size();
+
+ currentEntryNeedsZip64 = false;
+ if (forceZip64) {
+ currentEntryNeedsZip64 = true;
+ archiveNeedsZip64EocdRecord = true;
+ return;
+ }
+
+ // In this particular case, we'll write a zip64 eocd record locator and a zip64 eocd
+ // record but we won't actually need zip64 extended info records for any of the individual
+ // entries (unless they trigger the checks below).
+ if (entriesWritten == 64*1024-1) {
+ archiveNeedsZip64EocdRecord = true;
+ }
+
+ // Check whether we'll need to write out a zip64 extended info record in both the local file header
+ // and the central directory. In addition, we will need a zip64 eocd record locator
+ // and record to mark this archive as zip64.
+ //
+ // TODO: This is an imprecise check. When method != STORED it's possible that the compressed
+ // size will be (slightly) larger than the actual size. How can we improve this ?
+ //
+ // TODO: Will we regret forcing zip64 for archive entries with unknown entry sizes ? This is
+ // standard "zip" behaviour on linux but i'm not sure if we'll end up breaking somebody as a
+ // result.
+ if (totalBytesWritten > Zip64.MAX_ZIP_ENTRY_AND_ARCHIVE_SIZE ||
+ (entry.getSize() > Zip64.MAX_ZIP_ENTRY_AND_ARCHIVE_SIZE) ||
+ (entry.getSize() == -1)) {
+ currentEntryNeedsZip64 = true;
+ archiveNeedsZip64EocdRecord = true;
+ }
+ }
+
/**
* Sets the comment associated with the file being written. See {@link ZipFile#getComment}.
* @throws IllegalArgumentException if the comment is >= 64 Ki encoded bytes.
@@ -386,7 +522,7 @@ public class ZipOutputStream extends DeflaterOutputStream implements ZipConstant
defaultCompressionMethod = method;
}
- private long writeLong(OutputStream os, long i) throws IOException {
+ static long writeLongAsUint32(OutputStream os, long i) throws IOException {
// Write out the long value as an unsigned int
os.write((int) (i & 0xFF));
os.write((int) (i >> 8) & 0xFF);
@@ -395,7 +531,23 @@ public class ZipOutputStream extends DeflaterOutputStream implements ZipConstant
return i;
}
- private int writeShort(OutputStream os, int i) throws IOException {
+ static long writeLongAsUint64(OutputStream os, long i) throws IOException {
+ int i1 = (int) i;
+ os.write(i1 & 0xFF);
+ os.write((i1 >> 8) & 0xFF);
+ os.write((i1 >> 16) & 0xFF);
+ os.write((i1 >> 24) & 0xFF);
+
+ int i2 = (int) (i >> 32);
+ os.write(i2 & 0xFF);
+ os.write((i2 >> 8) & 0xFF);
+ os.write((i2 >> 16) & 0xFF);
+ os.write((i2 >> 24) & 0xFF);
+
+ return i;
+ }
+
+ static int writeIntAsUint16(OutputStream os, int i) throws IOException {
os.write(i & 0xFF);
os.write((i >> 8) & 0xFF);
return i;
@@ -434,4 +586,11 @@ public class ZipOutputStream extends DeflaterOutputStream implements ZipConstant
" bytes");
}
}
+
+ private long getBytesWritten() {
+ // This cast is somewhat messy but less error prone than keeping an
+ // CountingOutputStream reference around in addition to the FilterOutputStream's
+ // out.
+ return ((CountingOutputStream) out).getCount();
+ }
}
diff --git a/luni/src/main/java/libcore/util/CountingOutputStream.java b/luni/src/main/java/libcore/util/CountingOutputStream.java
new file mode 100644
index 0000000..cc0e1f2
--- /dev/null
+++ b/luni/src/main/java/libcore/util/CountingOutputStream.java
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License
+ */
+
+package libcore.util;
+
+import java.io.FilterOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+
+/**
+ * An output stream that keeps count of the number of bytes written to it.
+ *
+ * Useful when we need to make decisions based on the size of the output, such
+ * as deciding what sort of metadata to writes to zip files.
+ */
+public class CountingOutputStream extends FilterOutputStream {
+
+ private long count;
+
+ /**
+ * Constructs a new {@code FilterOutputStream} with {@code out} as its
+ * target stream.
+ *
+ * @param out the target stream that this stream writes to.
+ */
+ public CountingOutputStream(OutputStream out) {
+ super(out);
+ count = 0;
+ }
+
+ @Override
+ public void write(byte[] buffer, int offset, int length) throws IOException {
+ out.write(buffer, offset, length);
+ count += length;
+ }
+
+ @Override
+ public void write(int oneByte) throws IOException {
+ out.write(oneByte);
+ count++;
+ }
+
+ public long getCount() {
+ return count;
+ }
+}
diff --git a/luni/src/test/java/libcore/java/util/zip/ZipEntryTest.java b/luni/src/test/java/libcore/java/util/zip/ZipEntryTest.java
index 550ddfb..9c3e870 100644
--- a/luni/src/test/java/libcore/java/util/zip/ZipEntryTest.java
+++ b/luni/src/test/java/libcore/java/util/zip/ZipEntryTest.java
@@ -25,6 +25,7 @@ import java.util.Arrays;
import java.util.List;
import java.util.jar.JarEntry;
import java.util.zip.ZipEntry;
+import java.util.zip.ZipException;
import java.util.zip.ZipFile;
import java.util.zip.ZipInputStream;
import java.util.zip.ZipOutputStream;
@@ -132,6 +133,7 @@ public class ZipEntryTest extends junit.framework.TestCase {
File f = createTemporaryZipFile();
ZipOutputStream out = createZipOutputStream(f);
ZipEntry ze = new ZipEntry("x");
+ ze.setSize(0);
ze.setExtra(maxLengthExtra);
out.putNextEntry(ze);
out.closeEntry();
@@ -143,7 +145,25 @@ public class ZipEntryTest extends junit.framework.TestCase {
zipFile.close();
}
- public void testTooLongComment() throws Exception {
+ public void testMaxLengthExtra_zip64() throws Exception {
+ // Not quite the max length (65535), but large enough that there's no space
+ // for the zip64 extended info header.
+ byte[] maxLengthExtra = new byte[65530];
+
+ File f = createTemporaryZipFile();
+ ZipOutputStream out = createZipOutputStream(f);
+ ZipEntry ze = new ZipEntry("x");
+
+ ze.setExtra(maxLengthExtra);
+ try {
+ out.putNextEntry(ze);
+ fail();
+ } catch (ZipException expected) {
+ }
+ }
+
+
+ public void testTooLongComment() throws Exception {
String tooLongComment = makeString(65536, "z");
ZipEntry ze = new ZipEntry("x");
try {
@@ -176,7 +196,17 @@ public class ZipEntryTest extends junit.framework.TestCase {
File f = createTemporaryZipFile();
ZipOutputStream out = createZipOutputStream(f);
+
+ // Regular (non zip64) format.
ZipEntry ze = new ZipEntry("x");
+ ze.setSize(0);
+ ze.setExtra(extra);
+ ze.setComment(comment);
+ out.putNextEntry(ze);
+ out.closeEntry();
+
+ // An entry without a length is assumed to be zip64.
+ ze = new ZipEntry("y");
ze.setExtra(extra);
ze.setComment(comment);
out.putNextEntry(ze);
@@ -188,6 +218,9 @@ public class ZipEntryTest extends junit.framework.TestCase {
try {
assertEquals(comment, zipFile.getEntry("x").getComment());
assertTrue(Arrays.equals(extra, zipFile.getEntry("x").getExtra()));
+
+ assertEquals(comment, zipFile.getEntry("y").getComment());
+ assertTrue(Arrays.equals(extra, zipFile.getEntry("y").getExtra()));
} finally {
zipFile.close();
}
diff --git a/luni/src/test/java/libcore/java/util/zip/ZipFileTest.java b/luni/src/test/java/libcore/java/util/zip/ZipFileTest.java
index 1826140..bf18978 100644
--- a/luni/src/test/java/libcore/java/util/zip/ZipFileTest.java
+++ b/luni/src/test/java/libcore/java/util/zip/ZipFileTest.java
@@ -196,7 +196,7 @@ public final class ZipFileTest extends TestCase {
ZipFile zipFile = new ZipFile(f);
int entryCount = 0;
for (Enumeration<? extends ZipEntry> e = zipFile.entries(); e.hasMoreElements(); ) {
- ZipEntry zipEntry = e.nextElement();
+ e.nextElement();
++entryCount;
}
assertEquals(1024, entryCount);
@@ -204,18 +204,62 @@ public final class ZipFileTest extends TestCase {
}
}
- public void testZip64Support() throws IOException {
+ public void testZip64Support_largeNumberOfEntries() throws IOException {
+ File file = createZipFile(65550, 2);
+ ZipFile zf = null;
try {
- createZipFile(64*1024, 0);
- fail(); // Make this test more like testHugeZipFile when we have Zip64 support.
- } catch (ZipException expected) {
+ zf = new ZipFile(file);
+ assertEquals(65550, zf.size());
+
+ Enumeration<? extends ZipEntry> entries = zf.entries();
+ assertTrue(entries.hasMoreElements());
+ ZipEntry ze = entries.nextElement();
+ assertEquals(2, ze.getSize());
+ } finally {
+ if (zf != null) {
+ zf.close();
+ }
+ }
+ }
+
+ public void testZip64Support_totalLargerThan4G() throws IOException {
+ final File file = createZipFile(5, 1073741824L);
+ ZipFile zf = null;
+ try {
+ zf = new ZipFile(file);
+ assertEquals(5, zf.size());
+ Enumeration<? extends ZipEntry> entries = zf.entries();
+ assertTrue(entries.hasMoreElements());
+ ZipEntry ze = entries.nextElement();
+ assertEquals(1073741824L, ze.getSize());
+ } finally {
+ if (zf != null) {
+ zf.close();
+ }
+ }
+ }
+
+ public void testZip64Support_hugeEntry() throws IOException {
+ final File file = createZipFile(1, 4294967410L);
+ ZipFile zf = null;
+ try {
+ zf = new ZipFile(file);
+ assertEquals(1, zf.size());
+ Enumeration<? extends ZipEntry> entries = zf.entries();
+ assertTrue(entries.hasMoreElements());
+ ZipEntry ze = entries.nextElement();
+ assertEquals(4294967410L, ze.getSize());
+ } finally {
+ if (zf != null) {
+ zf.close();
+ }
}
}
/**
* Compresses the given number of files, each of the given size, into a .zip archive.
*/
- private static File createZipFile(int entryCount, int entrySize) throws IOException {
+ private static File createZipFile(int entryCount, long entrySize) throws IOException {
File result = createTemporaryZipFile();
byte[] writeBuffer = new byte[8192];
@@ -227,9 +271,9 @@ public final class ZipFileTest extends TestCase {
ZipEntry ze = new ZipEntry(Integer.toHexString(entry));
out.putNextEntry(ze);
- for (int i = 0; i < entrySize; i += writeBuffer.length) {
+ for (long i = 0; i < entrySize; i += writeBuffer.length) {
random.nextBytes(writeBuffer);
- int byteCount = Math.min(writeBuffer.length, entrySize - i);
+ int byteCount = (int) Math.min(writeBuffer.length, entrySize - i);
out.write(writeBuffer, 0, byteCount);
}