001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, 013 * software distributed under the License is distributed on an 014 * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 015 * KIND, either express or implied. See the License for the 016 * specific language governing permissions and limitations 017 * under the License. 018 */ 019package org.apache.commons.compress.archivers.zip; 020 021import static org.apache.commons.compress.archivers.zip.ZipConstants.DWORD; 022import static org.apache.commons.compress.archivers.zip.ZipConstants.SHORT; 023import static org.apache.commons.compress.archivers.zip.ZipConstants.WORD; 024import static org.apache.commons.compress.archivers.zip.ZipConstants.ZIP64_MAGIC; 025 026import java.io.ByteArrayInputStream; 027import java.io.ByteArrayOutputStream; 028import java.io.EOFException; 029import java.io.FilterInputStream; 030import java.io.IOException; 031import java.io.InputStream; 032import java.io.PushbackInputStream; 033import java.math.BigInteger; 034import java.nio.ByteBuffer; 035import java.util.Arrays; 036import java.util.Objects; 037import java.util.zip.CRC32; 038import java.util.zip.DataFormatException; 039import java.util.zip.Inflater; 040import java.util.zip.ZipEntry; 041import java.util.zip.ZipException; 042 043import org.apache.commons.compress.archivers.ArchiveEntry; 044import org.apache.commons.compress.archivers.ArchiveInputStream; 045import org.apache.commons.compress.compressors.bzip2.BZip2CompressorInputStream; 046import org.apache.commons.compress.compressors.deflate64.Deflate64CompressorInputStream; 047import org.apache.commons.compress.utils.ArchiveUtils; 048import org.apache.commons.compress.utils.CharsetNames; 049import org.apache.commons.compress.utils.IOUtils; 050import org.apache.commons.compress.utils.InputStreamStatistics; 051 052/** 053 * Implements an input stream that can read Zip archives. 054 * 055 * <p>As of Apache Commons Compress it transparently supports Zip64 056 * extensions and thus individual entries and archives larger than 4 057 * GB or with more than 65536 entries.</p> 058 * 059 * <p>The {@link ZipFile} class is preferred when reading from files 060 * as {@link ZipArchiveInputStream} is limited by not being able to 061 * read the central directory header before returning entries. In 062 * particular {@link ZipArchiveInputStream}</p> 063 * 064 * <ul> 065 * 066 * <li>may return entries that are not part of the central directory 067 * at all and shouldn't be considered part of the archive.</li> 068 * 069 * <li>may return several entries with the same name.</li> 070 * 071 * <li>will not return internal or external attributes.</li> 072 * 073 * <li>may return incomplete extra field data.</li> 074 * 075 * <li>may return unknown sizes and CRC values for entries until the 076 * next entry has been reached if the archive uses the data 077 * descriptor feature.</li> 078 * 079 * </ul> 080 * 081 * @see ZipFile 082 * @NotThreadSafe 083 */ 084public class ZipArchiveInputStream extends ArchiveInputStream<ZipArchiveEntry> implements InputStreamStatistics { 085 086 /** 087 * Bounded input stream adapted from commons-io 088 */ 089 private final class BoundedInputStream extends FilterInputStream { 090 091 /** the max length to provide */ 092 private final long max; 093 094 /** the number of bytes already returned */ 095 private long pos; 096 097 /** 098 * Creates a new {@code BoundedInputStream} that wraps the given input 099 * stream and limits it to a certain size. 100 * 101 * @param in The wrapped input stream 102 * @param size The maximum number of bytes to return 103 */ 104 public BoundedInputStream(final InputStream in, final long size) { 105 super(in); 106 this.max = size; 107 } 108 109 @Override 110 public int available() throws IOException { 111 if (max >= 0 && pos >= max) { 112 return 0; 113 } 114 return in.available(); 115 } 116 117 @Override 118 public int read() throws IOException { 119 if (max >= 0 && pos >= max) { 120 return -1; 121 } 122 final int result = in.read(); 123 pos++; 124 count(1); 125 current.bytesReadFromStream++; 126 return result; 127 } 128 129 @Override 130 public int read(final byte[] b) throws IOException { 131 return this.read(b, 0, b.length); 132 } 133 134 @Override 135 public int read(final byte[] b, final int off, final int len) throws IOException { 136 if (len == 0) { 137 return 0; 138 } 139 if (max >= 0 && pos >= max) { 140 return -1; 141 } 142 final long maxRead = max >= 0 ? Math.min(len, max - pos) : len; 143 final int bytesRead = in.read(b, off, (int) maxRead); 144 145 if (bytesRead == -1) { 146 return -1; 147 } 148 149 pos += bytesRead; 150 count(bytesRead); 151 current.bytesReadFromStream += bytesRead; 152 return bytesRead; 153 } 154 155 @Override 156 public long skip(final long n) throws IOException { 157 final long toSkip = max >= 0 ? Math.min(n, max - pos) : n; 158 final long skippedBytes = IOUtils.skip(in, toSkip); 159 pos += skippedBytes; 160 return skippedBytes; 161 } 162 } 163 164 /** 165 * Structure collecting information for the entry that is 166 * currently being read. 167 */ 168 private static final class CurrentEntry { 169 170 /** 171 * Current ZIP entry. 172 */ 173 private final ZipArchiveEntry entry = new ZipArchiveEntry(); 174 175 /** 176 * Does the entry use a data descriptor? 177 */ 178 private boolean hasDataDescriptor; 179 180 /** 181 * Does the entry have a ZIP64 extended information extra field. 182 */ 183 private boolean usesZip64; 184 185 /** 186 * Number of bytes of entry content read by the client if the 187 * entry is STORED. 188 */ 189 private long bytesRead; 190 191 /** 192 * Number of bytes of entry content read from the stream. 193 * 194 * <p>This may be more than the actual entry's length as some 195 * stuff gets buffered up and needs to be pushed back when the 196 * end of the entry has been reached.</p> 197 */ 198 private long bytesReadFromStream; 199 200 /** 201 * The checksum calculated as the current entry is read. 202 */ 203 private final CRC32 crc = new CRC32(); 204 205 /** 206 * The input stream decompressing the data for shrunk and imploded entries. 207 */ 208 private InputStream inputStream; 209 210 @SuppressWarnings("unchecked") // Caller beware 211 private <T extends InputStream> T checkInputStream() { 212 return (T) Objects.requireNonNull(inputStream, "inputStream"); 213 } 214 } 215 216 private static final int LFH_LEN = 30; 217 /* 218 local file header signature WORD 219 version needed to extract SHORT 220 general purpose bit flag SHORT 221 compression method SHORT 222 last mod file time SHORT 223 last mod file date SHORT 224 crc-32 WORD 225 compressed size WORD 226 uncompressed size WORD 227 file name length SHORT 228 extra field length SHORT 229 */ 230 231 private static final int CFH_LEN = 46; 232 /* 233 central file header signature WORD 234 version made by SHORT 235 version needed to extract SHORT 236 general purpose bit flag SHORT 237 compression method SHORT 238 last mod file time SHORT 239 last mod file date SHORT 240 crc-32 WORD 241 compressed size WORD 242 uncompressed size WORD 243 file name length SHORT 244 extra field length SHORT 245 file comment length SHORT 246 disk number start SHORT 247 internal file attributes SHORT 248 external file attributes WORD 249 relative offset of local header WORD 250 */ 251 252 private static final long TWO_EXP_32 = ZIP64_MAGIC + 1; 253 254 private static final String USE_ZIPFILE_INSTEAD_OF_STREAM_DISCLAIMER = 255 " while reading a stored entry using data descriptor. Either the archive is broken" 256 + " or it can not be read using ZipArchiveInputStream and you must use ZipFile." 257 + " A common cause for this is a ZIP archive containing a ZIP archive." 258 + " See http://commons.apache.org/proper/commons-compress/zip.html#ZipArchiveInputStream_vs_ZipFile"; 259 260 private static final byte[] LFH = ZipLong.LFH_SIG.getBytes(); 261 262 private static final byte[] CFH = ZipLong.CFH_SIG.getBytes(); 263 264 private static final byte[] DD = ZipLong.DD_SIG.getBytes(); 265 266 private static final byte[] APK_SIGNING_BLOCK_MAGIC = { 267 'A', 'P', 'K', ' ', 'S', 'i', 'g', ' ', 'B', 'l', 'o', 'c', 'k', ' ', '4', '2', 268 }; 269 270 private static final BigInteger LONG_MAX = BigInteger.valueOf(Long.MAX_VALUE); 271 272 private static boolean checksig(final byte[] signature, final byte[] expected) { 273 for (int i = 0; i < expected.length; i++) { 274 if (signature[i] != expected[i]) { 275 return false; 276 } 277 } 278 return true; 279 } 280 281 /** 282 * Checks if the signature matches what is expected for a ZIP file. 283 * Does not currently handle self-extracting ZIPs which may have arbitrary 284 * leading content. 285 * 286 * @param signature the bytes to check 287 * @param length the number of bytes to check 288 * @return true, if this stream is a ZIP archive stream, false otherwise 289 */ 290 public static boolean matches(final byte[] signature, final int length) { 291 if (length < ZipArchiveOutputStream.LFH_SIG.length) { 292 return false; 293 } 294 295 return checksig(signature, ZipArchiveOutputStream.LFH_SIG) // normal file 296 || checksig(signature, ZipArchiveOutputStream.EOCD_SIG) // empty zip 297 || checksig(signature, ZipArchiveOutputStream.DD_SIG) // split zip 298 || checksig(signature, ZipLong.SINGLE_SEGMENT_SPLIT_MARKER.getBytes()); 299 } 300 301 /** The ZIP encoding to use for file names and the file comment. */ 302 private final ZipEncoding zipEncoding; 303 304 // the provided encoding (for unit tests) 305 final String encoding; 306 307 /** Whether to look for and use Unicode extra fields. */ 308 private final boolean useUnicodeExtraFields; 309 310 /** Wrapped stream, will always be a PushbackInputStream. */ 311 private final InputStream inputStream; 312 /** Inflater used for all deflated entries. */ 313 private final Inflater inf = new Inflater(true); 314 /** Buffer used to read from the wrapped stream. */ 315 private final ByteBuffer buf = ByteBuffer.allocate(ZipArchiveOutputStream.BUFFER_SIZE); 316 /** The entry that is currently being read. */ 317 private CurrentEntry current; 318 /** Whether the stream has been closed. */ 319 private boolean closed; 320 321 /** Whether the stream has reached the central directory - and thus found all entries. */ 322 private boolean hitCentralDirectory; 323 324 /** 325 * When reading a stored entry that uses the data descriptor this 326 * stream has to read the full entry and caches it. This is the 327 * cache. 328 */ 329 private ByteArrayInputStream lastStoredEntry; 330 331 /** 332 * Whether the stream will try to read STORED entries that use a data descriptor. 333 * Setting it to true means we will not stop reading an entry with the compressed 334 * size, instead we will stop reading an entry when a data descriptor is met (by 335 * finding the Data Descriptor Signature). This will completely break down in some 336 * cases - like JARs in WARs. 337 * <p> 338 * See also : 339 * https://issues.apache.org/jira/projects/COMPRESS/issues/COMPRESS-555 340 * https://github.com/apache/commons-compress/pull/137#issuecomment-690835644 341 */ 342 private final boolean allowStoredEntriesWithDataDescriptor; 343 344 /** Count decompressed bytes for current entry */ 345 private long uncompressedCount; 346 347 /** Whether the stream will try to skip the ZIP split signature(08074B50) at the beginning **/ 348 private final boolean skipSplitSig; 349 350 // cached buffers - must only be used locally in the class (COMPRESS-172 - reduce garbage collection) 351 private final byte[] lfhBuf = new byte[LFH_LEN]; 352 353 private final byte[] skipBuf = new byte[1024]; 354 355 private final byte[] shortBuf = new byte[SHORT]; 356 357 private final byte[] wordBuf = new byte[WORD]; 358 359 private final byte[] twoDwordBuf = new byte[2 * DWORD]; 360 361 private int entriesRead; 362 363 /** 364 * Create an instance using UTF-8 encoding 365 * @param inputStream the stream to wrap 366 */ 367 public ZipArchiveInputStream(final InputStream inputStream) { 368 this(inputStream, CharsetNames.UTF_8); 369 } 370 371 /** 372 * Create an instance using the specified encoding 373 * @param inputStream the stream to wrap 374 * @param encoding the encoding to use for file names, use null 375 * for the platform's default encoding 376 * @since 1.5 377 */ 378 public ZipArchiveInputStream(final InputStream inputStream, final String encoding) { 379 this(inputStream, encoding, true); 380 } 381 382 /** 383 * Create an instance using the specified encoding 384 * @param inputStream the stream to wrap 385 * @param encoding the encoding to use for file names, use null 386 * for the platform's default encoding 387 * @param useUnicodeExtraFields whether to use InfoZIP Unicode 388 * Extra Fields (if present) to set the file names. 389 */ 390 public ZipArchiveInputStream(final InputStream inputStream, final String encoding, final boolean useUnicodeExtraFields) { 391 this(inputStream, encoding, useUnicodeExtraFields, false); 392 } 393 394 /** 395 * Create an instance using the specified encoding 396 * @param inputStream the stream to wrap 397 * @param encoding the encoding to use for file names, use null 398 * for the platform's default encoding 399 * @param useUnicodeExtraFields whether to use InfoZIP Unicode 400 * Extra Fields (if present) to set the file names. 401 * @param allowStoredEntriesWithDataDescriptor whether the stream 402 * will try to read STORED entries that use a data descriptor 403 * @since 1.1 404 */ 405 public ZipArchiveInputStream(final InputStream inputStream, 406 final String encoding, 407 final boolean useUnicodeExtraFields, 408 final boolean allowStoredEntriesWithDataDescriptor) { 409 this(inputStream, encoding, useUnicodeExtraFields, allowStoredEntriesWithDataDescriptor, false); 410 } 411 412 /** 413 * Create an instance using the specified encoding 414 * @param inputStream the stream to wrap 415 * @param encoding the encoding to use for file names, use null 416 * for the platform's default encoding 417 * @param useUnicodeExtraFields whether to use InfoZIP Unicode 418 * Extra Fields (if present) to set the file names. 419 * @param allowStoredEntriesWithDataDescriptor whether the stream 420 * will try to read STORED entries that use a data descriptor 421 * @param skipSplitSig Whether the stream will try to skip the zip 422 * split signature(08074B50) at the beginning. You will need to 423 * set this to true if you want to read a split archive. 424 * @since 1.20 425 */ 426 public ZipArchiveInputStream(final InputStream inputStream, 427 final String encoding, 428 final boolean useUnicodeExtraFields, 429 final boolean allowStoredEntriesWithDataDescriptor, 430 final boolean skipSplitSig) { 431 this.encoding = encoding; 432 zipEncoding = ZipEncodingHelper.getZipEncoding(encoding); 433 this.useUnicodeExtraFields = useUnicodeExtraFields; 434 this.inputStream = new PushbackInputStream(inputStream, buf.capacity()); 435 this.allowStoredEntriesWithDataDescriptor = allowStoredEntriesWithDataDescriptor; 436 this.skipSplitSig = skipSplitSig; 437 // haven't read anything so far 438 buf.limit(0); 439 } 440 441 /** 442 * Checks whether the current buffer contains the signature of a 443 * "data descriptor", "local file header" or 444 * "central directory entry". 445 * 446 * <p>If it contains such a signature, reads the data descriptor 447 * and positions the stream right after the data descriptor.</p> 448 */ 449 private boolean bufferContainsSignature(final ByteArrayOutputStream bos, final int offset, final int lastRead, final int expectedDDLen) 450 throws IOException { 451 452 boolean done = false; 453 for (int i = 0; !done && i < offset + lastRead - 4; i++) { 454 if (buf.array()[i] == LFH[0] && buf.array()[i + 1] == LFH[1]) { 455 int expectDDPos = i; 456 if (i >= expectedDDLen && 457 buf.array()[i + 2] == LFH[2] && buf.array()[i + 3] == LFH[3] 458 || buf.array()[i + 2] == CFH[2] && buf.array()[i + 3] == CFH[3]) { 459 // found an LFH or CFH: 460 expectDDPos = i - expectedDDLen; 461 done = true; 462 } 463 else if (buf.array()[i + 2] == DD[2] && buf.array()[i + 3] == DD[3]) { 464 // found DD: 465 done = true; 466 } 467 if (done) { 468 // * push back bytes read in excess as well as the data 469 // descriptor 470 // * copy the remaining bytes to cache 471 // * read data descriptor 472 pushback(buf.array(), expectDDPos, offset + lastRead - expectDDPos); 473 bos.write(buf.array(), 0, expectDDPos); 474 readDataDescriptor(); 475 } 476 } 477 } 478 return done; 479 } 480 481 /** 482 * If the last read bytes could hold a data descriptor and an 483 * incomplete signature then save the last bytes to the front of 484 * the buffer and cache everything in front of the potential data 485 * descriptor into the given ByteArrayOutputStream. 486 * 487 * <p>Data descriptor plus incomplete signature (3 bytes in the 488 * worst case) can be 20 bytes max.</p> 489 */ 490 private int cacheBytesRead(final ByteArrayOutputStream bos, int offset, final int lastRead, final int expectedDDLen) { 491 final int cacheable = offset + lastRead - expectedDDLen - 3; 492 if (cacheable > 0) { 493 bos.write(buf.array(), 0, cacheable); 494 System.arraycopy(buf.array(), cacheable, buf.array(), 0, expectedDDLen + 3); 495 offset = expectedDDLen + 3; 496 } else { 497 offset += lastRead; 498 } 499 return offset; 500 } 501 502 /** 503 * Whether this class is able to read the given entry. 504 * 505 * <p>May return false if it is set up to use encryption or a 506 * compression method that hasn't been implemented yet.</p> 507 * @since 1.1 508 */ 509 @Override 510 public boolean canReadEntryData(final ArchiveEntry ae) { 511 if (ae instanceof ZipArchiveEntry) { 512 final ZipArchiveEntry ze = (ZipArchiveEntry) ae; 513 return ZipUtil.canHandleEntryData(ze) 514 && supportsDataDescriptorFor(ze) 515 && supportsCompressedSizeFor(ze); 516 } 517 return false; 518 } 519 520 @Override 521 public void close() throws IOException { 522 if (!closed) { 523 closed = true; 524 try { 525 inputStream.close(); 526 } finally { 527 inf.end(); 528 } 529 } 530 } 531 532 /** 533 * Closes the current ZIP archive entry and positions the underlying 534 * stream to the beginning of the next entry. All per-entry variables 535 * and data structures are cleared. 536 * <p> 537 * If the compressed size of this entry is included in the entry header, 538 * then any outstanding bytes are simply skipped from the underlying 539 * stream without uncompressing them. This allows an entry to be safely 540 * closed even if the compression method is unsupported. 541 * <p> 542 * In case we don't know the compressed size of this entry or have 543 * already buffered too much data from the underlying stream to support 544 * uncompression, then the uncompression process is completed and the 545 * end position of the stream is adjusted based on the result of that 546 * process. 547 * 548 * @throws IOException if an error occurs 549 */ 550 private void closeEntry() throws IOException { 551 if (closed) { 552 throw new IOException("The stream is closed"); 553 } 554 if (current == null) { 555 return; 556 } 557 558 // Ensure all entry bytes are read 559 if (currentEntryHasOutstandingBytes()) { 560 drainCurrentEntryData(); 561 } else { 562 // this is guaranteed to exhaust the stream 563 skip(Long.MAX_VALUE); //NOSONAR 564 565 final long inB = current.entry.getMethod() == ZipArchiveOutputStream.DEFLATED 566 ? getBytesInflated() : current.bytesRead; 567 568 // this is at most a single read() operation and can't 569 // exceed the range of int 570 final int diff = (int) (current.bytesReadFromStream - inB); 571 572 // Pushback any required bytes 573 if (diff > 0) { 574 pushback(buf.array(), buf.limit() - diff, diff); 575 current.bytesReadFromStream -= diff; 576 } 577 578 // Drain remainder of entry if not all data bytes were required 579 if (currentEntryHasOutstandingBytes()) { 580 drainCurrentEntryData(); 581 } 582 } 583 584 if (lastStoredEntry == null && current.hasDataDescriptor) { 585 readDataDescriptor(); 586 } 587 588 inf.reset(); 589 buf.clear().flip(); 590 current = null; 591 lastStoredEntry = null; 592 } 593 594 /** 595 * If the compressed size of the current entry is included in the entry header 596 * and there are any outstanding bytes in the underlying stream, then 597 * this returns true. 598 * 599 * @return true, if current entry is determined to have outstanding bytes, false otherwise 600 */ 601 private boolean currentEntryHasOutstandingBytes() { 602 return current.bytesReadFromStream <= current.entry.getCompressedSize() 603 && !current.hasDataDescriptor; 604 } 605 606 /** 607 * Read all data of the current entry from the underlying stream 608 * that hasn't been read, yet. 609 */ 610 private void drainCurrentEntryData() throws IOException { 611 long remaining = current.entry.getCompressedSize() - current.bytesReadFromStream; 612 while (remaining > 0) { 613 final long n = inputStream.read(buf.array(), 0, (int) Math.min(buf.capacity(), remaining)); 614 if (n < 0) { 615 throw new EOFException("Truncated ZIP entry: " 616 + ArchiveUtils.sanitize(current.entry.getName())); 617 } 618 count(n); 619 remaining -= n; 620 } 621 } 622 623 private int fill() throws IOException { 624 if (closed) { 625 throw new IOException("The stream is closed"); 626 } 627 final int length = inputStream.read(buf.array()); 628 if (length > 0) { 629 buf.limit(length); 630 count(buf.limit()); 631 inf.setInput(buf.array(), 0, buf.limit()); 632 } 633 return length; 634 } 635 636 /** 637 * Reads forward until the signature of the "End of central 638 * directory" record is found. 639 */ 640 private boolean findEocdRecord() throws IOException { 641 int currentByte = -1; 642 boolean skipReadCall = false; 643 while (skipReadCall || (currentByte = readOneByte()) > -1) { 644 skipReadCall = false; 645 if (!isFirstByteOfEocdSig(currentByte)) { 646 continue; 647 } 648 currentByte = readOneByte(); 649 if (currentByte != ZipArchiveOutputStream.EOCD_SIG[1]) { 650 if (currentByte == -1) { 651 break; 652 } 653 skipReadCall = isFirstByteOfEocdSig(currentByte); 654 continue; 655 } 656 currentByte = readOneByte(); 657 if (currentByte != ZipArchiveOutputStream.EOCD_SIG[2]) { 658 if (currentByte == -1) { 659 break; 660 } 661 skipReadCall = isFirstByteOfEocdSig(currentByte); 662 continue; 663 } 664 currentByte = readOneByte(); 665 if (currentByte == -1) { 666 break; 667 } 668 if (currentByte == ZipArchiveOutputStream.EOCD_SIG[3]) { 669 return true; 670 } 671 skipReadCall = isFirstByteOfEocdSig(currentByte); 672 } 673 return false; 674 } 675 676 /** 677 * Gets the number of bytes Inflater has actually processed. 678 * 679 * <p>for Java < Java7 the getBytes* methods in 680 * Inflater/Deflater seem to return unsigned ints rather than 681 * longs that start over with 0 at 2^32.</p> 682 * 683 * <p>The stream knows how many bytes it has read, but not how 684 * many the Inflater actually consumed - it should be between the 685 * total number of bytes read for the entry and the total number 686 * minus the last read operation. Here we just try to make the 687 * value close enough to the bytes we've read by assuming the 688 * number of bytes consumed must be smaller than (or equal to) the 689 * number of bytes read but not smaller by more than 2^32.</p> 690 */ 691 private long getBytesInflated() { 692 long inB = inf.getBytesRead(); 693 if (current.bytesReadFromStream >= TWO_EXP_32) { 694 while (inB + TWO_EXP_32 <= current.bytesReadFromStream) { 695 inB += TWO_EXP_32; 696 } 697 } 698 return inB; 699 } 700 701 /** 702 * @since 1.17 703 */ 704 @SuppressWarnings("resource") // checkInputStream() does not allocate. 705 @Override 706 public long getCompressedCount() { 707 final int method = current.entry.getMethod(); 708 if (method == ZipArchiveOutputStream.STORED) { 709 return current.bytesRead; 710 } 711 if (method == ZipArchiveOutputStream.DEFLATED) { 712 return getBytesInflated(); 713 } 714 if (method == ZipMethod.UNSHRINKING.getCode() 715 || method == ZipMethod.IMPLODING.getCode() 716 || method == ZipMethod.ENHANCED_DEFLATED.getCode() 717 || method == ZipMethod.BZIP2.getCode()) { 718 return ((InputStreamStatistics) current.checkInputStream()).getCompressedCount(); 719 } 720 return -1; 721 } 722 723 @Override 724 public ZipArchiveEntry getNextEntry() throws IOException { 725 return getNextZipEntry(); 726 } 727 728 /** 729 * Gets the next entry. 730 * 731 * @return the next entry. 732 * @throws IOException 733 * @deprecated Use {@link #getNextEntry()}. 734 */ 735 @Deprecated 736 public ZipArchiveEntry getNextZipEntry() throws IOException { 737 uncompressedCount = 0; 738 739 boolean firstEntry = true; 740 if (closed || hitCentralDirectory) { 741 return null; 742 } 743 if (current != null) { 744 closeEntry(); 745 firstEntry = false; 746 } 747 748 final long currentHeaderOffset = getBytesRead(); 749 try { 750 if (firstEntry) { 751 // split archives have a special signature before the 752 // first local file header - look for it and fail with 753 // the appropriate error message if this is a split 754 // archive. 755 readFirstLocalFileHeader(); 756 } else { 757 readFully(lfhBuf); 758 } 759 } catch (final EOFException e) { //NOSONAR 760 return null; 761 } 762 763 final ZipLong sig = new ZipLong(lfhBuf); 764 if (!sig.equals(ZipLong.LFH_SIG)) { 765 if (sig.equals(ZipLong.CFH_SIG) || sig.equals(ZipLong.AED_SIG) || isApkSigningBlock(lfhBuf)) { 766 hitCentralDirectory = true; 767 skipRemainderOfArchive(); 768 return null; 769 } 770 throw new ZipException(String.format("Unexpected record signature: 0x%x", sig.getValue())); 771 } 772 773 int off = WORD; 774 current = new CurrentEntry(); 775 776 final int versionMadeBy = ZipShort.getValue(lfhBuf, off); 777 off += SHORT; 778 current.entry.setPlatform(versionMadeBy >> ZipFile.BYTE_SHIFT & ZipFile.NIBLET_MASK); 779 780 final GeneralPurposeBit gpFlag = GeneralPurposeBit.parse(lfhBuf, off); 781 final boolean hasUTF8Flag = gpFlag.usesUTF8ForNames(); 782 final ZipEncoding entryEncoding = hasUTF8Flag ? ZipEncodingHelper.ZIP_ENCODING_UTF_8 : zipEncoding; 783 current.hasDataDescriptor = gpFlag.usesDataDescriptor(); 784 current.entry.setGeneralPurposeBit(gpFlag); 785 786 off += SHORT; 787 788 current.entry.setMethod(ZipShort.getValue(lfhBuf, off)); 789 off += SHORT; 790 791 final long time = ZipUtil.dosToJavaTime(ZipLong.getValue(lfhBuf, off)); 792 current.entry.setTime(time); 793 off += WORD; 794 795 ZipLong size = null, cSize = null; 796 if (!current.hasDataDescriptor) { 797 current.entry.setCrc(ZipLong.getValue(lfhBuf, off)); 798 off += WORD; 799 800 cSize = new ZipLong(lfhBuf, off); 801 off += WORD; 802 803 size = new ZipLong(lfhBuf, off); 804 off += WORD; 805 } else { 806 off += 3 * WORD; 807 } 808 809 final int fileNameLen = ZipShort.getValue(lfhBuf, off); 810 811 off += SHORT; 812 813 final int extraLen = ZipShort.getValue(lfhBuf, off); 814 off += SHORT; // NOSONAR - assignment as documentation 815 816 final byte[] fileName = readRange(fileNameLen); 817 current.entry.setName(entryEncoding.decode(fileName), fileName); 818 if (hasUTF8Flag) { 819 current.entry.setNameSource(ZipArchiveEntry.NameSource.NAME_WITH_EFS_FLAG); 820 } 821 822 final byte[] extraData = readRange(extraLen); 823 try { 824 current.entry.setExtra(extraData); 825 } catch (final RuntimeException ex) { 826 final ZipException z = new ZipException("Invalid extra data in entry " + current.entry.getName()); 827 z.initCause(ex); 828 throw z; 829 } 830 831 if (!hasUTF8Flag && useUnicodeExtraFields) { 832 ZipUtil.setNameAndCommentFromExtraFields(current.entry, fileName, null); 833 } 834 835 processZip64Extra(size, cSize); 836 837 current.entry.setLocalHeaderOffset(currentHeaderOffset); 838 current.entry.setDataOffset(getBytesRead()); 839 current.entry.setStreamContiguous(true); 840 841 final ZipMethod m = ZipMethod.getMethodByCode(current.entry.getMethod()); 842 if (current.entry.getCompressedSize() != ArchiveEntry.SIZE_UNKNOWN) { 843 if (ZipUtil.canHandleEntryData(current.entry) && m != ZipMethod.STORED && m != ZipMethod.DEFLATED) { 844 final InputStream bis = new BoundedInputStream(inputStream, current.entry.getCompressedSize()); 845 switch (m) { 846 case UNSHRINKING: 847 current.inputStream = new UnshrinkingInputStream(bis); 848 break; 849 case IMPLODING: 850 try { 851 current.inputStream = new ExplodingInputStream( 852 current.entry.getGeneralPurposeBit().getSlidingDictionarySize(), 853 current.entry.getGeneralPurposeBit().getNumberOfShannonFanoTrees(), 854 bis); 855 } catch (final IllegalArgumentException ex) { 856 throw new IOException("bad IMPLODE data", ex); 857 } 858 break; 859 case BZIP2: 860 current.inputStream = new BZip2CompressorInputStream(bis); 861 break; 862 case ENHANCED_DEFLATED: 863 current.inputStream = new Deflate64CompressorInputStream(bis); 864 break; 865 default: 866 // we should never get here as all supported methods have been covered 867 // will cause an error when read is invoked, don't throw an exception here so people can 868 // skip unsupported entries 869 break; 870 } 871 } 872 } else if (m == ZipMethod.ENHANCED_DEFLATED) { 873 current.inputStream = new Deflate64CompressorInputStream(inputStream); 874 } 875 876 entriesRead++; 877 return current.entry; 878 } 879 880 /** 881 * @since 1.17 882 */ 883 @Override 884 public long getUncompressedCount() { 885 return uncompressedCount; 886 } 887 888 /** 889 * Checks whether this might be an APK Signing Block. 890 * 891 * <p>Unfortunately the APK signing block does not start with some kind of signature, it rather ends with one. It 892 * starts with a length, so what we do is parse the suspect length, skip ahead far enough, look for the signature 893 * and if we've found it, return true.</p> 894 * 895 * @param suspectLocalFileHeader the bytes read from the underlying stream in the expectation that they would hold 896 * the local file header of the next entry. 897 * 898 * @return true if this looks like an APK signing block 899 * 900 * @see <a href="https://source.android.com/security/apksigning/v2">https://source.android.com/security/apksigning/v2</a> 901 */ 902 private boolean isApkSigningBlock(final byte[] suspectLocalFileHeader) throws IOException { 903 // length of block excluding the size field itself 904 final BigInteger len = ZipEightByteInteger.getValue(suspectLocalFileHeader); 905 // LFH has already been read and all but the first eight bytes contain (part of) the APK signing block, 906 // also subtract 16 bytes in order to position us at the magic string 907 BigInteger toSkip = len.add(BigInteger.valueOf(DWORD - suspectLocalFileHeader.length 908 - (long) APK_SIGNING_BLOCK_MAGIC.length)); 909 final byte[] magic = new byte[APK_SIGNING_BLOCK_MAGIC.length]; 910 911 try { 912 if (toSkip.signum() < 0) { 913 // suspectLocalFileHeader contains the start of suspect magic string 914 final int off = suspectLocalFileHeader.length + toSkip.intValue(); 915 // length was shorter than magic length 916 if (off < DWORD) { 917 return false; 918 } 919 final int bytesInBuffer = Math.abs(toSkip.intValue()); 920 System.arraycopy(suspectLocalFileHeader, off, magic, 0, Math.min(bytesInBuffer, magic.length)); 921 if (bytesInBuffer < magic.length) { 922 readFully(magic, bytesInBuffer); 923 } 924 } else { 925 while (toSkip.compareTo(LONG_MAX) > 0) { 926 realSkip(Long.MAX_VALUE); 927 toSkip = toSkip.add(LONG_MAX.negate()); 928 } 929 realSkip(toSkip.longValue()); 930 readFully(magic); 931 } 932 } catch (final EOFException ex) { //NOSONAR 933 // length was invalid 934 return false; 935 } 936 return Arrays.equals(magic, APK_SIGNING_BLOCK_MAGIC); 937 } 938 939 private boolean isFirstByteOfEocdSig(final int b) { 940 return b == ZipArchiveOutputStream.EOCD_SIG[0]; 941 } 942 943 /** 944 * Records whether a Zip64 extra is present and sets the size 945 * information from it if sizes are 0xFFFFFFFF and the entry 946 * doesn't use a data descriptor. 947 */ 948 private void processZip64Extra(final ZipLong size, final ZipLong cSize) throws ZipException { 949 final ZipExtraField extra = 950 current.entry.getExtraField(Zip64ExtendedInformationExtraField.HEADER_ID); 951 if (extra != null && !(extra instanceof Zip64ExtendedInformationExtraField)) { 952 throw new ZipException("archive contains unparseable zip64 extra field"); 953 } 954 final Zip64ExtendedInformationExtraField z64 = 955 (Zip64ExtendedInformationExtraField) extra; 956 current.usesZip64 = z64 != null; 957 if (!current.hasDataDescriptor) { 958 if (z64 != null // same as current.usesZip64 but avoids NPE warning 959 && (ZipLong.ZIP64_MAGIC.equals(cSize) || ZipLong.ZIP64_MAGIC.equals(size)) ) { 960 if (z64.getCompressedSize() == null || z64.getSize() == null) { 961 // avoid NPE if it's a corrupted ZIP archive 962 throw new ZipException("archive contains corrupted zip64 extra field"); 963 } 964 long s = z64.getCompressedSize().getLongValue(); 965 if (s < 0) { 966 throw new ZipException("broken archive, entry with negative compressed size"); 967 } 968 current.entry.setCompressedSize(s); 969 s = z64.getSize().getLongValue(); 970 if (s < 0) { 971 throw new ZipException("broken archive, entry with negative size"); 972 } 973 current.entry.setSize(s); 974 } else if (cSize != null && size != null) { 975 if (cSize.getValue() < 0) { 976 throw new ZipException("broken archive, entry with negative compressed size"); 977 } 978 current.entry.setCompressedSize(cSize.getValue()); 979 if (size.getValue() < 0) { 980 throw new ZipException("broken archive, entry with negative size"); 981 } 982 current.entry.setSize(size.getValue()); 983 } 984 } 985 } 986 987 private void pushback(final byte[] buf, final int offset, final int length) throws IOException { 988 if (offset < 0) { 989 // Instead of ArrayIndexOutOfBoundsException 990 throw new IOException(String.format("Negative offset %,d into buffer", offset)); 991 } 992 ((PushbackInputStream) inputStream).unread(buf, offset, length); 993 pushedBackBytes(length); 994 } 995 996 @Override 997 public int read(final byte[] buffer, final int offset, final int length) throws IOException { 998 if (length == 0) { 999 return 0; 1000 } 1001 if (closed) { 1002 throw new IOException("The stream is closed"); 1003 } 1004 1005 if (current == null) { 1006 return -1; 1007 } 1008 1009 // avoid int overflow, check null buffer 1010 if (offset > buffer.length || length < 0 || offset < 0 || buffer.length - offset < length) { 1011 throw new ArrayIndexOutOfBoundsException(); 1012 } 1013 1014 ZipUtil.checkRequestedFeatures(current.entry); 1015 if (!supportsDataDescriptorFor(current.entry)) { 1016 throw new UnsupportedZipFeatureException(UnsupportedZipFeatureException.Feature.DATA_DESCRIPTOR, 1017 current.entry); 1018 } 1019 if (!supportsCompressedSizeFor(current.entry)) { 1020 throw new UnsupportedZipFeatureException(UnsupportedZipFeatureException.Feature.UNKNOWN_COMPRESSED_SIZE, 1021 current.entry); 1022 } 1023 1024 final int read; 1025 if (current.entry.getMethod() == ZipArchiveOutputStream.STORED) { 1026 read = readStored(buffer, offset, length); 1027 } else if (current.entry.getMethod() == ZipArchiveOutputStream.DEFLATED) { 1028 read = readDeflated(buffer, offset, length); 1029 } else if (current.entry.getMethod() == ZipMethod.UNSHRINKING.getCode() 1030 || current.entry.getMethod() == ZipMethod.IMPLODING.getCode() 1031 || current.entry.getMethod() == ZipMethod.ENHANCED_DEFLATED.getCode() 1032 || current.entry.getMethod() == ZipMethod.BZIP2.getCode()) { 1033 read = current.inputStream.read(buffer, offset, length); 1034 } else { 1035 throw new UnsupportedZipFeatureException(ZipMethod.getMethodByCode(current.entry.getMethod()), 1036 current.entry); 1037 } 1038 1039 if (read >= 0) { 1040 current.crc.update(buffer, offset, read); 1041 uncompressedCount += read; 1042 } 1043 1044 return read; 1045 } 1046 private void readDataDescriptor() throws IOException { 1047 readFully(wordBuf); 1048 ZipLong val = new ZipLong(wordBuf); 1049 if (ZipLong.DD_SIG.equals(val)) { 1050 // data descriptor with signature, skip sig 1051 readFully(wordBuf); 1052 val = new ZipLong(wordBuf); 1053 } 1054 current.entry.setCrc(val.getValue()); 1055 1056 // if there is a ZIP64 extra field, sizes are eight bytes 1057 // each, otherwise four bytes each. Unfortunately some 1058 // implementations - namely Java7 - use eight bytes without 1059 // using a ZIP64 extra field - 1060 // https://bugs.sun.com/bugdatabase/view_bug.do?bug_id=7073588 1061 1062 // just read 16 bytes and check whether bytes nine to twelve 1063 // look like one of the signatures of what could follow a data 1064 // descriptor (ignoring archive decryption headers for now). 1065 // If so, push back eight bytes and assume sizes are four 1066 // bytes, otherwise sizes are eight bytes each. 1067 readFully(twoDwordBuf); 1068 final ZipLong potentialSig = new ZipLong(twoDwordBuf, DWORD); 1069 if (potentialSig.equals(ZipLong.CFH_SIG) || potentialSig.equals(ZipLong.LFH_SIG)) { 1070 pushback(twoDwordBuf, DWORD, DWORD); 1071 long size = ZipLong.getValue(twoDwordBuf); 1072 if (size < 0) { 1073 throw new ZipException("broken archive, entry with negative compressed size"); 1074 } 1075 current.entry.setCompressedSize(size); 1076 size = ZipLong.getValue(twoDwordBuf, WORD); 1077 if (size < 0) { 1078 throw new ZipException("broken archive, entry with negative size"); 1079 } 1080 current.entry.setSize(size); 1081 } else { 1082 long size = ZipEightByteInteger.getLongValue(twoDwordBuf); 1083 if (size < 0) { 1084 throw new ZipException("broken archive, entry with negative compressed size"); 1085 } 1086 current.entry.setCompressedSize(size); 1087 size = ZipEightByteInteger.getLongValue(twoDwordBuf, DWORD); 1088 if (size < 0) { 1089 throw new ZipException("broken archive, entry with negative size"); 1090 } 1091 current.entry.setSize(size); 1092 } 1093 } 1094 /** 1095 * Implementation of read for DEFLATED entries. 1096 */ 1097 private int readDeflated(final byte[] buffer, final int offset, final int length) throws IOException { 1098 final int read = readFromInflater(buffer, offset, length); 1099 if (read <= 0) { 1100 if (inf.finished()) { 1101 return -1; 1102 } 1103 if (inf.needsDictionary()) { 1104 throw new ZipException("This archive needs a preset dictionary" 1105 + " which is not supported by Commons" 1106 + " Compress."); 1107 } 1108 if (read == -1) { 1109 throw new IOException("Truncated ZIP file"); 1110 } 1111 } 1112 return read; 1113 } 1114 1115 /** 1116 * Fills the given array with the first local file header and 1117 * deals with splitting/spanning markers that may prefix the first 1118 * LFH. 1119 */ 1120 private void readFirstLocalFileHeader() throws IOException { 1121 readFully(lfhBuf); 1122 final ZipLong sig = new ZipLong(lfhBuf); 1123 1124 if (!skipSplitSig && sig.equals(ZipLong.DD_SIG)) { 1125 throw new UnsupportedZipFeatureException(UnsupportedZipFeatureException.Feature.SPLITTING); 1126 } 1127 1128 // the split ZIP signature(08074B50) should only be skipped when the skipSplitSig is set 1129 if (sig.equals(ZipLong.SINGLE_SEGMENT_SPLIT_MARKER) || sig.equals(ZipLong.DD_SIG)) { 1130 // Just skip over the marker. 1131 final byte[] missedLfhBytes = new byte[4]; 1132 readFully(missedLfhBytes); 1133 System.arraycopy(lfhBuf, 4, lfhBuf, 0, LFH_LEN - 4); 1134 System.arraycopy(missedLfhBytes, 0, lfhBuf, LFH_LEN - 4, 4); 1135 } 1136 } 1137 1138 /** 1139 * Potentially reads more bytes to fill the inflater's buffer and 1140 * reads from it. 1141 */ 1142 private int readFromInflater(final byte[] buffer, final int offset, final int length) throws IOException { 1143 int read = 0; 1144 do { 1145 if (inf.needsInput()) { 1146 final int l = fill(); 1147 if (l > 0) { 1148 current.bytesReadFromStream += buf.limit(); 1149 } else if (l == -1) { 1150 return -1; 1151 } else { 1152 break; 1153 } 1154 } 1155 try { 1156 read = inf.inflate(buffer, offset, length); 1157 } catch (final DataFormatException e) { 1158 throw (IOException) new ZipException(e.getMessage()).initCause(e); 1159 } 1160 } while (read == 0 && inf.needsInput()); 1161 return read; 1162 } 1163 1164 private void readFully(final byte[] b) throws IOException { 1165 readFully(b, 0); 1166 } 1167 1168 // End of Central Directory Record 1169 // end of central dir signature WORD 1170 // number of this disk SHORT 1171 // number of the disk with the 1172 // start of the central directory SHORT 1173 // total number of entries in the 1174 // central directory on this disk SHORT 1175 // total number of entries in 1176 // the central directory SHORT 1177 // size of the central directory WORD 1178 // offset of start of central 1179 // directory with respect to 1180 // the starting disk number WORD 1181 // .ZIP file comment length SHORT 1182 // .ZIP file comment up to 64KB 1183 // 1184 1185 private void readFully(final byte[] b, final int off) throws IOException { 1186 final int len = b.length - off; 1187 final int count = IOUtils.readFully(inputStream, b, off, len); 1188 count(count); 1189 if (count < len) { 1190 throw new EOFException(); 1191 } 1192 } 1193 1194 /** 1195 * Reads bytes by reading from the underlying stream rather than 1196 * the (potentially inflating) archive stream - which {@link #read} would do. 1197 * 1198 * Also updates bytes-read counter. 1199 */ 1200 private int readOneByte() throws IOException { 1201 final int b = inputStream.read(); 1202 if (b != -1) { 1203 count(1); 1204 } 1205 return b; 1206 } 1207 1208 private byte[] readRange(final int len) throws IOException { 1209 final byte[] ret = IOUtils.readRange(inputStream, len); 1210 count(ret.length); 1211 if (ret.length < len) { 1212 throw new EOFException(); 1213 } 1214 return ret; 1215 } 1216 1217 /** 1218 * Implementation of read for STORED entries. 1219 */ 1220 private int readStored(final byte[] buffer, final int offset, final int length) throws IOException { 1221 1222 if (current.hasDataDescriptor) { 1223 if (lastStoredEntry == null) { 1224 readStoredEntry(); 1225 } 1226 return lastStoredEntry.read(buffer, offset, length); 1227 } 1228 1229 final long csize = current.entry.getSize(); 1230 if (current.bytesRead >= csize) { 1231 return -1; 1232 } 1233 1234 if (buf.position() >= buf.limit()) { 1235 buf.position(0); 1236 final int l = inputStream.read(buf.array()); 1237 if (l == -1) { 1238 buf.limit(0); 1239 throw new IOException("Truncated ZIP file"); 1240 } 1241 buf.limit(l); 1242 1243 count(l); 1244 current.bytesReadFromStream += l; 1245 } 1246 1247 int toRead = Math.min(buf.remaining(), length); 1248 if (csize - current.bytesRead < toRead) { 1249 // if it is smaller than toRead then it fits into an int 1250 toRead = (int) (csize - current.bytesRead); 1251 } 1252 buf.get(buffer, offset, toRead); 1253 current.bytesRead += toRead; 1254 return toRead; 1255 } 1256 1257 /** 1258 * Caches a stored entry that uses the data descriptor. 1259 * 1260 * <ul> 1261 * <li>Reads a stored entry until the signature of a local file 1262 * header, central directory header or data descriptor has been 1263 * found.</li> 1264 * <li>Stores all entry data in lastStoredEntry.</p> 1265 * <li>Rewinds the stream to position at the data 1266 * descriptor.</li> 1267 * <li>reads the data descriptor</li> 1268 * </ul> 1269 * 1270 * <p>After calling this method the entry should know its size, 1271 * the entry's data is cached and the stream is positioned at the 1272 * next local file or central directory header.</p> 1273 */ 1274 private void readStoredEntry() throws IOException { 1275 final ByteArrayOutputStream bos = new ByteArrayOutputStream(); 1276 int off = 0; 1277 boolean done = false; 1278 1279 // length of DD without signature 1280 final int ddLen = current.usesZip64 ? WORD + 2 * DWORD : 3 * WORD; 1281 1282 while (!done) { 1283 final int r = inputStream.read(buf.array(), off, ZipArchiveOutputStream.BUFFER_SIZE - off); 1284 if (r <= 0) { 1285 // read the whole archive without ever finding a 1286 // central directory 1287 throw new IOException("Truncated ZIP file"); 1288 } 1289 if (r + off < 4) { 1290 // buffer too small to check for a signature, loop 1291 off += r; 1292 continue; 1293 } 1294 1295 done = bufferContainsSignature(bos, off, r, ddLen); 1296 if (!done) { 1297 off = cacheBytesRead(bos, off, r, ddLen); 1298 } 1299 } 1300 if (current.entry.getCompressedSize() != current.entry.getSize()) { 1301 throw new ZipException("compressed and uncompressed size don't match" 1302 + USE_ZIPFILE_INSTEAD_OF_STREAM_DISCLAIMER); 1303 } 1304 final byte[] b = bos.toByteArray(); 1305 if (b.length != current.entry.getSize()) { 1306 throw new ZipException("actual and claimed size don't match" 1307 + USE_ZIPFILE_INSTEAD_OF_STREAM_DISCLAIMER); 1308 } 1309 lastStoredEntry = new ByteArrayInputStream(b); 1310 } 1311 1312 /** 1313 * Skips bytes by reading from the underlying stream rather than 1314 * the (potentially inflating) archive stream - which {@link 1315 * #skip} would do. 1316 * 1317 * Also updates bytes-read counter. 1318 */ 1319 private void realSkip(final long value) throws IOException { 1320 if (value >= 0) { 1321 long skipped = 0; 1322 while (skipped < value) { 1323 final long rem = value - skipped; 1324 final int x = inputStream.read(skipBuf, 0, (int) (skipBuf.length > rem ? rem : skipBuf.length)); 1325 if (x == -1) { 1326 return; 1327 } 1328 count(x); 1329 skipped += x; 1330 } 1331 return; 1332 } 1333 throw new IllegalArgumentException(); 1334 } 1335 /** 1336 * Skips over and discards value bytes of data from this input 1337 * stream. 1338 * 1339 * <p>This implementation may end up skipping over some smaller 1340 * number of bytes, possibly 0, if and only if it reaches the end 1341 * of the underlying stream.</p> 1342 * 1343 * <p>The actual number of bytes skipped is returned.</p> 1344 * 1345 * @param value the number of bytes to be skipped. 1346 * @return the actual number of bytes skipped. 1347 * @throws IOException - if an I/O error occurs. 1348 * @throws IllegalArgumentException - if value is negative. 1349 */ 1350 @Override 1351 public long skip(final long value) throws IOException { 1352 if (value >= 0) { 1353 long skipped = 0; 1354 while (skipped < value) { 1355 final long rem = value - skipped; 1356 final int x = read(skipBuf, 0, (int) (skipBuf.length > rem ? rem : skipBuf.length)); 1357 if (x == -1) { 1358 return skipped; 1359 } 1360 skipped += x; 1361 } 1362 return skipped; 1363 } 1364 throw new IllegalArgumentException(); 1365 } 1366 1367 /** 1368 * Reads the stream until it find the "End of central directory 1369 * record" and consumes it as well. 1370 */ 1371 private void skipRemainderOfArchive() throws IOException { 1372 // skip over central directory. One LFH has been read too much 1373 // already. The calculation discounts file names and extra 1374 // data, so it will be too short. 1375 if (entriesRead > 0) { 1376 realSkip((long) entriesRead * CFH_LEN - LFH_LEN); 1377 final boolean foundEocd = findEocdRecord(); 1378 if (foundEocd) { 1379 realSkip((long) ZipFile.MIN_EOCD_SIZE - WORD /* signature */ - SHORT /* comment len */); 1380 readFully(shortBuf); 1381 // file comment 1382 final int commentLen = ZipShort.getValue(shortBuf); 1383 if (commentLen >= 0) { 1384 realSkip(commentLen); 1385 return; 1386 } 1387 } 1388 } 1389 throw new IOException("Truncated ZIP file"); 1390 } 1391 1392 /** 1393 * Whether the compressed size for the entry is either known or 1394 * not required by the compression method being used. 1395 */ 1396 private boolean supportsCompressedSizeFor(final ZipArchiveEntry entry) { 1397 return entry.getCompressedSize() != ArchiveEntry.SIZE_UNKNOWN 1398 || entry.getMethod() == ZipEntry.DEFLATED 1399 || entry.getMethod() == ZipMethod.ENHANCED_DEFLATED.getCode() 1400 || entry.getGeneralPurposeBit().usesDataDescriptor() 1401 && allowStoredEntriesWithDataDescriptor 1402 && entry.getMethod() == ZipEntry.STORED; 1403 } 1404 1405 /** 1406 * Whether this entry requires a data descriptor this library can work with. 1407 * 1408 * @return true if allowStoredEntriesWithDataDescriptor is true, 1409 * the entry doesn't require any data descriptor or the method is 1410 * DEFLATED or ENHANCED_DEFLATED. 1411 */ 1412 private boolean supportsDataDescriptorFor(final ZipArchiveEntry entry) { 1413 return !entry.getGeneralPurposeBit().usesDataDescriptor() 1414 || allowStoredEntriesWithDataDescriptor && entry.getMethod() == ZipEntry.STORED 1415 || entry.getMethod() == ZipEntry.DEFLATED 1416 || entry.getMethod() == ZipMethod.ENHANCED_DEFLATED.getCode(); 1417 } 1418}