001/* 002 * Licensed to the Apache Software Foundation (ASF) under one or more 003 * contributor license agreements. See the NOTICE file distributed with 004 * this work for additional information regarding copyright ownership. 005 * The ASF licenses this file to You under the Apache License, Version 2.0 006 * (the "License"); you may not use this file except in compliance with 007 * the License. You may obtain a copy of the License at 008 * 009 * http://www.apache.org/licenses/LICENSE-2.0 010 * 011 * Unless required by applicable law or agreed to in writing, software 012 * distributed under the License is distributed on an "AS IS" BASIS, 013 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 014 * See the License for the specific language governing permissions and 015 * limitations under the License. 016 */ 017package org.apache.commons.compress.archivers.zip; 018 019import java.io.ByteArrayOutputStream; 020import java.io.File; 021import java.io.IOException; 022import java.io.InputStream; 023import java.io.OutputStream; 024import java.nio.ByteBuffer; 025import java.nio.channels.SeekableByteChannel; 026import java.nio.file.Files; 027import java.nio.file.LinkOption; 028import java.nio.file.OpenOption; 029import java.nio.file.Path; 030import java.nio.file.StandardOpenOption; 031import java.util.EnumSet; 032import java.util.HashMap; 033import java.util.LinkedList; 034import java.util.List; 035import java.util.Map; 036import java.util.zip.Deflater; 037import java.util.zip.ZipException; 038 039import org.apache.commons.compress.archivers.ArchiveEntry; 040import org.apache.commons.compress.archivers.ArchiveOutputStream; 041import org.apache.commons.compress.utils.ByteUtils; 042import org.apache.commons.compress.utils.CharsetNames; 043import org.apache.commons.compress.utils.IOUtils; 044 045/** 046 * Reimplementation of {@link java.util.zip.ZipOutputStream 047 * java.util.zip.ZipOutputStream} to handle the extended 048 * functionality of this package, especially internal/external file 049 * attributes and extra fields with different layouts for local file 050 * data and central directory entries. 051 * 052 * <p>This class will try to use {@link 053 * java.nio.channels.SeekableByteChannel} when it knows that the 054 * output is going to go to a file and no split archive shall be 055 * created.</p> 056 * 057 * <p>If SeekableByteChannel cannot be used, this implementation will use 058 * a Data Descriptor to store size and CRC information for {@link 059 * #DEFLATED DEFLATED} entries, you don't need to 060 * calculate them yourself. Unfortunately, this is not possible for 061 * the {@link #STORED STORED} method, where setting the CRC and 062 * uncompressed size information is required before {@link 063 * #putArchiveEntry(ZipArchiveEntry)} can be called.</p> 064 * 065 * <p>As of Apache Commons Compress 1.3, the class transparently supports Zip64 066 * extensions and thus individual entries and archives larger than 4 067 * GB or with more than 65536 entries in most cases but explicit 068 * control is provided via {@link #setUseZip64}. If the stream can not 069 * use SeekableByteChannel and you try to write a ZipArchiveEntry of 070 * unknown size, then Zip64 extensions will be disabled by default.</p> 071 * 072 * @NotThreadSafe 073 */ 074public class ZipArchiveOutputStream extends ArchiveOutputStream<ZipArchiveEntry> { 075 076 /** 077 * Structure collecting information for the entry that is 078 * currently being written. 079 */ 080 private static final class CurrentEntry { 081 082 /** 083 * Current ZIP entry. 084 */ 085 private final ZipArchiveEntry entry; 086 087 /** 088 * Offset for CRC entry in the local file header data for the 089 * current entry starts here. 090 */ 091 private long localDataStart; 092 093 /** 094 * Data for local header data 095 */ 096 private long dataStart; 097 098 /** 099 * Number of bytes read for the current entry (can't rely on 100 * Deflater#getBytesRead) when using DEFLATED. 101 */ 102 private long bytesRead; 103 104 /** 105 * Whether current entry was the first one using ZIP64 features. 106 */ 107 private boolean causedUseOfZip64; 108 109 /** 110 * Whether write() has been called at all. 111 * 112 * <p>In order to create a valid archive {@link 113 * #closeArchiveEntry closeArchiveEntry} will write an empty 114 * array to get the CRC right if nothing has been written to 115 * the stream at all.</p> 116 */ 117 private boolean hasWritten; 118 119 private CurrentEntry(final ZipArchiveEntry entry) { 120 this.entry = entry; 121 } 122 } 123 124 private static final class EntryMetaData { 125 private final long offset; 126 private final boolean usesDataDescriptor; 127 private EntryMetaData(final long offset, final boolean usesDataDescriptor) { 128 this.offset = offset; 129 this.usesDataDescriptor = usesDataDescriptor; 130 } 131 } 132 133 /** 134 * enum that represents the possible policies for creating Unicode 135 * extra fields. 136 */ 137 public static final class UnicodeExtraFieldPolicy { 138 139 /** 140 * Always create Unicode extra fields. 141 */ 142 public static final UnicodeExtraFieldPolicy ALWAYS = new UnicodeExtraFieldPolicy("always"); 143 144 /** 145 * Never create Unicode extra fields. 146 */ 147 public static final UnicodeExtraFieldPolicy NEVER = new UnicodeExtraFieldPolicy("never"); 148 149 /** 150 * Create Unicode extra fields for file names that cannot be 151 * encoded using the specified encoding. 152 */ 153 public static final UnicodeExtraFieldPolicy NOT_ENCODEABLE = new UnicodeExtraFieldPolicy("not encodeable"); 154 155 private final String name; 156 private UnicodeExtraFieldPolicy(final String n) { 157 name = n; 158 } 159 160 @Override 161 public String toString() { 162 return name; 163 } 164 } 165 166 static final int BUFFER_SIZE = 512; 167 private static final int LFH_SIG_OFFSET = 0; 168 private static final int LFH_VERSION_NEEDED_OFFSET = 4; 169 private static final int LFH_GPB_OFFSET = 6; 170 private static final int LFH_METHOD_OFFSET = 8; 171 private static final int LFH_TIME_OFFSET = 10; 172 private static final int LFH_CRC_OFFSET = 14; 173 private static final int LFH_COMPRESSED_SIZE_OFFSET = 18; 174 private static final int LFH_ORIGINAL_SIZE_OFFSET = 22; 175 private static final int LFH_FILENAME_LENGTH_OFFSET = 26; 176 private static final int LFH_EXTRA_LENGTH_OFFSET = 28; 177 private static final int LFH_FILENAME_OFFSET = 30; 178 private static final int CFH_SIG_OFFSET = 0; 179 private static final int CFH_VERSION_MADE_BY_OFFSET = 4; 180 private static final int CFH_VERSION_NEEDED_OFFSET = 6; 181 private static final int CFH_GPB_OFFSET = 8; 182 private static final int CFH_METHOD_OFFSET = 10; 183 private static final int CFH_TIME_OFFSET = 12; 184 private static final int CFH_CRC_OFFSET = 16; 185 private static final int CFH_COMPRESSED_SIZE_OFFSET = 20; 186 private static final int CFH_ORIGINAL_SIZE_OFFSET = 24; 187 private static final int CFH_FILENAME_LENGTH_OFFSET = 28; 188 private static final int CFH_EXTRA_LENGTH_OFFSET = 30; 189 private static final int CFH_COMMENT_LENGTH_OFFSET = 32; 190 private static final int CFH_DISK_NUMBER_OFFSET = 34; 191 private static final int CFH_INTERNAL_ATTRIBUTES_OFFSET = 36; 192 193 private static final int CFH_EXTERNAL_ATTRIBUTES_OFFSET = 38; 194 195 private static final int CFH_LFH_OFFSET = 42; 196 197 private static final int CFH_FILENAME_OFFSET = 46; 198 199 /** 200 * Compression method for deflated entries. 201 */ 202 public static final int DEFLATED = java.util.zip.ZipEntry.DEFLATED; 203 204 /** 205 * Default compression level for deflated entries. 206 */ 207 public static final int DEFAULT_COMPRESSION = Deflater.DEFAULT_COMPRESSION; 208 209 /** 210 * Compression method for stored entries. 211 */ 212 public static final int STORED = java.util.zip.ZipEntry.STORED; 213 214 /** 215 * default encoding for file names and comment. 216 */ 217 static final String DEFAULT_ENCODING = CharsetNames.UTF_8; 218 219 /** 220 * General purpose flag, which indicates that file names are 221 * written in UTF-8. 222 * @deprecated use {@link GeneralPurposeBit#UFT8_NAMES_FLAG} instead 223 */ 224 @Deprecated 225 public static final int EFS_FLAG = GeneralPurposeBit.UFT8_NAMES_FLAG; 226 227 /** 228 * Helper, a 0 as ZipShort. 229 */ 230 private static final byte[] ZERO = {0, 0}; 231 232 /** 233 * Helper, a 0 as ZipLong. 234 */ 235 private static final byte[] LZERO = {0, 0, 0, 0}; 236 237 private static final byte[] ONE = ZipLong.getBytes(1L); 238 239 /* 240 * Various ZIP constants shared between this class, ZipArchiveInputStream and ZipFile 241 */ 242 /** 243 * local file header signature 244 */ 245 static final byte[] LFH_SIG = ZipLong.LFH_SIG.getBytes(); //NOSONAR 246 247 /** 248 * data descriptor signature 249 */ 250 static final byte[] DD_SIG = ZipLong.DD_SIG.getBytes(); //NOSONAR 251 252 /** 253 * central file header signature 254 */ 255 static final byte[] CFH_SIG = ZipLong.CFH_SIG.getBytes(); //NOSONAR 256 257 /** 258 * end of central dir signature 259 */ 260 static final byte[] EOCD_SIG = ZipLong.getBytes(0X06054B50L); //NOSONAR 261 262 /** 263 * ZIP64 end of central dir signature 264 */ 265 static final byte[] ZIP64_EOCD_SIG = ZipLong.getBytes(0X06064B50L); //NOSONAR 266 267 /** 268 * ZIP64 end of central dir locator signature 269 */ 270 static final byte[] ZIP64_EOCD_LOC_SIG = ZipLong.getBytes(0X07064B50L); //NOSONAR 271 272 /** indicates if this archive is finished. protected for use in Jar implementation */ 273 protected boolean finished; 274 275 /** 276 * Current entry. 277 */ 278 private CurrentEntry entry; 279 280 /** 281 * The file comment. 282 */ 283 private String comment = ""; 284 285 /** 286 * Compression level for next entry. 287 */ 288 private int level = DEFAULT_COMPRESSION; 289 290 /** 291 * Has the compression level changed when compared to the last 292 * entry? 293 */ 294 private boolean hasCompressionLevelChanged; 295 296 /** 297 * Default compression method for next entry. 298 */ 299 private int method = java.util.zip.ZipEntry.DEFLATED; 300 301 /** 302 * List of ZipArchiveEntries written so far. 303 */ 304 private final List<ZipArchiveEntry> entries = new LinkedList<>(); 305 306 private final StreamCompressor streamCompressor; 307 308 /** 309 * Start of central directory. 310 */ 311 private long cdOffset; 312 313 /** 314 * Length of central directory. 315 */ 316 private long cdLength; 317 318 /** 319 * Disk number start of central directory. 320 */ 321 private long cdDiskNumberStart; 322 323 /** 324 * Length of end of central directory 325 */ 326 private long eocdLength; 327 328 /** 329 * Holds some book-keeping data for each entry. 330 */ 331 private final Map<ZipArchiveEntry, EntryMetaData> metaData = new HashMap<>(); 332 333 /** 334 * The encoding to use for file names and the file comment. 335 * 336 * <p>For a list of possible values see <a 337 * href="https://docs.oracle.com/javase/8/docs/technotes/guides/intl/encoding.doc.html">Supported Encodings</a>. 338 * Defaults to UTF-8.</p> 339 */ 340 private String encoding = DEFAULT_ENCODING; 341 342 /** 343 * The ZIP encoding to use for file names and the file comment. 344 * 345 * This field is of internal use and will be set in {@link 346 * #setEncoding(String)}. 347 */ 348 private ZipEncoding zipEncoding = ZipEncodingHelper.getZipEncoding(DEFAULT_ENCODING); 349 350 /** 351 * This Deflater object is used for output. 352 * 353 */ 354 protected final Deflater def; 355 356 /** 357 * Optional random access output. 358 */ 359 private final SeekableByteChannel channel; 360 361 private final OutputStream outputStream; 362 363 /** 364 * whether to use the general purpose bit flag when writing UTF-8 365 * file names or not. 366 */ 367 private boolean useUTF8Flag = true; 368 369 /** 370 * Whether to encode non-encodable file names as UTF-8. 371 */ 372 private boolean fallbackToUTF8; 373 374 /** 375 * whether to create UnicodePathExtraField-s for each entry. 376 */ 377 private UnicodeExtraFieldPolicy createUnicodeExtraFields = UnicodeExtraFieldPolicy.NEVER; 378 379 /** 380 * Whether anything inside this archive has used a ZIP64 feature. 381 * 382 * @since 1.3 383 */ 384 private boolean hasUsedZip64; 385 386 private Zip64Mode zip64Mode = Zip64Mode.AsNeeded; 387 388 private final byte[] copyBuffer = new byte[32768]; 389 390 /** 391 * Whether we are creating a split zip 392 */ 393 private final boolean isSplitZip; 394 395 /** 396 * Holds the number of Central Directories on each disk, this is used 397 * when writing Zip64 End Of Central Directory and End Of Central Directory 398 */ 399 private final Map<Integer, Integer> numberOfCDInDiskData = new HashMap<>(); 400 401 /** 402 * Creates a new ZIP OutputStream writing to a File. Will use 403 * random access if possible. 404 * @param file the file to ZIP to 405 * @throws IOException on error 406 */ 407 public ZipArchiveOutputStream(final File file) throws IOException { 408 this(file.toPath()); 409 } 410 411 /** 412 * Creates a split ZIP Archive. 413 * 414 * <p>The files making up the archive will use Z01, Z02, 415 * ... extensions and the last part of it will be the given {@code 416 * file}.</p> 417 * 418 * <p>Even though the stream writes to a file this stream will 419 * behave as if no random access was possible. This means the 420 * sizes of stored entries need to be known before the actual 421 * entry data is written.</p> 422 * 423 * @param file the file that will become the last part of the split archive 424 * @param zipSplitSize maximum size of a single part of the split 425 * archive created by this stream. Must be between 64kB and about 426 * 4GB. 427 * 428 * @throws IOException on error 429 * @throws IllegalArgumentException if zipSplitSize is not in the required range 430 * @since 1.20 431 */ 432 public ZipArchiveOutputStream(final File file, final long zipSplitSize) throws IOException { 433 this(file.toPath(), zipSplitSize); 434 } 435 436 /** 437 * Creates a new ZIP OutputStream filtering the underlying stream. 438 * @param out the outputstream to zip 439 */ 440 public ZipArchiveOutputStream(final OutputStream out) { 441 this.outputStream = out; 442 this.channel = null; 443 def = new Deflater(level, true); 444 streamCompressor = StreamCompressor.create(out, def); 445 isSplitZip = false; 446 } 447 448 /** 449 * Creates a split ZIP Archive. 450 * <p>The files making up the archive will use Z01, Z02, 451 * ... extensions and the last part of it will be the given {@code 452 * file}.</p> 453 * <p>Even though the stream writes to a file this stream will 454 * behave as if no random access was possible. This means the 455 * sizes of stored entries need to be known before the actual 456 * entry data is written.</p> 457 * @param path the path to the file that will become the last part of the split archive 458 * @param zipSplitSize maximum size of a single part of the split 459 * archive created by this stream. Must be between 64kB and about 4GB. 460 * @throws IOException on error 461 * @throws IllegalArgumentException if zipSplitSize is not in the required range 462 * @since 1.22 463 */ 464 public ZipArchiveOutputStream(final Path path, final long zipSplitSize) throws IOException { 465 def = new Deflater(level, true); 466 this.outputStream = new ZipSplitOutputStream(path, zipSplitSize); 467 streamCompressor = StreamCompressor.create(this.outputStream, def); 468 channel = null; 469 isSplitZip = true; 470 } 471 472 /** 473 * Creates a new ZIP OutputStream writing to a Path. Will use 474 * random access if possible. 475 * @param file the file to ZIP to 476 * @param options options specifying how the file is opened. 477 * @throws IOException on error 478 * @since 1.21 479 */ 480 public ZipArchiveOutputStream(final Path file, final OpenOption... options) throws IOException { 481 def = new Deflater(level, true); 482 OutputStream outputStream = null; 483 SeekableByteChannel channel = null; 484 StreamCompressor streamCompressor; 485 try { 486 channel = Files.newByteChannel(file, 487 EnumSet.of(StandardOpenOption.CREATE, StandardOpenOption.WRITE, 488 StandardOpenOption.READ, 489 StandardOpenOption.TRUNCATE_EXISTING)); 490 // will never get opened properly when an exception is thrown so doesn't need to get closed 491 streamCompressor = StreamCompressor.create(channel, def); //NOSONAR 492 } catch (final IOException e) { // NOSONAR 493 IOUtils.closeQuietly(channel); 494 channel = null; 495 outputStream = Files.newOutputStream(file, options); 496 streamCompressor = StreamCompressor.create(outputStream, def); 497 } 498 this.outputStream = outputStream; 499 this.channel = channel; 500 this.streamCompressor = streamCompressor; 501 this.isSplitZip = false; 502 } 503 504 /** 505 * Creates a new ZIP OutputStream writing to a SeekableByteChannel. 506 * 507 * <p>{@link 508 * org.apache.commons.compress.utils.SeekableInMemoryByteChannel} 509 * allows you to write to an in-memory archive using random 510 * access.</p> 511 * 512 * @param channel the channel to ZIP to 513 * @since 1.13 514 */ 515 public ZipArchiveOutputStream(final SeekableByteChannel channel) { 516 this.channel = channel; 517 def = new Deflater(level, true); 518 streamCompressor = StreamCompressor.create(channel, def); 519 outputStream = null; 520 isSplitZip = false; 521 } 522 523 /** 524 * Adds an archive entry with a raw input stream. 525 * 526 * If crc, size and compressed size are supplied on the entry, these values will be used as-is. 527 * Zip64 status is re-established based on the settings in this stream, and the supplied value 528 * is ignored. 529 * 530 * The entry is put and closed immediately. 531 * 532 * @param entry The archive entry to add 533 * @param rawStream The raw input stream of a different entry. May be compressed/encrypted. 534 * @throws IOException If copying fails 535 */ 536 public void addRawArchiveEntry(final ZipArchiveEntry entry, final InputStream rawStream) 537 throws IOException { 538 final ZipArchiveEntry ae = new ZipArchiveEntry(entry); 539 if (hasZip64Extra(ae)) { 540 // Will be re-added as required. this may make the file generated with this method 541 // somewhat smaller than standard mode, 542 // since standard mode is unable to remove the ZIP 64 header. 543 ae.removeExtraField(Zip64ExtendedInformationExtraField.HEADER_ID); 544 } 545 final boolean is2PhaseSource = ae.getCrc() != ZipArchiveEntry.CRC_UNKNOWN 546 && ae.getSize() != ArchiveEntry.SIZE_UNKNOWN 547 && ae.getCompressedSize() != ArchiveEntry.SIZE_UNKNOWN; 548 putArchiveEntry(ae, is2PhaseSource); 549 copyFromZipInputStream(rawStream); 550 closeCopiedEntry(is2PhaseSource); 551 } 552 553 /** 554 * Adds UnicodeExtra fields for name and file comment if mode is 555 * ALWAYS or the data cannot be encoded using the configured 556 * encoding. 557 */ 558 private void addUnicodeExtraFields(final ZipArchiveEntry ze, final boolean encodable, 559 final ByteBuffer name) 560 throws IOException { 561 if (createUnicodeExtraFields == UnicodeExtraFieldPolicy.ALWAYS 562 || !encodable) { 563 ze.addExtraField(new UnicodePathExtraField(ze.getName(), 564 name.array(), 565 name.arrayOffset(), 566 name.limit() 567 - name.position())); 568 } 569 570 final String comm = ze.getComment(); 571 if (comm != null && !comm.isEmpty()) { 572 573 final boolean commentEncodable = zipEncoding.canEncode(comm); 574 575 if (createUnicodeExtraFields == UnicodeExtraFieldPolicy.ALWAYS 576 || !commentEncodable) { 577 final ByteBuffer commentB = getEntryEncoding(ze).encode(comm); 578 ze.addExtraField(new UnicodeCommentExtraField(comm, 579 commentB.array(), 580 commentB.arrayOffset(), 581 commentB.limit() 582 - commentB.position()) 583 ); 584 } 585 } 586 } 587 588 /** 589 * Whether this stream is able to write the given entry. 590 * 591 * <p>May return false if it is set up to use encryption or a 592 * compression method that hasn't been implemented yet.</p> 593 * @since 1.1 594 */ 595 @Override 596 public boolean canWriteEntryData(final ArchiveEntry ae) { 597 if (ae instanceof ZipArchiveEntry) { 598 final ZipArchiveEntry zae = (ZipArchiveEntry) ae; 599 return zae.getMethod() != ZipMethod.IMPLODING.getCode() 600 && zae.getMethod() != ZipMethod.UNSHRINKING.getCode() 601 && ZipUtil.canHandleEntryData(zae); 602 } 603 return false; 604 } 605 606 /** 607 * Verifies the sizes aren't too big in the Zip64Mode.Never case 608 * and returns whether the entry would require a Zip64 extra 609 * field. 610 */ 611 private boolean checkIfNeedsZip64(final Zip64Mode effectiveMode) 612 throws ZipException { 613 final boolean actuallyNeedsZip64 = isZip64Required(entry.entry, effectiveMode); 614 if (actuallyNeedsZip64 && effectiveMode == Zip64Mode.Never) { 615 throw new Zip64RequiredException(Zip64RequiredException.getEntryTooBigMessage(entry.entry)); 616 } 617 return actuallyNeedsZip64; 618 } 619 620 /** 621 * Closes this output stream and releases any system resources 622 * associated with the stream. 623 * 624 * @throws IOException if an I/O error occurs. 625 * @throws Zip64RequiredException if the archive's size exceeds 4 626 * GByte or there are more than 65535 entries inside the archive 627 * and {@link #setUseZip64} is {@link Zip64Mode#Never}. 628 */ 629 @Override 630 public void close() throws IOException { 631 try { 632 if (!finished) { 633 finish(); 634 } 635 } finally { 636 destroy(); 637 } 638 } 639 640 /** 641 * Writes all necessary data for this entry. 642 * @throws IOException on error 643 * @throws Zip64RequiredException if the entry's uncompressed or 644 * compressed size exceeds 4 GByte and {@link #setUseZip64} 645 * is {@link Zip64Mode#Never}. 646 */ 647 @Override 648 public void closeArchiveEntry() throws IOException { 649 preClose(); 650 651 flushDeflater(); 652 653 final long bytesWritten = streamCompressor.getTotalBytesWritten() - entry.dataStart; 654 final long realCrc = streamCompressor.getCrc32(); 655 entry.bytesRead = streamCompressor.getBytesRead(); 656 final Zip64Mode effectiveMode = getEffectiveZip64Mode(entry.entry); 657 final boolean actuallyNeedsZip64 = handleSizesAndCrc(bytesWritten, realCrc, effectiveMode); 658 closeEntry(actuallyNeedsZip64, false); 659 streamCompressor.reset(); 660 } 661 662 /** 663 * Writes all necessary data for this entry. 664 * 665 * @param phased This entry is second phase of a 2-phase ZIP creation, size, compressed size and crc 666 * are known in ZipArchiveEntry 667 * @throws IOException on error 668 * @throws Zip64RequiredException if the entry's uncompressed or 669 * compressed size exceeds 4 GByte and {@link #setUseZip64} 670 * is {@link Zip64Mode#Never}. 671 */ 672 private void closeCopiedEntry(final boolean phased) throws IOException { 673 preClose(); 674 entry.bytesRead = entry.entry.getSize(); 675 final Zip64Mode effectiveMode = getEffectiveZip64Mode(entry.entry); 676 final boolean actuallyNeedsZip64 = checkIfNeedsZip64(effectiveMode); 677 closeEntry(actuallyNeedsZip64, phased); 678 } 679 680 private void closeEntry(final boolean actuallyNeedsZip64, final boolean phased) throws IOException { 681 if (!phased && channel != null) { 682 rewriteSizesAndCrc(actuallyNeedsZip64); 683 } 684 685 if (!phased) { 686 writeDataDescriptor(entry.entry); 687 } 688 entry = null; 689 } 690 691 private void copyFromZipInputStream(final InputStream src) throws IOException { 692 if (entry == null) { 693 throw new IllegalStateException("No current entry"); 694 } 695 ZipUtil.checkRequestedFeatures(entry.entry); 696 entry.hasWritten = true; 697 int length; 698 while ((length = src.read(copyBuffer)) >= 0 ) 699 { 700 streamCompressor.writeCounted(copyBuffer, 0, length); 701 count( length ); 702 } 703 } 704 705 /** 706 * Creates a new ZIP entry taking some information from the given 707 * file and using the provided name. 708 * 709 * <p>The name will be adjusted to end with a forward slash "/" if 710 * the file is a directory. If the file is not a directory a 711 * potential trailing forward slash will be stripped from the 712 * entry name.</p> 713 * 714 * <p>Must not be used if the stream has already been closed.</p> 715 */ 716 @Override 717 public ZipArchiveEntry createArchiveEntry(final File inputFile, final String entryName) 718 throws IOException { 719 if (finished) { 720 throw new IOException("Stream has already been finished"); 721 } 722 return new ZipArchiveEntry(inputFile, entryName); 723 } 724 725 /** 726 * Creates a new ZIP entry taking some information from the given 727 * file and using the provided name. 728 * 729 * <p>The name will be adjusted to end with a forward slash "/" if 730 * the file is a directory. If the file is not a directory a 731 * potential trailing forward slash will be stripped from the 732 * entry name.</p> 733 * 734 * <p>Must not be used if the stream has already been closed.</p> 735 * @param inputPath path to create the entry from. 736 * @param entryName name of the entry. 737 * @param options options indicating how symbolic links are handled. 738 * @return a new instance. 739 * @throws IOException if an I/O error occurs. 740 * @since 1.21 741 */ 742 @Override 743 public ZipArchiveEntry createArchiveEntry(final Path inputPath, final String entryName, final LinkOption... options) 744 throws IOException { 745 if (finished) { 746 throw new IOException("Stream has already been finished"); 747 } 748 return new ZipArchiveEntry(inputPath, entryName); 749 } 750 751 private byte[] createCentralFileHeader(final ZipArchiveEntry ze) throws IOException { 752 753 final EntryMetaData entryMetaData = metaData.get(ze); 754 final boolean needsZip64Extra = hasZip64Extra(ze) 755 || ze.getCompressedSize() >= ZipConstants.ZIP64_MAGIC 756 || ze.getSize() >= ZipConstants.ZIP64_MAGIC 757 || entryMetaData.offset >= ZipConstants.ZIP64_MAGIC 758 || ze.getDiskNumberStart() >= ZipConstants.ZIP64_MAGIC_SHORT 759 || zip64Mode == Zip64Mode.Always 760 || zip64Mode == Zip64Mode.AlwaysWithCompatibility; 761 762 if (needsZip64Extra && zip64Mode == Zip64Mode.Never) { 763 // must be the offset that is too big, otherwise an 764 // exception would have been throw in putArchiveEntry or 765 // closeArchiveEntry 766 throw new Zip64RequiredException(Zip64RequiredException 767 .ARCHIVE_TOO_BIG_MESSAGE); 768 } 769 770 771 handleZip64Extra(ze, entryMetaData.offset, needsZip64Extra); 772 773 return createCentralFileHeader(ze, getName(ze), entryMetaData, needsZip64Extra); 774 } 775 776 /** 777 * Writes the central file header entry. 778 * @param ze the entry to write 779 * @param name The encoded name 780 * @param entryMetaData meta data for this file 781 * @throws IOException on error 782 */ 783 private byte[] createCentralFileHeader(final ZipArchiveEntry ze, final ByteBuffer name, 784 final EntryMetaData entryMetaData, 785 final boolean needsZip64Extra) throws IOException { 786 if (isSplitZip) { 787 // calculate the disk number for every central file header, 788 // this will be used in writing End Of Central Directory and Zip64 End Of Central Directory 789 final int currentSplitSegment = ((ZipSplitOutputStream) this.outputStream).getCurrentSplitSegmentIndex(); 790 if (numberOfCDInDiskData.get(currentSplitSegment) == null) { 791 numberOfCDInDiskData.put(currentSplitSegment, 1); 792 } else { 793 final int originalNumberOfCD = numberOfCDInDiskData.get(currentSplitSegment); 794 numberOfCDInDiskData.put(currentSplitSegment, originalNumberOfCD + 1); 795 } 796 } 797 798 final byte[] extra = ze.getCentralDirectoryExtra(); 799 final int extraLength = extra.length; 800 801 // file comment length 802 String comm = ze.getComment(); 803 if (comm == null) { 804 comm = ""; 805 } 806 807 final ByteBuffer commentB = getEntryEncoding(ze).encode(comm); 808 final int nameLen = name.limit() - name.position(); 809 final int commentLen = commentB.limit() - commentB.position(); 810 final int len= CFH_FILENAME_OFFSET + nameLen + extraLength + commentLen; 811 final byte[] buf = new byte[len]; 812 813 System.arraycopy(CFH_SIG, 0, buf, CFH_SIG_OFFSET, ZipConstants.WORD); 814 815 // version made by 816 // CheckStyle:MagicNumber OFF 817 ZipShort.putShort(ze.getPlatform() << 8 | (!hasUsedZip64 ? ZipConstants.DATA_DESCRIPTOR_MIN_VERSION : ZipConstants.ZIP64_MIN_VERSION), 818 buf, CFH_VERSION_MADE_BY_OFFSET); 819 820 final int zipMethod = ze.getMethod(); 821 final boolean encodable = zipEncoding.canEncode(ze.getName()); 822 ZipShort.putShort(versionNeededToExtract(zipMethod, needsZip64Extra, entryMetaData.usesDataDescriptor), 823 buf, CFH_VERSION_NEEDED_OFFSET); 824 getGeneralPurposeBits(!encodable && fallbackToUTF8, entryMetaData.usesDataDescriptor).encode(buf, CFH_GPB_OFFSET); 825 826 // compression method 827 ZipShort.putShort(zipMethod, buf, CFH_METHOD_OFFSET); 828 829 830 // last mod. time and date 831 ZipUtil.toDosTime(ze.getTime(), buf, CFH_TIME_OFFSET); 832 833 // CRC 834 // compressed length 835 // uncompressed length 836 ZipLong.putLong(ze.getCrc(), buf, CFH_CRC_OFFSET); 837 if (ze.getCompressedSize() >= ZipConstants.ZIP64_MAGIC 838 || ze.getSize() >= ZipConstants.ZIP64_MAGIC 839 || zip64Mode == Zip64Mode.Always 840 || zip64Mode == Zip64Mode.AlwaysWithCompatibility) { 841 ZipLong.ZIP64_MAGIC.putLong(buf, CFH_COMPRESSED_SIZE_OFFSET); 842 ZipLong.ZIP64_MAGIC.putLong(buf, CFH_ORIGINAL_SIZE_OFFSET); 843 } else { 844 ZipLong.putLong(ze.getCompressedSize(), buf, CFH_COMPRESSED_SIZE_OFFSET); 845 ZipLong.putLong(ze.getSize(), buf, CFH_ORIGINAL_SIZE_OFFSET); 846 } 847 848 ZipShort.putShort(nameLen, buf, CFH_FILENAME_LENGTH_OFFSET); 849 850 // extra field length 851 ZipShort.putShort(extraLength, buf, CFH_EXTRA_LENGTH_OFFSET); 852 853 ZipShort.putShort(commentLen, buf, CFH_COMMENT_LENGTH_OFFSET); 854 855 // disk number start 856 if (isSplitZip) { 857 if (ze.getDiskNumberStart() >= ZipConstants.ZIP64_MAGIC_SHORT || zip64Mode == Zip64Mode.Always) { 858 ZipShort.putShort(ZipConstants.ZIP64_MAGIC_SHORT, buf, CFH_DISK_NUMBER_OFFSET); 859 } else { 860 ZipShort.putShort((int) ze.getDiskNumberStart(), buf, CFH_DISK_NUMBER_OFFSET); 861 } 862 } else { 863 System.arraycopy(ZERO, 0, buf, CFH_DISK_NUMBER_OFFSET, ZipConstants.SHORT); 864 } 865 866 // internal file attributes 867 ZipShort.putShort(ze.getInternalAttributes(), buf, CFH_INTERNAL_ATTRIBUTES_OFFSET); 868 869 // external file attributes 870 ZipLong.putLong(ze.getExternalAttributes(), buf, CFH_EXTERNAL_ATTRIBUTES_OFFSET); 871 872 // relative offset of LFH 873 if (entryMetaData.offset >= ZipConstants.ZIP64_MAGIC || zip64Mode == Zip64Mode.Always) { 874 ZipLong.putLong(ZipConstants.ZIP64_MAGIC, buf, CFH_LFH_OFFSET); 875 } else { 876 ZipLong.putLong(Math.min(entryMetaData.offset, ZipConstants.ZIP64_MAGIC), buf, CFH_LFH_OFFSET); 877 } 878 879 // file name 880 System.arraycopy(name.array(), name.arrayOffset(), buf, CFH_FILENAME_OFFSET, nameLen); 881 882 final int extraStart = CFH_FILENAME_OFFSET + nameLen; 883 System.arraycopy(extra, 0, buf, extraStart, extraLength); 884 885 final int commentStart = extraStart + extraLength; 886 887 // file comment 888 System.arraycopy(commentB.array(), commentB.arrayOffset(), buf, commentStart, commentLen); 889 return buf; 890 } 891 892 private byte[] createLocalFileHeader(final ZipArchiveEntry ze, final ByteBuffer name, final boolean encodable, 893 final boolean phased, final long archiveOffset) { 894 final ZipExtraField oldEx = ze.getExtraField(ResourceAlignmentExtraField.ID); 895 if (oldEx != null) { 896 ze.removeExtraField(ResourceAlignmentExtraField.ID); 897 } 898 final ResourceAlignmentExtraField oldAlignmentEx = 899 oldEx instanceof ResourceAlignmentExtraField ? (ResourceAlignmentExtraField) oldEx : null; 900 901 int alignment = ze.getAlignment(); 902 if (alignment <= 0 && oldAlignmentEx != null) { 903 alignment = oldAlignmentEx.getAlignment(); 904 } 905 906 if (alignment > 1 || oldAlignmentEx != null && !oldAlignmentEx.allowMethodChange()) { 907 final int oldLength = LFH_FILENAME_OFFSET + 908 name.limit() - name.position() + 909 ze.getLocalFileDataExtra().length; 910 911 final int padding = (int) (-archiveOffset - oldLength - ZipExtraField.EXTRAFIELD_HEADER_SIZE 912 - ResourceAlignmentExtraField.BASE_SIZE & 913 alignment - 1); 914 ze.addExtraField(new ResourceAlignmentExtraField(alignment, 915 oldAlignmentEx != null && oldAlignmentEx.allowMethodChange(), padding)); 916 } 917 918 final byte[] extra = ze.getLocalFileDataExtra(); 919 final int nameLen = name.limit() - name.position(); 920 final int len = LFH_FILENAME_OFFSET + nameLen + extra.length; 921 final byte[] buf = new byte[len]; 922 923 System.arraycopy(LFH_SIG, 0, buf, LFH_SIG_OFFSET, ZipConstants.WORD); 924 925 //store method in local variable to prevent multiple method calls 926 final int zipMethod = ze.getMethod(); 927 final boolean dataDescriptor = usesDataDescriptor(zipMethod, phased); 928 929 ZipShort.putShort(versionNeededToExtract(zipMethod, hasZip64Extra(ze), dataDescriptor), buf, LFH_VERSION_NEEDED_OFFSET); 930 931 final GeneralPurposeBit generalPurposeBit = getGeneralPurposeBits(!encodable && fallbackToUTF8, dataDescriptor); 932 generalPurposeBit.encode(buf, LFH_GPB_OFFSET); 933 934 // compression method 935 ZipShort.putShort(zipMethod, buf, LFH_METHOD_OFFSET); 936 937 ZipUtil.toDosTime(ze.getTime(), buf, LFH_TIME_OFFSET); 938 939 // CRC 940 if (phased || !(zipMethod == DEFLATED || channel != null)){ 941 ZipLong.putLong(ze.getCrc(), buf, LFH_CRC_OFFSET); 942 } else { 943 System.arraycopy(LZERO, 0, buf, LFH_CRC_OFFSET, ZipConstants.WORD); 944 } 945 946 // compressed length 947 // uncompressed length 948 if (hasZip64Extra(entry.entry)){ 949 // point to ZIP64 extended information extra field for 950 // sizes, may get rewritten once sizes are known if 951 // stream is seekable 952 ZipLong.ZIP64_MAGIC.putLong(buf, LFH_COMPRESSED_SIZE_OFFSET); 953 ZipLong.ZIP64_MAGIC.putLong(buf, LFH_ORIGINAL_SIZE_OFFSET); 954 } else if (phased) { 955 ZipLong.putLong(ze.getCompressedSize(), buf, LFH_COMPRESSED_SIZE_OFFSET); 956 ZipLong.putLong(ze.getSize(), buf, LFH_ORIGINAL_SIZE_OFFSET); 957 } else if (zipMethod == DEFLATED || channel != null) { 958 System.arraycopy(LZERO, 0, buf, LFH_COMPRESSED_SIZE_OFFSET, ZipConstants.WORD); 959 System.arraycopy(LZERO, 0, buf, LFH_ORIGINAL_SIZE_OFFSET, ZipConstants.WORD); 960 } else { // Stored 961 ZipLong.putLong(ze.getSize(), buf, LFH_COMPRESSED_SIZE_OFFSET); 962 ZipLong.putLong(ze.getSize(), buf, LFH_ORIGINAL_SIZE_OFFSET); 963 } 964 // file name length 965 ZipShort.putShort(nameLen, buf, LFH_FILENAME_LENGTH_OFFSET); 966 967 // extra field length 968 ZipShort.putShort(extra.length, buf, LFH_EXTRA_LENGTH_OFFSET); 969 970 // file name 971 System.arraycopy( name.array(), name.arrayOffset(), buf, LFH_FILENAME_OFFSET, nameLen); 972 973 // extra fields 974 System.arraycopy(extra, 0, buf, LFH_FILENAME_OFFSET + nameLen, extra.length); 975 976 return buf; 977 } 978 979 /** 980 * Writes next block of compressed data to the output stream. 981 * @throws IOException on error 982 */ 983 protected final void deflate() throws IOException { 984 streamCompressor.deflate(); 985 } 986 987 /** 988 * Closes the underlying stream/file without finishing the 989 * archive, the result will likely be a corrupt archive. 990 * 991 * <p>This method only exists to support tests that generate 992 * corrupt archives so they can clean up any temporary files.</p> 993 */ 994 void destroy() throws IOException { 995 try { 996 if (channel != null) { 997 channel.close(); 998 } 999 } finally { 1000 if (outputStream != null) { 1001 outputStream.close(); 1002 } 1003 } 1004 } 1005 1006 /** 1007 * {@inheritDoc} 1008 * @throws Zip64RequiredException if the archive's size exceeds 4 1009 * GByte or there are more than 65535 entries inside the archive 1010 * and {@link #setUseZip64} is {@link Zip64Mode#Never}. 1011 */ 1012 @Override 1013 public void finish() throws IOException { 1014 if (finished) { 1015 throw new IOException("This archive has already been finished"); 1016 } 1017 1018 if (entry != null) { 1019 throw new IOException("This archive contains unclosed entries."); 1020 } 1021 1022 final long cdOverallOffset = streamCompressor.getTotalBytesWritten(); 1023 cdOffset = cdOverallOffset; 1024 if (isSplitZip) { 1025 // when creating a split zip, the offset should be 1026 // the offset to the corresponding segment disk 1027 final ZipSplitOutputStream zipSplitOutputStream = (ZipSplitOutputStream) this.outputStream; 1028 cdOffset = zipSplitOutputStream.getCurrentSplitSegmentBytesWritten(); 1029 cdDiskNumberStart = zipSplitOutputStream.getCurrentSplitSegmentIndex(); 1030 } 1031 writeCentralDirectoryInChunks(); 1032 1033 cdLength = streamCompressor.getTotalBytesWritten() - cdOverallOffset; 1034 1035 // calculate the length of end of central directory, as it may be used in writeZip64CentralDirectory 1036 final ByteBuffer commentData = this.zipEncoding.encode(comment); 1037 final long commentLength = (long) commentData.limit() - commentData.position(); 1038 eocdLength = ZipConstants.WORD /* length of EOCD_SIG */ 1039 + ZipConstants.SHORT /* number of this disk */ 1040 + ZipConstants.SHORT /* disk number of start of central directory */ 1041 + ZipConstants.SHORT /* total number of entries on this disk */ 1042 + ZipConstants.SHORT /* total number of entries */ 1043 + ZipConstants.WORD /* size of central directory */ 1044 + ZipConstants.WORD /* offset of start of central directory */ 1045 + ZipConstants.SHORT /* ZIP comment length */ 1046 + commentLength /* ZIP comment */; 1047 1048 writeZip64CentralDirectory(); 1049 writeCentralDirectoryEnd(); 1050 metaData.clear(); 1051 entries.clear(); 1052 streamCompressor.close(); 1053 if (isSplitZip) { 1054 // trigger the ZipSplitOutputStream to write the final split segment 1055 outputStream.close(); 1056 } 1057 finished = true; 1058 } 1059 1060 /** 1061 * Flushes this output stream and forces any buffered output bytes 1062 * to be written out to the stream. 1063 * 1064 * @throws IOException if an I/O error occurs. 1065 */ 1066 @Override 1067 public void flush() throws IOException { 1068 if (outputStream != null) { 1069 outputStream.flush(); 1070 } 1071 } 1072 1073 /** 1074 * Ensures all bytes sent to the deflater are written to the stream. 1075 */ 1076 private void flushDeflater() throws IOException { 1077 if (entry.entry.getMethod() == DEFLATED) { 1078 streamCompressor.flushDeflater(); 1079 } 1080 } 1081 1082 /** 1083 * Returns the total number of bytes written to this stream. 1084 * @return the number of written bytes 1085 * @since 1.22 1086 */ 1087 @Override 1088 public long getBytesWritten() { 1089 return streamCompressor.getTotalBytesWritten(); 1090 } 1091 1092 /** 1093 * If the mode is AsNeeded and the entry is a compressed entry of 1094 * unknown size that gets written to a non-seekable stream then 1095 * change the default to Never. 1096 * 1097 * @since 1.3 1098 */ 1099 private Zip64Mode getEffectiveZip64Mode(final ZipArchiveEntry ze) { 1100 if (zip64Mode != Zip64Mode.AsNeeded 1101 || channel != null 1102 || ze.getMethod() != DEFLATED 1103 || ze.getSize() != ArchiveEntry.SIZE_UNKNOWN) { 1104 return zip64Mode; 1105 } 1106 return Zip64Mode.Never; 1107 } 1108 1109 /** 1110 * The encoding to use for file names and the file comment. 1111 * 1112 * @return null if using the platform's default character encoding. 1113 */ 1114 public String getEncoding() { 1115 return encoding; 1116 } 1117 1118 private ZipEncoding getEntryEncoding(final ZipArchiveEntry ze) { 1119 final boolean encodable = zipEncoding.canEncode(ze.getName()); 1120 return !encodable && fallbackToUTF8 1121 ? ZipEncodingHelper.ZIP_ENCODING_UTF_8 : zipEncoding; 1122 } 1123 1124 private GeneralPurposeBit getGeneralPurposeBits(final boolean utfFallback, final boolean usesDataDescriptor) { 1125 final GeneralPurposeBit b = new GeneralPurposeBit(); 1126 b.useUTF8ForNames(useUTF8Flag || utfFallback); 1127 if (usesDataDescriptor) { 1128 b.useDataDescriptor(true); 1129 } 1130 return b; 1131 } 1132 1133 private ByteBuffer getName(final ZipArchiveEntry ze) throws IOException { 1134 return getEntryEncoding(ze).encode(ze.getName()); 1135 } 1136 1137 /** 1138 * Gets the existing ZIP64 extended information extra field or 1139 * create a new one and add it to the entry. 1140 * 1141 * @since 1.3 1142 */ 1143 private Zip64ExtendedInformationExtraField 1144 getZip64Extra(final ZipArchiveEntry ze) { 1145 if (entry != null) { 1146 entry.causedUseOfZip64 = !hasUsedZip64; 1147 } 1148 hasUsedZip64 = true; 1149 final ZipExtraField extra = ze.getExtraField(Zip64ExtendedInformationExtraField.HEADER_ID); 1150 Zip64ExtendedInformationExtraField z64 = extra instanceof Zip64ExtendedInformationExtraField 1151 ? (Zip64ExtendedInformationExtraField) extra : null; 1152 if (z64 == null) { 1153 /* 1154 System.err.println("Adding z64 for " + ze.getName() 1155 + ", method: " + ze.getMethod() 1156 + " (" + (ze.getMethod() == STORED) + ")" 1157 + ", channel: " + (channel != null)); 1158 */ 1159 z64 = new Zip64ExtendedInformationExtraField(); 1160 } 1161 1162 // even if the field is there already, make sure it is the first one 1163 ze.addAsFirstExtraField(z64); 1164 1165 return z64; 1166 } 1167 1168 /** 1169 * Ensures the current entry's size and CRC information is set to 1170 * the values just written, verifies it isn't too big in the 1171 * Zip64Mode.Never case and returns whether the entry would 1172 * require a Zip64 extra field. 1173 */ 1174 private boolean handleSizesAndCrc(final long bytesWritten, final long crc, 1175 final Zip64Mode effectiveMode) 1176 throws ZipException { 1177 if (entry.entry.getMethod() == DEFLATED) { 1178 /* It turns out def.getBytesRead() returns wrong values if 1179 * the size exceeds 4 GB on Java < Java7 1180 entry.entry.setSize(def.getBytesRead()); 1181 */ 1182 entry.entry.setSize(entry.bytesRead); 1183 entry.entry.setCompressedSize(bytesWritten); 1184 entry.entry.setCrc(crc); 1185 1186 } else if (channel == null) { 1187 if (entry.entry.getCrc() != crc) { 1188 throw new ZipException("Bad CRC checksum for entry " 1189 + entry.entry.getName() + ": " 1190 + Long.toHexString(entry.entry.getCrc()) 1191 + " instead of " 1192 + Long.toHexString(crc)); 1193 } 1194 1195 if (entry.entry.getSize() != bytesWritten) { 1196 throw new ZipException("Bad size for entry " 1197 + entry.entry.getName() + ": " 1198 + entry.entry.getSize() 1199 + " instead of " 1200 + bytesWritten); 1201 } 1202 } else { /* method is STORED and we used SeekableByteChannel */ 1203 entry.entry.setSize(bytesWritten); 1204 entry.entry.setCompressedSize(bytesWritten); 1205 entry.entry.setCrc(crc); 1206 } 1207 1208 return checkIfNeedsZip64(effectiveMode); 1209 } 1210 1211 /** 1212 * If the entry needs Zip64 extra information inside the central 1213 * directory then configure its data. 1214 */ 1215 private void handleZip64Extra(final ZipArchiveEntry ze, final long lfhOffset, 1216 final boolean needsZip64Extra) { 1217 if (needsZip64Extra) { 1218 final Zip64ExtendedInformationExtraField z64 = getZip64Extra(ze); 1219 if (ze.getCompressedSize() >= ZipConstants.ZIP64_MAGIC 1220 || ze.getSize() >= ZipConstants.ZIP64_MAGIC 1221 || zip64Mode == Zip64Mode.Always 1222 || zip64Mode == Zip64Mode.AlwaysWithCompatibility) { 1223 z64.setCompressedSize(new ZipEightByteInteger(ze.getCompressedSize())); 1224 z64.setSize(new ZipEightByteInteger(ze.getSize())); 1225 } else { 1226 // reset value that may have been set for LFH 1227 z64.setCompressedSize(null); 1228 z64.setSize(null); 1229 } 1230 1231 final boolean needsToEncodeLfhOffset = 1232 lfhOffset >= ZipConstants.ZIP64_MAGIC || zip64Mode == Zip64Mode.Always; 1233 final boolean needsToEncodeDiskNumberStart = 1234 ze.getDiskNumberStart() >= ZipConstants.ZIP64_MAGIC_SHORT || zip64Mode == Zip64Mode.Always; 1235 1236 if (needsToEncodeLfhOffset || needsToEncodeDiskNumberStart) { 1237 z64.setRelativeHeaderOffset(new ZipEightByteInteger(lfhOffset)); 1238 } 1239 if (needsToEncodeDiskNumberStart) { 1240 z64.setDiskStartNumber(new ZipLong(ze.getDiskNumberStart())); 1241 } 1242 ze.setExtra(); 1243 } 1244 } 1245 1246 /** 1247 * Is there a ZIP64 extended information extra field for the 1248 * entry? 1249 * 1250 * @since 1.3 1251 */ 1252 private boolean hasZip64Extra(final ZipArchiveEntry ze) { 1253 return ze.getExtraField(Zip64ExtendedInformationExtraField 1254 .HEADER_ID) 1255 instanceof Zip64ExtendedInformationExtraField; 1256 } 1257 /** 1258 * This method indicates whether this archive is writing to a 1259 * seekable stream (i.e., to a random access file). 1260 * 1261 * <p>For seekable streams, you don't need to calculate the CRC or 1262 * uncompressed size for {@link #STORED} entries before 1263 * invoking {@link #putArchiveEntry(ZipArchiveEntry)}. 1264 * @return true if seekable 1265 */ 1266 public boolean isSeekable() { 1267 return channel != null; 1268 } 1269 1270 private boolean isTooLargeForZip32(final ZipArchiveEntry zipArchiveEntry) { 1271 return zipArchiveEntry.getSize() >= ZipConstants.ZIP64_MAGIC || zipArchiveEntry.getCompressedSize() >= ZipConstants.ZIP64_MAGIC; 1272 } 1273 1274 private boolean isZip64Required(final ZipArchiveEntry entry1, final Zip64Mode requestedMode) { 1275 return requestedMode == Zip64Mode.Always || requestedMode == Zip64Mode.AlwaysWithCompatibility 1276 || isTooLargeForZip32(entry1); 1277 } 1278 1279 private void preClose() throws IOException { 1280 if (finished) { 1281 throw new IOException("Stream has already been finished"); 1282 } 1283 1284 if (entry == null) { 1285 throw new IOException("No current entry to close"); 1286 } 1287 1288 if (!entry.hasWritten) { 1289 write(ByteUtils.EMPTY_BYTE_ARRAY, 0, 0); 1290 } 1291 } 1292 /** 1293 * {@inheritDoc} 1294 * @throws ClassCastException if entry is not an instance of ZipArchiveEntry 1295 * @throws Zip64RequiredException if the entry's uncompressed or 1296 * compressed size is known to exceed 4 GByte and {@link #setUseZip64} 1297 * is {@link Zip64Mode#Never}. 1298 */ 1299 @Override 1300 public void putArchiveEntry(final ZipArchiveEntry archiveEntry) throws IOException { 1301 putArchiveEntry(archiveEntry, false); 1302 } 1303 1304 /** 1305 * Writes the headers for an archive entry to the output stream. 1306 * The caller must then write the content to the stream and call 1307 * {@link #closeArchiveEntry()} to complete the process. 1308 1309 * @param archiveEntry The archiveEntry 1310 * @param phased If true size, compressedSize and crc required to be known up-front in the archiveEntry 1311 * @throws ClassCastException if entry is not an instance of ZipArchiveEntry 1312 * @throws Zip64RequiredException if the entry's uncompressed or 1313 * compressed size is known to exceed 4 GByte and {@link #setUseZip64} 1314 * is {@link Zip64Mode#Never}. 1315 */ 1316 private void putArchiveEntry(final ZipArchiveEntry archiveEntry, final boolean phased) throws IOException { 1317 if (finished) { 1318 throw new IOException("Stream has already been finished"); 1319 } 1320 1321 if (entry != null) { 1322 closeArchiveEntry(); 1323 } 1324 1325 entry = new CurrentEntry(archiveEntry); 1326 entries.add(entry.entry); 1327 1328 setDefaults(entry.entry); 1329 1330 final Zip64Mode effectiveMode = getEffectiveZip64Mode(entry.entry); 1331 validateSizeInformation(effectiveMode); 1332 1333 if (shouldAddZip64Extra(entry.entry, effectiveMode)) { 1334 1335 final Zip64ExtendedInformationExtraField z64 = getZip64Extra(entry.entry); 1336 1337 final ZipEightByteInteger size; 1338 final ZipEightByteInteger compressedSize; 1339 if (phased) { 1340 // sizes are already known 1341 size = new ZipEightByteInteger(entry.entry.getSize()); 1342 compressedSize = new ZipEightByteInteger(entry.entry.getCompressedSize()); 1343 } else if (entry.entry.getMethod() == STORED 1344 && entry.entry.getSize() != ArchiveEntry.SIZE_UNKNOWN) { 1345 // actually, we already know the sizes 1346 compressedSize = size = new ZipEightByteInteger(entry.entry.getSize()); 1347 } else { 1348 // just a placeholder, real data will be in data 1349 // descriptor or inserted later via SeekableByteChannel 1350 compressedSize = size = ZipEightByteInteger.ZERO; 1351 } 1352 z64.setSize(size); 1353 z64.setCompressedSize(compressedSize); 1354 entry.entry.setExtra(); 1355 } 1356 1357 if (entry.entry.getMethod() == DEFLATED && hasCompressionLevelChanged) { 1358 def.setLevel(level); 1359 hasCompressionLevelChanged = false; 1360 } 1361 writeLocalFileHeader(archiveEntry, phased); 1362 } 1363 1364 /** 1365 * When using random access output, write the local file header 1366 * and potentially the ZIP64 extra containing the correct CRC and 1367 * compressed/uncompressed sizes. 1368 */ 1369 private void rewriteSizesAndCrc(final boolean actuallyNeedsZip64) 1370 throws IOException { 1371 final long save = channel.position(); 1372 1373 channel.position(entry.localDataStart); 1374 writeOut(ZipLong.getBytes(entry.entry.getCrc())); 1375 if (!hasZip64Extra(entry.entry) || !actuallyNeedsZip64) { 1376 writeOut(ZipLong.getBytes(entry.entry.getCompressedSize())); 1377 writeOut(ZipLong.getBytes(entry.entry.getSize())); 1378 } else { 1379 writeOut(ZipLong.ZIP64_MAGIC.getBytes()); 1380 writeOut(ZipLong.ZIP64_MAGIC.getBytes()); 1381 } 1382 1383 if (hasZip64Extra(entry.entry)) { 1384 final ByteBuffer name = getName(entry.entry); 1385 final int nameLen = name.limit() - name.position(); 1386 // seek to ZIP64 extra, skip header and size information 1387 channel.position(entry.localDataStart + 3 * ZipConstants.WORD + 2 * ZipConstants.SHORT + nameLen + 2 * ZipConstants.SHORT); 1388 // inside the ZIP64 extra uncompressed size comes 1389 // first, unlike the LFH, CD or data descriptor 1390 writeOut(ZipEightByteInteger.getBytes(entry.entry.getSize())); 1391 writeOut(ZipEightByteInteger.getBytes(entry.entry.getCompressedSize())); 1392 1393 if (!actuallyNeedsZip64) { 1394 // do some cleanup: 1395 // * rewrite version needed to extract 1396 channel.position(entry.localDataStart - 5 * ZipConstants.SHORT); 1397 writeOut(ZipShort.getBytes(versionNeededToExtract(entry.entry.getMethod(), false, false))); 1398 1399 // * remove ZIP64 extra, so it doesn't get written 1400 // to the central directory 1401 entry.entry.removeExtraField(Zip64ExtendedInformationExtraField 1402 .HEADER_ID); 1403 entry.entry.setExtra(); 1404 1405 // * reset hasUsedZip64 if it has been set because 1406 // of this entry 1407 if (entry.causedUseOfZip64) { 1408 hasUsedZip64 = false; 1409 } 1410 } 1411 } 1412 channel.position(save); 1413 } 1414 1415 /** 1416 * Sets the file comment. 1417 * @param comment the comment 1418 */ 1419 public void setComment(final String comment) { 1420 this.comment = comment; 1421 } 1422 1423 1424 /** 1425 * Whether to create Unicode Extra Fields. 1426 * 1427 * <p>Defaults to NEVER.</p> 1428 * 1429 * @param b whether to create Unicode Extra Fields. 1430 */ 1431 public void setCreateUnicodeExtraFields(final UnicodeExtraFieldPolicy b) { 1432 createUnicodeExtraFields = b; 1433 } 1434 1435 1436 /** 1437 * Provides default values for compression method and last 1438 * modification time. 1439 */ 1440 private void setDefaults(final ZipArchiveEntry entry) { 1441 if (entry.getMethod() == -1) { // not specified 1442 entry.setMethod(method); 1443 } 1444 1445 if (entry.getTime() == -1) { // not specified 1446 entry.setTime(System.currentTimeMillis()); 1447 } 1448 } 1449 1450 /** 1451 * The encoding to use for file names and the file comment. 1452 * 1453 * <p>For a list of possible values see <a 1454 * href="https://docs.oracle.com/javase/8/docs/technotes/guides/intl/encoding.doc.html">Supported Encodings</a>. 1455 * Defaults to UTF-8.</p> 1456 * @param encoding the encoding to use for file names, use null 1457 * for the platform's default encoding 1458 */ 1459 public void setEncoding(final String encoding) { 1460 this.encoding = encoding; 1461 this.zipEncoding = ZipEncodingHelper.getZipEncoding(encoding); 1462 if (useUTF8Flag && !ZipEncodingHelper.isUTF8(encoding)) { 1463 useUTF8Flag = false; 1464 } 1465 } 1466 1467 /** 1468 * Whether to fall back to UTF and the language encoding flag if 1469 * the file name cannot be encoded using the specified encoding. 1470 * 1471 * <p>Defaults to false.</p> 1472 * 1473 * @param b whether to fall back to UTF and the language encoding 1474 * flag if the file name cannot be encoded using the specified 1475 * encoding. 1476 */ 1477 public void setFallbackToUTF8(final boolean b) { 1478 fallbackToUTF8 = b; 1479 } 1480 1481 /** 1482 * Sets the compression level for subsequent entries. 1483 * 1484 * <p>Default is Deflater.DEFAULT_COMPRESSION.</p> 1485 * @param level the compression level. 1486 * @throws IllegalArgumentException if an invalid compression 1487 * level is specified. 1488 */ 1489 public void setLevel(final int level) { 1490 if (level < Deflater.DEFAULT_COMPRESSION 1491 || level > Deflater.BEST_COMPRESSION) { 1492 throw new IllegalArgumentException("Invalid compression level: " 1493 + level); 1494 } 1495 if (this.level == level) { 1496 return; 1497 } 1498 hasCompressionLevelChanged = true; 1499 this.level = level; 1500 } 1501 1502 /** 1503 * Sets the default compression method for subsequent entries. 1504 * 1505 * <p>Default is DEFLATED.</p> 1506 * @param method an {@code int} from java.util.zip.ZipEntry 1507 */ 1508 public void setMethod(final int method) { 1509 this.method = method; 1510 } 1511 1512 /** 1513 * Whether to set the language encoding flag if the file name 1514 * encoding is UTF-8. 1515 * 1516 * <p>Defaults to true.</p> 1517 * 1518 * @param b whether to set the language encoding flag if the file 1519 * name encoding is UTF-8 1520 */ 1521 public void setUseLanguageEncodingFlag(final boolean b) { 1522 useUTF8Flag = b && ZipEncodingHelper.isUTF8(encoding); 1523 } 1524 1525 /** 1526 * Whether Zip64 extensions will be used. 1527 * 1528 * <p>When setting the mode to {@link Zip64Mode#Never Never}, 1529 * {@link #putArchiveEntry}, {@link #closeArchiveEntry}, {@link 1530 * #finish} or {@link #close} may throw a {@link 1531 * Zip64RequiredException} if the entry's size or the total size 1532 * of the archive exceeds 4GB or there are more than 65536 entries 1533 * inside the archive. Any archive created in this mode will be 1534 * readable by implementations that don't support Zip64.</p> 1535 * 1536 * <p>When setting the mode to {@link Zip64Mode#Always Always}, 1537 * Zip64 extensions will be used for all entries. Any archive 1538 * created in this mode may be unreadable by implementations that 1539 * don't support Zip64 even if all its contents would be.</p> 1540 * 1541 * <p>When setting the mode to {@link Zip64Mode#AsNeeded 1542 * AsNeeded}, Zip64 extensions will transparently be used for 1543 * those entries that require them. This mode can only be used if 1544 * the uncompressed size of the {@link ZipArchiveEntry} is known 1545 * when calling {@link #putArchiveEntry} or the archive is written 1546 * to a seekable output (i.e. you have used the {@link 1547 * #ZipArchiveOutputStream(java.io.File) File-arg constructor}) - 1548 * this mode is not valid when the output stream is not seekable 1549 * and the uncompressed size is unknown when {@link 1550 * #putArchiveEntry} is called.</p> 1551 * 1552 * <p>If no entry inside the resulting archive requires Zip64 1553 * extensions then {@link Zip64Mode#Never Never} will create the 1554 * smallest archive. {@link Zip64Mode#AsNeeded AsNeeded} will 1555 * create a slightly bigger archive if the uncompressed size of 1556 * any entry has initially been unknown and create an archive 1557 * identical to {@link Zip64Mode#Never Never} otherwise. {@link 1558 * Zip64Mode#Always Always} will create an archive that is at 1559 * least 24 bytes per entry bigger than the one {@link 1560 * Zip64Mode#Never Never} would create.</p> 1561 * 1562 * <p>Defaults to {@link Zip64Mode#AsNeeded AsNeeded} unless 1563 * {@link #putArchiveEntry} is called with an entry of unknown 1564 * size and data is written to a non-seekable stream - in this 1565 * case the default is {@link Zip64Mode#Never Never}.</p> 1566 * 1567 * @since 1.3 1568 * @param mode Whether Zip64 extensions will be used. 1569 */ 1570 public void setUseZip64(final Zip64Mode mode) { 1571 zip64Mode = mode; 1572 } 1573 1574 /** 1575 * Whether to add a Zip64 extended information extra field to the 1576 * local file header. 1577 * 1578 * <p>Returns true if</p> 1579 * 1580 * <ul> 1581 * <li>mode is Always</li> 1582 * <li>or we already know it is going to be needed</li> 1583 * <li>or the size is unknown and we can ensure it won't hurt 1584 * other implementations if we add it (i.e. we can erase its 1585 * usage</li> 1586 * </ul> 1587 */ 1588 private boolean shouldAddZip64Extra(final ZipArchiveEntry entry, final Zip64Mode mode) { 1589 return mode == Zip64Mode.Always 1590 || mode == Zip64Mode.AlwaysWithCompatibility 1591 || entry.getSize() >= ZipConstants.ZIP64_MAGIC 1592 || entry.getCompressedSize() >= ZipConstants.ZIP64_MAGIC 1593 || entry.getSize() == ArchiveEntry.SIZE_UNKNOWN 1594 && channel != null && mode != Zip64Mode.Never; 1595 } 1596 1597 /** 1598 * 4.4.1.4 If one of the fields in the end of central directory 1599 * record is too small to hold required data, the field SHOULD be 1600 * set to -1 (0xFFFF or 0xFFFFFFFF) and the ZIP64 format record 1601 * SHOULD be created. 1602 * @return true if zip64 End Of Central Directory is needed 1603 */ 1604 private boolean shouldUseZip64EOCD() { 1605 int numberOfThisDisk = 0; 1606 if (isSplitZip) { 1607 numberOfThisDisk = ((ZipSplitOutputStream) this.outputStream).getCurrentSplitSegmentIndex(); 1608 } 1609 final int numOfEntriesOnThisDisk = numberOfCDInDiskData.getOrDefault(numberOfThisDisk, 0); 1610 return numberOfThisDisk >= ZipConstants.ZIP64_MAGIC_SHORT /* number of this disk */ 1611 || cdDiskNumberStart >= ZipConstants.ZIP64_MAGIC_SHORT /* number of the disk with the start of the central directory */ 1612 || numOfEntriesOnThisDisk >= ZipConstants.ZIP64_MAGIC_SHORT /* total number of entries in the central directory on this disk */ 1613 || entries.size() >= ZipConstants.ZIP64_MAGIC_SHORT /* total number of entries in the central directory */ 1614 || cdLength >= ZipConstants.ZIP64_MAGIC /* size of the central directory */ 1615 || cdOffset >= ZipConstants.ZIP64_MAGIC; /* offset of start of central directory with respect to 1616 the starting disk number */ 1617 } 1618 1619 private boolean usesDataDescriptor(final int zipMethod, final boolean phased) { 1620 return !phased && zipMethod == DEFLATED && channel == null; 1621 } 1622 1623 /** 1624 * If the Zip64 mode is set to never, then all the data in End Of Central Directory 1625 * should not exceed their limits. 1626 * @throws Zip64RequiredException if Zip64 is actually needed 1627 */ 1628 private void validateIfZip64IsNeededInEOCD() throws Zip64RequiredException { 1629 // exception will only be thrown if the Zip64 mode is never while Zip64 is actually needed 1630 if (zip64Mode != Zip64Mode.Never) { 1631 return; 1632 } 1633 1634 int numberOfThisDisk = 0; 1635 if (isSplitZip) { 1636 numberOfThisDisk = ((ZipSplitOutputStream) this.outputStream).getCurrentSplitSegmentIndex(); 1637 } 1638 if (numberOfThisDisk >= ZipConstants.ZIP64_MAGIC_SHORT) { 1639 throw new Zip64RequiredException(Zip64RequiredException 1640 .NUMBER_OF_THIS_DISK_TOO_BIG_MESSAGE); 1641 } 1642 1643 if (cdDiskNumberStart >= ZipConstants.ZIP64_MAGIC_SHORT) { 1644 throw new Zip64RequiredException(Zip64RequiredException 1645 .NUMBER_OF_THE_DISK_OF_CENTRAL_DIRECTORY_TOO_BIG_MESSAGE); 1646 } 1647 1648 final int numOfEntriesOnThisDisk = numberOfCDInDiskData.getOrDefault(numberOfThisDisk, 0); 1649 if (numOfEntriesOnThisDisk >= ZipConstants.ZIP64_MAGIC_SHORT) { 1650 throw new Zip64RequiredException(Zip64RequiredException 1651 .TOO_MANY_ENTRIES_ON_THIS_DISK_MESSAGE); 1652 } 1653 1654 // number of entries 1655 if (entries.size() >= ZipConstants.ZIP64_MAGIC_SHORT) { 1656 throw new Zip64RequiredException(Zip64RequiredException 1657 .TOO_MANY_ENTRIES_MESSAGE); 1658 } 1659 1660 if (cdLength >= ZipConstants.ZIP64_MAGIC) { 1661 throw new Zip64RequiredException(Zip64RequiredException 1662 .SIZE_OF_CENTRAL_DIRECTORY_TOO_BIG_MESSAGE); 1663 } 1664 1665 if (cdOffset >= ZipConstants.ZIP64_MAGIC) { 1666 throw new Zip64RequiredException(Zip64RequiredException 1667 .ARCHIVE_TOO_BIG_MESSAGE); 1668 } 1669 } 1670 1671 1672 /** 1673 * Throws an exception if the size is unknown for a stored entry 1674 * that is written to a non-seekable output or the entry is too 1675 * big to be written without Zip64 extra but the mode has been set 1676 * to Never. 1677 */ 1678 private void validateSizeInformation(final Zip64Mode effectiveMode) 1679 throws ZipException { 1680 // Size/CRC not required if SeekableByteChannel is used 1681 if (entry.entry.getMethod() == STORED && channel == null) { 1682 if (entry.entry.getSize() == ArchiveEntry.SIZE_UNKNOWN) { 1683 throw new ZipException("Uncompressed size is required for" 1684 + " STORED method when not writing to a" 1685 + " file"); 1686 } 1687 if (entry.entry.getCrc() == ZipArchiveEntry.CRC_UNKNOWN) { 1688 throw new ZipException("CRC checksum is required for STORED" 1689 + " method when not writing to a file"); 1690 } 1691 entry.entry.setCompressedSize(entry.entry.getSize()); 1692 } 1693 1694 if ((entry.entry.getSize() >= ZipConstants.ZIP64_MAGIC 1695 || entry.entry.getCompressedSize() >= ZipConstants.ZIP64_MAGIC) 1696 && effectiveMode == Zip64Mode.Never) { 1697 throw new Zip64RequiredException(Zip64RequiredException 1698 .getEntryTooBigMessage(entry.entry)); 1699 } 1700 } 1701 1702 1703 private int versionNeededToExtract(final int zipMethod, final boolean zip64, final boolean usedDataDescriptor) { 1704 if (zip64) { 1705 return ZipConstants.ZIP64_MIN_VERSION; 1706 } 1707 if (usedDataDescriptor) { 1708 return ZipConstants.DATA_DESCRIPTOR_MIN_VERSION; 1709 } 1710 return versionNeededToExtractMethod(zipMethod); 1711 } 1712 1713 private int versionNeededToExtractMethod(final int zipMethod) { 1714 return zipMethod == DEFLATED ? ZipConstants.DEFLATE_MIN_VERSION : ZipConstants.INITIAL_VERSION; 1715 } 1716 1717 /** 1718 * Writes bytes to ZIP entry. 1719 * @param b the byte array to write 1720 * @param offset the start position to write from 1721 * @param length the number of bytes to write 1722 * @throws IOException on error 1723 */ 1724 @Override 1725 public void write(final byte[] b, final int offset, final int length) throws IOException { 1726 if (entry == null) { 1727 throw new IllegalStateException("No current entry"); 1728 } 1729 ZipUtil.checkRequestedFeatures(entry.entry); 1730 final long writtenThisTime = streamCompressor.write(b, offset, length, entry.entry.getMethod()); 1731 count(writtenThisTime); 1732 } 1733 1734 /** 1735 * Writes the "End of central dir record". 1736 * @throws IOException on error 1737 * @throws Zip64RequiredException if the archive's size exceeds 4 1738 * GByte or there are more than 65535 entries inside the archive 1739 * and {@link #setUseZip64(Zip64Mode)} is {@link Zip64Mode#Never}. 1740 */ 1741 protected void writeCentralDirectoryEnd() throws IOException { 1742 if (!hasUsedZip64 && isSplitZip) { 1743 ((ZipSplitOutputStream) this.outputStream).prepareToWriteUnsplittableContent(eocdLength); 1744 } 1745 1746 validateIfZip64IsNeededInEOCD(); 1747 1748 writeCounted(EOCD_SIG); 1749 1750 // number of this disk 1751 int numberOfThisDisk = 0; 1752 if (isSplitZip) { 1753 numberOfThisDisk = ((ZipSplitOutputStream) this.outputStream).getCurrentSplitSegmentIndex(); 1754 } 1755 writeCounted(ZipShort.getBytes(numberOfThisDisk)); 1756 1757 // disk number of the start of central directory 1758 writeCounted(ZipShort.getBytes((int) cdDiskNumberStart)); 1759 1760 // number of entries 1761 final int numberOfEntries = entries.size(); 1762 1763 // total number of entries in the central directory on this disk 1764 final int numOfEntriesOnThisDisk = isSplitZip 1765 ? numberOfCDInDiskData.getOrDefault(numberOfThisDisk, 0) 1766 : numberOfEntries; 1767 final byte[] numOfEntriesOnThisDiskData = ZipShort 1768 .getBytes(Math.min(numOfEntriesOnThisDisk, ZipConstants.ZIP64_MAGIC_SHORT)); 1769 writeCounted(numOfEntriesOnThisDiskData); 1770 1771 // number of entries 1772 final byte[] num = ZipShort.getBytes(Math.min(numberOfEntries, ZipConstants.ZIP64_MAGIC_SHORT)); 1773 writeCounted(num); 1774 1775 // length and location of CD 1776 writeCounted(ZipLong.getBytes(Math.min(cdLength, ZipConstants.ZIP64_MAGIC))); 1777 writeCounted(ZipLong.getBytes(Math.min(cdOffset, ZipConstants.ZIP64_MAGIC))); 1778 1779 // ZIP file comment 1780 final ByteBuffer data = this.zipEncoding.encode(comment); 1781 final int dataLen = data.limit() - data.position(); 1782 writeCounted(ZipShort.getBytes(dataLen)); 1783 streamCompressor.writeCounted(data.array(), data.arrayOffset(), dataLen); 1784 } 1785 1786 private void writeCentralDirectoryInChunks() throws IOException { 1787 final int NUM_PER_WRITE = 1000; 1788 final ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(70 * NUM_PER_WRITE); 1789 int count = 0; 1790 for (final ZipArchiveEntry ze : entries) { 1791 byteArrayOutputStream.write(createCentralFileHeader(ze)); 1792 if (++count > NUM_PER_WRITE){ 1793 writeCounted(byteArrayOutputStream.toByteArray()); 1794 byteArrayOutputStream.reset(); 1795 count = 0; 1796 } 1797 } 1798 writeCounted(byteArrayOutputStream.toByteArray()); 1799 } 1800 1801 /** 1802 * Writes the central file header entry. 1803 * @param ze the entry to write 1804 * @throws IOException on error 1805 * @throws Zip64RequiredException if the archive's size exceeds 4 1806 * GByte and {@link #setUseZip64(Zip64Mode)} is {@link 1807 * Zip64Mode#Never}. 1808 */ 1809 protected void writeCentralFileHeader(final ZipArchiveEntry ze) throws IOException { 1810 final byte[] centralFileHeader = createCentralFileHeader(ze); 1811 writeCounted(centralFileHeader); 1812 } 1813 1814 /** 1815 * Write bytes to output or random access file. 1816 * @param data the byte array to write 1817 * @throws IOException on error 1818 */ 1819 private void writeCounted(final byte[] data) throws IOException { 1820 streamCompressor.writeCounted(data); 1821 } 1822 1823 /** 1824 * Writes the data descriptor entry. 1825 * @param ze the entry to write 1826 * @throws IOException on error 1827 */ 1828 protected void writeDataDescriptor(final ZipArchiveEntry ze) throws IOException { 1829 if (!usesDataDescriptor(ze.getMethod(), false)) { 1830 return; 1831 } 1832 writeCounted(DD_SIG); 1833 writeCounted(ZipLong.getBytes(ze.getCrc())); 1834 if (!hasZip64Extra(ze)) { 1835 writeCounted(ZipLong.getBytes(ze.getCompressedSize())); 1836 writeCounted(ZipLong.getBytes(ze.getSize())); 1837 } else { 1838 writeCounted(ZipEightByteInteger.getBytes(ze.getCompressedSize())); 1839 writeCounted(ZipEightByteInteger.getBytes(ze.getSize())); 1840 } 1841 } 1842 1843 /** 1844 * Writes the local file header entry 1845 * @param ze the entry to write 1846 * @throws IOException on error 1847 */ 1848 protected void writeLocalFileHeader(final ZipArchiveEntry ze) throws IOException { 1849 writeLocalFileHeader(ze, false); 1850 } 1851 1852 private void writeLocalFileHeader(final ZipArchiveEntry ze, final boolean phased) throws IOException { 1853 final boolean encodable = zipEncoding.canEncode(ze.getName()); 1854 final ByteBuffer name = getName(ze); 1855 1856 if (createUnicodeExtraFields != UnicodeExtraFieldPolicy.NEVER) { 1857 addUnicodeExtraFields(ze, encodable, name); 1858 } 1859 1860 long localHeaderStart = streamCompressor.getTotalBytesWritten(); 1861 if (isSplitZip) { 1862 // when creating a split zip, the offset should be 1863 // the offset to the corresponding segment disk 1864 final ZipSplitOutputStream splitOutputStream = (ZipSplitOutputStream) this.outputStream; 1865 ze.setDiskNumberStart(splitOutputStream.getCurrentSplitSegmentIndex()); 1866 localHeaderStart = splitOutputStream.getCurrentSplitSegmentBytesWritten(); 1867 } 1868 1869 final byte[] localHeader = createLocalFileHeader(ze, name, encodable, phased, localHeaderStart); 1870 metaData.put(ze, new EntryMetaData(localHeaderStart, usesDataDescriptor(ze.getMethod(), phased))); 1871 entry.localDataStart = localHeaderStart + LFH_CRC_OFFSET; // At crc offset 1872 writeCounted(localHeader); 1873 entry.dataStart = streamCompressor.getTotalBytesWritten(); 1874 } 1875 1876 /** 1877 * Write bytes to output or random access file. 1878 * @param data the byte array to write 1879 * @throws IOException on error 1880 */ 1881 protected final void writeOut(final byte[] data) throws IOException { 1882 streamCompressor.writeOut(data, 0, data.length); 1883 } 1884 1885 /** 1886 * Write bytes to output or random access file. 1887 * @param data the byte array to write 1888 * @param offset the start position to write from 1889 * @param length the number of bytes to write 1890 * @throws IOException on error 1891 */ 1892 protected final void writeOut(final byte[] data, final int offset, final int length) 1893 throws IOException { 1894 streamCompressor.writeOut(data, offset, length); 1895 } 1896 1897 /** 1898 * Write preamble data. For most of the time, this is used to 1899 * make self-extracting zips. 1900 * 1901 * @param preamble data to write 1902 * @throws IOException if an entry already exists 1903 * @since 1.21 1904 */ 1905 public void writePreamble(final byte[] preamble) throws IOException { 1906 writePreamble(preamble, 0, preamble.length); 1907 } 1908 1909 /** 1910 * Write preamble data. For most of the time, this is used to 1911 * make self-extracting zips. 1912 * 1913 * @param preamble data to write 1914 * @param offset the start offset in the data 1915 * @param length the number of bytes to write 1916 * @throws IOException if an entry already exists 1917 * @since 1.21 1918 */ 1919 public void writePreamble(final byte[] preamble, final int offset, final int length) throws IOException { 1920 if (entry != null) { 1921 throw new IllegalStateException("Preamble must be written before creating an entry"); 1922 } 1923 this.streamCompressor.writeCounted(preamble, offset, length); 1924 } 1925 1926 /** 1927 * Writes the "ZIP64 End of central dir record" and 1928 * "ZIP64 End of central dir locator". 1929 * @throws IOException on error 1930 * @since 1.3 1931 */ 1932 protected void writeZip64CentralDirectory() throws IOException { 1933 if (zip64Mode == Zip64Mode.Never) { 1934 return; 1935 } 1936 1937 if (!hasUsedZip64 && shouldUseZip64EOCD()) { 1938 // actually "will use" 1939 hasUsedZip64 = true; 1940 } 1941 1942 if (!hasUsedZip64) { 1943 return; 1944 } 1945 1946 long offset = streamCompressor.getTotalBytesWritten(); 1947 long diskNumberStart = 0L; 1948 if (isSplitZip) { 1949 // when creating a split zip, the offset of should be 1950 // the offset to the corresponding segment disk 1951 final ZipSplitOutputStream zipSplitOutputStream = (ZipSplitOutputStream) this.outputStream; 1952 offset = zipSplitOutputStream.getCurrentSplitSegmentBytesWritten(); 1953 diskNumberStart = zipSplitOutputStream.getCurrentSplitSegmentIndex(); 1954 } 1955 1956 1957 writeOut(ZIP64_EOCD_SIG); 1958 // size of zip64 end of central directory, we don't have any variable length 1959 // as we don't support the extensible data sector, yet 1960 writeOut(ZipEightByteInteger 1961 .getBytes(ZipConstants.SHORT /* version made by */ 1962 + ZipConstants.SHORT /* version needed to extract */ 1963 + ZipConstants.WORD /* disk number */ 1964 + ZipConstants.WORD /* disk with central directory */ 1965 + ZipConstants.DWORD /* number of entries in CD on this disk */ 1966 + ZipConstants.DWORD /* total number of entries */ 1967 + ZipConstants.DWORD /* size of CD */ 1968 + (long) ZipConstants.DWORD /* offset of CD */ 1969 )); 1970 1971 // version made by and version needed to extract 1972 writeOut(ZipShort.getBytes(ZipConstants.ZIP64_MIN_VERSION)); 1973 writeOut(ZipShort.getBytes(ZipConstants.ZIP64_MIN_VERSION)); 1974 1975 // number of this disk 1976 int numberOfThisDisk = 0; 1977 if (isSplitZip) { 1978 numberOfThisDisk = ((ZipSplitOutputStream) this.outputStream).getCurrentSplitSegmentIndex(); 1979 } 1980 writeOut(ZipLong.getBytes(numberOfThisDisk)); 1981 1982 // disk number of the start of central directory 1983 writeOut(ZipLong.getBytes(cdDiskNumberStart)); 1984 1985 // total number of entries in the central directory on this disk 1986 final int numOfEntriesOnThisDisk = isSplitZip 1987 ? numberOfCDInDiskData.getOrDefault(numberOfThisDisk, 0) 1988 : entries.size(); 1989 final byte[] numOfEntriesOnThisDiskData = ZipEightByteInteger.getBytes(numOfEntriesOnThisDisk); 1990 writeOut(numOfEntriesOnThisDiskData); 1991 1992 // number of entries 1993 final byte[] num = ZipEightByteInteger.getBytes(entries.size()); 1994 writeOut(num); 1995 1996 // length and location of CD 1997 writeOut(ZipEightByteInteger.getBytes(cdLength)); 1998 writeOut(ZipEightByteInteger.getBytes(cdOffset)); 1999 2000 // no "zip64 extensible data sector" for now 2001 2002 if (isSplitZip) { 2003 // based on the ZIP specification, the End Of Central Directory record and 2004 // the Zip64 End Of Central Directory locator record must be on the same segment 2005 final int zip64EOCDLOCLength = ZipConstants.WORD /* length of ZIP64_EOCD_LOC_SIG */ 2006 + ZipConstants.WORD /* disk number of ZIP64_EOCD_SIG */ 2007 + ZipConstants.DWORD /* offset of ZIP64_EOCD_SIG */ 2008 + ZipConstants.WORD /* total number of disks */; 2009 2010 final long unsplittableContentSize = zip64EOCDLOCLength + eocdLength; 2011 ((ZipSplitOutputStream) this.outputStream).prepareToWriteUnsplittableContent(unsplittableContentSize); 2012 } 2013 2014 // and now the "ZIP64 end of central directory locator" 2015 writeOut(ZIP64_EOCD_LOC_SIG); 2016 2017 // disk number holding the ZIP64 EOCD record 2018 writeOut(ZipLong.getBytes(diskNumberStart)); 2019 // relative offset of ZIP64 EOCD record 2020 writeOut(ZipEightByteInteger.getBytes(offset)); 2021 // total number of disks 2022 if (isSplitZip) { 2023 // the Zip64 End Of Central Directory Locator and the End Of Central Directory must be 2024 // in the same split disk, it means they must be located in the last disk 2025 final int totalNumberOfDisks = ((ZipSplitOutputStream) this.outputStream).getCurrentSplitSegmentIndex() + 1; 2026 writeOut(ZipLong.getBytes(totalNumberOfDisks)); 2027 } else { 2028 writeOut(ONE); 2029 } 2030 } 2031}