001/* 002 * Licensed to the Apache Software Foundation (ASF) under one or more 003 * contributor license agreements. See the NOTICE file distributed with 004 * this work for additional information regarding copyright ownership. 005 * The ASF licenses this file to You under the Apache License, Version 2.0 006 * (the "License"); you may not use this file except in compliance with 007 * the License. You may obtain a copy of the License at 008 * 009 * http://www.apache.org/licenses/LICENSE-2.0 010 * 011 * Unless required by applicable law or agreed to in writing, software 012 * distributed under the License is distributed on an "AS IS" BASIS, 013 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 014 * See the License for the specific language governing permissions and 015 * limitations under the License. 016 */ 017package org.apache.commons.compress.archivers.zip; 018 019import java.io.ByteArrayOutputStream; 020import java.io.File; 021import java.io.IOException; 022import java.io.InputStream; 023import java.io.OutputStream; 024import java.nio.ByteBuffer; 025import java.nio.channels.SeekableByteChannel; 026import java.nio.file.LinkOption; 027import java.nio.file.OpenOption; 028import java.nio.file.Path; 029import java.util.HashMap; 030import java.util.LinkedList; 031import java.util.List; 032import java.util.Map; 033import java.util.zip.Deflater; 034import java.util.zip.ZipException; 035 036import org.apache.commons.compress.archivers.ArchiveEntry; 037import org.apache.commons.compress.archivers.ArchiveOutputStream; 038import org.apache.commons.compress.utils.ByteUtils; 039import org.apache.commons.compress.utils.CharsetNames; 040 041/** 042 * Reimplementation of {@link java.util.zip.ZipOutputStream java.util.zip.ZipOutputStream} to handle the extended functionality of this package, especially 043 * internal/external file attributes and extra fields with different layouts for local file data and central directory entries. 044 * <p> 045 * This class will try to use {@link java.nio.channels.SeekableByteChannel} when it knows that the output is going to go to a file and no split archive shall be 046 * created. 047 * </p> 048 * <p> 049 * If SeekableByteChannel cannot be used, this implementation will use a Data Descriptor to store size and CRC information for {@link #DEFLATED DEFLATED} 050 * entries, you don't need to calculate them yourself. Unfortunately, this is not possible for the {@link #STORED STORED} method, where setting the CRC and 051 * uncompressed size information is required before {@link #putArchiveEntry(ZipArchiveEntry)} can be called. 052 * </p> 053 * <p> 054 * As of Apache Commons Compress 1.3, the class transparently supports Zip64 extensions and thus individual entries and archives larger than 4 GB or with more 055 * than 65,536 entries in most cases but explicit control is provided via {@link #setUseZip64}. If the stream can not use SeekableByteChannel and you try to 056 * write a ZipArchiveEntry of unknown size, then Zip64 extensions will be disabled by default. 057 * </p> 058 * 059 * @NotThreadSafe 060 */ 061public class ZipArchiveOutputStream extends ArchiveOutputStream<ZipArchiveEntry> { 062 063 /** 064 * Structure collecting information for the entry that is currently being written. 065 */ 066 private static final class CurrentEntry { 067 068 /** 069 * Current ZIP entry. 070 */ 071 private final ZipArchiveEntry entry; 072 073 /** 074 * Offset for CRC entry in the local file header data for the current entry starts here. 075 */ 076 private long localDataStart; 077 078 /** 079 * Data for local header data 080 */ 081 private long dataStart; 082 083 /** 084 * Number of bytes read for the current entry (can't rely on Deflater#getBytesRead) when using DEFLATED. 085 */ 086 private long bytesRead; 087 088 /** 089 * Whether current entry was the first one using ZIP64 features. 090 */ 091 private boolean causedUseOfZip64; 092 093 /** 094 * Whether write() has been called at all. 095 * 096 * <p> 097 * In order to create a valid archive {@link #closeArchiveEntry closeArchiveEntry} will write an empty array to get the CRC right if nothing has been 098 * written to the stream at all. 099 * </p> 100 */ 101 private boolean hasWritten; 102 103 private CurrentEntry(final ZipArchiveEntry entry) { 104 this.entry = entry; 105 } 106 } 107 108 private static final class EntryMetaData { 109 private final long offset; 110 private final boolean usesDataDescriptor; 111 112 private EntryMetaData(final long offset, final boolean usesDataDescriptor) { 113 this.offset = offset; 114 this.usesDataDescriptor = usesDataDescriptor; 115 } 116 } 117 118 /** 119 * enum that represents the possible policies for creating Unicode extra fields. 120 */ 121 public static final class UnicodeExtraFieldPolicy { 122 123 /** 124 * Always create Unicode extra fields. 125 */ 126 public static final UnicodeExtraFieldPolicy ALWAYS = new UnicodeExtraFieldPolicy("always"); 127 128 /** 129 * Never create Unicode extra fields. 130 */ 131 public static final UnicodeExtraFieldPolicy NEVER = new UnicodeExtraFieldPolicy("never"); 132 133 /** 134 * Creates Unicode extra fields for file names that cannot be encoded using the specified encoding. 135 */ 136 public static final UnicodeExtraFieldPolicy NOT_ENCODEABLE = new UnicodeExtraFieldPolicy("not encodeable"); 137 138 private final String name; 139 140 private UnicodeExtraFieldPolicy(final String n) { 141 name = n; 142 } 143 144 @Override 145 public String toString() { 146 return name; 147 } 148 } 149 150 static final int BUFFER_SIZE = 512; 151 private static final int LFH_SIG_OFFSET = 0; 152 private static final int LFH_VERSION_NEEDED_OFFSET = 4; 153 private static final int LFH_GPB_OFFSET = 6; 154 private static final int LFH_METHOD_OFFSET = 8; 155 private static final int LFH_TIME_OFFSET = 10; 156 private static final int LFH_CRC_OFFSET = 14; 157 private static final int LFH_COMPRESSED_SIZE_OFFSET = 18; 158 private static final int LFH_ORIGINAL_SIZE_OFFSET = 22; 159 private static final int LFH_FILENAME_LENGTH_OFFSET = 26; 160 private static final int LFH_EXTRA_LENGTH_OFFSET = 28; 161 private static final int LFH_FILENAME_OFFSET = 30; 162 private static final int CFH_SIG_OFFSET = 0; 163 private static final int CFH_VERSION_MADE_BY_OFFSET = 4; 164 private static final int CFH_VERSION_NEEDED_OFFSET = 6; 165 private static final int CFH_GPB_OFFSET = 8; 166 private static final int CFH_METHOD_OFFSET = 10; 167 private static final int CFH_TIME_OFFSET = 12; 168 private static final int CFH_CRC_OFFSET = 16; 169 private static final int CFH_COMPRESSED_SIZE_OFFSET = 20; 170 private static final int CFH_ORIGINAL_SIZE_OFFSET = 24; 171 private static final int CFH_FILENAME_LENGTH_OFFSET = 28; 172 private static final int CFH_EXTRA_LENGTH_OFFSET = 30; 173 private static final int CFH_COMMENT_LENGTH_OFFSET = 32; 174 private static final int CFH_DISK_NUMBER_OFFSET = 34; 175 private static final int CFH_INTERNAL_ATTRIBUTES_OFFSET = 36; 176 177 private static final int CFH_EXTERNAL_ATTRIBUTES_OFFSET = 38; 178 179 private static final int CFH_LFH_OFFSET = 42; 180 181 private static final int CFH_FILENAME_OFFSET = 46; 182 183 /** 184 * Compression method for deflated entries. 185 */ 186 public static final int DEFLATED = java.util.zip.ZipEntry.DEFLATED; 187 188 /** 189 * Default compression level for deflated entries. 190 */ 191 public static final int DEFAULT_COMPRESSION = Deflater.DEFAULT_COMPRESSION; 192 193 /** 194 * Compression method for stored entries. 195 */ 196 public static final int STORED = java.util.zip.ZipEntry.STORED; 197 198 /** 199 * default encoding for file names and comment. 200 */ 201 static final String DEFAULT_ENCODING = CharsetNames.UTF_8; 202 203 /** 204 * General purpose flag, which indicates that file names are written in UTF-8. 205 * 206 * @deprecated use {@link GeneralPurposeBit#UFT8_NAMES_FLAG} instead 207 */ 208 @Deprecated 209 public static final int EFS_FLAG = GeneralPurposeBit.UFT8_NAMES_FLAG; 210 211 /** 212 * Helper, a 0 as ZipShort. 213 */ 214 private static final byte[] ZERO = { 0, 0 }; 215 216 /** 217 * Helper, a 0 as ZipLong. 218 */ 219 private static final byte[] LZERO = { 0, 0, 0, 0 }; 220 221 private static final byte[] ONE = ZipLong.getBytes(1L); 222 223 /* 224 * Various ZIP constants shared between this class, ZipArchiveInputStream and ZipFile 225 */ 226 /** 227 * local file header signature 228 */ 229 static final byte[] LFH_SIG = ZipLong.LFH_SIG.getBytes(); // NOSONAR 230 231 /** 232 * data descriptor signature 233 */ 234 static final byte[] DD_SIG = ZipLong.DD_SIG.getBytes(); // NOSONAR 235 236 /** 237 * central file header signature 238 */ 239 static final byte[] CFH_SIG = ZipLong.CFH_SIG.getBytes(); // NOSONAR 240 241 /** 242 * end of central dir signature 243 */ 244 static final byte[] EOCD_SIG = ZipLong.getBytes(0X06054B50L); // NOSONAR 245 246 /** 247 * ZIP64 end of central dir signature 248 */ 249 static final byte[] ZIP64_EOCD_SIG = ZipLong.getBytes(0X06064B50L); // NOSONAR 250 251 /** 252 * ZIP64 end of central dir locator signature 253 */ 254 static final byte[] ZIP64_EOCD_LOC_SIG = ZipLong.getBytes(0X07064B50L); // NOSONAR 255 256 /** Indicates if this archive is finished. protected for use in Jar implementation */ 257 protected boolean finished; 258 259 /** 260 * Current entry. 261 */ 262 private CurrentEntry entry; 263 264 /** 265 * The file comment. 266 */ 267 private String comment = ""; 268 269 /** 270 * Compression level for next entry. 271 */ 272 private int level = DEFAULT_COMPRESSION; 273 274 /** 275 * Has the compression level changed when compared to the last entry? 276 */ 277 private boolean hasCompressionLevelChanged; 278 279 /** 280 * Default compression method for next entry. 281 */ 282 private int method = java.util.zip.ZipEntry.DEFLATED; 283 284 /** 285 * List of ZipArchiveEntries written so far. 286 */ 287 private final List<ZipArchiveEntry> entries = new LinkedList<>(); 288 289 private final StreamCompressor streamCompressor; 290 291 /** 292 * Start of central directory. 293 */ 294 private long cdOffset; 295 296 /** 297 * Length of central directory. 298 */ 299 private long cdLength; 300 301 /** 302 * Disk number start of central directory. 303 */ 304 private long cdDiskNumberStart; 305 306 /** 307 * Length of end of central directory 308 */ 309 private long eocdLength; 310 311 /** 312 * Holds some book-keeping data for each entry. 313 */ 314 private final Map<ZipArchiveEntry, EntryMetaData> metaData = new HashMap<>(); 315 316 /** 317 * The encoding to use for file names and the file comment. 318 * 319 * <p> 320 * For a list of possible values see <a href="https://docs.oracle.com/javase/8/docs/technotes/guides/intl/encoding.doc.html">Supported Encodings</a>. 321 * Defaults to UTF-8. 322 * </p> 323 */ 324 private String charsetName = DEFAULT_ENCODING; 325 326 /** 327 * The ZIP encoding to use for file names and the file comment. 328 * 329 * This field is of internal use and will be set in {@link #setEncoding(String)}. 330 */ 331 private ZipEncoding zipEncoding = ZipEncodingHelper.getZipEncoding(DEFAULT_ENCODING); 332 333 /** 334 * This Deflater object is used for output. 335 */ 336 protected final Deflater def; 337 338 private final OutputStream outputStream; 339 340 /** 341 * whether to use the general purpose bit flag when writing UTF-8 file names or not. 342 */ 343 private boolean useUTF8Flag = true; 344 345 /** 346 * Whether to encode non-encodable file names as UTF-8. 347 */ 348 private boolean fallbackToUTF8; 349 350 /** 351 * whether to create UnicodePathExtraField-s for each entry. 352 */ 353 private UnicodeExtraFieldPolicy createUnicodeExtraFields = UnicodeExtraFieldPolicy.NEVER; 354 355 /** 356 * Whether anything inside this archive has used a ZIP64 feature. 357 * 358 * @since 1.3 359 */ 360 private boolean hasUsedZip64; 361 362 private Zip64Mode zip64Mode = Zip64Mode.AsNeeded; 363 364 private final byte[] copyBuffer = new byte[32768]; 365 366 /** 367 * Whether we are creating a split zip 368 */ 369 private final boolean isSplitZip; 370 371 /** 372 * Holds the number of Central Directories on each disk, this is used when writing Zip64 End Of Central Directory and End Of Central Directory 373 */ 374 private final Map<Integer, Integer> numberOfCDInDiskData = new HashMap<>(); 375 376 /** 377 * Creates a new ZIP OutputStream writing to a File. Will use random access if possible. 378 * 379 * @param file the file to ZIP to 380 * @throws IOException on error 381 */ 382 public ZipArchiveOutputStream(final File file) throws IOException { 383 this(file.toPath()); 384 } 385 386 /** 387 * Creates a split ZIP Archive. 388 * 389 * <p> 390 * The files making up the archive will use Z01, Z02, ... extensions and the last part of it will be the given {@code 391 * file}. 392 * </p> 393 * 394 * <p> 395 * Even though the stream writes to a file this stream will behave as if no random access was possible. This means the sizes of stored entries need to be 396 * known before the actual entry data is written. 397 * </p> 398 * 399 * @param file the file that will become the last part of the split archive 400 * @param zipSplitSize maximum size of a single part of the split archive created by this stream. Must be between 64kB and about 4GB. 401 * 402 * @throws IOException on error 403 * @throws IllegalArgumentException if zipSplitSize is not in the required range 404 * @since 1.20 405 */ 406 public ZipArchiveOutputStream(final File file, final long zipSplitSize) throws IOException { 407 this(file.toPath(), zipSplitSize); 408 } 409 410 /** 411 * Creates a new ZIP OutputStream filtering the underlying stream. 412 * 413 * @param out the outputstream to zip 414 */ 415 public ZipArchiveOutputStream(final OutputStream out) { 416 this.outputStream = out; 417 this.def = new Deflater(level, true); 418 this.streamCompressor = StreamCompressor.create(out, def); 419 this.isSplitZip = false; 420 } 421 422 /** 423 * Creates a split ZIP Archive. 424 * <p> 425 * The files making up the archive will use Z01, Z02, ... extensions and the last part of it will be the given {@code 426 * file}. 427 * </p> 428 * <p> 429 * Even though the stream writes to a file this stream will behave as if no random access was possible. This means the sizes of stored entries need to be 430 * known before the actual entry data is written. 431 * </p> 432 * 433 * @param path the path to the file that will become the last part of the split archive 434 * @param zipSplitSize maximum size of a single part of the split archive created by this stream. Must be between 64kB and about 4GB. 435 * @throws IOException on error 436 * @throws IllegalArgumentException if zipSplitSize is not in the required range 437 * @since 1.22 438 */ 439 public ZipArchiveOutputStream(final Path path, final long zipSplitSize) throws IOException { 440 this.def = new Deflater(level, true); 441 this.outputStream = new ZipSplitOutputStream(path, zipSplitSize); 442 this.streamCompressor = StreamCompressor.create(this.outputStream, def); 443 this.isSplitZip = true; 444 } 445 446 /** 447 * Creates a new ZIP OutputStream writing to a Path. Will use random access if possible. 448 * 449 * @param file the file to ZIP to 450 * @param options options specifying how the file is opened. 451 * @throws IOException on error 452 * @since 1.21 453 */ 454 public ZipArchiveOutputStream(final Path file, final OpenOption... options) throws IOException { 455 this.def = new Deflater(level, true); 456 this.outputStream = options.length == 0 ? new FileRandomAccessOutputStream(file) : new FileRandomAccessOutputStream(file, options); 457 this.streamCompressor = StreamCompressor.create(outputStream, def); 458 this.isSplitZip = false; 459 } 460 461 /** 462 * Creates a new ZIP OutputStream writing to a SeekableByteChannel. 463 * 464 * <p> 465 * {@link org.apache.commons.compress.utils.SeekableInMemoryByteChannel} allows you to write to an in-memory archive using random access. 466 * </p> 467 * 468 * @param channel the channel to ZIP to 469 * @since 1.13 470 */ 471 public ZipArchiveOutputStream(final SeekableByteChannel channel) { 472 this.outputStream = new SeekableChannelRandomAccessOutputStream(channel); 473 this.def = new Deflater(level, true); 474 this.streamCompressor = StreamCompressor.create(outputStream, def); 475 this.isSplitZip = false; 476 } 477 478 /** 479 * Adds an archive entry with a raw input stream. 480 * <p> 481 * If crc, size and compressed size are supplied on the entry, these values will be used as-is. Zip64 status is re-established based on the settings in this 482 * stream, and the supplied value is ignored. 483 * </p> 484 * <p> 485 * The entry is put and closed immediately. 486 * </p> 487 * 488 * @param entry The archive entry to add 489 * @param rawStream The raw input stream of a different entry. May be compressed/encrypted. 490 * @throws IOException If copying fails 491 */ 492 public void addRawArchiveEntry(final ZipArchiveEntry entry, final InputStream rawStream) throws IOException { 493 final ZipArchiveEntry ae = new ZipArchiveEntry(entry); 494 if (hasZip64Extra(ae)) { 495 // Will be re-added as required. this may make the file generated with this method 496 // somewhat smaller than standard mode, 497 // since standard mode is unable to remove the ZIP 64 header. 498 ae.removeExtraField(Zip64ExtendedInformationExtraField.HEADER_ID); 499 } 500 final boolean is2PhaseSource = ae.getCrc() != ZipArchiveEntry.CRC_UNKNOWN && ae.getSize() != ArchiveEntry.SIZE_UNKNOWN 501 && ae.getCompressedSize() != ArchiveEntry.SIZE_UNKNOWN; 502 putArchiveEntry(ae, is2PhaseSource); 503 copyFromZipInputStream(rawStream); 504 closeCopiedEntry(is2PhaseSource); 505 } 506 507 /** 508 * Adds UnicodeExtra fields for name and file comment if mode is ALWAYS or the data cannot be encoded using the configured encoding. 509 */ 510 private void addUnicodeExtraFields(final ZipArchiveEntry ze, final boolean encodable, final ByteBuffer name) throws IOException { 511 if (createUnicodeExtraFields == UnicodeExtraFieldPolicy.ALWAYS || !encodable) { 512 ze.addExtraField(new UnicodePathExtraField(ze.getName(), name.array(), name.arrayOffset(), name.limit() - name.position())); 513 } 514 515 final String comm = ze.getComment(); 516 if (comm != null && !comm.isEmpty()) { 517 518 final boolean commentEncodable = zipEncoding.canEncode(comm); 519 520 if (createUnicodeExtraFields == UnicodeExtraFieldPolicy.ALWAYS || !commentEncodable) { 521 final ByteBuffer commentB = getEntryEncoding(ze).encode(comm); 522 ze.addExtraField(new UnicodeCommentExtraField(comm, commentB.array(), commentB.arrayOffset(), commentB.limit() - commentB.position())); 523 } 524 } 525 } 526 527 /** 528 * Whether this stream is able to write the given entry. 529 * <p> 530 * May return false if it is set up to use encryption or a compression method that hasn't been implemented yet. 531 * </p> 532 * 533 * @since 1.1 534 */ 535 @Override 536 public boolean canWriteEntryData(final ArchiveEntry ae) { 537 if (ae instanceof ZipArchiveEntry) { 538 final ZipArchiveEntry zae = (ZipArchiveEntry) ae; 539 return zae.getMethod() != ZipMethod.IMPLODING.getCode() && zae.getMethod() != ZipMethod.UNSHRINKING.getCode() && ZipUtil.canHandleEntryData(zae); 540 } 541 return false; 542 } 543 544 /** 545 * Verifies the sizes aren't too big in the Zip64Mode.Never case and returns whether the entry would require a Zip64 extra field. 546 */ 547 private boolean checkIfNeedsZip64(final Zip64Mode effectiveMode) throws ZipException { 548 final boolean actuallyNeedsZip64 = isZip64Required(entry.entry, effectiveMode); 549 if (actuallyNeedsZip64 && effectiveMode == Zip64Mode.Never) { 550 throw new Zip64RequiredException(Zip64RequiredException.getEntryTooBigMessage(entry.entry)); 551 } 552 return actuallyNeedsZip64; 553 } 554 555 /** 556 * Closes this output stream and releases any system resources associated with the stream. 557 * 558 * @throws IOException if an I/O error occurs. 559 * @throws Zip64RequiredException if the archive's size exceeds 4 GByte or there are more than 65535 entries inside the archive and {@link #setUseZip64} is 560 * {@link Zip64Mode#Never}. 561 */ 562 @Override 563 public void close() throws IOException { 564 try { 565 if (!finished) { 566 finish(); 567 } 568 } finally { 569 destroy(); 570 } 571 } 572 573 /** 574 * Writes all necessary data for this entry. 575 * 576 * @throws IOException on error 577 * @throws Zip64RequiredException if the entry's uncompressed or compressed size exceeds 4 GByte and {@link #setUseZip64} is {@link Zip64Mode#Never}. 578 */ 579 @Override 580 public void closeArchiveEntry() throws IOException { 581 preClose(); 582 583 flushDeflater(); 584 585 final long bytesWritten = streamCompressor.getTotalBytesWritten() - entry.dataStart; 586 final long realCrc = streamCompressor.getCrc32(); 587 entry.bytesRead = streamCompressor.getBytesRead(); 588 final Zip64Mode effectiveMode = getEffectiveZip64Mode(entry.entry); 589 final boolean actuallyNeedsZip64 = handleSizesAndCrc(bytesWritten, realCrc, effectiveMode); 590 closeEntry(actuallyNeedsZip64, false); 591 streamCompressor.reset(); 592 } 593 594 /** 595 * Writes all necessary data for this entry. 596 * 597 * @param phased This entry is second phase of a 2-phase ZIP creation, size, compressed size and crc are known in ZipArchiveEntry 598 * @throws IOException on error 599 * @throws Zip64RequiredException if the entry's uncompressed or compressed size exceeds 4 GByte and {@link #setUseZip64} is {@link Zip64Mode#Never}. 600 */ 601 private void closeCopiedEntry(final boolean phased) throws IOException { 602 preClose(); 603 entry.bytesRead = entry.entry.getSize(); 604 final Zip64Mode effectiveMode = getEffectiveZip64Mode(entry.entry); 605 final boolean actuallyNeedsZip64 = checkIfNeedsZip64(effectiveMode); 606 closeEntry(actuallyNeedsZip64, phased); 607 } 608 609 private void closeEntry(final boolean actuallyNeedsZip64, final boolean phased) throws IOException { 610 if (!phased && outputStream instanceof RandomAccessOutputStream) { 611 rewriteSizesAndCrc(actuallyNeedsZip64); 612 } 613 614 if (!phased) { 615 writeDataDescriptor(entry.entry); 616 } 617 entry = null; 618 } 619 620 private void copyFromZipInputStream(final InputStream src) throws IOException { 621 if (entry == null) { 622 throw new IllegalStateException("No current entry"); 623 } 624 ZipUtil.checkRequestedFeatures(entry.entry); 625 entry.hasWritten = true; 626 int length; 627 while ((length = src.read(copyBuffer)) >= 0) { 628 streamCompressor.writeCounted(copyBuffer, 0, length); 629 count(length); 630 } 631 } 632 633 /** 634 * Creates a new ZIP entry taking some information from the given file and using the provided name. 635 * <p> 636 * The name will be adjusted to end with a forward slash "/" if the file is a directory. If the file is not a directory a potential trailing forward slash 637 * will be stripped from the entry name. 638 * </p> 639 * <p> 640 * Must not be used if the stream has already been closed. 641 * </p> 642 */ 643 @Override 644 public ZipArchiveEntry createArchiveEntry(final File inputFile, final String entryName) throws IOException { 645 if (finished) { 646 throw new IOException("Stream has already been finished"); 647 } 648 return new ZipArchiveEntry(inputFile, entryName); 649 } 650 651 /** 652 * Creates a new ZIP entry taking some information from the given file and using the provided name. 653 * <p> 654 * The name will be adjusted to end with a forward slash "/" if the file is a directory. If the file is not a directory a potential trailing forward slash 655 * will be stripped from the entry name. 656 * </p> 657 * <p> 658 * Must not be used if the stream has already been closed. 659 * </p> 660 * 661 * @param inputPath path to create the entry from. 662 * @param entryName name of the entry. 663 * @param options options indicating how symbolic links are handled. 664 * @return a new instance. 665 * @throws IOException if an I/O error occurs. 666 * @since 1.21 667 */ 668 @Override 669 public ZipArchiveEntry createArchiveEntry(final Path inputPath, final String entryName, final LinkOption... options) throws IOException { 670 if (finished) { 671 throw new IOException("Stream has already been finished"); 672 } 673 return new ZipArchiveEntry(inputPath, entryName); 674 } 675 676 private byte[] createCentralFileHeader(final ZipArchiveEntry ze) throws IOException { 677 678 final EntryMetaData entryMetaData = metaData.get(ze); 679 final boolean needsZip64Extra = hasZip64Extra(ze) || ze.getCompressedSize() >= ZipConstants.ZIP64_MAGIC || ze.getSize() >= ZipConstants.ZIP64_MAGIC 680 || entryMetaData.offset >= ZipConstants.ZIP64_MAGIC || ze.getDiskNumberStart() >= ZipConstants.ZIP64_MAGIC_SHORT 681 || zip64Mode == Zip64Mode.Always || zip64Mode == Zip64Mode.AlwaysWithCompatibility; 682 683 if (needsZip64Extra && zip64Mode == Zip64Mode.Never) { 684 // must be the offset that is too big, otherwise an 685 // exception would have been throw in putArchiveEntry or 686 // closeArchiveEntry 687 throw new Zip64RequiredException(Zip64RequiredException.ARCHIVE_TOO_BIG_MESSAGE); 688 } 689 690 handleZip64Extra(ze, entryMetaData.offset, needsZip64Extra); 691 692 return createCentralFileHeader(ze, getName(ze), entryMetaData, needsZip64Extra); 693 } 694 695 /** 696 * Writes the central file header entry. 697 * 698 * @param ze the entry to write 699 * @param name The encoded name 700 * @param entryMetaData meta data for this file 701 * @throws IOException on error 702 */ 703 private byte[] createCentralFileHeader(final ZipArchiveEntry ze, final ByteBuffer name, final EntryMetaData entryMetaData, final boolean needsZip64Extra) 704 throws IOException { 705 if (isSplitZip) { 706 // calculate the disk number for every central file header, 707 // this will be used in writing End Of Central Directory and Zip64 End Of Central Directory 708 final int currentSplitSegment = ((ZipSplitOutputStream) this.outputStream).getCurrentSplitSegmentIndex(); 709 if (numberOfCDInDiskData.get(currentSplitSegment) == null) { 710 numberOfCDInDiskData.put(currentSplitSegment, 1); 711 } else { 712 final int originalNumberOfCD = numberOfCDInDiskData.get(currentSplitSegment); 713 numberOfCDInDiskData.put(currentSplitSegment, originalNumberOfCD + 1); 714 } 715 } 716 717 final byte[] extra = ze.getCentralDirectoryExtra(); 718 final int extraLength = extra.length; 719 720 // file comment length 721 String comm = ze.getComment(); 722 if (comm == null) { 723 comm = ""; 724 } 725 726 final ByteBuffer commentB = getEntryEncoding(ze).encode(comm); 727 final int nameLen = name.limit() - name.position(); 728 final int commentLen = commentB.limit() - commentB.position(); 729 final int len = CFH_FILENAME_OFFSET + nameLen + extraLength + commentLen; 730 final byte[] buf = new byte[len]; 731 732 System.arraycopy(CFH_SIG, 0, buf, CFH_SIG_OFFSET, ZipConstants.WORD); 733 734 // version made by 735 // CheckStyle:MagicNumber OFF 736 ZipShort.putShort(ze.getPlatform() << 8 | (!hasUsedZip64 ? ZipConstants.DATA_DESCRIPTOR_MIN_VERSION : ZipConstants.ZIP64_MIN_VERSION), buf, 737 CFH_VERSION_MADE_BY_OFFSET); 738 739 final int zipMethod = ze.getMethod(); 740 final boolean encodable = zipEncoding.canEncode(ze.getName()); 741 ZipShort.putShort(versionNeededToExtract(zipMethod, needsZip64Extra, entryMetaData.usesDataDescriptor), buf, CFH_VERSION_NEEDED_OFFSET); 742 getGeneralPurposeBits(!encodable && fallbackToUTF8, entryMetaData.usesDataDescriptor).encode(buf, CFH_GPB_OFFSET); 743 744 // compression method 745 ZipShort.putShort(zipMethod, buf, CFH_METHOD_OFFSET); 746 747 // last mod. time and date 748 ZipUtil.toDosTime(ze.getTime(), buf, CFH_TIME_OFFSET); 749 750 // CRC 751 // compressed length 752 // uncompressed length 753 ZipLong.putLong(ze.getCrc(), buf, CFH_CRC_OFFSET); 754 if (ze.getCompressedSize() >= ZipConstants.ZIP64_MAGIC || ze.getSize() >= ZipConstants.ZIP64_MAGIC || zip64Mode == Zip64Mode.Always 755 || zip64Mode == Zip64Mode.AlwaysWithCompatibility) { 756 ZipLong.ZIP64_MAGIC.putLong(buf, CFH_COMPRESSED_SIZE_OFFSET); 757 ZipLong.ZIP64_MAGIC.putLong(buf, CFH_ORIGINAL_SIZE_OFFSET); 758 } else { 759 ZipLong.putLong(ze.getCompressedSize(), buf, CFH_COMPRESSED_SIZE_OFFSET); 760 ZipLong.putLong(ze.getSize(), buf, CFH_ORIGINAL_SIZE_OFFSET); 761 } 762 763 ZipShort.putShort(nameLen, buf, CFH_FILENAME_LENGTH_OFFSET); 764 765 // extra field length 766 ZipShort.putShort(extraLength, buf, CFH_EXTRA_LENGTH_OFFSET); 767 768 ZipShort.putShort(commentLen, buf, CFH_COMMENT_LENGTH_OFFSET); 769 770 // disk number start 771 if (isSplitZip) { 772 if (ze.getDiskNumberStart() >= ZipConstants.ZIP64_MAGIC_SHORT || zip64Mode == Zip64Mode.Always) { 773 ZipShort.putShort(ZipConstants.ZIP64_MAGIC_SHORT, buf, CFH_DISK_NUMBER_OFFSET); 774 } else { 775 ZipShort.putShort((int) ze.getDiskNumberStart(), buf, CFH_DISK_NUMBER_OFFSET); 776 } 777 } else { 778 System.arraycopy(ZERO, 0, buf, CFH_DISK_NUMBER_OFFSET, ZipConstants.SHORT); 779 } 780 781 // internal file attributes 782 ZipShort.putShort(ze.getInternalAttributes(), buf, CFH_INTERNAL_ATTRIBUTES_OFFSET); 783 784 // external file attributes 785 ZipLong.putLong(ze.getExternalAttributes(), buf, CFH_EXTERNAL_ATTRIBUTES_OFFSET); 786 787 // relative offset of LFH 788 if (entryMetaData.offset >= ZipConstants.ZIP64_MAGIC || zip64Mode == Zip64Mode.Always) { 789 ZipLong.putLong(ZipConstants.ZIP64_MAGIC, buf, CFH_LFH_OFFSET); 790 } else { 791 ZipLong.putLong(Math.min(entryMetaData.offset, ZipConstants.ZIP64_MAGIC), buf, CFH_LFH_OFFSET); 792 } 793 794 // file name 795 System.arraycopy(name.array(), name.arrayOffset(), buf, CFH_FILENAME_OFFSET, nameLen); 796 797 final int extraStart = CFH_FILENAME_OFFSET + nameLen; 798 System.arraycopy(extra, 0, buf, extraStart, extraLength); 799 800 final int commentStart = extraStart + extraLength; 801 802 // file comment 803 System.arraycopy(commentB.array(), commentB.arrayOffset(), buf, commentStart, commentLen); 804 return buf; 805 } 806 807 private byte[] createLocalFileHeader(final ZipArchiveEntry ze, final ByteBuffer name, final boolean encodable, final boolean phased, 808 final long archiveOffset) { 809 final ZipExtraField oldEx = ze.getExtraField(ResourceAlignmentExtraField.ID); 810 if (oldEx != null) { 811 ze.removeExtraField(ResourceAlignmentExtraField.ID); 812 } 813 final ResourceAlignmentExtraField oldAlignmentEx = oldEx instanceof ResourceAlignmentExtraField ? (ResourceAlignmentExtraField) oldEx : null; 814 815 int alignment = ze.getAlignment(); 816 if (alignment <= 0 && oldAlignmentEx != null) { 817 alignment = oldAlignmentEx.getAlignment(); 818 } 819 820 if (alignment > 1 || oldAlignmentEx != null && !oldAlignmentEx.allowMethodChange()) { 821 final int oldLength = LFH_FILENAME_OFFSET + name.limit() - name.position() + ze.getLocalFileDataExtra().length; 822 823 final int padding = (int) (-archiveOffset - oldLength - ZipExtraField.EXTRAFIELD_HEADER_SIZE - ResourceAlignmentExtraField.BASE_SIZE 824 & alignment - 1); 825 ze.addExtraField(new ResourceAlignmentExtraField(alignment, oldAlignmentEx != null && oldAlignmentEx.allowMethodChange(), padding)); 826 } 827 828 final byte[] extra = ze.getLocalFileDataExtra(); 829 final int nameLen = name.limit() - name.position(); 830 final int len = LFH_FILENAME_OFFSET + nameLen + extra.length; 831 final byte[] buf = new byte[len]; 832 833 System.arraycopy(LFH_SIG, 0, buf, LFH_SIG_OFFSET, ZipConstants.WORD); 834 835 // store method in local variable to prevent multiple method calls 836 final int zipMethod = ze.getMethod(); 837 final boolean dataDescriptor = usesDataDescriptor(zipMethod, phased); 838 839 ZipShort.putShort(versionNeededToExtract(zipMethod, hasZip64Extra(ze), dataDescriptor), buf, LFH_VERSION_NEEDED_OFFSET); 840 841 final GeneralPurposeBit generalPurposeBit = getGeneralPurposeBits(!encodable && fallbackToUTF8, dataDescriptor); 842 generalPurposeBit.encode(buf, LFH_GPB_OFFSET); 843 844 // compression method 845 ZipShort.putShort(zipMethod, buf, LFH_METHOD_OFFSET); 846 847 ZipUtil.toDosTime(ze.getTime(), buf, LFH_TIME_OFFSET); 848 849 // CRC 850 if (phased || !(zipMethod == DEFLATED || outputStream instanceof RandomAccessOutputStream)) { 851 ZipLong.putLong(ze.getCrc(), buf, LFH_CRC_OFFSET); 852 } else { 853 System.arraycopy(LZERO, 0, buf, LFH_CRC_OFFSET, ZipConstants.WORD); 854 } 855 856 // compressed length 857 // uncompressed length 858 if (hasZip64Extra(entry.entry)) { 859 // point to ZIP64 extended information extra field for 860 // sizes, may get rewritten once sizes are known if 861 // stream is seekable 862 ZipLong.ZIP64_MAGIC.putLong(buf, LFH_COMPRESSED_SIZE_OFFSET); 863 ZipLong.ZIP64_MAGIC.putLong(buf, LFH_ORIGINAL_SIZE_OFFSET); 864 } else if (phased) { 865 ZipLong.putLong(ze.getCompressedSize(), buf, LFH_COMPRESSED_SIZE_OFFSET); 866 ZipLong.putLong(ze.getSize(), buf, LFH_ORIGINAL_SIZE_OFFSET); 867 } else if (zipMethod == DEFLATED || outputStream instanceof RandomAccessOutputStream) { 868 System.arraycopy(LZERO, 0, buf, LFH_COMPRESSED_SIZE_OFFSET, ZipConstants.WORD); 869 System.arraycopy(LZERO, 0, buf, LFH_ORIGINAL_SIZE_OFFSET, ZipConstants.WORD); 870 } else { // Stored 871 ZipLong.putLong(ze.getSize(), buf, LFH_COMPRESSED_SIZE_OFFSET); 872 ZipLong.putLong(ze.getSize(), buf, LFH_ORIGINAL_SIZE_OFFSET); 873 } 874 // file name length 875 ZipShort.putShort(nameLen, buf, LFH_FILENAME_LENGTH_OFFSET); 876 877 // extra field length 878 ZipShort.putShort(extra.length, buf, LFH_EXTRA_LENGTH_OFFSET); 879 880 // file name 881 System.arraycopy(name.array(), name.arrayOffset(), buf, LFH_FILENAME_OFFSET, nameLen); 882 883 // extra fields 884 System.arraycopy(extra, 0, buf, LFH_FILENAME_OFFSET + nameLen, extra.length); 885 886 return buf; 887 } 888 889 /** 890 * Writes next block of compressed data to the output stream. 891 * 892 * @throws IOException on error 893 */ 894 protected final void deflate() throws IOException { 895 streamCompressor.deflate(); 896 } 897 898 /** 899 * Closes the underlying stream/file without finishing the archive, the result will likely be a corrupt archive. 900 * <p> 901 * This method only exists to support tests that generate corrupt archives so they can clean up any temporary files. 902 * </p> 903 */ 904 void destroy() throws IOException { 905 if (outputStream != null) { 906 outputStream.close(); 907 } 908 } 909 910 /** 911 * {@inheritDoc} 912 * 913 * @throws Zip64RequiredException if the archive's size exceeds 4 GByte or there are more than 65535 entries inside the archive and {@link #setUseZip64} is 914 * {@link Zip64Mode#Never}. 915 */ 916 @Override 917 public void finish() throws IOException { 918 if (finished) { 919 throw new IOException("This archive has already been finished"); 920 } 921 922 if (entry != null) { 923 throw new IOException("This archive contains unclosed entries."); 924 } 925 926 final long cdOverallOffset = streamCompressor.getTotalBytesWritten(); 927 cdOffset = cdOverallOffset; 928 if (isSplitZip) { 929 // when creating a split zip, the offset should be 930 // the offset to the corresponding segment disk 931 final ZipSplitOutputStream zipSplitOutputStream = (ZipSplitOutputStream) this.outputStream; 932 cdOffset = zipSplitOutputStream.getCurrentSplitSegmentBytesWritten(); 933 cdDiskNumberStart = zipSplitOutputStream.getCurrentSplitSegmentIndex(); 934 } 935 writeCentralDirectoryInChunks(); 936 937 cdLength = streamCompressor.getTotalBytesWritten() - cdOverallOffset; 938 939 // calculate the length of end of central directory, as it may be used in writeZip64CentralDirectory 940 final ByteBuffer commentData = this.zipEncoding.encode(comment); 941 final long commentLength = (long) commentData.limit() - commentData.position(); 942 eocdLength = ZipConstants.WORD /* length of EOCD_SIG */ 943 + ZipConstants.SHORT /* number of this disk */ 944 + ZipConstants.SHORT /* disk number of start of central directory */ 945 + ZipConstants.SHORT /* total number of entries on this disk */ 946 + ZipConstants.SHORT /* total number of entries */ 947 + ZipConstants.WORD /* size of central directory */ 948 + ZipConstants.WORD /* offset of start of central directory */ 949 + ZipConstants.SHORT /* ZIP comment length */ 950 + commentLength /* ZIP comment */; 951 952 writeZip64CentralDirectory(); 953 writeCentralDirectoryEnd(); 954 metaData.clear(); 955 entries.clear(); 956 streamCompressor.close(); 957 if (isSplitZip) { 958 // trigger the ZipSplitOutputStream to write the final split segment 959 outputStream.close(); 960 } 961 finished = true; 962 } 963 964 /** 965 * Flushes this output stream and forces any buffered output bytes to be written out to the stream. 966 * 967 * @throws IOException if an I/O error occurs. 968 */ 969 @Override 970 public void flush() throws IOException { 971 if (outputStream != null) { 972 outputStream.flush(); 973 } 974 } 975 976 /** 977 * Ensures all bytes sent to the deflater are written to the stream. 978 */ 979 private void flushDeflater() throws IOException { 980 if (entry.entry.getMethod() == DEFLATED) { 981 streamCompressor.flushDeflater(); 982 } 983 } 984 985 /** 986 * Returns the total number of bytes written to this stream. 987 * 988 * @return the number of written bytes 989 * @since 1.22 990 */ 991 @Override 992 public long getBytesWritten() { 993 return streamCompressor.getTotalBytesWritten(); 994 } 995 996 /** 997 * If the mode is AsNeeded and the entry is a compressed entry of unknown size that gets written to a non-seekable stream then change the default to Never. 998 * 999 * @since 1.3 1000 */ 1001 private Zip64Mode getEffectiveZip64Mode(final ZipArchiveEntry ze) { 1002 if (zip64Mode != Zip64Mode.AsNeeded || outputStream instanceof RandomAccessOutputStream || 1003 ze.getMethod() != DEFLATED || ze.getSize() != ArchiveEntry.SIZE_UNKNOWN) { 1004 return zip64Mode; 1005 } 1006 return Zip64Mode.Never; 1007 } 1008 1009 /** 1010 * The encoding to use for file names and the file comment. 1011 * 1012 * @return null if using the platform's default character encoding. 1013 */ 1014 public String getEncoding() { 1015 return charsetName; 1016 } 1017 1018 private ZipEncoding getEntryEncoding(final ZipArchiveEntry ze) { 1019 final boolean encodable = zipEncoding.canEncode(ze.getName()); 1020 return !encodable && fallbackToUTF8 ? ZipEncodingHelper.ZIP_ENCODING_UTF_8 : zipEncoding; 1021 } 1022 1023 private GeneralPurposeBit getGeneralPurposeBits(final boolean utfFallback, final boolean usesDataDescriptor) { 1024 final GeneralPurposeBit b = new GeneralPurposeBit(); 1025 b.useUTF8ForNames(useUTF8Flag || utfFallback); 1026 if (usesDataDescriptor) { 1027 b.useDataDescriptor(true); 1028 } 1029 return b; 1030 } 1031 1032 private ByteBuffer getName(final ZipArchiveEntry ze) throws IOException { 1033 return getEntryEncoding(ze).encode(ze.getName()); 1034 } 1035 1036 /** 1037 * Gets the existing ZIP64 extended information extra field or create a new one and add it to the entry. 1038 * 1039 * @since 1.3 1040 */ 1041 private Zip64ExtendedInformationExtraField getZip64Extra(final ZipArchiveEntry ze) { 1042 if (entry != null) { 1043 entry.causedUseOfZip64 = !hasUsedZip64; 1044 } 1045 hasUsedZip64 = true; 1046 final ZipExtraField extra = ze.getExtraField(Zip64ExtendedInformationExtraField.HEADER_ID); 1047 Zip64ExtendedInformationExtraField z64 = extra instanceof Zip64ExtendedInformationExtraField ? (Zip64ExtendedInformationExtraField) extra : null; 1048 if (z64 == null) { 1049 /* 1050 * System.err.println("Adding z64 for " + ze.getName() + ", method: " + ze.getMethod() + " (" + (ze.getMethod() == STORED) + ")" + ", channel: " + 1051 * (channel != null)); 1052 */ 1053 z64 = new Zip64ExtendedInformationExtraField(); 1054 } 1055 1056 // even if the field is there already, make sure it is the first one 1057 ze.addAsFirstExtraField(z64); 1058 1059 return z64; 1060 } 1061 1062 /** 1063 * Ensures the current entry's size and CRC information is set to the values just written, verifies it isn't too big in the Zip64Mode.Never case and returns 1064 * whether the entry would require a Zip64 extra field. 1065 */ 1066 private boolean handleSizesAndCrc(final long bytesWritten, final long crc, final Zip64Mode effectiveMode) throws ZipException { 1067 if (entry.entry.getMethod() == DEFLATED) { 1068 /* 1069 * It turns out def.getBytesRead() returns wrong values if the size exceeds 4 GB on Java < Java7 entry.entry.setSize(def.getBytesRead()); 1070 */ 1071 entry.entry.setSize(entry.bytesRead); 1072 entry.entry.setCompressedSize(bytesWritten); 1073 entry.entry.setCrc(crc); 1074 1075 } else if (!(outputStream instanceof RandomAccessOutputStream)) { 1076 if (entry.entry.getCrc() != crc) { 1077 throw new ZipException("Bad CRC checksum for entry " + entry.entry.getName() + ": " + Long.toHexString(entry.entry.getCrc()) + " instead of " 1078 + Long.toHexString(crc)); 1079 } 1080 1081 if (entry.entry.getSize() != bytesWritten) { 1082 throw new ZipException("Bad size for entry " + entry.entry.getName() + ": " + entry.entry.getSize() + " instead of " + bytesWritten); 1083 } 1084 } else { /* method is STORED and we used SeekableByteChannel */ 1085 entry.entry.setSize(bytesWritten); 1086 entry.entry.setCompressedSize(bytesWritten); 1087 entry.entry.setCrc(crc); 1088 } 1089 1090 return checkIfNeedsZip64(effectiveMode); 1091 } 1092 1093 /** 1094 * If the entry needs Zip64 extra information inside the central directory then configure its data. 1095 */ 1096 private void handleZip64Extra(final ZipArchiveEntry ze, final long lfhOffset, final boolean needsZip64Extra) { 1097 if (needsZip64Extra) { 1098 final Zip64ExtendedInformationExtraField z64 = getZip64Extra(ze); 1099 if (ze.getCompressedSize() >= ZipConstants.ZIP64_MAGIC || ze.getSize() >= ZipConstants.ZIP64_MAGIC || zip64Mode == Zip64Mode.Always 1100 || zip64Mode == Zip64Mode.AlwaysWithCompatibility) { 1101 z64.setCompressedSize(new ZipEightByteInteger(ze.getCompressedSize())); 1102 z64.setSize(new ZipEightByteInteger(ze.getSize())); 1103 } else { 1104 // reset value that may have been set for LFH 1105 z64.setCompressedSize(null); 1106 z64.setSize(null); 1107 } 1108 1109 final boolean needsToEncodeLfhOffset = lfhOffset >= ZipConstants.ZIP64_MAGIC || zip64Mode == Zip64Mode.Always; 1110 final boolean needsToEncodeDiskNumberStart = ze.getDiskNumberStart() >= ZipConstants.ZIP64_MAGIC_SHORT || zip64Mode == Zip64Mode.Always; 1111 1112 if (needsToEncodeLfhOffset || needsToEncodeDiskNumberStart) { 1113 z64.setRelativeHeaderOffset(new ZipEightByteInteger(lfhOffset)); 1114 } 1115 if (needsToEncodeDiskNumberStart) { 1116 z64.setDiskStartNumber(new ZipLong(ze.getDiskNumberStart())); 1117 } 1118 ze.setExtra(); 1119 } 1120 } 1121 1122 /** 1123 * Is there a ZIP64 extended information extra field for the entry? 1124 * 1125 * @since 1.3 1126 */ 1127 private boolean hasZip64Extra(final ZipArchiveEntry ze) { 1128 return ze.getExtraField(Zip64ExtendedInformationExtraField.HEADER_ID) instanceof Zip64ExtendedInformationExtraField; 1129 } 1130 1131 /** 1132 * This method indicates whether this archive is writing to a seekable stream (i.e., to a random access file). 1133 * <p> 1134 * For seekable streams, you don't need to calculate the CRC or uncompressed size for {@link #STORED} entries before invoking 1135 * {@link #putArchiveEntry(ZipArchiveEntry)}. 1136 * </p> 1137 * 1138 * @return true if seekable 1139 */ 1140 public boolean isSeekable() { 1141 return outputStream instanceof RandomAccessOutputStream; 1142 } 1143 1144 private boolean isTooLargeForZip32(final ZipArchiveEntry zipArchiveEntry) { 1145 return zipArchiveEntry.getSize() >= ZipConstants.ZIP64_MAGIC || zipArchiveEntry.getCompressedSize() >= ZipConstants.ZIP64_MAGIC; 1146 } 1147 1148 private boolean isZip64Required(final ZipArchiveEntry entry1, final Zip64Mode requestedMode) { 1149 return requestedMode == Zip64Mode.Always || requestedMode == Zip64Mode.AlwaysWithCompatibility || isTooLargeForZip32(entry1); 1150 } 1151 1152 private void preClose() throws IOException { 1153 if (finished) { 1154 throw new IOException("Stream has already been finished"); 1155 } 1156 1157 if (entry == null) { 1158 throw new IOException("No current entry to close"); 1159 } 1160 1161 if (!entry.hasWritten) { 1162 write(ByteUtils.EMPTY_BYTE_ARRAY, 0, 0); 1163 } 1164 } 1165 1166 /** 1167 * {@inheritDoc} 1168 * 1169 * @throws ClassCastException if entry is not an instance of ZipArchiveEntry 1170 * @throws Zip64RequiredException if the entry's uncompressed or compressed size is known to exceed 4 GByte and {@link #setUseZip64} is 1171 * {@link Zip64Mode#Never}. 1172 */ 1173 @Override 1174 public void putArchiveEntry(final ZipArchiveEntry archiveEntry) throws IOException { 1175 putArchiveEntry(archiveEntry, false); 1176 } 1177 1178 /** 1179 * Writes the headers for an archive entry to the output stream. The caller must then write the content to the stream and call {@link #closeArchiveEntry()} 1180 * to complete the process. 1181 * 1182 * @param archiveEntry The archiveEntry 1183 * @param phased If true size, compressedSize and crc required to be known up-front in the archiveEntry 1184 * @throws ClassCastException if entry is not an instance of ZipArchiveEntry 1185 * @throws Zip64RequiredException if the entry's uncompressed or compressed size is known to exceed 4 GByte and {@link #setUseZip64} is 1186 * {@link Zip64Mode#Never}. 1187 */ 1188 private void putArchiveEntry(final ZipArchiveEntry archiveEntry, final boolean phased) throws IOException { 1189 if (finished) { 1190 throw new IOException("Stream has already been finished"); 1191 } 1192 1193 if (entry != null) { 1194 closeArchiveEntry(); 1195 } 1196 1197 entry = new CurrentEntry(archiveEntry); 1198 entries.add(entry.entry); 1199 1200 setDefaults(entry.entry); 1201 1202 final Zip64Mode effectiveMode = getEffectiveZip64Mode(entry.entry); 1203 validateSizeInformation(effectiveMode); 1204 1205 if (shouldAddZip64Extra(entry.entry, effectiveMode)) { 1206 1207 final Zip64ExtendedInformationExtraField z64 = getZip64Extra(entry.entry); 1208 1209 final ZipEightByteInteger size; 1210 final ZipEightByteInteger compressedSize; 1211 if (phased) { 1212 // sizes are already known 1213 size = new ZipEightByteInteger(entry.entry.getSize()); 1214 compressedSize = new ZipEightByteInteger(entry.entry.getCompressedSize()); 1215 } else if (entry.entry.getMethod() == STORED && entry.entry.getSize() != ArchiveEntry.SIZE_UNKNOWN) { 1216 // actually, we already know the sizes 1217 compressedSize = size = new ZipEightByteInteger(entry.entry.getSize()); 1218 } else { 1219 // just a placeholder, real data will be in data 1220 // descriptor or inserted later via SeekableByteChannel 1221 compressedSize = size = ZipEightByteInteger.ZERO; 1222 } 1223 z64.setSize(size); 1224 z64.setCompressedSize(compressedSize); 1225 entry.entry.setExtra(); 1226 } 1227 1228 if (entry.entry.getMethod() == DEFLATED && hasCompressionLevelChanged) { 1229 def.setLevel(level); 1230 hasCompressionLevelChanged = false; 1231 } 1232 writeLocalFileHeader(archiveEntry, phased); 1233 } 1234 1235 /** 1236 * When using random access output, write the local file header and potentially the ZIP64 extra containing the correct CRC and compressed/uncompressed 1237 * sizes. 1238 */ 1239 private void rewriteSizesAndCrc(final boolean actuallyNeedsZip64) throws IOException { 1240 final RandomAccessOutputStream randomStream = (RandomAccessOutputStream) outputStream; 1241 long dataStart = entry.localDataStart; 1242 if (randomStream instanceof ZipSplitOutputStream) { 1243 dataStart = ((ZipSplitOutputStream) randomStream).calculateDiskPosition(entry.entry.getDiskNumberStart(), dataStart); 1244 } 1245 1246 long position = dataStart; 1247 randomStream.writeFully(ZipLong.getBytes(entry.entry.getCrc()), position); position += ZipConstants.WORD; 1248 if (!hasZip64Extra(entry.entry) || !actuallyNeedsZip64) { 1249 randomStream.writeFully(ZipLong.getBytes(entry.entry.getCompressedSize()), position); position += ZipConstants.WORD; 1250 randomStream.writeFully(ZipLong.getBytes(entry.entry.getSize()), position); position += ZipConstants.WORD; 1251 } else { 1252 randomStream.writeFully(ZipLong.ZIP64_MAGIC.getBytes(), position); position += ZipConstants.WORD; 1253 randomStream.writeFully(ZipLong.ZIP64_MAGIC.getBytes(), position); position += ZipConstants.WORD; 1254 } 1255 1256 if (hasZip64Extra(entry.entry)) { 1257 final ByteBuffer name = getName(entry.entry); 1258 final int nameLen = name.limit() - name.position(); 1259 // seek to ZIP64 extra, skip header and size information 1260 position = dataStart + 3 * ZipConstants.WORD + 2 * ZipConstants.SHORT + nameLen + 2 * ZipConstants.SHORT; 1261 // inside the ZIP64 extra uncompressed size comes 1262 // first, unlike the LFH, CD or data descriptor 1263 randomStream.writeFully(ZipEightByteInteger.getBytes(entry.entry.getSize()), position); position += ZipConstants.DWORD; 1264 randomStream.writeFully(ZipEightByteInteger.getBytes(entry.entry.getCompressedSize()), position); position += ZipConstants.DWORD; 1265 1266 if (!actuallyNeedsZip64) { 1267 // do some cleanup: 1268 // * rewrite version needed to extract 1269 position = dataStart - 5 * ZipConstants.SHORT; 1270 randomStream.writeFully(ZipShort.getBytes(versionNeededToExtract(entry.entry.getMethod(), false, false)), position); 1271 position += ZipConstants.SHORT; 1272 1273 // * remove ZIP64 extra, so it doesn't get written 1274 // to the central directory 1275 entry.entry.removeExtraField(Zip64ExtendedInformationExtraField.HEADER_ID); 1276 entry.entry.setExtra(); 1277 1278 // * reset hasUsedZip64 if it has been set because 1279 // of this entry 1280 if (entry.causedUseOfZip64) { 1281 hasUsedZip64 = false; 1282 } 1283 } 1284 } 1285 } 1286 1287 /** 1288 * Sets the file comment. 1289 * 1290 * @param comment the comment 1291 */ 1292 public void setComment(final String comment) { 1293 this.comment = comment; 1294 } 1295 1296 /** 1297 * Whether to create Unicode Extra Fields. 1298 * <p> 1299 * Defaults to NEVER. 1300 * </p> 1301 * 1302 * @param b whether to create Unicode Extra Fields. 1303 */ 1304 public void setCreateUnicodeExtraFields(final UnicodeExtraFieldPolicy b) { 1305 createUnicodeExtraFields = b; 1306 } 1307 1308 /** 1309 * Provides default values for compression method and last modification time. 1310 */ 1311 private void setDefaults(final ZipArchiveEntry entry) { 1312 if (entry.getMethod() == -1) { // not specified 1313 entry.setMethod(method); 1314 } 1315 1316 if (entry.getTime() == -1) { // not specified 1317 entry.setTime(System.currentTimeMillis()); 1318 } 1319 } 1320 1321 /** 1322 * The encoding to use for file names and the file comment. 1323 * <p> 1324 * For a list of possible values see <a href="https://docs.oracle.com/javase/8/docs/technotes/guides/intl/encoding.doc.html">Supported Encodings</a>. 1325 * Defaults to UTF-8. 1326 * </p> 1327 * 1328 * @param encoding the encoding to use for file names, use null for the platform's default encoding 1329 */ 1330 public void setEncoding(final String encoding) { 1331 this.charsetName = encoding; 1332 this.zipEncoding = ZipEncodingHelper.getZipEncoding(encoding); 1333 if (useUTF8Flag && !ZipEncodingHelper.isUTF8(encoding)) { 1334 useUTF8Flag = false; 1335 } 1336 } 1337 1338 /** 1339 * Whether to fall back to UTF and the language encoding flag if the file name cannot be encoded using the specified encoding. 1340 * <p> 1341 * Defaults to false. 1342 * </p> 1343 * 1344 * @param b whether to fall back to UTF and the language encoding flag if the file name cannot be encoded using the specified encoding. 1345 */ 1346 public void setFallbackToUTF8(final boolean b) { 1347 fallbackToUTF8 = b; 1348 } 1349 1350 /** 1351 * Sets the compression level for subsequent entries. 1352 * <p> 1353 * Default is Deflater.DEFAULT_COMPRESSION. 1354 * </p> 1355 * 1356 * @param level the compression level. 1357 * @throws IllegalArgumentException if an invalid compression level is specified. 1358 */ 1359 public void setLevel(final int level) { 1360 if (level < Deflater.DEFAULT_COMPRESSION || level > Deflater.BEST_COMPRESSION) { 1361 throw new IllegalArgumentException("Invalid compression level: " + level); 1362 } 1363 if (this.level == level) { 1364 return; 1365 } 1366 hasCompressionLevelChanged = true; 1367 this.level = level; 1368 } 1369 1370 /** 1371 * Sets the default compression method for subsequent entries. 1372 * <p> 1373 * Default is DEFLATED. 1374 * </p> 1375 * 1376 * @param method an {@code int} from java.util.zip.ZipEntry 1377 */ 1378 public void setMethod(final int method) { 1379 this.method = method; 1380 } 1381 1382 /** 1383 * Whether to set the language encoding flag if the file name encoding is UTF-8. 1384 * <p> 1385 * Defaults to true. 1386 * </p> 1387 * 1388 * @param b whether to set the language encoding flag if the file name encoding is UTF-8 1389 */ 1390 public void setUseLanguageEncodingFlag(final boolean b) { 1391 useUTF8Flag = b && ZipEncodingHelper.isUTF8(charsetName); 1392 } 1393 1394 /** 1395 * Whether Zip64 extensions will be used. 1396 * <p> 1397 * When setting the mode to {@link Zip64Mode#Never Never}, {@link #putArchiveEntry}, {@link #closeArchiveEntry}, {@link #finish} or {@link #close} may throw 1398 * a {@link Zip64RequiredException} if the entry's size or the total size of the archive exceeds 4GB or there are more than 65,536 entries inside the 1399 * archive. Any archive created in this mode will be readable by implementations that don't support Zip64. 1400 * </p> 1401 * <p> 1402 * When setting the mode to {@link Zip64Mode#Always Always}, Zip64 extensions will be used for all entries. Any archive created in this mode may be 1403 * unreadable by implementations that don't support Zip64 even if all its contents would be. 1404 * </p> 1405 * <p> 1406 * When setting the mode to {@link Zip64Mode#AsNeeded AsNeeded}, Zip64 extensions will transparently be used for those entries that require them. This mode 1407 * can only be used if the uncompressed size of the {@link ZipArchiveEntry} is known when calling {@link #putArchiveEntry} or the archive is written to a 1408 * seekable output (i.e. you have used the {@link #ZipArchiveOutputStream(java.io.File) File-arg constructor}) - this mode is not valid when the output 1409 * stream is not seekable and the uncompressed size is unknown when {@link #putArchiveEntry} is called. 1410 * </p> 1411 * <p> 1412 * If no entry inside the resulting archive requires Zip64 extensions then {@link Zip64Mode#Never Never} will create the smallest archive. 1413 * {@link Zip64Mode#AsNeeded AsNeeded} will create a slightly bigger archive if the uncompressed size of any entry has initially been unknown and create an 1414 * archive identical to {@link Zip64Mode#Never Never} otherwise. {@link Zip64Mode#Always Always} will create an archive that is at least 24 bytes per entry 1415 * bigger than the one {@link Zip64Mode#Never Never} would create. 1416 * </p> 1417 * <p> 1418 * Defaults to {@link Zip64Mode#AsNeeded AsNeeded} unless {@link #putArchiveEntry} is called with an entry of unknown size and data is written to a 1419 * non-seekable stream - in this case the default is {@link Zip64Mode#Never Never}. 1420 * </p> 1421 * 1422 * @since 1.3 1423 * @param mode Whether Zip64 extensions will be used. 1424 */ 1425 public void setUseZip64(final Zip64Mode mode) { 1426 zip64Mode = mode; 1427 } 1428 1429 /** 1430 * Whether to add a Zip64 extended information extra field to the local file header. 1431 * <p> 1432 * Returns true if 1433 * </p> 1434 * <ul> 1435 * <li>mode is Always</li> 1436 * <li>or we already know it is going to be needed</li> 1437 * <li>or the size is unknown and we can ensure it won't hurt other implementations if we add it (i.e. we can erase its usage</li> 1438 * </ul> 1439 */ 1440 private boolean shouldAddZip64Extra(final ZipArchiveEntry entry, final Zip64Mode mode) { 1441 return mode == Zip64Mode.Always || mode == Zip64Mode.AlwaysWithCompatibility || entry.getSize() >= ZipConstants.ZIP64_MAGIC 1442 || entry.getCompressedSize() >= ZipConstants.ZIP64_MAGIC 1443 || entry.getSize() == ArchiveEntry.SIZE_UNKNOWN && outputStream instanceof RandomAccessOutputStream && mode != Zip64Mode.Never; 1444 } 1445 1446 /** 1447 * 4.4.1.4 If one of the fields in the end of central directory record is too small to hold required data, the field SHOULD be set to -1 (0xFFFF or 1448 * 0xFFFFFFFF) and the ZIP64 format record SHOULD be created. 1449 * 1450 * @return true if zip64 End Of Central Directory is needed 1451 */ 1452 private boolean shouldUseZip64EOCD() { 1453 int numberOfThisDisk = 0; 1454 if (isSplitZip) { 1455 numberOfThisDisk = ((ZipSplitOutputStream) this.outputStream).getCurrentSplitSegmentIndex(); 1456 } 1457 final int numOfEntriesOnThisDisk = numberOfCDInDiskData.getOrDefault(numberOfThisDisk, 0); 1458 return numberOfThisDisk >= ZipConstants.ZIP64_MAGIC_SHORT /* number of this disk */ 1459 || cdDiskNumberStart >= ZipConstants.ZIP64_MAGIC_SHORT /* number of the disk with the start of the central directory */ 1460 || numOfEntriesOnThisDisk >= ZipConstants.ZIP64_MAGIC_SHORT /* total number of entries in the central directory on this disk */ 1461 || entries.size() >= ZipConstants.ZIP64_MAGIC_SHORT /* total number of entries in the central directory */ 1462 || cdLength >= ZipConstants.ZIP64_MAGIC /* size of the central directory */ 1463 || cdOffset >= ZipConstants.ZIP64_MAGIC; /* 1464 * offset of start of central directory with respect to the starting disk number 1465 */ 1466 } 1467 1468 private boolean usesDataDescriptor(final int zipMethod, final boolean phased) { 1469 return !phased && zipMethod == DEFLATED && !(outputStream instanceof RandomAccessOutputStream); 1470 } 1471 1472 /** 1473 * If the Zip64 mode is set to never, then all the data in End Of Central Directory should not exceed their limits. 1474 * 1475 * @throws Zip64RequiredException if Zip64 is actually needed 1476 */ 1477 private void validateIfZip64IsNeededInEOCD() throws Zip64RequiredException { 1478 // exception will only be thrown if the Zip64 mode is never while Zip64 is actually needed 1479 if (zip64Mode != Zip64Mode.Never) { 1480 return; 1481 } 1482 1483 int numberOfThisDisk = 0; 1484 if (isSplitZip) { 1485 numberOfThisDisk = ((ZipSplitOutputStream) this.outputStream).getCurrentSplitSegmentIndex(); 1486 } 1487 if (numberOfThisDisk >= ZipConstants.ZIP64_MAGIC_SHORT) { 1488 throw new Zip64RequiredException(Zip64RequiredException.DISK_NUMBER_TOO_BIG_MESSAGE); 1489 } 1490 1491 if (cdDiskNumberStart >= ZipConstants.ZIP64_MAGIC_SHORT) { 1492 throw new Zip64RequiredException(Zip64RequiredException.CENTRAL_DIRECTORY_DISK_NUMBER_TOO_BIG_MESSAGE); 1493 } 1494 1495 final int numOfEntriesOnThisDisk = numberOfCDInDiskData.getOrDefault(numberOfThisDisk, 0); 1496 if (numOfEntriesOnThisDisk >= ZipConstants.ZIP64_MAGIC_SHORT) { 1497 throw new Zip64RequiredException(Zip64RequiredException.TOO_MANY_ENTRIES_ON_DISK_MESSAGE); 1498 } 1499 1500 // number of entries 1501 if (entries.size() >= ZipConstants.ZIP64_MAGIC_SHORT) { 1502 throw new Zip64RequiredException(Zip64RequiredException.TOO_MANY_ENTRIES_MESSAGE); 1503 } 1504 1505 if (cdLength >= ZipConstants.ZIP64_MAGIC) { 1506 throw new Zip64RequiredException(Zip64RequiredException.CENTRAL_DIRECTORY_SIZE_TOO_BIG_MESSAGE); 1507 } 1508 1509 if (cdOffset >= ZipConstants.ZIP64_MAGIC) { 1510 throw new Zip64RequiredException(Zip64RequiredException.ARCHIVE_TOO_BIG_MESSAGE); 1511 } 1512 } 1513 1514 /** 1515 * Throws an exception if the size is unknown for a stored entry that is written to a non-seekable output or the entry is too big to be written without 1516 * Zip64 extra but the mode has been set to Never. 1517 */ 1518 private void validateSizeInformation(final Zip64Mode effectiveMode) throws ZipException { 1519 // Size/CRC not required if SeekableByteChannel is used 1520 if (entry.entry.getMethod() == STORED && !(outputStream instanceof RandomAccessOutputStream)) { 1521 if (entry.entry.getSize() == ArchiveEntry.SIZE_UNKNOWN) { 1522 throw new ZipException("Uncompressed size is required for" + " STORED method when not writing to a" + " file"); 1523 } 1524 if (entry.entry.getCrc() == ZipArchiveEntry.CRC_UNKNOWN) { 1525 throw new ZipException("CRC checksum is required for STORED" + " method when not writing to a file"); 1526 } 1527 entry.entry.setCompressedSize(entry.entry.getSize()); 1528 } 1529 1530 if ((entry.entry.getSize() >= ZipConstants.ZIP64_MAGIC || entry.entry.getCompressedSize() >= ZipConstants.ZIP64_MAGIC) 1531 && effectiveMode == Zip64Mode.Never) { 1532 throw new Zip64RequiredException(Zip64RequiredException.getEntryTooBigMessage(entry.entry)); 1533 } 1534 } 1535 1536 private int versionNeededToExtract(final int zipMethod, final boolean zip64, final boolean usedDataDescriptor) { 1537 if (zip64) { 1538 return ZipConstants.ZIP64_MIN_VERSION; 1539 } 1540 if (usedDataDescriptor) { 1541 return ZipConstants.DATA_DESCRIPTOR_MIN_VERSION; 1542 } 1543 return versionNeededToExtractMethod(zipMethod); 1544 } 1545 1546 private int versionNeededToExtractMethod(final int zipMethod) { 1547 return zipMethod == DEFLATED ? ZipConstants.DEFLATE_MIN_VERSION : ZipConstants.INITIAL_VERSION; 1548 } 1549 1550 /** 1551 * Writes bytes to ZIP entry. 1552 * 1553 * @param b the byte array to write 1554 * @param offset the start position to write from 1555 * @param length the number of bytes to write 1556 * @throws IOException on error 1557 */ 1558 @Override 1559 public void write(final byte[] b, final int offset, final int length) throws IOException { 1560 if (entry == null) { 1561 throw new IllegalStateException("No current entry"); 1562 } 1563 ZipUtil.checkRequestedFeatures(entry.entry); 1564 final long writtenThisTime = streamCompressor.write(b, offset, length, entry.entry.getMethod()); 1565 count(writtenThisTime); 1566 } 1567 1568 /** 1569 * Writes the "End of central dir record". 1570 * 1571 * @throws IOException on error 1572 * @throws Zip64RequiredException if the archive's size exceeds 4 GByte or there are more than 65535 entries inside the archive and 1573 * {@link #setUseZip64(Zip64Mode)} is {@link Zip64Mode#Never}. 1574 */ 1575 protected void writeCentralDirectoryEnd() throws IOException { 1576 if (!hasUsedZip64 && isSplitZip) { 1577 ((ZipSplitOutputStream) this.outputStream).prepareToWriteUnsplittableContent(eocdLength); 1578 } 1579 1580 validateIfZip64IsNeededInEOCD(); 1581 1582 writeCounted(EOCD_SIG); 1583 1584 // number of this disk 1585 int numberOfThisDisk = 0; 1586 if (isSplitZip) { 1587 numberOfThisDisk = ((ZipSplitOutputStream) this.outputStream).getCurrentSplitSegmentIndex(); 1588 } 1589 writeCounted(ZipShort.getBytes(numberOfThisDisk)); 1590 1591 // disk number of the start of central directory 1592 writeCounted(ZipShort.getBytes((int) cdDiskNumberStart)); 1593 1594 // number of entries 1595 final int numberOfEntries = entries.size(); 1596 1597 // total number of entries in the central directory on this disk 1598 final int numOfEntriesOnThisDisk = isSplitZip ? numberOfCDInDiskData.getOrDefault(numberOfThisDisk, 0) : numberOfEntries; 1599 final byte[] numOfEntriesOnThisDiskData = ZipShort.getBytes(Math.min(numOfEntriesOnThisDisk, ZipConstants.ZIP64_MAGIC_SHORT)); 1600 writeCounted(numOfEntriesOnThisDiskData); 1601 1602 // number of entries 1603 final byte[] num = ZipShort.getBytes(Math.min(numberOfEntries, ZipConstants.ZIP64_MAGIC_SHORT)); 1604 writeCounted(num); 1605 1606 // length and location of CD 1607 writeCounted(ZipLong.getBytes(Math.min(cdLength, ZipConstants.ZIP64_MAGIC))); 1608 writeCounted(ZipLong.getBytes(Math.min(cdOffset, ZipConstants.ZIP64_MAGIC))); 1609 1610 // ZIP file comment 1611 final ByteBuffer data = this.zipEncoding.encode(comment); 1612 final int dataLen = data.limit() - data.position(); 1613 writeCounted(ZipShort.getBytes(dataLen)); 1614 streamCompressor.writeCounted(data.array(), data.arrayOffset(), dataLen); 1615 } 1616 1617 private void writeCentralDirectoryInChunks() throws IOException { 1618 final int NUM_PER_WRITE = 1000; 1619 final ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(70 * NUM_PER_WRITE); 1620 int count = 0; 1621 for (final ZipArchiveEntry ze : entries) { 1622 byteArrayOutputStream.write(createCentralFileHeader(ze)); 1623 if (++count > NUM_PER_WRITE) { 1624 writeCounted(byteArrayOutputStream.toByteArray()); 1625 byteArrayOutputStream.reset(); 1626 count = 0; 1627 } 1628 } 1629 writeCounted(byteArrayOutputStream.toByteArray()); 1630 } 1631 1632 /** 1633 * Writes the central file header entry. 1634 * 1635 * @param ze the entry to write 1636 * @throws IOException on error 1637 * @throws Zip64RequiredException if the archive's size exceeds 4 GByte and {@link #setUseZip64(Zip64Mode)} is {@link Zip64Mode#Never}. 1638 */ 1639 protected void writeCentralFileHeader(final ZipArchiveEntry ze) throws IOException { 1640 final byte[] centralFileHeader = createCentralFileHeader(ze); 1641 writeCounted(centralFileHeader); 1642 } 1643 1644 /** 1645 * Write bytes to output or random access file. 1646 * 1647 * @param data the byte array to write 1648 * @throws IOException on error 1649 */ 1650 private void writeCounted(final byte[] data) throws IOException { 1651 streamCompressor.writeCounted(data); 1652 } 1653 1654 /** 1655 * Writes the data descriptor entry. 1656 * 1657 * @param ze the entry to write 1658 * @throws IOException on error 1659 */ 1660 protected void writeDataDescriptor(final ZipArchiveEntry ze) throws IOException { 1661 if (!usesDataDescriptor(ze.getMethod(), false)) { 1662 return; 1663 } 1664 writeCounted(DD_SIG); 1665 writeCounted(ZipLong.getBytes(ze.getCrc())); 1666 if (!hasZip64Extra(ze)) { 1667 writeCounted(ZipLong.getBytes(ze.getCompressedSize())); 1668 writeCounted(ZipLong.getBytes(ze.getSize())); 1669 } else { 1670 writeCounted(ZipEightByteInteger.getBytes(ze.getCompressedSize())); 1671 writeCounted(ZipEightByteInteger.getBytes(ze.getSize())); 1672 } 1673 } 1674 1675 /** 1676 * Writes the local file header entry 1677 * 1678 * @param ze the entry to write 1679 * @throws IOException on error 1680 */ 1681 protected void writeLocalFileHeader(final ZipArchiveEntry ze) throws IOException { 1682 writeLocalFileHeader(ze, false); 1683 } 1684 1685 private void writeLocalFileHeader(final ZipArchiveEntry ze, final boolean phased) throws IOException { 1686 final boolean encodable = zipEncoding.canEncode(ze.getName()); 1687 final ByteBuffer name = getName(ze); 1688 1689 if (createUnicodeExtraFields != UnicodeExtraFieldPolicy.NEVER) { 1690 addUnicodeExtraFields(ze, encodable, name); 1691 } 1692 1693 long localHeaderStart = streamCompressor.getTotalBytesWritten(); 1694 if (isSplitZip) { 1695 // when creating a split zip, the offset should be 1696 // the offset to the corresponding segment disk 1697 final ZipSplitOutputStream splitOutputStream = (ZipSplitOutputStream) this.outputStream; 1698 ze.setDiskNumberStart(splitOutputStream.getCurrentSplitSegmentIndex()); 1699 localHeaderStart = splitOutputStream.getCurrentSplitSegmentBytesWritten(); 1700 } 1701 1702 final byte[] localHeader = createLocalFileHeader(ze, name, encodable, phased, localHeaderStart); 1703 metaData.put(ze, new EntryMetaData(localHeaderStart, usesDataDescriptor(ze.getMethod(), phased))); 1704 entry.localDataStart = localHeaderStart + LFH_CRC_OFFSET; // At crc offset 1705 writeCounted(localHeader); 1706 entry.dataStart = streamCompressor.getTotalBytesWritten(); 1707 } 1708 1709 /** 1710 * Write bytes to output or random access file. 1711 * 1712 * @param data the byte array to write 1713 * @throws IOException on error 1714 */ 1715 protected final void writeOut(final byte[] data) throws IOException { 1716 streamCompressor.writeOut(data, 0, data.length); 1717 } 1718 1719 /** 1720 * Write bytes to output or random access file. 1721 * 1722 * @param data the byte array to write 1723 * @param offset the start position to write from 1724 * @param length the number of bytes to write 1725 * @throws IOException on error 1726 */ 1727 protected final void writeOut(final byte[] data, final int offset, final int length) throws IOException { 1728 streamCompressor.writeOut(data, offset, length); 1729 } 1730 1731 /** 1732 * Write preamble data. For most of the time, this is used to make self-extracting zips. 1733 * 1734 * @param preamble data to write 1735 * @throws IOException if an entry already exists 1736 * @since 1.21 1737 */ 1738 public void writePreamble(final byte[] preamble) throws IOException { 1739 writePreamble(preamble, 0, preamble.length); 1740 } 1741 1742 /** 1743 * Write preamble data. For most of the time, this is used to make self-extracting zips. 1744 * 1745 * @param preamble data to write 1746 * @param offset the start offset in the data 1747 * @param length the number of bytes to write 1748 * @throws IOException if an entry already exists 1749 * @since 1.21 1750 */ 1751 public void writePreamble(final byte[] preamble, final int offset, final int length) throws IOException { 1752 if (entry != null) { 1753 throw new IllegalStateException("Preamble must be written before creating an entry"); 1754 } 1755 this.streamCompressor.writeCounted(preamble, offset, length); 1756 } 1757 1758 /** 1759 * Writes the "ZIP64 End of central dir record" and "ZIP64 End of central dir locator". 1760 * 1761 * @throws IOException on error 1762 * @since 1.3 1763 */ 1764 protected void writeZip64CentralDirectory() throws IOException { 1765 if (zip64Mode == Zip64Mode.Never) { 1766 return; 1767 } 1768 1769 if (!hasUsedZip64 && shouldUseZip64EOCD()) { 1770 // actually "will use" 1771 hasUsedZip64 = true; 1772 } 1773 1774 if (!hasUsedZip64) { 1775 return; 1776 } 1777 1778 long offset = streamCompressor.getTotalBytesWritten(); 1779 long diskNumberStart = 0L; 1780 if (isSplitZip) { 1781 // when creating a split zip, the offset of should be 1782 // the offset to the corresponding segment disk 1783 final ZipSplitOutputStream zipSplitOutputStream = (ZipSplitOutputStream) this.outputStream; 1784 offset = zipSplitOutputStream.getCurrentSplitSegmentBytesWritten(); 1785 diskNumberStart = zipSplitOutputStream.getCurrentSplitSegmentIndex(); 1786 } 1787 1788 writeOut(ZIP64_EOCD_SIG); 1789 // size of zip64 end of central directory, we don't have any variable length 1790 // as we don't support the extensible data sector, yet 1791 writeOut(ZipEightByteInteger.getBytes(ZipConstants.SHORT /* version made by */ 1792 + ZipConstants.SHORT /* version needed to extract */ 1793 + ZipConstants.WORD /* disk number */ 1794 + ZipConstants.WORD /* disk with central directory */ 1795 + ZipConstants.DWORD /* number of entries in CD on this disk */ 1796 + ZipConstants.DWORD /* total number of entries */ 1797 + ZipConstants.DWORD /* size of CD */ 1798 + (long) ZipConstants.DWORD /* offset of CD */ 1799 )); 1800 1801 // version made by and version needed to extract 1802 writeOut(ZipShort.getBytes(ZipConstants.ZIP64_MIN_VERSION)); 1803 writeOut(ZipShort.getBytes(ZipConstants.ZIP64_MIN_VERSION)); 1804 1805 // number of this disk 1806 int numberOfThisDisk = 0; 1807 if (isSplitZip) { 1808 numberOfThisDisk = ((ZipSplitOutputStream) this.outputStream).getCurrentSplitSegmentIndex(); 1809 } 1810 writeOut(ZipLong.getBytes(numberOfThisDisk)); 1811 1812 // disk number of the start of central directory 1813 writeOut(ZipLong.getBytes(cdDiskNumberStart)); 1814 1815 // total number of entries in the central directory on this disk 1816 final int numOfEntriesOnThisDisk = isSplitZip ? numberOfCDInDiskData.getOrDefault(numberOfThisDisk, 0) : entries.size(); 1817 final byte[] numOfEntriesOnThisDiskData = ZipEightByteInteger.getBytes(numOfEntriesOnThisDisk); 1818 writeOut(numOfEntriesOnThisDiskData); 1819 1820 // number of entries 1821 final byte[] num = ZipEightByteInteger.getBytes(entries.size()); 1822 writeOut(num); 1823 1824 // length and location of CD 1825 writeOut(ZipEightByteInteger.getBytes(cdLength)); 1826 writeOut(ZipEightByteInteger.getBytes(cdOffset)); 1827 1828 // no "zip64 extensible data sector" for now 1829 1830 if (isSplitZip) { 1831 // based on the ZIP specification, the End Of Central Directory record and 1832 // the Zip64 End Of Central Directory locator record must be on the same segment 1833 final int zip64EOCDLOCLength = ZipConstants.WORD /* length of ZIP64_EOCD_LOC_SIG */ 1834 + ZipConstants.WORD /* disk number of ZIP64_EOCD_SIG */ 1835 + ZipConstants.DWORD /* offset of ZIP64_EOCD_SIG */ 1836 + ZipConstants.WORD /* total number of disks */; 1837 1838 final long unsplittableContentSize = zip64EOCDLOCLength + eocdLength; 1839 ((ZipSplitOutputStream) this.outputStream).prepareToWriteUnsplittableContent(unsplittableContentSize); 1840 } 1841 1842 // and now the "ZIP64 end of central directory locator" 1843 writeOut(ZIP64_EOCD_LOC_SIG); 1844 1845 // disk number holding the ZIP64 EOCD record 1846 writeOut(ZipLong.getBytes(diskNumberStart)); 1847 // relative offset of ZIP64 EOCD record 1848 writeOut(ZipEightByteInteger.getBytes(offset)); 1849 // total number of disks 1850 if (isSplitZip) { 1851 // the Zip64 End Of Central Directory Locator and the End Of Central Directory must be 1852 // in the same split disk, it means they must be located in the last disk 1853 final int totalNumberOfDisks = ((ZipSplitOutputStream) this.outputStream).getCurrentSplitSegmentIndex() + 1; 1854 writeOut(ZipLong.getBytes(totalNumberOfDisks)); 1855 } else { 1856 writeOut(ONE); 1857 } 1858 } 1859}