001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, 013 * software distributed under the License is distributed on an 014 * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 015 * KIND, either express or implied. See the License for the 016 * specific language governing permissions and limitations 017 * under the License. 018 */ 019package org.apache.commons.compress.archivers.zip; 020 021import java.util.zip.ZipException; 022 023import static org.apache.commons.compress.archivers.zip.ZipConstants.DWORD; 024import static org.apache.commons.compress.archivers.zip.ZipConstants.WORD; 025 026/** 027 * Holds size and other extended information for entries that use Zip64 028 * features. 029 * 030 * <p>Currently Commons Compress doesn't support encrypting the 031 * central directory so the note in APPNOTE.TXT about masking doesn't 032 * apply.</p> 033 * 034 * <p>The implementation relies on data being read from the local file 035 * header and assumes that both size values are always present.</p> 036 * 037 * @see <a href="http://www.pkware.com/documents/casestudies/APPNOTE.TXT">PKWARE 038 * APPNOTE.TXT, section 4.5.3</a> 039 * 040 * @since 1.2 041 * @NotThreadSafe 042 */ 043public class Zip64ExtendedInformationExtraField implements ZipExtraField { 044 045 static final ZipShort HEADER_ID = new ZipShort(0x0001); 046 047 private static final String LFH_MUST_HAVE_BOTH_SIZES_MSG = 048 "Zip64 extended information must contain" 049 + " both size values in the local file header."; 050 private static final byte[] EMPTY = new byte[0]; 051 052 private ZipEightByteInteger size, compressedSize, relativeHeaderOffset; 053 private ZipLong diskStart; 054 055 /** 056 * Stored in {@link #parseFromCentralDirectoryData 057 * parseFromCentralDirectoryData} so it can be reused when ZipFile 058 * calls {@link #reparseCentralDirectoryData 059 * reparseCentralDirectoryData}. 060 * 061 * <p>Not used for anything else</p> 062 * 063 * @since 1.3 064 */ 065 private byte[] rawCentralDirectoryData; 066 067 /** 068 * This constructor should only be used by the code that reads 069 * archives inside of Commons Compress. 070 */ 071 public Zip64ExtendedInformationExtraField() { } 072 073 /** 074 * Creates an extra field based on the original and compressed size. 075 * 076 * @param size the entry's original size 077 * @param compressedSize the entry's compressed size 078 * 079 * @throws IllegalArgumentException if size or compressedSize is null 080 */ 081 public Zip64ExtendedInformationExtraField(ZipEightByteInteger size, 082 ZipEightByteInteger compressedSize) { 083 this(size, compressedSize, null, null); 084 } 085 086 /** 087 * Creates an extra field based on all four possible values. 088 * 089 * @param size the entry's original size 090 * @param compressedSize the entry's compressed size 091 * 092 * @throws IllegalArgumentException if size or compressedSize is null 093 */ 094 public Zip64ExtendedInformationExtraField(ZipEightByteInteger size, 095 ZipEightByteInteger compressedSize, 096 ZipEightByteInteger relativeHeaderOffset, 097 ZipLong diskStart) { 098 this.size = size; 099 this.compressedSize = compressedSize; 100 this.relativeHeaderOffset = relativeHeaderOffset; 101 this.diskStart = diskStart; 102 } 103 104 public ZipShort getHeaderId() { 105 return HEADER_ID; 106 } 107 108 public ZipShort getLocalFileDataLength() { 109 return new ZipShort(size != null ? 2 * DWORD : 0); 110 } 111 112 public ZipShort getCentralDirectoryLength() { 113 return new ZipShort((size != null ? DWORD : 0) 114 + (compressedSize != null ? DWORD : 0) 115 + (relativeHeaderOffset != null ? DWORD : 0) 116 + (diskStart != null ? WORD : 0)); 117 } 118 119 public byte[] getLocalFileDataData() { 120 if (size != null || compressedSize != null) { 121 if (size == null || compressedSize == null) { 122 throw new IllegalArgumentException(LFH_MUST_HAVE_BOTH_SIZES_MSG); 123 } 124 byte[] data = new byte[2 * DWORD]; 125 addSizes(data); 126 return data; 127 } 128 return EMPTY; 129 } 130 131 public byte[] getCentralDirectoryData() { 132 byte[] data = new byte[getCentralDirectoryLength().getValue()]; 133 int off = addSizes(data); 134 if (relativeHeaderOffset != null) { 135 System.arraycopy(relativeHeaderOffset.getBytes(), 0, data, off, DWORD); 136 off += DWORD; 137 } 138 if (diskStart != null) { 139 System.arraycopy(diskStart.getBytes(), 0, data, off, WORD); 140 off += WORD; 141 } 142 return data; 143 } 144 145 public void parseFromLocalFileData(byte[] buffer, int offset, int length) 146 throws ZipException { 147 if (length == 0) { 148 // no local file data at all, may happen if an archive 149 // only holds a ZIP64 extended information extra field 150 // inside the central directory but not inside the local 151 // file header 152 return; 153 } 154 if (length < 2 * DWORD) { 155 throw new ZipException(LFH_MUST_HAVE_BOTH_SIZES_MSG); 156 } 157 size = new ZipEightByteInteger(buffer, offset); 158 offset += DWORD; 159 compressedSize = new ZipEightByteInteger(buffer, offset); 160 offset += DWORD; 161 int remaining = length - 2 * DWORD; 162 if (remaining >= DWORD) { 163 relativeHeaderOffset = new ZipEightByteInteger(buffer, offset); 164 offset += DWORD; 165 remaining -= DWORD; 166 } 167 if (remaining >= WORD) { 168 diskStart = new ZipLong(buffer, offset); 169 offset += WORD; 170 remaining -= WORD; 171 } 172 } 173 174 public void parseFromCentralDirectoryData(byte[] buffer, int offset, 175 int length) 176 throws ZipException { 177 // store for processing in reparseCentralDirectoryData 178 rawCentralDirectoryData = new byte[length]; 179 System.arraycopy(buffer, offset, rawCentralDirectoryData, 0, length); 180 181 // if there is no size information in here, we are screwed and 182 // can only hope things will get resolved by LFH data later 183 // But there are some cases that can be detected 184 // * all data is there 185 // * length == 24 -> both sizes and offset 186 // * length % 8 == 4 -> at least we can identify the diskStart field 187 if (length >= 3 * DWORD + WORD) { 188 parseFromLocalFileData(buffer, offset, length); 189 } else if (length == 3 * DWORD) { 190 size = new ZipEightByteInteger(buffer, offset); 191 offset += DWORD; 192 compressedSize = new ZipEightByteInteger(buffer, offset); 193 offset += DWORD; 194 relativeHeaderOffset = new ZipEightByteInteger(buffer, offset); 195 } else if (length % DWORD == WORD) { 196 diskStart = new ZipLong(buffer, offset + length - WORD); 197 } 198 } 199 200 /** 201 * Parses the raw bytes read from the central directory extra 202 * field with knowledge which fields are expected to be there. 203 * 204 * <p>All four fields inside the zip64 extended information extra 205 * field are optional and must only be present if their corresponding 206 * entry inside the central directory contains the correct magic 207 * value.</p> 208 */ 209 public void reparseCentralDirectoryData(boolean hasUncompressedSize, 210 boolean hasCompressedSize, 211 boolean hasRelativeHeaderOffset, 212 boolean hasDiskStart) 213 throws ZipException { 214 if (rawCentralDirectoryData != null) { 215 int expectedLength = (hasUncompressedSize ? DWORD : 0) 216 + (hasCompressedSize ? DWORD : 0) 217 + (hasRelativeHeaderOffset ? DWORD : 0) 218 + (hasDiskStart ? WORD : 0); 219 if (rawCentralDirectoryData.length < expectedLength) { 220 throw new ZipException("central directory zip64 extended" 221 + " information extra field's length" 222 + " doesn't match central directory" 223 + " data. Expected length " 224 + expectedLength + " but is " 225 + rawCentralDirectoryData.length); 226 } 227 int offset = 0; 228 if (hasUncompressedSize) { 229 size = new ZipEightByteInteger(rawCentralDirectoryData, offset); 230 offset += DWORD; 231 } 232 if (hasCompressedSize) { 233 compressedSize = new ZipEightByteInteger(rawCentralDirectoryData, 234 offset); 235 offset += DWORD; 236 } 237 if (hasRelativeHeaderOffset) { 238 relativeHeaderOffset = 239 new ZipEightByteInteger(rawCentralDirectoryData, offset); 240 offset += DWORD; 241 } 242 if (hasDiskStart) { 243 diskStart = new ZipLong(rawCentralDirectoryData, offset); 244 offset += WORD; 245 } 246 } 247 } 248 249 /** 250 * The uncompressed size stored in this extra field. 251 */ 252 public ZipEightByteInteger getSize() { 253 return size; 254 } 255 256 /** 257 * The uncompressed size stored in this extra field. 258 */ 259 public void setSize(ZipEightByteInteger size) { 260 this.size = size; 261 } 262 263 /** 264 * The compressed size stored in this extra field. 265 */ 266 public ZipEightByteInteger getCompressedSize() { 267 return compressedSize; 268 } 269 270 /** 271 * The uncompressed size stored in this extra field. 272 */ 273 public void setCompressedSize(ZipEightByteInteger compressedSize) { 274 this.compressedSize = compressedSize; 275 } 276 277 /** 278 * The relative header offset stored in this extra field. 279 */ 280 public ZipEightByteInteger getRelativeHeaderOffset() { 281 return relativeHeaderOffset; 282 } 283 284 /** 285 * The relative header offset stored in this extra field. 286 */ 287 public void setRelativeHeaderOffset(ZipEightByteInteger rho) { 288 relativeHeaderOffset = rho; 289 } 290 291 /** 292 * The disk start number stored in this extra field. 293 */ 294 public ZipLong getDiskStartNumber() { 295 return diskStart; 296 } 297 298 /** 299 * The disk start number stored in this extra field. 300 */ 301 public void setDiskStartNumber(ZipLong ds) { 302 diskStart = ds; 303 } 304 305 private int addSizes(byte[] data) { 306 int off = 0; 307 if (size != null) { 308 System.arraycopy(size.getBytes(), 0, data, 0, DWORD); 309 off += DWORD; 310 } 311 if (compressedSize != null) { 312 System.arraycopy(compressedSize.getBytes(), 0, data, off, DWORD); 313 off += DWORD; 314 } 315 return off; 316 } 317}