001 /** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018 package org.apache.hadoop.hdfs.protocolPB; 019 020 import java.io.EOFException; 021 import java.io.IOException; 022 import java.io.InputStream; 023 import java.util.ArrayList; 024 import java.util.Arrays; 025 import java.util.EnumSet; 026 import java.util.List; 027 028 import org.apache.hadoop.fs.ContentSummary; 029 import org.apache.hadoop.fs.CreateFlag; 030 import org.apache.hadoop.fs.FsServerDefaults; 031 import org.apache.hadoop.fs.permission.FsPermission; 032 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; 033 import org.apache.hadoop.hdfs.DFSUtil; 034 import org.apache.hadoop.hdfs.protocol.Block; 035 import org.apache.hadoop.hdfs.protocol.ClientProtocol; 036 import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; 037 import org.apache.hadoop.hdfs.protocol.DatanodeID; 038 import org.apache.hadoop.hdfs.protocol.DatanodeInfo; 039 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; 040 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; 041 import org.apache.hadoop.hdfs.protocol.DirectoryListing; 042 import org.apache.hadoop.hdfs.protocol.ExtendedBlock; 043 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; 044 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; 045 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry; 046 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType; 047 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; 048 import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus; 049 import org.apache.hadoop.hdfs.protocol.LocatedBlock; 050 import org.apache.hadoop.hdfs.protocol.LocatedBlocks; 051 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; 052 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos; 053 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateFlagProto; 054 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto; 055 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto; 056 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SafeModeActionProto; 057 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto; 058 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto; 059 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto; 060 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto; 061 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto; 062 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto; 063 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto.StorageState; 064 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto; 065 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto; 066 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto; 067 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto; 068 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto; 069 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto; 070 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; 071 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto; 072 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto; 073 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto; 074 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto; 075 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto; 076 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto; 077 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto; 078 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto; 079 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto; 080 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto; 081 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto; 082 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState; 083 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto; 084 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto; 085 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto; 086 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto; 087 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto; 088 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto; 089 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto; 090 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType; 091 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto; 092 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder; 093 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto; 094 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto; 095 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto; 096 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto; 097 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto; 098 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto; 099 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto; 100 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto; 101 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaStateProto; 102 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto; 103 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto; 104 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto; 105 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto; 106 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto; 107 import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto; 108 import org.apache.hadoop.hdfs.security.token.block.BlockKey; 109 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; 110 import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; 111 import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; 112 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; 113 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; 114 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; 115 import org.apache.hadoop.hdfs.server.common.StorageInfo; 116 import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature; 117 import org.apache.hadoop.hdfs.server.namenode.INodeId; 118 import org.apache.hadoop.hdfs.server.protocol.BalancerBandwidthCommand; 119 import org.apache.hadoop.hdfs.server.protocol.BlockCommand; 120 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand; 121 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; 122 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; 123 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; 124 import org.apache.hadoop.hdfs.server.protocol.CheckpointCommand; 125 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; 126 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; 127 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; 128 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; 129 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State; 130 import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand; 131 import org.apache.hadoop.hdfs.server.protocol.JournalInfo; 132 import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand; 133 import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat; 134 import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand; 135 import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration; 136 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; 137 import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo; 138 import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus; 139 import org.apache.hadoop.hdfs.server.protocol.RegisterCommand; 140 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog; 141 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; 142 import org.apache.hadoop.hdfs.server.protocol.StorageReport; 143 import org.apache.hadoop.hdfs.util.ExactSizeInputStream; 144 import org.apache.hadoop.io.EnumSetWritable; 145 import org.apache.hadoop.io.Text; 146 import org.apache.hadoop.security.proto.SecurityProtos.TokenProto; 147 import org.apache.hadoop.security.token.Token; 148 import org.apache.hadoop.util.DataChecksum; 149 150 import com.google.common.collect.Lists; 151 import com.google.protobuf.ByteString; 152 import com.google.protobuf.CodedInputStream; 153 154 /** 155 * Utilities for converting protobuf classes to and from implementation classes 156 * and other helper utilities to help in dealing with protobuf. 157 * 158 * Note that when converting from an internal type to protobuf type, the 159 * converter never return null for protobuf type. The check for internal type 160 * being null must be done before calling the convert() method. 161 */ 162 public class PBHelper { 163 private static final RegisterCommandProto REG_CMD_PROTO = 164 RegisterCommandProto.newBuilder().build(); 165 private static final RegisterCommand REG_CMD = new RegisterCommand(); 166 167 private PBHelper() { 168 /** Hidden constructor */ 169 } 170 171 public static ByteString getByteString(byte[] bytes) { 172 return ByteString.copyFrom(bytes); 173 } 174 175 public static NamenodeRole convert(NamenodeRoleProto role) { 176 switch (role) { 177 case NAMENODE: 178 return NamenodeRole.NAMENODE; 179 case BACKUP: 180 return NamenodeRole.BACKUP; 181 case CHECKPOINT: 182 return NamenodeRole.CHECKPOINT; 183 } 184 return null; 185 } 186 187 public static NamenodeRoleProto convert(NamenodeRole role) { 188 switch (role) { 189 case NAMENODE: 190 return NamenodeRoleProto.NAMENODE; 191 case BACKUP: 192 return NamenodeRoleProto.BACKUP; 193 case CHECKPOINT: 194 return NamenodeRoleProto.CHECKPOINT; 195 } 196 return null; 197 } 198 199 public static StorageInfoProto convert(StorageInfo info) { 200 return StorageInfoProto.newBuilder().setClusterID(info.getClusterID()) 201 .setCTime(info.getCTime()).setLayoutVersion(info.getLayoutVersion()) 202 .setNamespceID(info.getNamespaceID()).build(); 203 } 204 205 public static StorageInfo convert(StorageInfoProto info) { 206 return new StorageInfo(info.getLayoutVersion(), info.getNamespceID(), 207 info.getClusterID(), info.getCTime()); 208 } 209 210 public static NamenodeRegistrationProto convert(NamenodeRegistration reg) { 211 return NamenodeRegistrationProto.newBuilder() 212 .setHttpAddress(reg.getHttpAddress()).setRole(convert(reg.getRole())) 213 .setRpcAddress(reg.getAddress()) 214 .setStorageInfo(convert((StorageInfo) reg)).build(); 215 } 216 217 public static NamenodeRegistration convert(NamenodeRegistrationProto reg) { 218 return new NamenodeRegistration(reg.getRpcAddress(), reg.getHttpAddress(), 219 convert(reg.getStorageInfo()), convert(reg.getRole())); 220 } 221 222 // DatanodeId 223 public static DatanodeID convert(DatanodeIDProto dn) { 224 return new DatanodeID(dn.getIpAddr(), dn.getHostName(), dn.getStorageID(), 225 dn.getXferPort(), dn.getInfoPort(), dn.getIpcPort()); 226 } 227 228 public static DatanodeIDProto convert(DatanodeID dn) { 229 return DatanodeIDProto.newBuilder() 230 .setIpAddr(dn.getIpAddr()) 231 .setHostName(dn.getHostName()) 232 .setStorageID(dn.getStorageID()) 233 .setXferPort(dn.getXferPort()) 234 .setInfoPort(dn.getInfoPort()) 235 .setIpcPort(dn.getIpcPort()).build(); 236 } 237 238 // Arrays of DatanodeId 239 public static DatanodeIDProto[] convert(DatanodeID[] did) { 240 if (did == null) 241 return null; 242 final int len = did.length; 243 DatanodeIDProto[] result = new DatanodeIDProto[len]; 244 for (int i = 0; i < len; ++i) { 245 result[i] = convert(did[i]); 246 } 247 return result; 248 } 249 250 public static DatanodeID[] convert(DatanodeIDProto[] did) { 251 if (did == null) return null; 252 final int len = did.length; 253 DatanodeID[] result = new DatanodeID[len]; 254 for (int i = 0; i < len; ++i) { 255 result[i] = convert(did[i]); 256 } 257 return result; 258 } 259 260 // Block 261 public static BlockProto convert(Block b) { 262 return BlockProto.newBuilder().setBlockId(b.getBlockId()) 263 .setGenStamp(b.getGenerationStamp()).setNumBytes(b.getNumBytes()) 264 .build(); 265 } 266 267 public static Block convert(BlockProto b) { 268 return new Block(b.getBlockId(), b.getNumBytes(), b.getGenStamp()); 269 } 270 271 public static BlockWithLocationsProto convert(BlockWithLocations blk) { 272 return BlockWithLocationsProto.newBuilder() 273 .setBlock(convert(blk.getBlock())) 274 .addAllStorageIDs(Arrays.asList(blk.getStorageIDs())).build(); 275 } 276 277 public static BlockWithLocations convert(BlockWithLocationsProto b) { 278 return new BlockWithLocations(convert(b.getBlock()), b.getStorageIDsList() 279 .toArray(new String[0])); 280 } 281 282 public static BlocksWithLocationsProto convert(BlocksWithLocations blks) { 283 BlocksWithLocationsProto.Builder builder = BlocksWithLocationsProto 284 .newBuilder(); 285 for (BlockWithLocations b : blks.getBlocks()) { 286 builder.addBlocks(convert(b)); 287 } 288 return builder.build(); 289 } 290 291 public static BlocksWithLocations convert(BlocksWithLocationsProto blocks) { 292 List<BlockWithLocationsProto> b = blocks.getBlocksList(); 293 BlockWithLocations[] ret = new BlockWithLocations[b.size()]; 294 int i = 0; 295 for (BlockWithLocationsProto entry : b) { 296 ret[i++] = convert(entry); 297 } 298 return new BlocksWithLocations(ret); 299 } 300 301 public static BlockKeyProto convert(BlockKey key) { 302 byte[] encodedKey = key.getEncodedKey(); 303 ByteString keyBytes = ByteString.copyFrom(encodedKey == null ? 304 DFSUtil.EMPTY_BYTES : encodedKey); 305 return BlockKeyProto.newBuilder().setKeyId(key.getKeyId()) 306 .setKeyBytes(keyBytes).setExpiryDate(key.getExpiryDate()).build(); 307 } 308 309 public static BlockKey convert(BlockKeyProto k) { 310 return new BlockKey(k.getKeyId(), k.getExpiryDate(), k.getKeyBytes() 311 .toByteArray()); 312 } 313 314 public static ExportedBlockKeysProto convert(ExportedBlockKeys keys) { 315 ExportedBlockKeysProto.Builder builder = ExportedBlockKeysProto 316 .newBuilder(); 317 builder.setIsBlockTokenEnabled(keys.isBlockTokenEnabled()) 318 .setKeyUpdateInterval(keys.getKeyUpdateInterval()) 319 .setTokenLifeTime(keys.getTokenLifetime()) 320 .setCurrentKey(convert(keys.getCurrentKey())); 321 for (BlockKey k : keys.getAllKeys()) { 322 builder.addAllKeys(convert(k)); 323 } 324 return builder.build(); 325 } 326 327 public static ExportedBlockKeys convert(ExportedBlockKeysProto keys) { 328 return new ExportedBlockKeys(keys.getIsBlockTokenEnabled(), 329 keys.getKeyUpdateInterval(), keys.getTokenLifeTime(), 330 convert(keys.getCurrentKey()), convertBlockKeys(keys.getAllKeysList())); 331 } 332 333 public static CheckpointSignatureProto convert(CheckpointSignature s) { 334 return CheckpointSignatureProto.newBuilder() 335 .setBlockPoolId(s.getBlockpoolID()) 336 .setCurSegmentTxId(s.getCurSegmentTxId()) 337 .setMostRecentCheckpointTxId(s.getMostRecentCheckpointTxId()) 338 .setStorageInfo(PBHelper.convert((StorageInfo) s)).build(); 339 } 340 341 public static CheckpointSignature convert(CheckpointSignatureProto s) { 342 return new CheckpointSignature(PBHelper.convert(s.getStorageInfo()), 343 s.getBlockPoolId(), s.getMostRecentCheckpointTxId(), 344 s.getCurSegmentTxId()); 345 } 346 347 public static RemoteEditLogProto convert(RemoteEditLog log) { 348 return RemoteEditLogProto.newBuilder() 349 .setStartTxId(log.getStartTxId()) 350 .setEndTxId(log.getEndTxId()) 351 .setIsInProgress(log.isInProgress()).build(); 352 } 353 354 public static RemoteEditLog convert(RemoteEditLogProto l) { 355 return new RemoteEditLog(l.getStartTxId(), l.getEndTxId(), 356 l.getIsInProgress()); 357 } 358 359 public static RemoteEditLogManifestProto convert( 360 RemoteEditLogManifest manifest) { 361 RemoteEditLogManifestProto.Builder builder = RemoteEditLogManifestProto 362 .newBuilder(); 363 for (RemoteEditLog log : manifest.getLogs()) { 364 builder.addLogs(convert(log)); 365 } 366 return builder.build(); 367 } 368 369 public static RemoteEditLogManifest convert( 370 RemoteEditLogManifestProto manifest) { 371 List<RemoteEditLog> logs = new ArrayList<RemoteEditLog>(manifest 372 .getLogsList().size()); 373 for (RemoteEditLogProto l : manifest.getLogsList()) { 374 logs.add(convert(l)); 375 } 376 return new RemoteEditLogManifest(logs); 377 } 378 379 public static CheckpointCommandProto convert(CheckpointCommand cmd) { 380 return CheckpointCommandProto.newBuilder() 381 .setSignature(convert(cmd.getSignature())) 382 .setNeedToReturnImage(cmd.needToReturnImage()).build(); 383 } 384 385 public static NamenodeCommandProto convert(NamenodeCommand cmd) { 386 if (cmd instanceof CheckpointCommand) { 387 return NamenodeCommandProto.newBuilder().setAction(cmd.getAction()) 388 .setType(NamenodeCommandProto.Type.CheckPointCommand) 389 .setCheckpointCmd(convert((CheckpointCommand) cmd)).build(); 390 } 391 return NamenodeCommandProto.newBuilder() 392 .setType(NamenodeCommandProto.Type.NamenodeCommand) 393 .setAction(cmd.getAction()).build(); 394 } 395 396 public static BlockKey[] convertBlockKeys(List<BlockKeyProto> list) { 397 BlockKey[] ret = new BlockKey[list.size()]; 398 int i = 0; 399 for (BlockKeyProto k : list) { 400 ret[i++] = convert(k); 401 } 402 return ret; 403 } 404 405 public static NamespaceInfo convert(NamespaceInfoProto info) { 406 StorageInfoProto storage = info.getStorageInfo(); 407 return new NamespaceInfo(storage.getNamespceID(), storage.getClusterID(), 408 info.getBlockPoolID(), storage.getCTime(), info.getBuildVersion(), 409 info.getSoftwareVersion()); 410 } 411 412 public static NamenodeCommand convert(NamenodeCommandProto cmd) { 413 if (cmd == null) return null; 414 switch (cmd.getType()) { 415 case CheckPointCommand: 416 CheckpointCommandProto chkPt = cmd.getCheckpointCmd(); 417 return new CheckpointCommand(PBHelper.convert(chkPt.getSignature()), 418 chkPt.getNeedToReturnImage()); 419 default: 420 return new NamenodeCommand(cmd.getAction()); 421 } 422 } 423 424 public static ExtendedBlock convert(ExtendedBlockProto eb) { 425 if (eb == null) return null; 426 return new ExtendedBlock( eb.getPoolId(), eb.getBlockId(), eb.getNumBytes(), 427 eb.getGenerationStamp()); 428 } 429 430 public static ExtendedBlockProto convert(final ExtendedBlock b) { 431 if (b == null) return null; 432 return ExtendedBlockProto.newBuilder(). 433 setPoolId(b.getBlockPoolId()). 434 setBlockId(b.getBlockId()). 435 setNumBytes(b.getNumBytes()). 436 setGenerationStamp(b.getGenerationStamp()). 437 build(); 438 } 439 440 public static RecoveringBlockProto convert(RecoveringBlock b) { 441 if (b == null) { 442 return null; 443 } 444 LocatedBlockProto lb = PBHelper.convert((LocatedBlock)b); 445 return RecoveringBlockProto.newBuilder().setBlock(lb) 446 .setNewGenStamp(b.getNewGenerationStamp()).build(); 447 } 448 449 public static RecoveringBlock convert(RecoveringBlockProto b) { 450 ExtendedBlock block = convert(b.getBlock().getB()); 451 DatanodeInfo[] locs = convert(b.getBlock().getLocsList()); 452 return new RecoveringBlock(block, locs, b.getNewGenStamp()); 453 } 454 455 public static DatanodeInfoProto.AdminState convert( 456 final DatanodeInfo.AdminStates inAs) { 457 switch (inAs) { 458 case NORMAL: return DatanodeInfoProto.AdminState.NORMAL; 459 case DECOMMISSION_INPROGRESS: 460 return DatanodeInfoProto.AdminState.DECOMMISSION_INPROGRESS; 461 case DECOMMISSIONED: return DatanodeInfoProto.AdminState.DECOMMISSIONED; 462 default: return DatanodeInfoProto.AdminState.NORMAL; 463 } 464 } 465 466 static public DatanodeInfo convert(DatanodeInfoProto di) { 467 if (di == null) return null; 468 return new DatanodeInfo( 469 PBHelper.convert(di.getId()), 470 di.hasLocation() ? di.getLocation() : null , 471 di.getCapacity(), di.getDfsUsed(), di.getRemaining(), 472 di.getBlockPoolUsed() , di.getLastUpdate() , di.getXceiverCount() , 473 PBHelper.convert(di.getAdminState())); 474 } 475 476 static public DatanodeInfoProto convertDatanodeInfo(DatanodeInfo di) { 477 if (di == null) return null; 478 DatanodeInfoProto.Builder builder = DatanodeInfoProto.newBuilder(); 479 if (di.getNetworkLocation() != null) { 480 builder.setLocation(di.getNetworkLocation()); 481 } 482 483 return builder. 484 setId(PBHelper.convert((DatanodeID) di)). 485 setCapacity(di.getCapacity()). 486 setDfsUsed(di.getDfsUsed()). 487 setRemaining(di.getRemaining()). 488 setBlockPoolUsed(di.getBlockPoolUsed()). 489 setLastUpdate(di.getLastUpdate()). 490 setXceiverCount(di.getXceiverCount()). 491 setAdminState(PBHelper.convert(di.getAdminState())). 492 build(); 493 } 494 495 496 static public DatanodeInfo[] convert(DatanodeInfoProto di[]) { 497 if (di == null) return null; 498 DatanodeInfo[] result = new DatanodeInfo[di.length]; 499 for (int i = 0; i < di.length; i++) { 500 result[i] = convert(di[i]); 501 } 502 return result; 503 } 504 505 public static List<? extends HdfsProtos.DatanodeInfoProto> convert( 506 DatanodeInfo[] dnInfos) { 507 return convert(dnInfos, 0); 508 } 509 510 /** 511 * Copy from {@code dnInfos} to a target of list of same size starting at 512 * {@code startIdx}. 513 */ 514 public static List<? extends HdfsProtos.DatanodeInfoProto> convert( 515 DatanodeInfo[] dnInfos, int startIdx) { 516 if (dnInfos == null) 517 return null; 518 ArrayList<HdfsProtos.DatanodeInfoProto> protos = Lists 519 .newArrayListWithCapacity(dnInfos.length); 520 for (int i = startIdx; i < dnInfos.length; i++) { 521 protos.add(convert(dnInfos[i])); 522 } 523 return protos; 524 } 525 526 public static DatanodeInfo[] convert(List<DatanodeInfoProto> list) { 527 DatanodeInfo[] info = new DatanodeInfo[list.size()]; 528 for (int i = 0; i < info.length; i++) { 529 info[i] = convert(list.get(i)); 530 } 531 return info; 532 } 533 534 public static DatanodeInfoProto convert(DatanodeInfo info) { 535 DatanodeInfoProto.Builder builder = DatanodeInfoProto.newBuilder(); 536 builder.setBlockPoolUsed(info.getBlockPoolUsed()); 537 builder.setAdminState(PBHelper.convert(info.getAdminState())); 538 builder.setCapacity(info.getCapacity()) 539 .setDfsUsed(info.getDfsUsed()) 540 .setId(PBHelper.convert((DatanodeID)info)) 541 .setLastUpdate(info.getLastUpdate()) 542 .setLocation(info.getNetworkLocation()) 543 .setRemaining(info.getRemaining()) 544 .setXceiverCount(info.getXceiverCount()) 545 .build(); 546 return builder.build(); 547 } 548 549 public static AdminStates convert(AdminState adminState) { 550 switch(adminState) { 551 case DECOMMISSION_INPROGRESS: 552 return AdminStates.DECOMMISSION_INPROGRESS; 553 case DECOMMISSIONED: 554 return AdminStates.DECOMMISSIONED; 555 case NORMAL: 556 default: 557 return AdminStates.NORMAL; 558 } 559 } 560 561 public static LocatedBlockProto convert(LocatedBlock b) { 562 if (b == null) return null; 563 Builder builder = LocatedBlockProto.newBuilder(); 564 DatanodeInfo[] locs = b.getLocations(); 565 for (int i = 0; i < locs.length; i++) { 566 builder.addLocs(i, PBHelper.convert(locs[i])); 567 } 568 return builder.setB(PBHelper.convert(b.getBlock())) 569 .setBlockToken(PBHelper.convert(b.getBlockToken())) 570 .setCorrupt(b.isCorrupt()).setOffset(b.getStartOffset()).build(); 571 } 572 573 public static LocatedBlock convert(LocatedBlockProto proto) { 574 if (proto == null) return null; 575 List<DatanodeInfoProto> locs = proto.getLocsList(); 576 DatanodeInfo[] targets = new DatanodeInfo[locs.size()]; 577 for (int i = 0; i < locs.size(); i++) { 578 targets[i] = PBHelper.convert(locs.get(i)); 579 } 580 LocatedBlock lb = new LocatedBlock(PBHelper.convert(proto.getB()), targets, 581 proto.getOffset(), proto.getCorrupt()); 582 lb.setBlockToken(PBHelper.convert(proto.getBlockToken())); 583 return lb; 584 } 585 586 public static TokenProto convert(Token<?> tok) { 587 return TokenProto.newBuilder(). 588 setIdentifier(ByteString.copyFrom(tok.getIdentifier())). 589 setPassword(ByteString.copyFrom(tok.getPassword())). 590 setKind(tok.getKind().toString()). 591 setService(tok.getService().toString()).build(); 592 } 593 594 public static Token<BlockTokenIdentifier> convert( 595 TokenProto blockToken) { 596 return new Token<BlockTokenIdentifier>(blockToken.getIdentifier() 597 .toByteArray(), blockToken.getPassword().toByteArray(), new Text( 598 blockToken.getKind()), new Text(blockToken.getService())); 599 } 600 601 602 public static Token<DelegationTokenIdentifier> convertDelegationToken( 603 TokenProto blockToken) { 604 return new Token<DelegationTokenIdentifier>(blockToken.getIdentifier() 605 .toByteArray(), blockToken.getPassword().toByteArray(), new Text( 606 blockToken.getKind()), new Text(blockToken.getService())); 607 } 608 609 public static ReplicaState convert(ReplicaStateProto state) { 610 switch (state) { 611 case RBW: 612 return ReplicaState.RBW; 613 case RUR: 614 return ReplicaState.RUR; 615 case RWR: 616 return ReplicaState.RWR; 617 case TEMPORARY: 618 return ReplicaState.TEMPORARY; 619 case FINALIZED: 620 default: 621 return ReplicaState.FINALIZED; 622 } 623 } 624 625 public static ReplicaStateProto convert(ReplicaState state) { 626 switch (state) { 627 case RBW: 628 return ReplicaStateProto.RBW; 629 case RUR: 630 return ReplicaStateProto.RUR; 631 case RWR: 632 return ReplicaStateProto.RWR; 633 case TEMPORARY: 634 return ReplicaStateProto.TEMPORARY; 635 case FINALIZED: 636 default: 637 return ReplicaStateProto.FINALIZED; 638 } 639 } 640 641 public static DatanodeRegistrationProto convert( 642 DatanodeRegistration registration) { 643 DatanodeRegistrationProto.Builder builder = DatanodeRegistrationProto 644 .newBuilder(); 645 return builder.setDatanodeID(PBHelper.convert((DatanodeID) registration)) 646 .setStorageInfo(PBHelper.convert(registration.getStorageInfo())) 647 .setKeys(PBHelper.convert(registration.getExportedKeys())) 648 .setSoftwareVersion(registration.getSoftwareVersion()).build(); 649 } 650 651 public static DatanodeRegistration convert(DatanodeRegistrationProto proto) { 652 return new DatanodeRegistration(PBHelper.convert(proto.getDatanodeID()), 653 PBHelper.convert(proto.getStorageInfo()), PBHelper.convert(proto 654 .getKeys()), proto.getSoftwareVersion()); 655 } 656 657 public static DatanodeCommand convert(DatanodeCommandProto proto) { 658 switch (proto.getCmdType()) { 659 case BalancerBandwidthCommand: 660 return PBHelper.convert(proto.getBalancerCmd()); 661 case BlockCommand: 662 return PBHelper.convert(proto.getBlkCmd()); 663 case BlockRecoveryCommand: 664 return PBHelper.convert(proto.getRecoveryCmd()); 665 case FinalizeCommand: 666 return PBHelper.convert(proto.getFinalizeCmd()); 667 case KeyUpdateCommand: 668 return PBHelper.convert(proto.getKeyUpdateCmd()); 669 case RegisterCommand: 670 return REG_CMD; 671 } 672 return null; 673 } 674 675 public static BalancerBandwidthCommandProto convert( 676 BalancerBandwidthCommand bbCmd) { 677 return BalancerBandwidthCommandProto.newBuilder() 678 .setBandwidth(bbCmd.getBalancerBandwidthValue()).build(); 679 } 680 681 public static KeyUpdateCommandProto convert(KeyUpdateCommand cmd) { 682 return KeyUpdateCommandProto.newBuilder() 683 .setKeys(PBHelper.convert(cmd.getExportedKeys())).build(); 684 } 685 686 public static BlockRecoveryCommandProto convert(BlockRecoveryCommand cmd) { 687 BlockRecoveryCommandProto.Builder builder = BlockRecoveryCommandProto 688 .newBuilder(); 689 for (RecoveringBlock b : cmd.getRecoveringBlocks()) { 690 builder.addBlocks(PBHelper.convert(b)); 691 } 692 return builder.build(); 693 } 694 695 public static FinalizeCommandProto convert(FinalizeCommand cmd) { 696 return FinalizeCommandProto.newBuilder() 697 .setBlockPoolId(cmd.getBlockPoolId()).build(); 698 } 699 700 public static BlockCommandProto convert(BlockCommand cmd) { 701 BlockCommandProto.Builder builder = BlockCommandProto.newBuilder() 702 .setBlockPoolId(cmd.getBlockPoolId()); 703 switch (cmd.getAction()) { 704 case DatanodeProtocol.DNA_TRANSFER: 705 builder.setAction(BlockCommandProto.Action.TRANSFER); 706 break; 707 case DatanodeProtocol.DNA_INVALIDATE: 708 builder.setAction(BlockCommandProto.Action.INVALIDATE); 709 break; 710 case DatanodeProtocol.DNA_SHUTDOWN: 711 builder.setAction(BlockCommandProto.Action.SHUTDOWN); 712 break; 713 default: 714 throw new AssertionError("Invalid action"); 715 } 716 Block[] blocks = cmd.getBlocks(); 717 for (int i = 0; i < blocks.length; i++) { 718 builder.addBlocks(PBHelper.convert(blocks[i])); 719 } 720 builder.addAllTargets(PBHelper.convert(cmd.getTargets())); 721 return builder.build(); 722 } 723 724 private static List<DatanodeInfosProto> convert(DatanodeInfo[][] targets) { 725 DatanodeInfosProto[] ret = new DatanodeInfosProto[targets.length]; 726 for (int i = 0; i < targets.length; i++) { 727 ret[i] = DatanodeInfosProto.newBuilder() 728 .addAllDatanodes(PBHelper.convert(targets[i])).build(); 729 } 730 return Arrays.asList(ret); 731 } 732 733 public static DatanodeCommandProto convert(DatanodeCommand datanodeCommand) { 734 DatanodeCommandProto.Builder builder = DatanodeCommandProto.newBuilder(); 735 if (datanodeCommand == null) { 736 return builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand) 737 .build(); 738 } 739 switch (datanodeCommand.getAction()) { 740 case DatanodeProtocol.DNA_BALANCERBANDWIDTHUPDATE: 741 builder.setCmdType(DatanodeCommandProto.Type.BalancerBandwidthCommand) 742 .setBalancerCmd( 743 PBHelper.convert((BalancerBandwidthCommand) datanodeCommand)); 744 break; 745 case DatanodeProtocol.DNA_ACCESSKEYUPDATE: 746 builder 747 .setCmdType(DatanodeCommandProto.Type.KeyUpdateCommand) 748 .setKeyUpdateCmd(PBHelper.convert((KeyUpdateCommand) datanodeCommand)); 749 break; 750 case DatanodeProtocol.DNA_RECOVERBLOCK: 751 builder.setCmdType(DatanodeCommandProto.Type.BlockRecoveryCommand) 752 .setRecoveryCmd( 753 PBHelper.convert((BlockRecoveryCommand) datanodeCommand)); 754 break; 755 case DatanodeProtocol.DNA_FINALIZE: 756 builder.setCmdType(DatanodeCommandProto.Type.FinalizeCommand) 757 .setFinalizeCmd(PBHelper.convert((FinalizeCommand) datanodeCommand)); 758 break; 759 case DatanodeProtocol.DNA_REGISTER: 760 builder.setCmdType(DatanodeCommandProto.Type.RegisterCommand) 761 .setRegisterCmd(REG_CMD_PROTO); 762 break; 763 case DatanodeProtocol.DNA_TRANSFER: 764 case DatanodeProtocol.DNA_INVALIDATE: 765 case DatanodeProtocol.DNA_SHUTDOWN: 766 builder.setCmdType(DatanodeCommandProto.Type.BlockCommand).setBlkCmd( 767 PBHelper.convert((BlockCommand) datanodeCommand)); 768 break; 769 case DatanodeProtocol.DNA_UNKNOWN: //Not expected 770 default: 771 builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand); 772 } 773 return builder.build(); 774 } 775 776 public static KeyUpdateCommand convert(KeyUpdateCommandProto keyUpdateCmd) { 777 return new KeyUpdateCommand(PBHelper.convert(keyUpdateCmd.getKeys())); 778 } 779 780 public static FinalizeCommand convert(FinalizeCommandProto finalizeCmd) { 781 return new FinalizeCommand(finalizeCmd.getBlockPoolId()); 782 } 783 784 public static BlockRecoveryCommand convert( 785 BlockRecoveryCommandProto recoveryCmd) { 786 List<RecoveringBlockProto> list = recoveryCmd.getBlocksList(); 787 List<RecoveringBlock> recoveringBlocks = new ArrayList<RecoveringBlock>( 788 list.size()); 789 790 for (RecoveringBlockProto rbp : list) { 791 recoveringBlocks.add(PBHelper.convert(rbp)); 792 } 793 return new BlockRecoveryCommand(recoveringBlocks); 794 } 795 796 public static BlockCommand convert(BlockCommandProto blkCmd) { 797 List<BlockProto> blockProtoList = blkCmd.getBlocksList(); 798 Block[] blocks = new Block[blockProtoList.size()]; 799 for (int i = 0; i < blockProtoList.size(); i++) { 800 blocks[i] = PBHelper.convert(blockProtoList.get(i)); 801 } 802 List<DatanodeInfosProto> targetList = blkCmd.getTargetsList(); 803 DatanodeInfo[][] targets = new DatanodeInfo[targetList.size()][]; 804 for (int i = 0; i < targetList.size(); i++) { 805 targets[i] = PBHelper.convert(targetList.get(i)); 806 } 807 int action = DatanodeProtocol.DNA_UNKNOWN; 808 switch (blkCmd.getAction()) { 809 case TRANSFER: 810 action = DatanodeProtocol.DNA_TRANSFER; 811 break; 812 case INVALIDATE: 813 action = DatanodeProtocol.DNA_INVALIDATE; 814 break; 815 case SHUTDOWN: 816 action = DatanodeProtocol.DNA_SHUTDOWN; 817 break; 818 } 819 return new BlockCommand(action, blkCmd.getBlockPoolId(), blocks, targets); 820 } 821 822 public static DatanodeInfo[] convert(DatanodeInfosProto datanodeInfosProto) { 823 List<DatanodeInfoProto> proto = datanodeInfosProto.getDatanodesList(); 824 DatanodeInfo[] infos = new DatanodeInfo[proto.size()]; 825 for (int i = 0; i < infos.length; i++) { 826 infos[i] = PBHelper.convert(proto.get(i)); 827 } 828 return infos; 829 } 830 831 public static BalancerBandwidthCommand convert( 832 BalancerBandwidthCommandProto balancerCmd) { 833 return new BalancerBandwidthCommand(balancerCmd.getBandwidth()); 834 } 835 836 public static ReceivedDeletedBlockInfoProto convert( 837 ReceivedDeletedBlockInfo receivedDeletedBlockInfo) { 838 ReceivedDeletedBlockInfoProto.Builder builder = 839 ReceivedDeletedBlockInfoProto.newBuilder(); 840 841 ReceivedDeletedBlockInfoProto.BlockStatus status; 842 switch (receivedDeletedBlockInfo.getStatus()) { 843 case RECEIVING_BLOCK: 844 status = ReceivedDeletedBlockInfoProto.BlockStatus.RECEIVING; 845 break; 846 case RECEIVED_BLOCK: 847 status = ReceivedDeletedBlockInfoProto.BlockStatus.RECEIVED; 848 break; 849 case DELETED_BLOCK: 850 status = ReceivedDeletedBlockInfoProto.BlockStatus.DELETED; 851 break; 852 default: 853 throw new IllegalArgumentException("Bad status: " + 854 receivedDeletedBlockInfo.getStatus()); 855 } 856 builder.setStatus(status); 857 858 if (receivedDeletedBlockInfo.getDelHints() != null) { 859 builder.setDeleteHint(receivedDeletedBlockInfo.getDelHints()); 860 } 861 return builder.setBlock(PBHelper.convert(receivedDeletedBlockInfo.getBlock())) 862 .build(); 863 } 864 865 public static ReceivedDeletedBlockInfo convert( 866 ReceivedDeletedBlockInfoProto proto) { 867 ReceivedDeletedBlockInfo.BlockStatus status = null; 868 switch (proto.getStatus()) { 869 case RECEIVING: 870 status = BlockStatus.RECEIVING_BLOCK; 871 break; 872 case RECEIVED: 873 status = BlockStatus.RECEIVED_BLOCK; 874 break; 875 case DELETED: 876 status = BlockStatus.DELETED_BLOCK; 877 break; 878 } 879 return new ReceivedDeletedBlockInfo( 880 PBHelper.convert(proto.getBlock()), 881 status, 882 proto.hasDeleteHint() ? proto.getDeleteHint() : null); 883 } 884 885 public static NamespaceInfoProto convert(NamespaceInfo info) { 886 return NamespaceInfoProto.newBuilder() 887 .setBlockPoolID(info.getBlockPoolID()) 888 .setBuildVersion(info.getBuildVersion()) 889 .setUnused(0) 890 .setStorageInfo(PBHelper.convert((StorageInfo)info)) 891 .setSoftwareVersion(info.getSoftwareVersion()).build(); 892 } 893 894 // Located Block Arrays and Lists 895 public static LocatedBlockProto[] convertLocatedBlock(LocatedBlock[] lb) { 896 if (lb == null) return null; 897 return convertLocatedBlock2(Arrays.asList(lb)).toArray( 898 new LocatedBlockProto[lb.length]); 899 } 900 901 public static LocatedBlock[] convertLocatedBlock(LocatedBlockProto[] lb) { 902 if (lb == null) return null; 903 return convertLocatedBlock(Arrays.asList(lb)).toArray( 904 new LocatedBlock[lb.length]); 905 } 906 907 public static List<LocatedBlock> convertLocatedBlock( 908 List<LocatedBlockProto> lb) { 909 if (lb == null) return null; 910 final int len = lb.size(); 911 List<LocatedBlock> result = 912 new ArrayList<LocatedBlock>(len); 913 for (int i = 0; i < len; ++i) { 914 result.add(PBHelper.convert(lb.get(i))); 915 } 916 return result; 917 } 918 919 public static List<LocatedBlockProto> convertLocatedBlock2(List<LocatedBlock> lb) { 920 if (lb == null) return null; 921 final int len = lb.size(); 922 List<LocatedBlockProto> result = new ArrayList<LocatedBlockProto>(len); 923 for (int i = 0; i < len; ++i) { 924 result.add(PBHelper.convert(lb.get(i))); 925 } 926 return result; 927 } 928 929 930 // LocatedBlocks 931 public static LocatedBlocks convert(LocatedBlocksProto lb) { 932 return new LocatedBlocks( 933 lb.getFileLength(), lb.getUnderConstruction(), 934 PBHelper.convertLocatedBlock(lb.getBlocksList()), 935 lb.hasLastBlock() ? PBHelper.convert(lb.getLastBlock()) : null, 936 lb.getIsLastBlockComplete()); 937 } 938 939 public static LocatedBlocksProto convert(LocatedBlocks lb) { 940 if (lb == null) { 941 return null; 942 } 943 LocatedBlocksProto.Builder builder = 944 LocatedBlocksProto.newBuilder(); 945 if (lb.getLastLocatedBlock() != null) { 946 builder.setLastBlock(PBHelper.convert(lb.getLastLocatedBlock())); 947 } 948 return builder.setFileLength(lb.getFileLength()) 949 .setUnderConstruction(lb.isUnderConstruction()) 950 .addAllBlocks(PBHelper.convertLocatedBlock2(lb.getLocatedBlocks())) 951 .setIsLastBlockComplete(lb.isLastBlockComplete()).build(); 952 } 953 954 // DataEncryptionKey 955 public static DataEncryptionKey convert(DataEncryptionKeyProto bet) { 956 String encryptionAlgorithm = bet.getEncryptionAlgorithm(); 957 return new DataEncryptionKey(bet.getKeyId(), 958 bet.getBlockPoolId(), 959 bet.getNonce().toByteArray(), 960 bet.getEncryptionKey().toByteArray(), 961 bet.getExpiryDate(), 962 encryptionAlgorithm.isEmpty() ? null : encryptionAlgorithm); 963 } 964 965 public static DataEncryptionKeyProto convert(DataEncryptionKey bet) { 966 DataEncryptionKeyProto.Builder b = DataEncryptionKeyProto.newBuilder() 967 .setKeyId(bet.keyId) 968 .setBlockPoolId(bet.blockPoolId) 969 .setNonce(ByteString.copyFrom(bet.nonce)) 970 .setEncryptionKey(ByteString.copyFrom(bet.encryptionKey)) 971 .setExpiryDate(bet.expiryDate); 972 if (bet.encryptionAlgorithm != null) { 973 b.setEncryptionAlgorithm(bet.encryptionAlgorithm); 974 } 975 return b.build(); 976 } 977 978 public static FsServerDefaults convert(FsServerDefaultsProto fs) { 979 if (fs == null) return null; 980 return new FsServerDefaults( 981 fs.getBlockSize(), fs.getBytesPerChecksum(), 982 fs.getWritePacketSize(), (short) fs.getReplication(), 983 fs.getFileBufferSize(), 984 fs.getEncryptDataTransfer(), 985 fs.getTrashInterval(), 986 PBHelper.convert(fs.getChecksumType())); 987 } 988 989 public static FsServerDefaultsProto convert(FsServerDefaults fs) { 990 if (fs == null) return null; 991 return FsServerDefaultsProto.newBuilder(). 992 setBlockSize(fs.getBlockSize()). 993 setBytesPerChecksum(fs.getBytesPerChecksum()). 994 setWritePacketSize(fs.getWritePacketSize()) 995 .setReplication(fs.getReplication()) 996 .setFileBufferSize(fs.getFileBufferSize()) 997 .setEncryptDataTransfer(fs.getEncryptDataTransfer()) 998 .setTrashInterval(fs.getTrashInterval()) 999 .setChecksumType(PBHelper.convert(fs.getChecksumType())) 1000 .build(); 1001 } 1002 1003 public static FsPermissionProto convert(FsPermission p) { 1004 if (p == null) return null; 1005 return FsPermissionProto.newBuilder().setPerm(p.toShort()).build(); 1006 } 1007 1008 public static FsPermission convert(FsPermissionProto p) { 1009 if (p == null) return null; 1010 return new FsPermission((short)p.getPerm()); 1011 } 1012 1013 1014 // The creatFlag field in PB is a bitmask whose values are the same a the 1015 // emum values of CreateFlag 1016 public static int convertCreateFlag(EnumSetWritable<CreateFlag> flag) { 1017 int value = 0; 1018 if (flag.contains(CreateFlag.APPEND)) { 1019 value |= CreateFlagProto.APPEND.getNumber(); 1020 } 1021 if (flag.contains(CreateFlag.CREATE)) { 1022 value |= CreateFlagProto.CREATE.getNumber(); 1023 } 1024 if (flag.contains(CreateFlag.OVERWRITE)) { 1025 value |= CreateFlagProto.OVERWRITE.getNumber(); 1026 } 1027 return value; 1028 } 1029 1030 public static EnumSetWritable<CreateFlag> convert(int flag) { 1031 EnumSet<CreateFlag> result = 1032 EnumSet.noneOf(CreateFlag.class); 1033 if ((flag & CreateFlagProto.APPEND_VALUE) == CreateFlagProto.APPEND_VALUE) { 1034 result.add(CreateFlag.APPEND); 1035 } 1036 if ((flag & CreateFlagProto.CREATE_VALUE) == CreateFlagProto.CREATE_VALUE) { 1037 result.add(CreateFlag.CREATE); 1038 } 1039 if ((flag & CreateFlagProto.OVERWRITE_VALUE) 1040 == CreateFlagProto.OVERWRITE_VALUE) { 1041 result.add(CreateFlag.OVERWRITE); 1042 } 1043 return new EnumSetWritable<CreateFlag>(result); 1044 } 1045 1046 public static HdfsFileStatus convert(HdfsFileStatusProto fs) { 1047 if (fs == null) 1048 return null; 1049 return new HdfsLocatedFileStatus( 1050 fs.getLength(), fs.getFileType().equals(FileType.IS_DIR), 1051 fs.getBlockReplication(), fs.getBlocksize(), 1052 fs.getModificationTime(), fs.getAccessTime(), 1053 PBHelper.convert(fs.getPermission()), fs.getOwner(), fs.getGroup(), 1054 fs.getFileType().equals(FileType.IS_SYMLINK) ? 1055 fs.getSymlink().toByteArray() : null, 1056 fs.getPath().toByteArray(), 1057 fs.hasFileId()? fs.getFileId(): INodeId.GRANDFATHER_INODE_ID, 1058 fs.hasLocations() ? PBHelper.convert(fs.getLocations()) : null, 1059 fs.hasChildrenNum() ? fs.getChildrenNum() : 0); 1060 } 1061 1062 public static SnapshottableDirectoryStatus convert( 1063 SnapshottableDirectoryStatusProto sdirStatusProto) { 1064 if (sdirStatusProto == null) { 1065 return null; 1066 } 1067 final HdfsFileStatusProto status = sdirStatusProto.getDirStatus(); 1068 return new SnapshottableDirectoryStatus( 1069 status.getModificationTime(), 1070 status.getAccessTime(), 1071 PBHelper.convert(status.getPermission()), 1072 status.getOwner(), 1073 status.getGroup(), 1074 status.getPath().toByteArray(), 1075 status.getFileId(), 1076 status.getChildrenNum(), 1077 sdirStatusProto.getSnapshotNumber(), 1078 sdirStatusProto.getSnapshotQuota(), 1079 sdirStatusProto.getParentFullpath().toByteArray()); 1080 } 1081 1082 public static HdfsFileStatusProto convert(HdfsFileStatus fs) { 1083 if (fs == null) 1084 return null; 1085 FileType fType = FileType.IS_FILE; 1086 if (fs.isDir()) { 1087 fType = FileType.IS_DIR; 1088 } else if (fs.isSymlink()) { 1089 fType = FileType.IS_SYMLINK; 1090 } 1091 1092 HdfsFileStatusProto.Builder builder = 1093 HdfsFileStatusProto.newBuilder(). 1094 setLength(fs.getLen()). 1095 setFileType(fType). 1096 setBlockReplication(fs.getReplication()). 1097 setBlocksize(fs.getBlockSize()). 1098 setModificationTime(fs.getModificationTime()). 1099 setAccessTime(fs.getAccessTime()). 1100 setPermission(PBHelper.convert(fs.getPermission())). 1101 setOwner(fs.getOwner()). 1102 setGroup(fs.getGroup()). 1103 setFileId(fs.getFileId()). 1104 setChildrenNum(fs.getChildrenNum()). 1105 setPath(ByteString.copyFrom(fs.getLocalNameInBytes())); 1106 if (fs.isSymlink()) { 1107 builder.setSymlink(ByteString.copyFrom(fs.getSymlinkInBytes())); 1108 } 1109 if (fs instanceof HdfsLocatedFileStatus) { 1110 LocatedBlocks locations = ((HdfsLocatedFileStatus)fs).getBlockLocations(); 1111 if (locations != null) { 1112 builder.setLocations(PBHelper.convert(locations)); 1113 } 1114 } 1115 return builder.build(); 1116 } 1117 1118 public static SnapshottableDirectoryStatusProto convert( 1119 SnapshottableDirectoryStatus status) { 1120 if (status == null) { 1121 return null; 1122 } 1123 int snapshotNumber = status.getSnapshotNumber(); 1124 int snapshotQuota = status.getSnapshotQuota(); 1125 byte[] parentFullPath = status.getParentFullPath(); 1126 ByteString parentFullPathBytes = ByteString.copyFrom( 1127 parentFullPath == null ? DFSUtil.EMPTY_BYTES : parentFullPath); 1128 HdfsFileStatusProto fs = convert(status.getDirStatus()); 1129 SnapshottableDirectoryStatusProto.Builder builder = 1130 SnapshottableDirectoryStatusProto 1131 .newBuilder().setSnapshotNumber(snapshotNumber) 1132 .setSnapshotQuota(snapshotQuota).setParentFullpath(parentFullPathBytes) 1133 .setDirStatus(fs); 1134 return builder.build(); 1135 } 1136 1137 public static HdfsFileStatusProto[] convert(HdfsFileStatus[] fs) { 1138 if (fs == null) return null; 1139 final int len = fs.length; 1140 HdfsFileStatusProto[] result = new HdfsFileStatusProto[len]; 1141 for (int i = 0; i < len; ++i) { 1142 result[i] = PBHelper.convert(fs[i]); 1143 } 1144 return result; 1145 } 1146 1147 public static HdfsFileStatus[] convert(HdfsFileStatusProto[] fs) { 1148 if (fs == null) return null; 1149 final int len = fs.length; 1150 HdfsFileStatus[] result = new HdfsFileStatus[len]; 1151 for (int i = 0; i < len; ++i) { 1152 result[i] = PBHelper.convert(fs[i]); 1153 } 1154 return result; 1155 } 1156 1157 public static DirectoryListing convert(DirectoryListingProto dl) { 1158 if (dl == null) 1159 return null; 1160 List<HdfsFileStatusProto> partList = dl.getPartialListingList(); 1161 return new DirectoryListing( 1162 partList.isEmpty() ? new HdfsLocatedFileStatus[0] 1163 : PBHelper.convert( 1164 partList.toArray(new HdfsFileStatusProto[partList.size()])), 1165 dl.getRemainingEntries()); 1166 } 1167 1168 public static DirectoryListingProto convert(DirectoryListing d) { 1169 if (d == null) 1170 return null; 1171 return DirectoryListingProto.newBuilder(). 1172 addAllPartialListing(Arrays.asList( 1173 PBHelper.convert(d.getPartialListing()))). 1174 setRemainingEntries(d.getRemainingEntries()). 1175 build(); 1176 } 1177 1178 public static long[] convert(GetFsStatsResponseProto res) { 1179 long[] result = new long[6]; 1180 result[ClientProtocol.GET_STATS_CAPACITY_IDX] = res.getCapacity(); 1181 result[ClientProtocol.GET_STATS_USED_IDX] = res.getUsed(); 1182 result[ClientProtocol.GET_STATS_REMAINING_IDX] = res.getRemaining(); 1183 result[ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX] = res.getUnderReplicated(); 1184 result[ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX] = res.getCorruptBlocks(); 1185 result[ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX] = res.getMissingBlocks(); 1186 return result; 1187 } 1188 1189 public static GetFsStatsResponseProto convert(long[] fsStats) { 1190 GetFsStatsResponseProto.Builder result = GetFsStatsResponseProto 1191 .newBuilder(); 1192 if (fsStats.length >= ClientProtocol.GET_STATS_CAPACITY_IDX + 1) 1193 result.setCapacity(fsStats[ClientProtocol.GET_STATS_CAPACITY_IDX]); 1194 if (fsStats.length >= ClientProtocol.GET_STATS_USED_IDX + 1) 1195 result.setUsed(fsStats[ClientProtocol.GET_STATS_USED_IDX]); 1196 if (fsStats.length >= ClientProtocol.GET_STATS_REMAINING_IDX + 1) 1197 result.setRemaining(fsStats[ClientProtocol.GET_STATS_REMAINING_IDX]); 1198 if (fsStats.length >= ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX + 1) 1199 result.setUnderReplicated( 1200 fsStats[ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX]); 1201 if (fsStats.length >= ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX + 1) 1202 result.setCorruptBlocks( 1203 fsStats[ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX]); 1204 if (fsStats.length >= ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX + 1) 1205 result.setMissingBlocks( 1206 fsStats[ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX]); 1207 return result.build(); 1208 } 1209 1210 public static DatanodeReportTypeProto 1211 convert(DatanodeReportType t) { 1212 switch (t) { 1213 case ALL: return DatanodeReportTypeProto.ALL; 1214 case LIVE: return DatanodeReportTypeProto.LIVE; 1215 case DEAD: return DatanodeReportTypeProto.DEAD; 1216 default: 1217 throw new IllegalArgumentException("Unexpected data type report:" + t); 1218 } 1219 } 1220 1221 public static DatanodeReportType 1222 convert(DatanodeReportTypeProto t) { 1223 switch (t) { 1224 case ALL: return DatanodeReportType.ALL; 1225 case LIVE: return DatanodeReportType.LIVE; 1226 case DEAD: return DatanodeReportType.DEAD; 1227 default: 1228 throw new IllegalArgumentException("Unexpected data type report:" + t); 1229 } 1230 } 1231 1232 public static SafeModeActionProto convert( 1233 SafeModeAction a) { 1234 switch (a) { 1235 case SAFEMODE_LEAVE: 1236 return SafeModeActionProto.SAFEMODE_LEAVE; 1237 case SAFEMODE_ENTER: 1238 return SafeModeActionProto.SAFEMODE_ENTER; 1239 case SAFEMODE_GET: 1240 return SafeModeActionProto.SAFEMODE_GET; 1241 default: 1242 throw new IllegalArgumentException("Unexpected SafeModeAction :" + a); 1243 } 1244 } 1245 1246 public static SafeModeAction convert( 1247 ClientNamenodeProtocolProtos.SafeModeActionProto a) { 1248 switch (a) { 1249 case SAFEMODE_LEAVE: 1250 return SafeModeAction.SAFEMODE_LEAVE; 1251 case SAFEMODE_ENTER: 1252 return SafeModeAction.SAFEMODE_ENTER; 1253 case SAFEMODE_GET: 1254 return SafeModeAction.SAFEMODE_GET; 1255 default: 1256 throw new IllegalArgumentException("Unexpected SafeModeAction :" + a); 1257 } 1258 } 1259 1260 public static CorruptFileBlocks convert(CorruptFileBlocksProto c) { 1261 if (c == null) 1262 return null; 1263 List<String> fileList = c.getFilesList(); 1264 return new CorruptFileBlocks(fileList.toArray(new String[fileList.size()]), 1265 c.getCookie()); 1266 } 1267 1268 public static CorruptFileBlocksProto convert(CorruptFileBlocks c) { 1269 if (c == null) 1270 return null; 1271 return CorruptFileBlocksProto.newBuilder(). 1272 addAllFiles(Arrays.asList(c.getFiles())). 1273 setCookie(c.getCookie()). 1274 build(); 1275 } 1276 1277 public static ContentSummary convert(ContentSummaryProto cs) { 1278 if (cs == null) return null; 1279 return new ContentSummary( 1280 cs.getLength(), cs.getFileCount(), cs.getDirectoryCount(), cs.getQuota(), 1281 cs.getSpaceConsumed(), cs.getSpaceQuota()); 1282 } 1283 1284 public static ContentSummaryProto convert(ContentSummary cs) { 1285 if (cs == null) return null; 1286 return ContentSummaryProto.newBuilder(). 1287 setLength(cs.getLength()). 1288 setFileCount(cs.getFileCount()). 1289 setDirectoryCount(cs.getDirectoryCount()). 1290 setQuota(cs.getQuota()). 1291 setSpaceConsumed(cs.getSpaceConsumed()). 1292 setSpaceQuota(cs.getSpaceQuota()). 1293 build(); 1294 } 1295 1296 public static NNHAStatusHeartbeat convert(NNHAStatusHeartbeatProto s) { 1297 if (s == null) return null; 1298 switch (s.getState()) { 1299 case ACTIVE: 1300 return new NNHAStatusHeartbeat(HAServiceState.ACTIVE, s.getTxid()); 1301 case STANDBY: 1302 return new NNHAStatusHeartbeat(HAServiceState.STANDBY, s.getTxid()); 1303 default: 1304 throw new IllegalArgumentException("Unexpected NNHAStatusHeartbeat.State:" + s.getState()); 1305 } 1306 } 1307 1308 public static NNHAStatusHeartbeatProto convert(NNHAStatusHeartbeat hb) { 1309 if (hb == null) return null; 1310 NNHAStatusHeartbeatProto.Builder builder = 1311 NNHAStatusHeartbeatProto.newBuilder(); 1312 switch (hb.getState()) { 1313 case ACTIVE: 1314 builder.setState(NNHAStatusHeartbeatProto.State.ACTIVE); 1315 break; 1316 case STANDBY: 1317 builder.setState(NNHAStatusHeartbeatProto.State.STANDBY); 1318 break; 1319 default: 1320 throw new IllegalArgumentException("Unexpected NNHAStatusHeartbeat.State:" + 1321 hb.getState()); 1322 } 1323 builder.setTxid(hb.getTxId()); 1324 return builder.build(); 1325 } 1326 1327 public static DatanodeStorageProto convert(DatanodeStorage s) { 1328 return DatanodeStorageProto.newBuilder() 1329 .setState(PBHelper.convert(s.getState())) 1330 .setStorageID(s.getStorageID()).build(); 1331 } 1332 1333 private static StorageState convert(State state) { 1334 switch(state) { 1335 case READ_ONLY: 1336 return StorageState.READ_ONLY; 1337 case NORMAL: 1338 default: 1339 return StorageState.NORMAL; 1340 } 1341 } 1342 1343 public static DatanodeStorage convert(DatanodeStorageProto s) { 1344 return new DatanodeStorage(s.getStorageID(), PBHelper.convert(s.getState())); 1345 } 1346 1347 private static State convert(StorageState state) { 1348 switch(state) { 1349 case READ_ONLY: 1350 return DatanodeStorage.State.READ_ONLY; 1351 case NORMAL: 1352 default: 1353 return DatanodeStorage.State.NORMAL; 1354 } 1355 } 1356 1357 public static StorageReportProto convert(StorageReport r) { 1358 return StorageReportProto.newBuilder() 1359 .setBlockPoolUsed(r.getBlockPoolUsed()).setCapacity(r.getCapacity()) 1360 .setDfsUsed(r.getDfsUsed()).setRemaining(r.getRemaining()) 1361 .setStorageID(r.getStorageID()).build(); 1362 } 1363 1364 public static JournalInfo convert(JournalInfoProto info) { 1365 int lv = info.hasLayoutVersion() ? info.getLayoutVersion() : 0; 1366 int nsID = info.hasNamespaceID() ? info.getNamespaceID() : 0; 1367 return new JournalInfo(lv, info.getClusterID(), nsID); 1368 } 1369 1370 /** 1371 * Method used for converting {@link JournalInfoProto} sent from Namenode 1372 * to Journal receivers to {@link NamenodeRegistration}. 1373 */ 1374 public static JournalInfoProto convert(JournalInfo j) { 1375 return JournalInfoProto.newBuilder().setClusterID(j.getClusterId()) 1376 .setLayoutVersion(j.getLayoutVersion()) 1377 .setNamespaceID(j.getNamespaceId()).build(); 1378 } 1379 1380 public static SnapshottableDirectoryStatus[] convert( 1381 SnapshottableDirectoryListingProto sdlp) { 1382 if (sdlp == null) 1383 return null; 1384 List<SnapshottableDirectoryStatusProto> list = sdlp 1385 .getSnapshottableDirListingList(); 1386 if (list.isEmpty()) { 1387 return new SnapshottableDirectoryStatus[0]; 1388 } else { 1389 SnapshottableDirectoryStatus[] result = 1390 new SnapshottableDirectoryStatus[list.size()]; 1391 for (int i = 0; i < list.size(); i++) { 1392 result[i] = PBHelper.convert(list.get(i)); 1393 } 1394 return result; 1395 } 1396 } 1397 1398 public static SnapshottableDirectoryListingProto convert( 1399 SnapshottableDirectoryStatus[] status) { 1400 if (status == null) 1401 return null; 1402 SnapshottableDirectoryStatusProto[] protos = 1403 new SnapshottableDirectoryStatusProto[status.length]; 1404 for (int i = 0; i < status.length; i++) { 1405 protos[i] = PBHelper.convert(status[i]); 1406 } 1407 List<SnapshottableDirectoryStatusProto> protoList = Arrays.asList(protos); 1408 return SnapshottableDirectoryListingProto.newBuilder() 1409 .addAllSnapshottableDirListing(protoList).build(); 1410 } 1411 1412 public static DiffReportEntry convert(SnapshotDiffReportEntryProto entry) { 1413 if (entry == null) { 1414 return null; 1415 } 1416 DiffType type = DiffType.getTypeFromLabel(entry 1417 .getModificationLabel()); 1418 return type == null ? null : 1419 new DiffReportEntry(type, entry.getFullpath().toByteArray()); 1420 } 1421 1422 public static SnapshotDiffReportEntryProto convert(DiffReportEntry entry) { 1423 if (entry == null) { 1424 return null; 1425 } 1426 byte[] fullPath = entry.getRelativePath(); 1427 ByteString fullPathString = ByteString 1428 .copyFrom(fullPath == null ? DFSUtil.EMPTY_BYTES : fullPath); 1429 1430 String modification = entry.getType().getLabel(); 1431 1432 SnapshotDiffReportEntryProto entryProto = SnapshotDiffReportEntryProto 1433 .newBuilder().setFullpath(fullPathString) 1434 .setModificationLabel(modification).build(); 1435 return entryProto; 1436 } 1437 1438 public static SnapshotDiffReport convert(SnapshotDiffReportProto reportProto) { 1439 if (reportProto == null) { 1440 return null; 1441 } 1442 String snapshotDir = reportProto.getSnapshotRoot(); 1443 String fromSnapshot = reportProto.getFromSnapshot(); 1444 String toSnapshot = reportProto.getToSnapshot(); 1445 List<SnapshotDiffReportEntryProto> list = reportProto 1446 .getDiffReportEntriesList(); 1447 List<DiffReportEntry> entries = new ArrayList<DiffReportEntry>(); 1448 for (SnapshotDiffReportEntryProto entryProto : list) { 1449 DiffReportEntry entry = convert(entryProto); 1450 if (entry != null) 1451 entries.add(entry); 1452 } 1453 return new SnapshotDiffReport(snapshotDir, fromSnapshot, toSnapshot, 1454 entries); 1455 } 1456 1457 public static SnapshotDiffReportProto convert(SnapshotDiffReport report) { 1458 if (report == null) { 1459 return null; 1460 } 1461 List<DiffReportEntry> entries = report.getDiffList(); 1462 List<SnapshotDiffReportEntryProto> entryProtos = 1463 new ArrayList<SnapshotDiffReportEntryProto>(); 1464 for (DiffReportEntry entry : entries) { 1465 SnapshotDiffReportEntryProto entryProto = convert(entry); 1466 if (entryProto != null) 1467 entryProtos.add(entryProto); 1468 } 1469 1470 SnapshotDiffReportProto reportProto = SnapshotDiffReportProto.newBuilder() 1471 .setSnapshotRoot(report.getSnapshotRoot()) 1472 .setFromSnapshot(report.getFromSnapshot()) 1473 .setToSnapshot(report.getLaterSnapshotName()) 1474 .addAllDiffReportEntries(entryProtos).build(); 1475 return reportProto; 1476 } 1477 1478 public static DataChecksum.Type convert(HdfsProtos.ChecksumTypeProto type) { 1479 return DataChecksum.Type.valueOf(type.getNumber()); 1480 } 1481 1482 public static HdfsProtos.ChecksumTypeProto convert(DataChecksum.Type type) { 1483 return HdfsProtos.ChecksumTypeProto.valueOf(type.id); 1484 } 1485 1486 public static InputStream vintPrefixed(final InputStream input) 1487 throws IOException { 1488 final int firstByte = input.read(); 1489 if (firstByte == -1) { 1490 throw new EOFException("Premature EOF: no length prefix available"); 1491 } 1492 1493 int size = CodedInputStream.readRawVarint32(firstByte, input); 1494 assert size >= 0; 1495 return new ExactSizeInputStream(input, size); 1496 } 1497 }