001 /** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018 package org.apache.hadoop.hdfs.protocolPB; 019 020 import java.io.EOFException; 021 import java.io.IOException; 022 import java.io.InputStream; 023 import java.util.ArrayList; 024 import java.util.Arrays; 025 import java.util.EnumSet; 026 import java.util.List; 027 028 import org.apache.hadoop.fs.ContentSummary; 029 import org.apache.hadoop.fs.CreateFlag; 030 import org.apache.hadoop.fs.FsServerDefaults; 031 import org.apache.hadoop.fs.permission.FsPermission; 032 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; 033 import org.apache.hadoop.hdfs.DFSUtil; 034 import org.apache.hadoop.hdfs.protocol.Block; 035 import org.apache.hadoop.hdfs.protocol.ClientProtocol; 036 import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; 037 import org.apache.hadoop.hdfs.protocol.DatanodeID; 038 import org.apache.hadoop.hdfs.protocol.DatanodeInfo; 039 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; 040 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; 041 import org.apache.hadoop.hdfs.protocol.DirectoryListing; 042 import org.apache.hadoop.hdfs.protocol.ExtendedBlock; 043 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; 044 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; 045 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry; 046 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType; 047 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; 048 import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus; 049 import org.apache.hadoop.hdfs.protocol.LocatedBlock; 050 import org.apache.hadoop.hdfs.protocol.LocatedBlocks; 051 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; 052 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos; 053 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateFlagProto; 054 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto; 055 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto; 056 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SafeModeActionProto; 057 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto; 058 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto; 059 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto; 060 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto; 061 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto; 062 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto; 063 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto.StorageState; 064 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto; 065 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto; 066 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto; 067 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto; 068 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto; 069 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto; 070 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; 071 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto; 072 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto; 073 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto; 074 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto; 075 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto; 076 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto; 077 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto; 078 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto; 079 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto; 080 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto; 081 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto; 082 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState; 083 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto; 084 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto; 085 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto; 086 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto; 087 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto; 088 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto; 089 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto; 090 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType; 091 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto; 092 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder; 093 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto; 094 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto; 095 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto; 096 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto; 097 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto; 098 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto; 099 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto; 100 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto; 101 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaStateProto; 102 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto; 103 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto; 104 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto; 105 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto; 106 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto; 107 import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto; 108 import org.apache.hadoop.hdfs.security.token.block.BlockKey; 109 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; 110 import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; 111 import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; 112 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; 113 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; 114 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; 115 import org.apache.hadoop.hdfs.server.common.StorageInfo; 116 import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature; 117 import org.apache.hadoop.hdfs.server.namenode.INodeId; 118 import org.apache.hadoop.hdfs.server.protocol.BalancerBandwidthCommand; 119 import org.apache.hadoop.hdfs.server.protocol.BlockCommand; 120 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand; 121 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; 122 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; 123 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; 124 import org.apache.hadoop.hdfs.server.protocol.CheckpointCommand; 125 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; 126 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; 127 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; 128 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; 129 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State; 130 import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand; 131 import org.apache.hadoop.hdfs.server.protocol.JournalInfo; 132 import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand; 133 import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat; 134 import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand; 135 import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration; 136 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; 137 import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo; 138 import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus; 139 import org.apache.hadoop.hdfs.server.protocol.RegisterCommand; 140 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog; 141 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; 142 import org.apache.hadoop.hdfs.server.protocol.StorageReport; 143 import org.apache.hadoop.hdfs.util.ExactSizeInputStream; 144 import org.apache.hadoop.io.EnumSetWritable; 145 import org.apache.hadoop.io.Text; 146 import org.apache.hadoop.security.proto.SecurityProtos.TokenProto; 147 import org.apache.hadoop.security.token.Token; 148 import org.apache.hadoop.util.DataChecksum; 149 150 import com.google.common.collect.Lists; 151 import com.google.protobuf.ByteString; 152 import com.google.protobuf.CodedInputStream; 153 154 /** 155 * Utilities for converting protobuf classes to and from implementation classes 156 * and other helper utilities to help in dealing with protobuf. 157 * 158 * Note that when converting from an internal type to protobuf type, the 159 * converter never return null for protobuf type. The check for internal type 160 * being null must be done before calling the convert() method. 161 */ 162 public class PBHelper { 163 private static final RegisterCommandProto REG_CMD_PROTO = 164 RegisterCommandProto.newBuilder().build(); 165 private static final RegisterCommand REG_CMD = new RegisterCommand(); 166 167 private PBHelper() { 168 /** Hidden constructor */ 169 } 170 171 public static ByteString getByteString(byte[] bytes) { 172 return ByteString.copyFrom(bytes); 173 } 174 175 public static NamenodeRole convert(NamenodeRoleProto role) { 176 switch (role) { 177 case NAMENODE: 178 return NamenodeRole.NAMENODE; 179 case BACKUP: 180 return NamenodeRole.BACKUP; 181 case CHECKPOINT: 182 return NamenodeRole.CHECKPOINT; 183 } 184 return null; 185 } 186 187 public static NamenodeRoleProto convert(NamenodeRole role) { 188 switch (role) { 189 case NAMENODE: 190 return NamenodeRoleProto.NAMENODE; 191 case BACKUP: 192 return NamenodeRoleProto.BACKUP; 193 case CHECKPOINT: 194 return NamenodeRoleProto.CHECKPOINT; 195 } 196 return null; 197 } 198 199 public static StorageInfoProto convert(StorageInfo info) { 200 return StorageInfoProto.newBuilder().setClusterID(info.getClusterID()) 201 .setCTime(info.getCTime()).setLayoutVersion(info.getLayoutVersion()) 202 .setNamespceID(info.getNamespaceID()).build(); 203 } 204 205 public static StorageInfo convert(StorageInfoProto info) { 206 return new StorageInfo(info.getLayoutVersion(), info.getNamespceID(), 207 info.getClusterID(), info.getCTime()); 208 } 209 210 public static NamenodeRegistrationProto convert(NamenodeRegistration reg) { 211 return NamenodeRegistrationProto.newBuilder() 212 .setHttpAddress(reg.getHttpAddress()).setRole(convert(reg.getRole())) 213 .setRpcAddress(reg.getAddress()) 214 .setStorageInfo(convert((StorageInfo) reg)).build(); 215 } 216 217 public static NamenodeRegistration convert(NamenodeRegistrationProto reg) { 218 return new NamenodeRegistration(reg.getRpcAddress(), reg.getHttpAddress(), 219 convert(reg.getStorageInfo()), convert(reg.getRole())); 220 } 221 222 // DatanodeId 223 public static DatanodeID convert(DatanodeIDProto dn) { 224 return new DatanodeID(dn.getIpAddr(), dn.getHostName(), dn.getStorageID(), 225 dn.getXferPort(), dn.getInfoPort(), dn.hasInfoSecurePort() ? dn 226 .getInfoSecurePort() : 0, dn.getIpcPort()); 227 } 228 229 public static DatanodeIDProto convert(DatanodeID dn) { 230 return DatanodeIDProto.newBuilder() 231 .setIpAddr(dn.getIpAddr()) 232 .setHostName(dn.getHostName()) 233 .setStorageID(dn.getStorageID()) 234 .setXferPort(dn.getXferPort()) 235 .setInfoPort(dn.getInfoPort()) 236 .setInfoSecurePort(dn.getInfoSecurePort()) 237 .setIpcPort(dn.getIpcPort()).build(); 238 } 239 240 // Arrays of DatanodeId 241 public static DatanodeIDProto[] convert(DatanodeID[] did) { 242 if (did == null) 243 return null; 244 final int len = did.length; 245 DatanodeIDProto[] result = new DatanodeIDProto[len]; 246 for (int i = 0; i < len; ++i) { 247 result[i] = convert(did[i]); 248 } 249 return result; 250 } 251 252 public static DatanodeID[] convert(DatanodeIDProto[] did) { 253 if (did == null) return null; 254 final int len = did.length; 255 DatanodeID[] result = new DatanodeID[len]; 256 for (int i = 0; i < len; ++i) { 257 result[i] = convert(did[i]); 258 } 259 return result; 260 } 261 262 // Block 263 public static BlockProto convert(Block b) { 264 return BlockProto.newBuilder().setBlockId(b.getBlockId()) 265 .setGenStamp(b.getGenerationStamp()).setNumBytes(b.getNumBytes()) 266 .build(); 267 } 268 269 public static Block convert(BlockProto b) { 270 return new Block(b.getBlockId(), b.getNumBytes(), b.getGenStamp()); 271 } 272 273 public static BlockWithLocationsProto convert(BlockWithLocations blk) { 274 return BlockWithLocationsProto.newBuilder() 275 .setBlock(convert(blk.getBlock())) 276 .addAllStorageIDs(Arrays.asList(blk.getStorageIDs())).build(); 277 } 278 279 public static BlockWithLocations convert(BlockWithLocationsProto b) { 280 return new BlockWithLocations(convert(b.getBlock()), b.getStorageIDsList() 281 .toArray(new String[0])); 282 } 283 284 public static BlocksWithLocationsProto convert(BlocksWithLocations blks) { 285 BlocksWithLocationsProto.Builder builder = BlocksWithLocationsProto 286 .newBuilder(); 287 for (BlockWithLocations b : blks.getBlocks()) { 288 builder.addBlocks(convert(b)); 289 } 290 return builder.build(); 291 } 292 293 public static BlocksWithLocations convert(BlocksWithLocationsProto blocks) { 294 List<BlockWithLocationsProto> b = blocks.getBlocksList(); 295 BlockWithLocations[] ret = new BlockWithLocations[b.size()]; 296 int i = 0; 297 for (BlockWithLocationsProto entry : b) { 298 ret[i++] = convert(entry); 299 } 300 return new BlocksWithLocations(ret); 301 } 302 303 public static BlockKeyProto convert(BlockKey key) { 304 byte[] encodedKey = key.getEncodedKey(); 305 ByteString keyBytes = ByteString.copyFrom(encodedKey == null ? 306 DFSUtil.EMPTY_BYTES : encodedKey); 307 return BlockKeyProto.newBuilder().setKeyId(key.getKeyId()) 308 .setKeyBytes(keyBytes).setExpiryDate(key.getExpiryDate()).build(); 309 } 310 311 public static BlockKey convert(BlockKeyProto k) { 312 return new BlockKey(k.getKeyId(), k.getExpiryDate(), k.getKeyBytes() 313 .toByteArray()); 314 } 315 316 public static ExportedBlockKeysProto convert(ExportedBlockKeys keys) { 317 ExportedBlockKeysProto.Builder builder = ExportedBlockKeysProto 318 .newBuilder(); 319 builder.setIsBlockTokenEnabled(keys.isBlockTokenEnabled()) 320 .setKeyUpdateInterval(keys.getKeyUpdateInterval()) 321 .setTokenLifeTime(keys.getTokenLifetime()) 322 .setCurrentKey(convert(keys.getCurrentKey())); 323 for (BlockKey k : keys.getAllKeys()) { 324 builder.addAllKeys(convert(k)); 325 } 326 return builder.build(); 327 } 328 329 public static ExportedBlockKeys convert(ExportedBlockKeysProto keys) { 330 return new ExportedBlockKeys(keys.getIsBlockTokenEnabled(), 331 keys.getKeyUpdateInterval(), keys.getTokenLifeTime(), 332 convert(keys.getCurrentKey()), convertBlockKeys(keys.getAllKeysList())); 333 } 334 335 public static CheckpointSignatureProto convert(CheckpointSignature s) { 336 return CheckpointSignatureProto.newBuilder() 337 .setBlockPoolId(s.getBlockpoolID()) 338 .setCurSegmentTxId(s.getCurSegmentTxId()) 339 .setMostRecentCheckpointTxId(s.getMostRecentCheckpointTxId()) 340 .setStorageInfo(PBHelper.convert((StorageInfo) s)).build(); 341 } 342 343 public static CheckpointSignature convert(CheckpointSignatureProto s) { 344 return new CheckpointSignature(PBHelper.convert(s.getStorageInfo()), 345 s.getBlockPoolId(), s.getMostRecentCheckpointTxId(), 346 s.getCurSegmentTxId()); 347 } 348 349 public static RemoteEditLogProto convert(RemoteEditLog log) { 350 return RemoteEditLogProto.newBuilder() 351 .setStartTxId(log.getStartTxId()) 352 .setEndTxId(log.getEndTxId()) 353 .setIsInProgress(log.isInProgress()).build(); 354 } 355 356 public static RemoteEditLog convert(RemoteEditLogProto l) { 357 return new RemoteEditLog(l.getStartTxId(), l.getEndTxId(), 358 l.getIsInProgress()); 359 } 360 361 public static RemoteEditLogManifestProto convert( 362 RemoteEditLogManifest manifest) { 363 RemoteEditLogManifestProto.Builder builder = RemoteEditLogManifestProto 364 .newBuilder(); 365 for (RemoteEditLog log : manifest.getLogs()) { 366 builder.addLogs(convert(log)); 367 } 368 return builder.build(); 369 } 370 371 public static RemoteEditLogManifest convert( 372 RemoteEditLogManifestProto manifest) { 373 List<RemoteEditLog> logs = new ArrayList<RemoteEditLog>(manifest 374 .getLogsList().size()); 375 for (RemoteEditLogProto l : manifest.getLogsList()) { 376 logs.add(convert(l)); 377 } 378 return new RemoteEditLogManifest(logs); 379 } 380 381 public static CheckpointCommandProto convert(CheckpointCommand cmd) { 382 return CheckpointCommandProto.newBuilder() 383 .setSignature(convert(cmd.getSignature())) 384 .setNeedToReturnImage(cmd.needToReturnImage()).build(); 385 } 386 387 public static NamenodeCommandProto convert(NamenodeCommand cmd) { 388 if (cmd instanceof CheckpointCommand) { 389 return NamenodeCommandProto.newBuilder().setAction(cmd.getAction()) 390 .setType(NamenodeCommandProto.Type.CheckPointCommand) 391 .setCheckpointCmd(convert((CheckpointCommand) cmd)).build(); 392 } 393 return NamenodeCommandProto.newBuilder() 394 .setType(NamenodeCommandProto.Type.NamenodeCommand) 395 .setAction(cmd.getAction()).build(); 396 } 397 398 public static BlockKey[] convertBlockKeys(List<BlockKeyProto> list) { 399 BlockKey[] ret = new BlockKey[list.size()]; 400 int i = 0; 401 for (BlockKeyProto k : list) { 402 ret[i++] = convert(k); 403 } 404 return ret; 405 } 406 407 public static NamespaceInfo convert(NamespaceInfoProto info) { 408 StorageInfoProto storage = info.getStorageInfo(); 409 return new NamespaceInfo(storage.getNamespceID(), storage.getClusterID(), 410 info.getBlockPoolID(), storage.getCTime(), info.getBuildVersion(), 411 info.getSoftwareVersion()); 412 } 413 414 public static NamenodeCommand convert(NamenodeCommandProto cmd) { 415 if (cmd == null) return null; 416 switch (cmd.getType()) { 417 case CheckPointCommand: 418 CheckpointCommandProto chkPt = cmd.getCheckpointCmd(); 419 return new CheckpointCommand(PBHelper.convert(chkPt.getSignature()), 420 chkPt.getNeedToReturnImage()); 421 default: 422 return new NamenodeCommand(cmd.getAction()); 423 } 424 } 425 426 public static ExtendedBlock convert(ExtendedBlockProto eb) { 427 if (eb == null) return null; 428 return new ExtendedBlock( eb.getPoolId(), eb.getBlockId(), eb.getNumBytes(), 429 eb.getGenerationStamp()); 430 } 431 432 public static ExtendedBlockProto convert(final ExtendedBlock b) { 433 if (b == null) return null; 434 return ExtendedBlockProto.newBuilder(). 435 setPoolId(b.getBlockPoolId()). 436 setBlockId(b.getBlockId()). 437 setNumBytes(b.getNumBytes()). 438 setGenerationStamp(b.getGenerationStamp()). 439 build(); 440 } 441 442 public static RecoveringBlockProto convert(RecoveringBlock b) { 443 if (b == null) { 444 return null; 445 } 446 LocatedBlockProto lb = PBHelper.convert((LocatedBlock)b); 447 return RecoveringBlockProto.newBuilder().setBlock(lb) 448 .setNewGenStamp(b.getNewGenerationStamp()).build(); 449 } 450 451 public static RecoveringBlock convert(RecoveringBlockProto b) { 452 ExtendedBlock block = convert(b.getBlock().getB()); 453 DatanodeInfo[] locs = convert(b.getBlock().getLocsList()); 454 return new RecoveringBlock(block, locs, b.getNewGenStamp()); 455 } 456 457 public static DatanodeInfoProto.AdminState convert( 458 final DatanodeInfo.AdminStates inAs) { 459 switch (inAs) { 460 case NORMAL: return DatanodeInfoProto.AdminState.NORMAL; 461 case DECOMMISSION_INPROGRESS: 462 return DatanodeInfoProto.AdminState.DECOMMISSION_INPROGRESS; 463 case DECOMMISSIONED: return DatanodeInfoProto.AdminState.DECOMMISSIONED; 464 default: return DatanodeInfoProto.AdminState.NORMAL; 465 } 466 } 467 468 static public DatanodeInfo convert(DatanodeInfoProto di) { 469 if (di == null) return null; 470 return new DatanodeInfo( 471 PBHelper.convert(di.getId()), 472 di.hasLocation() ? di.getLocation() : null , 473 di.getCapacity(), di.getDfsUsed(), di.getRemaining(), 474 di.getBlockPoolUsed() , di.getLastUpdate() , di.getXceiverCount() , 475 PBHelper.convert(di.getAdminState())); 476 } 477 478 static public DatanodeInfoProto convertDatanodeInfo(DatanodeInfo di) { 479 if (di == null) return null; 480 DatanodeInfoProto.Builder builder = DatanodeInfoProto.newBuilder(); 481 if (di.getNetworkLocation() != null) { 482 builder.setLocation(di.getNetworkLocation()); 483 } 484 485 return builder. 486 setId(PBHelper.convert((DatanodeID) di)). 487 setCapacity(di.getCapacity()). 488 setDfsUsed(di.getDfsUsed()). 489 setRemaining(di.getRemaining()). 490 setBlockPoolUsed(di.getBlockPoolUsed()). 491 setLastUpdate(di.getLastUpdate()). 492 setXceiverCount(di.getXceiverCount()). 493 setAdminState(PBHelper.convert(di.getAdminState())). 494 build(); 495 } 496 497 498 static public DatanodeInfo[] convert(DatanodeInfoProto di[]) { 499 if (di == null) return null; 500 DatanodeInfo[] result = new DatanodeInfo[di.length]; 501 for (int i = 0; i < di.length; i++) { 502 result[i] = convert(di[i]); 503 } 504 return result; 505 } 506 507 public static List<? extends HdfsProtos.DatanodeInfoProto> convert( 508 DatanodeInfo[] dnInfos) { 509 return convert(dnInfos, 0); 510 } 511 512 /** 513 * Copy from {@code dnInfos} to a target of list of same size starting at 514 * {@code startIdx}. 515 */ 516 public static List<? extends HdfsProtos.DatanodeInfoProto> convert( 517 DatanodeInfo[] dnInfos, int startIdx) { 518 if (dnInfos == null) 519 return null; 520 ArrayList<HdfsProtos.DatanodeInfoProto> protos = Lists 521 .newArrayListWithCapacity(dnInfos.length); 522 for (int i = startIdx; i < dnInfos.length; i++) { 523 protos.add(convert(dnInfos[i])); 524 } 525 return protos; 526 } 527 528 public static DatanodeInfo[] convert(List<DatanodeInfoProto> list) { 529 DatanodeInfo[] info = new DatanodeInfo[list.size()]; 530 for (int i = 0; i < info.length; i++) { 531 info[i] = convert(list.get(i)); 532 } 533 return info; 534 } 535 536 public static DatanodeInfoProto convert(DatanodeInfo info) { 537 DatanodeInfoProto.Builder builder = DatanodeInfoProto.newBuilder(); 538 builder.setBlockPoolUsed(info.getBlockPoolUsed()); 539 builder.setAdminState(PBHelper.convert(info.getAdminState())); 540 builder.setCapacity(info.getCapacity()) 541 .setDfsUsed(info.getDfsUsed()) 542 .setId(PBHelper.convert((DatanodeID)info)) 543 .setLastUpdate(info.getLastUpdate()) 544 .setLocation(info.getNetworkLocation()) 545 .setRemaining(info.getRemaining()) 546 .setXceiverCount(info.getXceiverCount()) 547 .build(); 548 return builder.build(); 549 } 550 551 public static AdminStates convert(AdminState adminState) { 552 switch(adminState) { 553 case DECOMMISSION_INPROGRESS: 554 return AdminStates.DECOMMISSION_INPROGRESS; 555 case DECOMMISSIONED: 556 return AdminStates.DECOMMISSIONED; 557 case NORMAL: 558 default: 559 return AdminStates.NORMAL; 560 } 561 } 562 563 public static LocatedBlockProto convert(LocatedBlock b) { 564 if (b == null) return null; 565 Builder builder = LocatedBlockProto.newBuilder(); 566 DatanodeInfo[] locs = b.getLocations(); 567 for (int i = 0; i < locs.length; i++) { 568 builder.addLocs(i, PBHelper.convert(locs[i])); 569 } 570 return builder.setB(PBHelper.convert(b.getBlock())) 571 .setBlockToken(PBHelper.convert(b.getBlockToken())) 572 .setCorrupt(b.isCorrupt()).setOffset(b.getStartOffset()).build(); 573 } 574 575 public static LocatedBlock convert(LocatedBlockProto proto) { 576 if (proto == null) return null; 577 List<DatanodeInfoProto> locs = proto.getLocsList(); 578 DatanodeInfo[] targets = new DatanodeInfo[locs.size()]; 579 for (int i = 0; i < locs.size(); i++) { 580 targets[i] = PBHelper.convert(locs.get(i)); 581 } 582 LocatedBlock lb = new LocatedBlock(PBHelper.convert(proto.getB()), targets, 583 proto.getOffset(), proto.getCorrupt()); 584 lb.setBlockToken(PBHelper.convert(proto.getBlockToken())); 585 return lb; 586 } 587 588 public static TokenProto convert(Token<?> tok) { 589 return TokenProto.newBuilder(). 590 setIdentifier(ByteString.copyFrom(tok.getIdentifier())). 591 setPassword(ByteString.copyFrom(tok.getPassword())). 592 setKind(tok.getKind().toString()). 593 setService(tok.getService().toString()).build(); 594 } 595 596 public static Token<BlockTokenIdentifier> convert( 597 TokenProto blockToken) { 598 return new Token<BlockTokenIdentifier>(blockToken.getIdentifier() 599 .toByteArray(), blockToken.getPassword().toByteArray(), new Text( 600 blockToken.getKind()), new Text(blockToken.getService())); 601 } 602 603 604 public static Token<DelegationTokenIdentifier> convertDelegationToken( 605 TokenProto blockToken) { 606 return new Token<DelegationTokenIdentifier>(blockToken.getIdentifier() 607 .toByteArray(), blockToken.getPassword().toByteArray(), new Text( 608 blockToken.getKind()), new Text(blockToken.getService())); 609 } 610 611 public static ReplicaState convert(ReplicaStateProto state) { 612 switch (state) { 613 case RBW: 614 return ReplicaState.RBW; 615 case RUR: 616 return ReplicaState.RUR; 617 case RWR: 618 return ReplicaState.RWR; 619 case TEMPORARY: 620 return ReplicaState.TEMPORARY; 621 case FINALIZED: 622 default: 623 return ReplicaState.FINALIZED; 624 } 625 } 626 627 public static ReplicaStateProto convert(ReplicaState state) { 628 switch (state) { 629 case RBW: 630 return ReplicaStateProto.RBW; 631 case RUR: 632 return ReplicaStateProto.RUR; 633 case RWR: 634 return ReplicaStateProto.RWR; 635 case TEMPORARY: 636 return ReplicaStateProto.TEMPORARY; 637 case FINALIZED: 638 default: 639 return ReplicaStateProto.FINALIZED; 640 } 641 } 642 643 public static DatanodeRegistrationProto convert( 644 DatanodeRegistration registration) { 645 DatanodeRegistrationProto.Builder builder = DatanodeRegistrationProto 646 .newBuilder(); 647 return builder.setDatanodeID(PBHelper.convert((DatanodeID) registration)) 648 .setStorageInfo(PBHelper.convert(registration.getStorageInfo())) 649 .setKeys(PBHelper.convert(registration.getExportedKeys())) 650 .setSoftwareVersion(registration.getSoftwareVersion()).build(); 651 } 652 653 public static DatanodeRegistration convert(DatanodeRegistrationProto proto) { 654 return new DatanodeRegistration(PBHelper.convert(proto.getDatanodeID()), 655 PBHelper.convert(proto.getStorageInfo()), PBHelper.convert(proto 656 .getKeys()), proto.getSoftwareVersion()); 657 } 658 659 public static DatanodeCommand convert(DatanodeCommandProto proto) { 660 switch (proto.getCmdType()) { 661 case BalancerBandwidthCommand: 662 return PBHelper.convert(proto.getBalancerCmd()); 663 case BlockCommand: 664 return PBHelper.convert(proto.getBlkCmd()); 665 case BlockRecoveryCommand: 666 return PBHelper.convert(proto.getRecoveryCmd()); 667 case FinalizeCommand: 668 return PBHelper.convert(proto.getFinalizeCmd()); 669 case KeyUpdateCommand: 670 return PBHelper.convert(proto.getKeyUpdateCmd()); 671 case RegisterCommand: 672 return REG_CMD; 673 } 674 return null; 675 } 676 677 public static BalancerBandwidthCommandProto convert( 678 BalancerBandwidthCommand bbCmd) { 679 return BalancerBandwidthCommandProto.newBuilder() 680 .setBandwidth(bbCmd.getBalancerBandwidthValue()).build(); 681 } 682 683 public static KeyUpdateCommandProto convert(KeyUpdateCommand cmd) { 684 return KeyUpdateCommandProto.newBuilder() 685 .setKeys(PBHelper.convert(cmd.getExportedKeys())).build(); 686 } 687 688 public static BlockRecoveryCommandProto convert(BlockRecoveryCommand cmd) { 689 BlockRecoveryCommandProto.Builder builder = BlockRecoveryCommandProto 690 .newBuilder(); 691 for (RecoveringBlock b : cmd.getRecoveringBlocks()) { 692 builder.addBlocks(PBHelper.convert(b)); 693 } 694 return builder.build(); 695 } 696 697 public static FinalizeCommandProto convert(FinalizeCommand cmd) { 698 return FinalizeCommandProto.newBuilder() 699 .setBlockPoolId(cmd.getBlockPoolId()).build(); 700 } 701 702 public static BlockCommandProto convert(BlockCommand cmd) { 703 BlockCommandProto.Builder builder = BlockCommandProto.newBuilder() 704 .setBlockPoolId(cmd.getBlockPoolId()); 705 switch (cmd.getAction()) { 706 case DatanodeProtocol.DNA_TRANSFER: 707 builder.setAction(BlockCommandProto.Action.TRANSFER); 708 break; 709 case DatanodeProtocol.DNA_INVALIDATE: 710 builder.setAction(BlockCommandProto.Action.INVALIDATE); 711 break; 712 case DatanodeProtocol.DNA_SHUTDOWN: 713 builder.setAction(BlockCommandProto.Action.SHUTDOWN); 714 break; 715 default: 716 throw new AssertionError("Invalid action"); 717 } 718 Block[] blocks = cmd.getBlocks(); 719 for (int i = 0; i < blocks.length; i++) { 720 builder.addBlocks(PBHelper.convert(blocks[i])); 721 } 722 builder.addAllTargets(PBHelper.convert(cmd.getTargets())); 723 return builder.build(); 724 } 725 726 private static List<DatanodeInfosProto> convert(DatanodeInfo[][] targets) { 727 DatanodeInfosProto[] ret = new DatanodeInfosProto[targets.length]; 728 for (int i = 0; i < targets.length; i++) { 729 ret[i] = DatanodeInfosProto.newBuilder() 730 .addAllDatanodes(PBHelper.convert(targets[i])).build(); 731 } 732 return Arrays.asList(ret); 733 } 734 735 public static DatanodeCommandProto convert(DatanodeCommand datanodeCommand) { 736 DatanodeCommandProto.Builder builder = DatanodeCommandProto.newBuilder(); 737 if (datanodeCommand == null) { 738 return builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand) 739 .build(); 740 } 741 switch (datanodeCommand.getAction()) { 742 case DatanodeProtocol.DNA_BALANCERBANDWIDTHUPDATE: 743 builder.setCmdType(DatanodeCommandProto.Type.BalancerBandwidthCommand) 744 .setBalancerCmd( 745 PBHelper.convert((BalancerBandwidthCommand) datanodeCommand)); 746 break; 747 case DatanodeProtocol.DNA_ACCESSKEYUPDATE: 748 builder 749 .setCmdType(DatanodeCommandProto.Type.KeyUpdateCommand) 750 .setKeyUpdateCmd(PBHelper.convert((KeyUpdateCommand) datanodeCommand)); 751 break; 752 case DatanodeProtocol.DNA_RECOVERBLOCK: 753 builder.setCmdType(DatanodeCommandProto.Type.BlockRecoveryCommand) 754 .setRecoveryCmd( 755 PBHelper.convert((BlockRecoveryCommand) datanodeCommand)); 756 break; 757 case DatanodeProtocol.DNA_FINALIZE: 758 builder.setCmdType(DatanodeCommandProto.Type.FinalizeCommand) 759 .setFinalizeCmd(PBHelper.convert((FinalizeCommand) datanodeCommand)); 760 break; 761 case DatanodeProtocol.DNA_REGISTER: 762 builder.setCmdType(DatanodeCommandProto.Type.RegisterCommand) 763 .setRegisterCmd(REG_CMD_PROTO); 764 break; 765 case DatanodeProtocol.DNA_TRANSFER: 766 case DatanodeProtocol.DNA_INVALIDATE: 767 case DatanodeProtocol.DNA_SHUTDOWN: 768 builder.setCmdType(DatanodeCommandProto.Type.BlockCommand).setBlkCmd( 769 PBHelper.convert((BlockCommand) datanodeCommand)); 770 break; 771 case DatanodeProtocol.DNA_UNKNOWN: //Not expected 772 default: 773 builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand); 774 } 775 return builder.build(); 776 } 777 778 public static KeyUpdateCommand convert(KeyUpdateCommandProto keyUpdateCmd) { 779 return new KeyUpdateCommand(PBHelper.convert(keyUpdateCmd.getKeys())); 780 } 781 782 public static FinalizeCommand convert(FinalizeCommandProto finalizeCmd) { 783 return new FinalizeCommand(finalizeCmd.getBlockPoolId()); 784 } 785 786 public static BlockRecoveryCommand convert( 787 BlockRecoveryCommandProto recoveryCmd) { 788 List<RecoveringBlockProto> list = recoveryCmd.getBlocksList(); 789 List<RecoveringBlock> recoveringBlocks = new ArrayList<RecoveringBlock>( 790 list.size()); 791 792 for (RecoveringBlockProto rbp : list) { 793 recoveringBlocks.add(PBHelper.convert(rbp)); 794 } 795 return new BlockRecoveryCommand(recoveringBlocks); 796 } 797 798 public static BlockCommand convert(BlockCommandProto blkCmd) { 799 List<BlockProto> blockProtoList = blkCmd.getBlocksList(); 800 Block[] blocks = new Block[blockProtoList.size()]; 801 for (int i = 0; i < blockProtoList.size(); i++) { 802 blocks[i] = PBHelper.convert(blockProtoList.get(i)); 803 } 804 List<DatanodeInfosProto> targetList = blkCmd.getTargetsList(); 805 DatanodeInfo[][] targets = new DatanodeInfo[targetList.size()][]; 806 for (int i = 0; i < targetList.size(); i++) { 807 targets[i] = PBHelper.convert(targetList.get(i)); 808 } 809 int action = DatanodeProtocol.DNA_UNKNOWN; 810 switch (blkCmd.getAction()) { 811 case TRANSFER: 812 action = DatanodeProtocol.DNA_TRANSFER; 813 break; 814 case INVALIDATE: 815 action = DatanodeProtocol.DNA_INVALIDATE; 816 break; 817 case SHUTDOWN: 818 action = DatanodeProtocol.DNA_SHUTDOWN; 819 break; 820 } 821 return new BlockCommand(action, blkCmd.getBlockPoolId(), blocks, targets); 822 } 823 824 public static DatanodeInfo[] convert(DatanodeInfosProto datanodeInfosProto) { 825 List<DatanodeInfoProto> proto = datanodeInfosProto.getDatanodesList(); 826 DatanodeInfo[] infos = new DatanodeInfo[proto.size()]; 827 for (int i = 0; i < infos.length; i++) { 828 infos[i] = PBHelper.convert(proto.get(i)); 829 } 830 return infos; 831 } 832 833 public static BalancerBandwidthCommand convert( 834 BalancerBandwidthCommandProto balancerCmd) { 835 return new BalancerBandwidthCommand(balancerCmd.getBandwidth()); 836 } 837 838 public static ReceivedDeletedBlockInfoProto convert( 839 ReceivedDeletedBlockInfo receivedDeletedBlockInfo) { 840 ReceivedDeletedBlockInfoProto.Builder builder = 841 ReceivedDeletedBlockInfoProto.newBuilder(); 842 843 ReceivedDeletedBlockInfoProto.BlockStatus status; 844 switch (receivedDeletedBlockInfo.getStatus()) { 845 case RECEIVING_BLOCK: 846 status = ReceivedDeletedBlockInfoProto.BlockStatus.RECEIVING; 847 break; 848 case RECEIVED_BLOCK: 849 status = ReceivedDeletedBlockInfoProto.BlockStatus.RECEIVED; 850 break; 851 case DELETED_BLOCK: 852 status = ReceivedDeletedBlockInfoProto.BlockStatus.DELETED; 853 break; 854 default: 855 throw new IllegalArgumentException("Bad status: " + 856 receivedDeletedBlockInfo.getStatus()); 857 } 858 builder.setStatus(status); 859 860 if (receivedDeletedBlockInfo.getDelHints() != null) { 861 builder.setDeleteHint(receivedDeletedBlockInfo.getDelHints()); 862 } 863 return builder.setBlock(PBHelper.convert(receivedDeletedBlockInfo.getBlock())) 864 .build(); 865 } 866 867 public static ReceivedDeletedBlockInfo convert( 868 ReceivedDeletedBlockInfoProto proto) { 869 ReceivedDeletedBlockInfo.BlockStatus status = null; 870 switch (proto.getStatus()) { 871 case RECEIVING: 872 status = BlockStatus.RECEIVING_BLOCK; 873 break; 874 case RECEIVED: 875 status = BlockStatus.RECEIVED_BLOCK; 876 break; 877 case DELETED: 878 status = BlockStatus.DELETED_BLOCK; 879 break; 880 } 881 return new ReceivedDeletedBlockInfo( 882 PBHelper.convert(proto.getBlock()), 883 status, 884 proto.hasDeleteHint() ? proto.getDeleteHint() : null); 885 } 886 887 public static NamespaceInfoProto convert(NamespaceInfo info) { 888 return NamespaceInfoProto.newBuilder() 889 .setBlockPoolID(info.getBlockPoolID()) 890 .setBuildVersion(info.getBuildVersion()) 891 .setUnused(0) 892 .setStorageInfo(PBHelper.convert((StorageInfo)info)) 893 .setSoftwareVersion(info.getSoftwareVersion()).build(); 894 } 895 896 // Located Block Arrays and Lists 897 public static LocatedBlockProto[] convertLocatedBlock(LocatedBlock[] lb) { 898 if (lb == null) return null; 899 return convertLocatedBlock2(Arrays.asList(lb)).toArray( 900 new LocatedBlockProto[lb.length]); 901 } 902 903 public static LocatedBlock[] convertLocatedBlock(LocatedBlockProto[] lb) { 904 if (lb == null) return null; 905 return convertLocatedBlock(Arrays.asList(lb)).toArray( 906 new LocatedBlock[lb.length]); 907 } 908 909 public static List<LocatedBlock> convertLocatedBlock( 910 List<LocatedBlockProto> lb) { 911 if (lb == null) return null; 912 final int len = lb.size(); 913 List<LocatedBlock> result = 914 new ArrayList<LocatedBlock>(len); 915 for (int i = 0; i < len; ++i) { 916 result.add(PBHelper.convert(lb.get(i))); 917 } 918 return result; 919 } 920 921 public static List<LocatedBlockProto> convertLocatedBlock2(List<LocatedBlock> lb) { 922 if (lb == null) return null; 923 final int len = lb.size(); 924 List<LocatedBlockProto> result = new ArrayList<LocatedBlockProto>(len); 925 for (int i = 0; i < len; ++i) { 926 result.add(PBHelper.convert(lb.get(i))); 927 } 928 return result; 929 } 930 931 932 // LocatedBlocks 933 public static LocatedBlocks convert(LocatedBlocksProto lb) { 934 return new LocatedBlocks( 935 lb.getFileLength(), lb.getUnderConstruction(), 936 PBHelper.convertLocatedBlock(lb.getBlocksList()), 937 lb.hasLastBlock() ? PBHelper.convert(lb.getLastBlock()) : null, 938 lb.getIsLastBlockComplete()); 939 } 940 941 public static LocatedBlocksProto convert(LocatedBlocks lb) { 942 if (lb == null) { 943 return null; 944 } 945 LocatedBlocksProto.Builder builder = 946 LocatedBlocksProto.newBuilder(); 947 if (lb.getLastLocatedBlock() != null) { 948 builder.setLastBlock(PBHelper.convert(lb.getLastLocatedBlock())); 949 } 950 return builder.setFileLength(lb.getFileLength()) 951 .setUnderConstruction(lb.isUnderConstruction()) 952 .addAllBlocks(PBHelper.convertLocatedBlock2(lb.getLocatedBlocks())) 953 .setIsLastBlockComplete(lb.isLastBlockComplete()).build(); 954 } 955 956 // DataEncryptionKey 957 public static DataEncryptionKey convert(DataEncryptionKeyProto bet) { 958 String encryptionAlgorithm = bet.getEncryptionAlgorithm(); 959 return new DataEncryptionKey(bet.getKeyId(), 960 bet.getBlockPoolId(), 961 bet.getNonce().toByteArray(), 962 bet.getEncryptionKey().toByteArray(), 963 bet.getExpiryDate(), 964 encryptionAlgorithm.isEmpty() ? null : encryptionAlgorithm); 965 } 966 967 public static DataEncryptionKeyProto convert(DataEncryptionKey bet) { 968 DataEncryptionKeyProto.Builder b = DataEncryptionKeyProto.newBuilder() 969 .setKeyId(bet.keyId) 970 .setBlockPoolId(bet.blockPoolId) 971 .setNonce(ByteString.copyFrom(bet.nonce)) 972 .setEncryptionKey(ByteString.copyFrom(bet.encryptionKey)) 973 .setExpiryDate(bet.expiryDate); 974 if (bet.encryptionAlgorithm != null) { 975 b.setEncryptionAlgorithm(bet.encryptionAlgorithm); 976 } 977 return b.build(); 978 } 979 980 public static FsServerDefaults convert(FsServerDefaultsProto fs) { 981 if (fs == null) return null; 982 return new FsServerDefaults( 983 fs.getBlockSize(), fs.getBytesPerChecksum(), 984 fs.getWritePacketSize(), (short) fs.getReplication(), 985 fs.getFileBufferSize(), 986 fs.getEncryptDataTransfer(), 987 fs.getTrashInterval(), 988 PBHelper.convert(fs.getChecksumType())); 989 } 990 991 public static FsServerDefaultsProto convert(FsServerDefaults fs) { 992 if (fs == null) return null; 993 return FsServerDefaultsProto.newBuilder(). 994 setBlockSize(fs.getBlockSize()). 995 setBytesPerChecksum(fs.getBytesPerChecksum()). 996 setWritePacketSize(fs.getWritePacketSize()) 997 .setReplication(fs.getReplication()) 998 .setFileBufferSize(fs.getFileBufferSize()) 999 .setEncryptDataTransfer(fs.getEncryptDataTransfer()) 1000 .setTrashInterval(fs.getTrashInterval()) 1001 .setChecksumType(PBHelper.convert(fs.getChecksumType())) 1002 .build(); 1003 } 1004 1005 public static FsPermissionProto convert(FsPermission p) { 1006 if (p == null) return null; 1007 return FsPermissionProto.newBuilder().setPerm(p.toShort()).build(); 1008 } 1009 1010 public static FsPermission convert(FsPermissionProto p) { 1011 if (p == null) return null; 1012 return new FsPermission((short)p.getPerm()); 1013 } 1014 1015 1016 // The creatFlag field in PB is a bitmask whose values are the same a the 1017 // emum values of CreateFlag 1018 public static int convertCreateFlag(EnumSetWritable<CreateFlag> flag) { 1019 int value = 0; 1020 if (flag.contains(CreateFlag.APPEND)) { 1021 value |= CreateFlagProto.APPEND.getNumber(); 1022 } 1023 if (flag.contains(CreateFlag.CREATE)) { 1024 value |= CreateFlagProto.CREATE.getNumber(); 1025 } 1026 if (flag.contains(CreateFlag.OVERWRITE)) { 1027 value |= CreateFlagProto.OVERWRITE.getNumber(); 1028 } 1029 return value; 1030 } 1031 1032 public static EnumSetWritable<CreateFlag> convert(int flag) { 1033 EnumSet<CreateFlag> result = 1034 EnumSet.noneOf(CreateFlag.class); 1035 if ((flag & CreateFlagProto.APPEND_VALUE) == CreateFlagProto.APPEND_VALUE) { 1036 result.add(CreateFlag.APPEND); 1037 } 1038 if ((flag & CreateFlagProto.CREATE_VALUE) == CreateFlagProto.CREATE_VALUE) { 1039 result.add(CreateFlag.CREATE); 1040 } 1041 if ((flag & CreateFlagProto.OVERWRITE_VALUE) 1042 == CreateFlagProto.OVERWRITE_VALUE) { 1043 result.add(CreateFlag.OVERWRITE); 1044 } 1045 return new EnumSetWritable<CreateFlag>(result); 1046 } 1047 1048 public static HdfsFileStatus convert(HdfsFileStatusProto fs) { 1049 if (fs == null) 1050 return null; 1051 return new HdfsLocatedFileStatus( 1052 fs.getLength(), fs.getFileType().equals(FileType.IS_DIR), 1053 fs.getBlockReplication(), fs.getBlocksize(), 1054 fs.getModificationTime(), fs.getAccessTime(), 1055 PBHelper.convert(fs.getPermission()), fs.getOwner(), fs.getGroup(), 1056 fs.getFileType().equals(FileType.IS_SYMLINK) ? 1057 fs.getSymlink().toByteArray() : null, 1058 fs.getPath().toByteArray(), 1059 fs.hasFileId()? fs.getFileId(): INodeId.GRANDFATHER_INODE_ID, 1060 fs.hasLocations() ? PBHelper.convert(fs.getLocations()) : null, 1061 fs.hasChildrenNum() ? fs.getChildrenNum() : -1); 1062 } 1063 1064 public static SnapshottableDirectoryStatus convert( 1065 SnapshottableDirectoryStatusProto sdirStatusProto) { 1066 if (sdirStatusProto == null) { 1067 return null; 1068 } 1069 final HdfsFileStatusProto status = sdirStatusProto.getDirStatus(); 1070 return new SnapshottableDirectoryStatus( 1071 status.getModificationTime(), 1072 status.getAccessTime(), 1073 PBHelper.convert(status.getPermission()), 1074 status.getOwner(), 1075 status.getGroup(), 1076 status.getPath().toByteArray(), 1077 status.getFileId(), 1078 status.getChildrenNum(), 1079 sdirStatusProto.getSnapshotNumber(), 1080 sdirStatusProto.getSnapshotQuota(), 1081 sdirStatusProto.getParentFullpath().toByteArray()); 1082 } 1083 1084 public static HdfsFileStatusProto convert(HdfsFileStatus fs) { 1085 if (fs == null) 1086 return null; 1087 FileType fType = FileType.IS_FILE; 1088 if (fs.isDir()) { 1089 fType = FileType.IS_DIR; 1090 } else if (fs.isSymlink()) { 1091 fType = FileType.IS_SYMLINK; 1092 } 1093 1094 HdfsFileStatusProto.Builder builder = 1095 HdfsFileStatusProto.newBuilder(). 1096 setLength(fs.getLen()). 1097 setFileType(fType). 1098 setBlockReplication(fs.getReplication()). 1099 setBlocksize(fs.getBlockSize()). 1100 setModificationTime(fs.getModificationTime()). 1101 setAccessTime(fs.getAccessTime()). 1102 setPermission(PBHelper.convert(fs.getPermission())). 1103 setOwner(fs.getOwner()). 1104 setGroup(fs.getGroup()). 1105 setFileId(fs.getFileId()). 1106 setChildrenNum(fs.getChildrenNum()). 1107 setPath(ByteString.copyFrom(fs.getLocalNameInBytes())); 1108 if (fs.isSymlink()) { 1109 builder.setSymlink(ByteString.copyFrom(fs.getSymlinkInBytes())); 1110 } 1111 if (fs instanceof HdfsLocatedFileStatus) { 1112 LocatedBlocks locations = ((HdfsLocatedFileStatus)fs).getBlockLocations(); 1113 if (locations != null) { 1114 builder.setLocations(PBHelper.convert(locations)); 1115 } 1116 } 1117 return builder.build(); 1118 } 1119 1120 public static SnapshottableDirectoryStatusProto convert( 1121 SnapshottableDirectoryStatus status) { 1122 if (status == null) { 1123 return null; 1124 } 1125 int snapshotNumber = status.getSnapshotNumber(); 1126 int snapshotQuota = status.getSnapshotQuota(); 1127 byte[] parentFullPath = status.getParentFullPath(); 1128 ByteString parentFullPathBytes = ByteString.copyFrom( 1129 parentFullPath == null ? DFSUtil.EMPTY_BYTES : parentFullPath); 1130 HdfsFileStatusProto fs = convert(status.getDirStatus()); 1131 SnapshottableDirectoryStatusProto.Builder builder = 1132 SnapshottableDirectoryStatusProto 1133 .newBuilder().setSnapshotNumber(snapshotNumber) 1134 .setSnapshotQuota(snapshotQuota).setParentFullpath(parentFullPathBytes) 1135 .setDirStatus(fs); 1136 return builder.build(); 1137 } 1138 1139 public static HdfsFileStatusProto[] convert(HdfsFileStatus[] fs) { 1140 if (fs == null) return null; 1141 final int len = fs.length; 1142 HdfsFileStatusProto[] result = new HdfsFileStatusProto[len]; 1143 for (int i = 0; i < len; ++i) { 1144 result[i] = PBHelper.convert(fs[i]); 1145 } 1146 return result; 1147 } 1148 1149 public static HdfsFileStatus[] convert(HdfsFileStatusProto[] fs) { 1150 if (fs == null) return null; 1151 final int len = fs.length; 1152 HdfsFileStatus[] result = new HdfsFileStatus[len]; 1153 for (int i = 0; i < len; ++i) { 1154 result[i] = PBHelper.convert(fs[i]); 1155 } 1156 return result; 1157 } 1158 1159 public static DirectoryListing convert(DirectoryListingProto dl) { 1160 if (dl == null) 1161 return null; 1162 List<HdfsFileStatusProto> partList = dl.getPartialListingList(); 1163 return new DirectoryListing( 1164 partList.isEmpty() ? new HdfsLocatedFileStatus[0] 1165 : PBHelper.convert( 1166 partList.toArray(new HdfsFileStatusProto[partList.size()])), 1167 dl.getRemainingEntries()); 1168 } 1169 1170 public static DirectoryListingProto convert(DirectoryListing d) { 1171 if (d == null) 1172 return null; 1173 return DirectoryListingProto.newBuilder(). 1174 addAllPartialListing(Arrays.asList( 1175 PBHelper.convert(d.getPartialListing()))). 1176 setRemainingEntries(d.getRemainingEntries()). 1177 build(); 1178 } 1179 1180 public static long[] convert(GetFsStatsResponseProto res) { 1181 long[] result = new long[6]; 1182 result[ClientProtocol.GET_STATS_CAPACITY_IDX] = res.getCapacity(); 1183 result[ClientProtocol.GET_STATS_USED_IDX] = res.getUsed(); 1184 result[ClientProtocol.GET_STATS_REMAINING_IDX] = res.getRemaining(); 1185 result[ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX] = res.getUnderReplicated(); 1186 result[ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX] = res.getCorruptBlocks(); 1187 result[ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX] = res.getMissingBlocks(); 1188 return result; 1189 } 1190 1191 public static GetFsStatsResponseProto convert(long[] fsStats) { 1192 GetFsStatsResponseProto.Builder result = GetFsStatsResponseProto 1193 .newBuilder(); 1194 if (fsStats.length >= ClientProtocol.GET_STATS_CAPACITY_IDX + 1) 1195 result.setCapacity(fsStats[ClientProtocol.GET_STATS_CAPACITY_IDX]); 1196 if (fsStats.length >= ClientProtocol.GET_STATS_USED_IDX + 1) 1197 result.setUsed(fsStats[ClientProtocol.GET_STATS_USED_IDX]); 1198 if (fsStats.length >= ClientProtocol.GET_STATS_REMAINING_IDX + 1) 1199 result.setRemaining(fsStats[ClientProtocol.GET_STATS_REMAINING_IDX]); 1200 if (fsStats.length >= ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX + 1) 1201 result.setUnderReplicated( 1202 fsStats[ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX]); 1203 if (fsStats.length >= ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX + 1) 1204 result.setCorruptBlocks( 1205 fsStats[ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX]); 1206 if (fsStats.length >= ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX + 1) 1207 result.setMissingBlocks( 1208 fsStats[ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX]); 1209 return result.build(); 1210 } 1211 1212 public static DatanodeReportTypeProto 1213 convert(DatanodeReportType t) { 1214 switch (t) { 1215 case ALL: return DatanodeReportTypeProto.ALL; 1216 case LIVE: return DatanodeReportTypeProto.LIVE; 1217 case DEAD: return DatanodeReportTypeProto.DEAD; 1218 default: 1219 throw new IllegalArgumentException("Unexpected data type report:" + t); 1220 } 1221 } 1222 1223 public static DatanodeReportType 1224 convert(DatanodeReportTypeProto t) { 1225 switch (t) { 1226 case ALL: return DatanodeReportType.ALL; 1227 case LIVE: return DatanodeReportType.LIVE; 1228 case DEAD: return DatanodeReportType.DEAD; 1229 default: 1230 throw new IllegalArgumentException("Unexpected data type report:" + t); 1231 } 1232 } 1233 1234 public static SafeModeActionProto convert( 1235 SafeModeAction a) { 1236 switch (a) { 1237 case SAFEMODE_LEAVE: 1238 return SafeModeActionProto.SAFEMODE_LEAVE; 1239 case SAFEMODE_ENTER: 1240 return SafeModeActionProto.SAFEMODE_ENTER; 1241 case SAFEMODE_GET: 1242 return SafeModeActionProto.SAFEMODE_GET; 1243 default: 1244 throw new IllegalArgumentException("Unexpected SafeModeAction :" + a); 1245 } 1246 } 1247 1248 public static SafeModeAction convert( 1249 ClientNamenodeProtocolProtos.SafeModeActionProto a) { 1250 switch (a) { 1251 case SAFEMODE_LEAVE: 1252 return SafeModeAction.SAFEMODE_LEAVE; 1253 case SAFEMODE_ENTER: 1254 return SafeModeAction.SAFEMODE_ENTER; 1255 case SAFEMODE_GET: 1256 return SafeModeAction.SAFEMODE_GET; 1257 default: 1258 throw new IllegalArgumentException("Unexpected SafeModeAction :" + a); 1259 } 1260 } 1261 1262 public static CorruptFileBlocks convert(CorruptFileBlocksProto c) { 1263 if (c == null) 1264 return null; 1265 List<String> fileList = c.getFilesList(); 1266 return new CorruptFileBlocks(fileList.toArray(new String[fileList.size()]), 1267 c.getCookie()); 1268 } 1269 1270 public static CorruptFileBlocksProto convert(CorruptFileBlocks c) { 1271 if (c == null) 1272 return null; 1273 return CorruptFileBlocksProto.newBuilder(). 1274 addAllFiles(Arrays.asList(c.getFiles())). 1275 setCookie(c.getCookie()). 1276 build(); 1277 } 1278 1279 public static ContentSummary convert(ContentSummaryProto cs) { 1280 if (cs == null) return null; 1281 return new ContentSummary( 1282 cs.getLength(), cs.getFileCount(), cs.getDirectoryCount(), cs.getQuota(), 1283 cs.getSpaceConsumed(), cs.getSpaceQuota()); 1284 } 1285 1286 public static ContentSummaryProto convert(ContentSummary cs) { 1287 if (cs == null) return null; 1288 return ContentSummaryProto.newBuilder(). 1289 setLength(cs.getLength()). 1290 setFileCount(cs.getFileCount()). 1291 setDirectoryCount(cs.getDirectoryCount()). 1292 setQuota(cs.getQuota()). 1293 setSpaceConsumed(cs.getSpaceConsumed()). 1294 setSpaceQuota(cs.getSpaceQuota()). 1295 build(); 1296 } 1297 1298 public static NNHAStatusHeartbeat convert(NNHAStatusHeartbeatProto s) { 1299 if (s == null) return null; 1300 switch (s.getState()) { 1301 case ACTIVE: 1302 return new NNHAStatusHeartbeat(HAServiceState.ACTIVE, s.getTxid()); 1303 case STANDBY: 1304 return new NNHAStatusHeartbeat(HAServiceState.STANDBY, s.getTxid()); 1305 default: 1306 throw new IllegalArgumentException("Unexpected NNHAStatusHeartbeat.State:" + s.getState()); 1307 } 1308 } 1309 1310 public static NNHAStatusHeartbeatProto convert(NNHAStatusHeartbeat hb) { 1311 if (hb == null) return null; 1312 NNHAStatusHeartbeatProto.Builder builder = 1313 NNHAStatusHeartbeatProto.newBuilder(); 1314 switch (hb.getState()) { 1315 case ACTIVE: 1316 builder.setState(NNHAStatusHeartbeatProto.State.ACTIVE); 1317 break; 1318 case STANDBY: 1319 builder.setState(NNHAStatusHeartbeatProto.State.STANDBY); 1320 break; 1321 default: 1322 throw new IllegalArgumentException("Unexpected NNHAStatusHeartbeat.State:" + 1323 hb.getState()); 1324 } 1325 builder.setTxid(hb.getTxId()); 1326 return builder.build(); 1327 } 1328 1329 public static DatanodeStorageProto convert(DatanodeStorage s) { 1330 return DatanodeStorageProto.newBuilder() 1331 .setState(PBHelper.convert(s.getState())) 1332 .setStorageID(s.getStorageID()).build(); 1333 } 1334 1335 private static StorageState convert(State state) { 1336 switch(state) { 1337 case READ_ONLY: 1338 return StorageState.READ_ONLY; 1339 case NORMAL: 1340 default: 1341 return StorageState.NORMAL; 1342 } 1343 } 1344 1345 public static DatanodeStorage convert(DatanodeStorageProto s) { 1346 return new DatanodeStorage(s.getStorageID(), PBHelper.convert(s.getState())); 1347 } 1348 1349 private static State convert(StorageState state) { 1350 switch(state) { 1351 case READ_ONLY: 1352 return DatanodeStorage.State.READ_ONLY; 1353 case NORMAL: 1354 default: 1355 return DatanodeStorage.State.NORMAL; 1356 } 1357 } 1358 1359 public static StorageReportProto convert(StorageReport r) { 1360 return StorageReportProto.newBuilder() 1361 .setBlockPoolUsed(r.getBlockPoolUsed()).setCapacity(r.getCapacity()) 1362 .setDfsUsed(r.getDfsUsed()).setRemaining(r.getRemaining()) 1363 .setStorageID(r.getStorageID()).build(); 1364 } 1365 1366 public static JournalInfo convert(JournalInfoProto info) { 1367 int lv = info.hasLayoutVersion() ? info.getLayoutVersion() : 0; 1368 int nsID = info.hasNamespaceID() ? info.getNamespaceID() : 0; 1369 return new JournalInfo(lv, info.getClusterID(), nsID); 1370 } 1371 1372 /** 1373 * Method used for converting {@link JournalInfoProto} sent from Namenode 1374 * to Journal receivers to {@link NamenodeRegistration}. 1375 */ 1376 public static JournalInfoProto convert(JournalInfo j) { 1377 return JournalInfoProto.newBuilder().setClusterID(j.getClusterId()) 1378 .setLayoutVersion(j.getLayoutVersion()) 1379 .setNamespaceID(j.getNamespaceId()).build(); 1380 } 1381 1382 public static SnapshottableDirectoryStatus[] convert( 1383 SnapshottableDirectoryListingProto sdlp) { 1384 if (sdlp == null) 1385 return null; 1386 List<SnapshottableDirectoryStatusProto> list = sdlp 1387 .getSnapshottableDirListingList(); 1388 if (list.isEmpty()) { 1389 return new SnapshottableDirectoryStatus[0]; 1390 } else { 1391 SnapshottableDirectoryStatus[] result = 1392 new SnapshottableDirectoryStatus[list.size()]; 1393 for (int i = 0; i < list.size(); i++) { 1394 result[i] = PBHelper.convert(list.get(i)); 1395 } 1396 return result; 1397 } 1398 } 1399 1400 public static SnapshottableDirectoryListingProto convert( 1401 SnapshottableDirectoryStatus[] status) { 1402 if (status == null) 1403 return null; 1404 SnapshottableDirectoryStatusProto[] protos = 1405 new SnapshottableDirectoryStatusProto[status.length]; 1406 for (int i = 0; i < status.length; i++) { 1407 protos[i] = PBHelper.convert(status[i]); 1408 } 1409 List<SnapshottableDirectoryStatusProto> protoList = Arrays.asList(protos); 1410 return SnapshottableDirectoryListingProto.newBuilder() 1411 .addAllSnapshottableDirListing(protoList).build(); 1412 } 1413 1414 public static DiffReportEntry convert(SnapshotDiffReportEntryProto entry) { 1415 if (entry == null) { 1416 return null; 1417 } 1418 DiffType type = DiffType.getTypeFromLabel(entry 1419 .getModificationLabel()); 1420 return type == null ? null : 1421 new DiffReportEntry(type, entry.getFullpath().toByteArray()); 1422 } 1423 1424 public static SnapshotDiffReportEntryProto convert(DiffReportEntry entry) { 1425 if (entry == null) { 1426 return null; 1427 } 1428 byte[] fullPath = entry.getRelativePath(); 1429 ByteString fullPathString = ByteString 1430 .copyFrom(fullPath == null ? DFSUtil.EMPTY_BYTES : fullPath); 1431 1432 String modification = entry.getType().getLabel(); 1433 1434 SnapshotDiffReportEntryProto entryProto = SnapshotDiffReportEntryProto 1435 .newBuilder().setFullpath(fullPathString) 1436 .setModificationLabel(modification).build(); 1437 return entryProto; 1438 } 1439 1440 public static SnapshotDiffReport convert(SnapshotDiffReportProto reportProto) { 1441 if (reportProto == null) { 1442 return null; 1443 } 1444 String snapshotDir = reportProto.getSnapshotRoot(); 1445 String fromSnapshot = reportProto.getFromSnapshot(); 1446 String toSnapshot = reportProto.getToSnapshot(); 1447 List<SnapshotDiffReportEntryProto> list = reportProto 1448 .getDiffReportEntriesList(); 1449 List<DiffReportEntry> entries = new ArrayList<DiffReportEntry>(); 1450 for (SnapshotDiffReportEntryProto entryProto : list) { 1451 DiffReportEntry entry = convert(entryProto); 1452 if (entry != null) 1453 entries.add(entry); 1454 } 1455 return new SnapshotDiffReport(snapshotDir, fromSnapshot, toSnapshot, 1456 entries); 1457 } 1458 1459 public static SnapshotDiffReportProto convert(SnapshotDiffReport report) { 1460 if (report == null) { 1461 return null; 1462 } 1463 List<DiffReportEntry> entries = report.getDiffList(); 1464 List<SnapshotDiffReportEntryProto> entryProtos = 1465 new ArrayList<SnapshotDiffReportEntryProto>(); 1466 for (DiffReportEntry entry : entries) { 1467 SnapshotDiffReportEntryProto entryProto = convert(entry); 1468 if (entryProto != null) 1469 entryProtos.add(entryProto); 1470 } 1471 1472 SnapshotDiffReportProto reportProto = SnapshotDiffReportProto.newBuilder() 1473 .setSnapshotRoot(report.getSnapshotRoot()) 1474 .setFromSnapshot(report.getFromSnapshot()) 1475 .setToSnapshot(report.getLaterSnapshotName()) 1476 .addAllDiffReportEntries(entryProtos).build(); 1477 return reportProto; 1478 } 1479 1480 public static DataChecksum.Type convert(HdfsProtos.ChecksumTypeProto type) { 1481 return DataChecksum.Type.valueOf(type.getNumber()); 1482 } 1483 1484 public static HdfsProtos.ChecksumTypeProto convert(DataChecksum.Type type) { 1485 return HdfsProtos.ChecksumTypeProto.valueOf(type.id); 1486 } 1487 1488 public static InputStream vintPrefixed(final InputStream input) 1489 throws IOException { 1490 final int firstByte = input.read(); 1491 if (firstByte == -1) { 1492 throw new EOFException("Premature EOF: no length prefix available"); 1493 } 1494 1495 int size = CodedInputStream.readRawVarint32(firstByte, input); 1496 assert size >= 0; 1497 return new ExactSizeInputStream(input, size); 1498 } 1499 }