001/** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hdfs.protocolPB; 019 020import java.io.EOFException; 021import java.io.IOException; 022import java.io.InputStream; 023import java.util.ArrayList; 024import java.util.Arrays; 025import java.util.EnumSet; 026import java.util.List; 027 028import org.apache.hadoop.fs.ContentSummary; 029import org.apache.hadoop.fs.CreateFlag; 030import org.apache.hadoop.fs.FsServerDefaults; 031import org.apache.hadoop.fs.permission.FsPermission; 032import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; 033import org.apache.hadoop.hdfs.server.protocol.StorageReport; 034import org.apache.hadoop.hdfs.protocol.Block; 035import org.apache.hadoop.hdfs.protocol.ClientProtocol; 036import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; 037import org.apache.hadoop.hdfs.protocol.DatanodeID; 038import org.apache.hadoop.hdfs.protocol.DatanodeInfo; 039import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; 040import org.apache.hadoop.hdfs.protocol.DirectoryListing; 041import org.apache.hadoop.hdfs.protocol.ExtendedBlock; 042import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; 043import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; 044import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; 045import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus; 046import org.apache.hadoop.hdfs.protocol.LocatedBlock; 047import org.apache.hadoop.hdfs.protocol.LocatedBlocks; 048import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos; 049import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; 050import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateFlagProto; 051import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto; 052import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto; 053import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SafeModeActionProto; 054import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto; 055import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto; 056import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto; 057import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto; 058import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto; 059import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto; 060import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto.StorageState; 061import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto; 062import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto; 063import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto; 064import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto; 065import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto; 066import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto; 067import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto; 068import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto; 069import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto; 070import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto; 071import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto; 072import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto; 073import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto; 074import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto; 075import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto; 076import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto; 077import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto; 078import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState; 079import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto; 080import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto; 081import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto; 082import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto; 083import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto; 084import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto; 085import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto; 086import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType; 087import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto; 088import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder; 089import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto; 090import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto; 091import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto; 092import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto; 093import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto; 094import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto; 095import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto; 096import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto; 097import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaStateProto; 098import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto; 099import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto; 100import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; 101import org.apache.hadoop.hdfs.security.token.block.BlockKey; 102import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; 103import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; 104import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; 105import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; 106import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; 107import org.apache.hadoop.hdfs.server.common.StorageInfo; 108import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature; 109import org.apache.hadoop.hdfs.server.protocol.BalancerBandwidthCommand; 110import org.apache.hadoop.hdfs.server.protocol.BlockCommand; 111import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand; 112import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; 113import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; 114import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; 115import org.apache.hadoop.hdfs.server.protocol.CheckpointCommand; 116import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; 117import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; 118import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; 119import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; 120import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State; 121import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand; 122import org.apache.hadoop.hdfs.server.protocol.JournalInfo; 123import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand; 124import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand; 125import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration; 126import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; 127import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat; 128import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo; 129import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus; 130import org.apache.hadoop.hdfs.server.protocol.RegisterCommand; 131import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog; 132import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; 133import org.apache.hadoop.hdfs.util.ExactSizeInputStream; 134import org.apache.hadoop.io.EnumSetWritable; 135import org.apache.hadoop.io.Text; 136import org.apache.hadoop.security.proto.SecurityProtos.TokenProto; 137import org.apache.hadoop.security.token.Token; 138import org.apache.hadoop.util.DataChecksum; 139 140import com.google.common.collect.Lists; 141import com.google.protobuf.ByteString; 142import com.google.protobuf.CodedInputStream; 143 144/** 145 * Utilities for converting protobuf classes to and from implementation classes 146 * and other helper utilities to help in dealing with protobuf. 147 * 148 * Note that when converting from an internal type to protobuf type, the 149 * converter never return null for protobuf type. The check for internal type 150 * being null must be done before calling the convert() method. 151 */ 152public class PBHelper { 153 private static final RegisterCommandProto REG_CMD_PROTO = 154 RegisterCommandProto.newBuilder().build(); 155 private static final RegisterCommand REG_CMD = new RegisterCommand(); 156 157 private PBHelper() { 158 /** Hidden constructor */ 159 } 160 161 public static ByteString getByteString(byte[] bytes) { 162 return ByteString.copyFrom(bytes); 163 } 164 165 public static NamenodeRole convert(NamenodeRoleProto role) { 166 switch (role) { 167 case NAMENODE: 168 return NamenodeRole.NAMENODE; 169 case BACKUP: 170 return NamenodeRole.BACKUP; 171 case CHECKPOINT: 172 return NamenodeRole.CHECKPOINT; 173 } 174 return null; 175 } 176 177 public static NamenodeRoleProto convert(NamenodeRole role) { 178 switch (role) { 179 case NAMENODE: 180 return NamenodeRoleProto.NAMENODE; 181 case BACKUP: 182 return NamenodeRoleProto.BACKUP; 183 case CHECKPOINT: 184 return NamenodeRoleProto.CHECKPOINT; 185 } 186 return null; 187 } 188 189 public static StorageInfoProto convert(StorageInfo info) { 190 return StorageInfoProto.newBuilder().setClusterID(info.getClusterID()) 191 .setCTime(info.getCTime()).setLayoutVersion(info.getLayoutVersion()) 192 .setNamespceID(info.getNamespaceID()).build(); 193 } 194 195 public static StorageInfo convert(StorageInfoProto info) { 196 return new StorageInfo(info.getLayoutVersion(), info.getNamespceID(), 197 info.getClusterID(), info.getCTime()); 198 } 199 200 public static NamenodeRegistrationProto convert(NamenodeRegistration reg) { 201 return NamenodeRegistrationProto.newBuilder() 202 .setHttpAddress(reg.getHttpAddress()).setRole(convert(reg.getRole())) 203 .setRpcAddress(reg.getAddress()) 204 .setStorageInfo(convert((StorageInfo) reg)).build(); 205 } 206 207 public static NamenodeRegistration convert(NamenodeRegistrationProto reg) { 208 return new NamenodeRegistration(reg.getRpcAddress(), reg.getHttpAddress(), 209 convert(reg.getStorageInfo()), convert(reg.getRole())); 210 } 211 212 // DatanodeId 213 public static DatanodeID convert(DatanodeIDProto dn) { 214 return new DatanodeID(dn.getIpAddr(), dn.getHostName(), dn.getStorageID(), 215 dn.getXferPort(), dn.getInfoPort(), dn.getIpcPort()); 216 } 217 218 public static DatanodeIDProto convert(DatanodeID dn) { 219 return DatanodeIDProto.newBuilder() 220 .setIpAddr(dn.getIpAddr()) 221 .setHostName(dn.getHostName()) 222 .setStorageID(dn.getStorageID()) 223 .setXferPort(dn.getXferPort()) 224 .setInfoPort(dn.getInfoPort()) 225 .setIpcPort(dn.getIpcPort()).build(); 226 } 227 228 // Arrays of DatanodeId 229 public static DatanodeIDProto[] convert(DatanodeID[] did) { 230 if (did == null) 231 return null; 232 final int len = did.length; 233 DatanodeIDProto[] result = new DatanodeIDProto[len]; 234 for (int i = 0; i < len; ++i) { 235 result[i] = convert(did[i]); 236 } 237 return result; 238 } 239 240 public static DatanodeID[] convert(DatanodeIDProto[] did) { 241 if (did == null) return null; 242 final int len = did.length; 243 DatanodeID[] result = new DatanodeID[len]; 244 for (int i = 0; i < len; ++i) { 245 result[i] = convert(did[i]); 246 } 247 return result; 248 } 249 250 // Block 251 public static BlockProto convert(Block b) { 252 return BlockProto.newBuilder().setBlockId(b.getBlockId()) 253 .setGenStamp(b.getGenerationStamp()).setNumBytes(b.getNumBytes()) 254 .build(); 255 } 256 257 public static Block convert(BlockProto b) { 258 return new Block(b.getBlockId(), b.getNumBytes(), b.getGenStamp()); 259 } 260 261 public static BlockWithLocationsProto convert(BlockWithLocations blk) { 262 return BlockWithLocationsProto.newBuilder() 263 .setBlock(convert(blk.getBlock())) 264 .addAllStorageIDs(Arrays.asList(blk.getStorageIDs())).build(); 265 } 266 267 public static BlockWithLocations convert(BlockWithLocationsProto b) { 268 return new BlockWithLocations(convert(b.getBlock()), b.getStorageIDsList() 269 .toArray(new String[0])); 270 } 271 272 public static BlocksWithLocationsProto convert(BlocksWithLocations blks) { 273 BlocksWithLocationsProto.Builder builder = BlocksWithLocationsProto 274 .newBuilder(); 275 for (BlockWithLocations b : blks.getBlocks()) { 276 builder.addBlocks(convert(b)); 277 } 278 return builder.build(); 279 } 280 281 public static BlocksWithLocations convert(BlocksWithLocationsProto blocks) { 282 List<BlockWithLocationsProto> b = blocks.getBlocksList(); 283 BlockWithLocations[] ret = new BlockWithLocations[b.size()]; 284 int i = 0; 285 for (BlockWithLocationsProto entry : b) { 286 ret[i++] = convert(entry); 287 } 288 return new BlocksWithLocations(ret); 289 } 290 291 public static BlockKeyProto convert(BlockKey key) { 292 byte[] encodedKey = key.getEncodedKey(); 293 ByteString keyBytes = ByteString.copyFrom(encodedKey == null ? new byte[0] 294 : encodedKey); 295 return BlockKeyProto.newBuilder().setKeyId(key.getKeyId()) 296 .setKeyBytes(keyBytes).setExpiryDate(key.getExpiryDate()).build(); 297 } 298 299 public static BlockKey convert(BlockKeyProto k) { 300 return new BlockKey(k.getKeyId(), k.getExpiryDate(), k.getKeyBytes() 301 .toByteArray()); 302 } 303 304 public static ExportedBlockKeysProto convert(ExportedBlockKeys keys) { 305 ExportedBlockKeysProto.Builder builder = ExportedBlockKeysProto 306 .newBuilder(); 307 builder.setIsBlockTokenEnabled(keys.isBlockTokenEnabled()) 308 .setKeyUpdateInterval(keys.getKeyUpdateInterval()) 309 .setTokenLifeTime(keys.getTokenLifetime()) 310 .setCurrentKey(convert(keys.getCurrentKey())); 311 for (BlockKey k : keys.getAllKeys()) { 312 builder.addAllKeys(convert(k)); 313 } 314 return builder.build(); 315 } 316 317 public static ExportedBlockKeys convert(ExportedBlockKeysProto keys) { 318 return new ExportedBlockKeys(keys.getIsBlockTokenEnabled(), 319 keys.getKeyUpdateInterval(), keys.getTokenLifeTime(), 320 convert(keys.getCurrentKey()), convertBlockKeys(keys.getAllKeysList())); 321 } 322 323 public static CheckpointSignatureProto convert(CheckpointSignature s) { 324 return CheckpointSignatureProto.newBuilder() 325 .setBlockPoolId(s.getBlockpoolID()) 326 .setCurSegmentTxId(s.getCurSegmentTxId()) 327 .setMostRecentCheckpointTxId(s.getMostRecentCheckpointTxId()) 328 .setStorageInfo(PBHelper.convert((StorageInfo) s)).build(); 329 } 330 331 public static CheckpointSignature convert(CheckpointSignatureProto s) { 332 return new CheckpointSignature(PBHelper.convert(s.getStorageInfo()), 333 s.getBlockPoolId(), s.getMostRecentCheckpointTxId(), 334 s.getCurSegmentTxId()); 335 } 336 337 public static RemoteEditLogProto convert(RemoteEditLog log) { 338 return RemoteEditLogProto.newBuilder() 339 .setStartTxId(log.getStartTxId()) 340 .setEndTxId(log.getEndTxId()) 341 .setIsInProgress(log.isInProgress()).build(); 342 } 343 344 public static RemoteEditLog convert(RemoteEditLogProto l) { 345 return new RemoteEditLog(l.getStartTxId(), l.getEndTxId(), 346 l.getIsInProgress()); 347 } 348 349 public static RemoteEditLogManifestProto convert( 350 RemoteEditLogManifest manifest) { 351 RemoteEditLogManifestProto.Builder builder = RemoteEditLogManifestProto 352 .newBuilder(); 353 for (RemoteEditLog log : manifest.getLogs()) { 354 builder.addLogs(convert(log)); 355 } 356 return builder.build(); 357 } 358 359 public static RemoteEditLogManifest convert( 360 RemoteEditLogManifestProto manifest) { 361 List<RemoteEditLog> logs = new ArrayList<RemoteEditLog>(manifest 362 .getLogsList().size()); 363 for (RemoteEditLogProto l : manifest.getLogsList()) { 364 logs.add(convert(l)); 365 } 366 return new RemoteEditLogManifest(logs); 367 } 368 369 public static CheckpointCommandProto convert(CheckpointCommand cmd) { 370 return CheckpointCommandProto.newBuilder() 371 .setSignature(convert(cmd.getSignature())) 372 .setNeedToReturnImage(cmd.needToReturnImage()).build(); 373 } 374 375 public static NamenodeCommandProto convert(NamenodeCommand cmd) { 376 if (cmd instanceof CheckpointCommand) { 377 return NamenodeCommandProto.newBuilder().setAction(cmd.getAction()) 378 .setType(NamenodeCommandProto.Type.CheckPointCommand) 379 .setCheckpointCmd(convert((CheckpointCommand) cmd)).build(); 380 } 381 return NamenodeCommandProto.newBuilder() 382 .setType(NamenodeCommandProto.Type.NamenodeCommand) 383 .setAction(cmd.getAction()).build(); 384 } 385 386 public static BlockKey[] convertBlockKeys(List<BlockKeyProto> list) { 387 BlockKey[] ret = new BlockKey[list.size()]; 388 int i = 0; 389 for (BlockKeyProto k : list) { 390 ret[i++] = convert(k); 391 } 392 return ret; 393 } 394 395 public static NamespaceInfo convert(NamespaceInfoProto info) { 396 StorageInfoProto storage = info.getStorageInfo(); 397 return new NamespaceInfo(storage.getNamespceID(), storage.getClusterID(), 398 info.getBlockPoolID(), storage.getCTime(), info.getBuildVersion(), 399 info.getSoftwareVersion()); 400 } 401 402 public static NamenodeCommand convert(NamenodeCommandProto cmd) { 403 if (cmd == null) return null; 404 switch (cmd.getType()) { 405 case CheckPointCommand: 406 CheckpointCommandProto chkPt = cmd.getCheckpointCmd(); 407 return new CheckpointCommand(PBHelper.convert(chkPt.getSignature()), 408 chkPt.getNeedToReturnImage()); 409 default: 410 return new NamenodeCommand(cmd.getAction()); 411 } 412 } 413 414 public static ExtendedBlock convert(ExtendedBlockProto eb) { 415 if (eb == null) return null; 416 return new ExtendedBlock( eb.getPoolId(), eb.getBlockId(), eb.getNumBytes(), 417 eb.getGenerationStamp()); 418 } 419 420 public static ExtendedBlockProto convert(final ExtendedBlock b) { 421 if (b == null) return null; 422 return ExtendedBlockProto.newBuilder(). 423 setPoolId(b.getBlockPoolId()). 424 setBlockId(b.getBlockId()). 425 setNumBytes(b.getNumBytes()). 426 setGenerationStamp(b.getGenerationStamp()). 427 build(); 428 } 429 430 public static RecoveringBlockProto convert(RecoveringBlock b) { 431 if (b == null) { 432 return null; 433 } 434 LocatedBlockProto lb = PBHelper.convert((LocatedBlock)b); 435 return RecoveringBlockProto.newBuilder().setBlock(lb) 436 .setNewGenStamp(b.getNewGenerationStamp()).build(); 437 } 438 439 public static RecoveringBlock convert(RecoveringBlockProto b) { 440 ExtendedBlock block = convert(b.getBlock().getB()); 441 DatanodeInfo[] locs = convert(b.getBlock().getLocsList()); 442 return new RecoveringBlock(block, locs, b.getNewGenStamp()); 443 } 444 445 public static DatanodeInfoProto.AdminState convert( 446 final DatanodeInfo.AdminStates inAs) { 447 switch (inAs) { 448 case NORMAL: return DatanodeInfoProto.AdminState.NORMAL; 449 case DECOMMISSION_INPROGRESS: 450 return DatanodeInfoProto.AdminState.DECOMMISSION_INPROGRESS; 451 case DECOMMISSIONED: return DatanodeInfoProto.AdminState.DECOMMISSIONED; 452 default: return DatanodeInfoProto.AdminState.NORMAL; 453 } 454 } 455 456 static public DatanodeInfo convert(DatanodeInfoProto di) { 457 if (di == null) return null; 458 return new DatanodeInfo( 459 PBHelper.convert(di.getId()), 460 di.hasLocation() ? di.getLocation() : null , 461 di.getCapacity(), di.getDfsUsed(), di.getRemaining(), 462 di.getBlockPoolUsed() , di.getLastUpdate() , di.getXceiverCount() , 463 PBHelper.convert(di.getAdminState())); 464 } 465 466 static public DatanodeInfoProto convertDatanodeInfo(DatanodeInfo di) { 467 if (di == null) return null; 468 DatanodeInfoProto.Builder builder = DatanodeInfoProto.newBuilder(); 469 if (di.getNetworkLocation() != null) { 470 builder.setLocation(di.getNetworkLocation()); 471 } 472 473 return builder. 474 setId(PBHelper.convert((DatanodeID) di)). 475 setCapacity(di.getCapacity()). 476 setDfsUsed(di.getDfsUsed()). 477 setRemaining(di.getRemaining()). 478 setBlockPoolUsed(di.getBlockPoolUsed()). 479 setLastUpdate(di.getLastUpdate()). 480 setXceiverCount(di.getXceiverCount()). 481 setAdminState(PBHelper.convert(di.getAdminState())). 482 build(); 483 } 484 485 486 static public DatanodeInfo[] convert(DatanodeInfoProto di[]) { 487 if (di == null) return null; 488 DatanodeInfo[] result = new DatanodeInfo[di.length]; 489 for (int i = 0; i < di.length; i++) { 490 result[i] = convert(di[i]); 491 } 492 return result; 493 } 494 495 public static List<? extends HdfsProtos.DatanodeInfoProto> convert( 496 DatanodeInfo[] dnInfos) { 497 return convert(dnInfos, 0); 498 } 499 500 /** 501 * Copy from {@code dnInfos} to a target of list of same size starting at 502 * {@code startIdx}. 503 */ 504 public static List<? extends HdfsProtos.DatanodeInfoProto> convert( 505 DatanodeInfo[] dnInfos, int startIdx) { 506 if (dnInfos == null) 507 return null; 508 ArrayList<HdfsProtos.DatanodeInfoProto> protos = Lists 509 .newArrayListWithCapacity(dnInfos.length); 510 for (int i = startIdx; i < dnInfos.length; i++) { 511 protos.add(convert(dnInfos[i])); 512 } 513 return protos; 514 } 515 516 public static DatanodeInfo[] convert(List<DatanodeInfoProto> list) { 517 DatanodeInfo[] info = new DatanodeInfo[list.size()]; 518 for (int i = 0; i < info.length; i++) { 519 info[i] = convert(list.get(i)); 520 } 521 return info; 522 } 523 524 public static DatanodeInfoProto convert(DatanodeInfo info) { 525 DatanodeInfoProto.Builder builder = DatanodeInfoProto.newBuilder(); 526 builder.setBlockPoolUsed(info.getBlockPoolUsed()); 527 builder.setAdminState(PBHelper.convert(info.getAdminState())); 528 builder.setCapacity(info.getCapacity()) 529 .setDfsUsed(info.getDfsUsed()) 530 .setId(PBHelper.convert((DatanodeID)info)) 531 .setLastUpdate(info.getLastUpdate()) 532 .setLocation(info.getNetworkLocation()) 533 .setRemaining(info.getRemaining()) 534 .setXceiverCount(info.getXceiverCount()) 535 .build(); 536 return builder.build(); 537 } 538 539 public static AdminStates convert(AdminState adminState) { 540 switch(adminState) { 541 case DECOMMISSION_INPROGRESS: 542 return AdminStates.DECOMMISSION_INPROGRESS; 543 case DECOMMISSIONED: 544 return AdminStates.DECOMMISSIONED; 545 case NORMAL: 546 default: 547 return AdminStates.NORMAL; 548 } 549 } 550 551 public static LocatedBlockProto convert(LocatedBlock b) { 552 if (b == null) return null; 553 Builder builder = LocatedBlockProto.newBuilder(); 554 DatanodeInfo[] locs = b.getLocations(); 555 for (int i = 0; i < locs.length; i++) { 556 builder.addLocs(i, PBHelper.convert(locs[i])); 557 } 558 return builder.setB(PBHelper.convert(b.getBlock())) 559 .setBlockToken(PBHelper.convert(b.getBlockToken())) 560 .setCorrupt(b.isCorrupt()).setOffset(b.getStartOffset()).build(); 561 } 562 563 public static LocatedBlock convert(LocatedBlockProto proto) { 564 if (proto == null) return null; 565 List<DatanodeInfoProto> locs = proto.getLocsList(); 566 DatanodeInfo[] targets = new DatanodeInfo[locs.size()]; 567 for (int i = 0; i < locs.size(); i++) { 568 targets[i] = PBHelper.convert(locs.get(i)); 569 } 570 LocatedBlock lb = new LocatedBlock(PBHelper.convert(proto.getB()), targets, 571 proto.getOffset(), proto.getCorrupt()); 572 lb.setBlockToken(PBHelper.convert(proto.getBlockToken())); 573 return lb; 574 } 575 576 public static TokenProto convert(Token<?> tok) { 577 return TokenProto.newBuilder(). 578 setIdentifier(ByteString.copyFrom(tok.getIdentifier())). 579 setPassword(ByteString.copyFrom(tok.getPassword())). 580 setKind(tok.getKind().toString()). 581 setService(tok.getService().toString()).build(); 582 } 583 584 public static Token<BlockTokenIdentifier> convert( 585 TokenProto blockToken) { 586 return new Token<BlockTokenIdentifier>(blockToken.getIdentifier() 587 .toByteArray(), blockToken.getPassword().toByteArray(), new Text( 588 blockToken.getKind()), new Text(blockToken.getService())); 589 } 590 591 592 public static Token<DelegationTokenIdentifier> convertDelegationToken( 593 TokenProto blockToken) { 594 return new Token<DelegationTokenIdentifier>(blockToken.getIdentifier() 595 .toByteArray(), blockToken.getPassword().toByteArray(), new Text( 596 blockToken.getKind()), new Text(blockToken.getService())); 597 } 598 599 public static ReplicaState convert(ReplicaStateProto state) { 600 switch (state) { 601 case RBW: 602 return ReplicaState.RBW; 603 case RUR: 604 return ReplicaState.RUR; 605 case RWR: 606 return ReplicaState.RWR; 607 case TEMPORARY: 608 return ReplicaState.TEMPORARY; 609 case FINALIZED: 610 default: 611 return ReplicaState.FINALIZED; 612 } 613 } 614 615 public static ReplicaStateProto convert(ReplicaState state) { 616 switch (state) { 617 case RBW: 618 return ReplicaStateProto.RBW; 619 case RUR: 620 return ReplicaStateProto.RUR; 621 case RWR: 622 return ReplicaStateProto.RWR; 623 case TEMPORARY: 624 return ReplicaStateProto.TEMPORARY; 625 case FINALIZED: 626 default: 627 return ReplicaStateProto.FINALIZED; 628 } 629 } 630 631 public static DatanodeRegistrationProto convert( 632 DatanodeRegistration registration) { 633 DatanodeRegistrationProto.Builder builder = DatanodeRegistrationProto 634 .newBuilder(); 635 return builder.setDatanodeID(PBHelper.convert((DatanodeID) registration)) 636 .setStorageInfo(PBHelper.convert(registration.getStorageInfo())) 637 .setKeys(PBHelper.convert(registration.getExportedKeys())) 638 .setSoftwareVersion(registration.getSoftwareVersion()).build(); 639 } 640 641 public static DatanodeRegistration convert(DatanodeRegistrationProto proto) { 642 return new DatanodeRegistration(PBHelper.convert(proto.getDatanodeID()), 643 PBHelper.convert(proto.getStorageInfo()), PBHelper.convert(proto 644 .getKeys()), proto.getSoftwareVersion()); 645 } 646 647 public static DatanodeCommand convert(DatanodeCommandProto proto) { 648 switch (proto.getCmdType()) { 649 case BalancerBandwidthCommand: 650 return PBHelper.convert(proto.getBalancerCmd()); 651 case BlockCommand: 652 return PBHelper.convert(proto.getBlkCmd()); 653 case BlockRecoveryCommand: 654 return PBHelper.convert(proto.getRecoveryCmd()); 655 case FinalizeCommand: 656 return PBHelper.convert(proto.getFinalizeCmd()); 657 case KeyUpdateCommand: 658 return PBHelper.convert(proto.getKeyUpdateCmd()); 659 case RegisterCommand: 660 return REG_CMD; 661 } 662 return null; 663 } 664 665 public static BalancerBandwidthCommandProto convert( 666 BalancerBandwidthCommand bbCmd) { 667 return BalancerBandwidthCommandProto.newBuilder() 668 .setBandwidth(bbCmd.getBalancerBandwidthValue()).build(); 669 } 670 671 public static KeyUpdateCommandProto convert(KeyUpdateCommand cmd) { 672 return KeyUpdateCommandProto.newBuilder() 673 .setKeys(PBHelper.convert(cmd.getExportedKeys())).build(); 674 } 675 676 public static BlockRecoveryCommandProto convert(BlockRecoveryCommand cmd) { 677 BlockRecoveryCommandProto.Builder builder = BlockRecoveryCommandProto 678 .newBuilder(); 679 for (RecoveringBlock b : cmd.getRecoveringBlocks()) { 680 builder.addBlocks(PBHelper.convert(b)); 681 } 682 return builder.build(); 683 } 684 685 public static FinalizeCommandProto convert(FinalizeCommand cmd) { 686 return FinalizeCommandProto.newBuilder() 687 .setBlockPoolId(cmd.getBlockPoolId()).build(); 688 } 689 690 public static BlockCommandProto convert(BlockCommand cmd) { 691 BlockCommandProto.Builder builder = BlockCommandProto.newBuilder() 692 .setBlockPoolId(cmd.getBlockPoolId()); 693 switch (cmd.getAction()) { 694 case DatanodeProtocol.DNA_TRANSFER: 695 builder.setAction(BlockCommandProto.Action.TRANSFER); 696 break; 697 case DatanodeProtocol.DNA_INVALIDATE: 698 builder.setAction(BlockCommandProto.Action.INVALIDATE); 699 break; 700 case DatanodeProtocol.DNA_SHUTDOWN: 701 builder.setAction(BlockCommandProto.Action.SHUTDOWN); 702 break; 703 default: 704 throw new AssertionError("Invalid action"); 705 } 706 Block[] blocks = cmd.getBlocks(); 707 for (int i = 0; i < blocks.length; i++) { 708 builder.addBlocks(PBHelper.convert(blocks[i])); 709 } 710 builder.addAllTargets(PBHelper.convert(cmd.getTargets())); 711 return builder.build(); 712 } 713 714 private static List<DatanodeInfosProto> convert(DatanodeInfo[][] targets) { 715 DatanodeInfosProto[] ret = new DatanodeInfosProto[targets.length]; 716 for (int i = 0; i < targets.length; i++) { 717 ret[i] = DatanodeInfosProto.newBuilder() 718 .addAllDatanodes(PBHelper.convert(targets[i])).build(); 719 } 720 return Arrays.asList(ret); 721 } 722 723 public static DatanodeCommandProto convert(DatanodeCommand datanodeCommand) { 724 DatanodeCommandProto.Builder builder = DatanodeCommandProto.newBuilder(); 725 if (datanodeCommand == null) { 726 return builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand) 727 .build(); 728 } 729 switch (datanodeCommand.getAction()) { 730 case DatanodeProtocol.DNA_BALANCERBANDWIDTHUPDATE: 731 builder.setCmdType(DatanodeCommandProto.Type.BalancerBandwidthCommand) 732 .setBalancerCmd( 733 PBHelper.convert((BalancerBandwidthCommand) datanodeCommand)); 734 break; 735 case DatanodeProtocol.DNA_ACCESSKEYUPDATE: 736 builder 737 .setCmdType(DatanodeCommandProto.Type.KeyUpdateCommand) 738 .setKeyUpdateCmd(PBHelper.convert((KeyUpdateCommand) datanodeCommand)); 739 break; 740 case DatanodeProtocol.DNA_RECOVERBLOCK: 741 builder.setCmdType(DatanodeCommandProto.Type.BlockRecoveryCommand) 742 .setRecoveryCmd( 743 PBHelper.convert((BlockRecoveryCommand) datanodeCommand)); 744 break; 745 case DatanodeProtocol.DNA_FINALIZE: 746 builder.setCmdType(DatanodeCommandProto.Type.FinalizeCommand) 747 .setFinalizeCmd(PBHelper.convert((FinalizeCommand) datanodeCommand)); 748 break; 749 case DatanodeProtocol.DNA_REGISTER: 750 builder.setCmdType(DatanodeCommandProto.Type.RegisterCommand) 751 .setRegisterCmd(REG_CMD_PROTO); 752 break; 753 case DatanodeProtocol.DNA_TRANSFER: 754 case DatanodeProtocol.DNA_INVALIDATE: 755 case DatanodeProtocol.DNA_SHUTDOWN: 756 builder.setCmdType(DatanodeCommandProto.Type.BlockCommand).setBlkCmd( 757 PBHelper.convert((BlockCommand) datanodeCommand)); 758 break; 759 case DatanodeProtocol.DNA_UNKNOWN: //Not expected 760 default: 761 builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand); 762 } 763 return builder.build(); 764 } 765 766 public static KeyUpdateCommand convert(KeyUpdateCommandProto keyUpdateCmd) { 767 return new KeyUpdateCommand(PBHelper.convert(keyUpdateCmd.getKeys())); 768 } 769 770 public static FinalizeCommand convert(FinalizeCommandProto finalizeCmd) { 771 return new FinalizeCommand(finalizeCmd.getBlockPoolId()); 772 } 773 774 public static BlockRecoveryCommand convert( 775 BlockRecoveryCommandProto recoveryCmd) { 776 List<RecoveringBlockProto> list = recoveryCmd.getBlocksList(); 777 List<RecoveringBlock> recoveringBlocks = new ArrayList<RecoveringBlock>( 778 list.size()); 779 780 for (RecoveringBlockProto rbp : list) { 781 recoveringBlocks.add(PBHelper.convert(rbp)); 782 } 783 return new BlockRecoveryCommand(recoveringBlocks); 784 } 785 786 public static BlockCommand convert(BlockCommandProto blkCmd) { 787 List<BlockProto> blockProtoList = blkCmd.getBlocksList(); 788 Block[] blocks = new Block[blockProtoList.size()]; 789 for (int i = 0; i < blockProtoList.size(); i++) { 790 blocks[i] = PBHelper.convert(blockProtoList.get(i)); 791 } 792 List<DatanodeInfosProto> targetList = blkCmd.getTargetsList(); 793 DatanodeInfo[][] targets = new DatanodeInfo[targetList.size()][]; 794 for (int i = 0; i < targetList.size(); i++) { 795 targets[i] = PBHelper.convert(targetList.get(i)); 796 } 797 int action = DatanodeProtocol.DNA_UNKNOWN; 798 switch (blkCmd.getAction()) { 799 case TRANSFER: 800 action = DatanodeProtocol.DNA_TRANSFER; 801 break; 802 case INVALIDATE: 803 action = DatanodeProtocol.DNA_INVALIDATE; 804 break; 805 case SHUTDOWN: 806 action = DatanodeProtocol.DNA_SHUTDOWN; 807 break; 808 } 809 return new BlockCommand(action, blkCmd.getBlockPoolId(), blocks, targets); 810 } 811 812 public static DatanodeInfo[] convert(DatanodeInfosProto datanodeInfosProto) { 813 List<DatanodeInfoProto> proto = datanodeInfosProto.getDatanodesList(); 814 DatanodeInfo[] infos = new DatanodeInfo[proto.size()]; 815 for (int i = 0; i < infos.length; i++) { 816 infos[i] = PBHelper.convert(proto.get(i)); 817 } 818 return infos; 819 } 820 821 public static BalancerBandwidthCommand convert( 822 BalancerBandwidthCommandProto balancerCmd) { 823 return new BalancerBandwidthCommand(balancerCmd.getBandwidth()); 824 } 825 826 public static ReceivedDeletedBlockInfoProto convert( 827 ReceivedDeletedBlockInfo receivedDeletedBlockInfo) { 828 ReceivedDeletedBlockInfoProto.Builder builder = 829 ReceivedDeletedBlockInfoProto.newBuilder(); 830 831 ReceivedDeletedBlockInfoProto.BlockStatus status; 832 switch (receivedDeletedBlockInfo.getStatus()) { 833 case RECEIVING_BLOCK: 834 status = ReceivedDeletedBlockInfoProto.BlockStatus.RECEIVING; 835 break; 836 case RECEIVED_BLOCK: 837 status = ReceivedDeletedBlockInfoProto.BlockStatus.RECEIVED; 838 break; 839 case DELETED_BLOCK: 840 status = ReceivedDeletedBlockInfoProto.BlockStatus.DELETED; 841 break; 842 default: 843 throw new IllegalArgumentException("Bad status: " + 844 receivedDeletedBlockInfo.getStatus()); 845 } 846 builder.setStatus(status); 847 848 if (receivedDeletedBlockInfo.getDelHints() != null) { 849 builder.setDeleteHint(receivedDeletedBlockInfo.getDelHints()); 850 } 851 return builder.setBlock(PBHelper.convert(receivedDeletedBlockInfo.getBlock())) 852 .build(); 853 } 854 855 public static ReceivedDeletedBlockInfo convert( 856 ReceivedDeletedBlockInfoProto proto) { 857 ReceivedDeletedBlockInfo.BlockStatus status = null; 858 switch (proto.getStatus()) { 859 case RECEIVING: 860 status = BlockStatus.RECEIVING_BLOCK; 861 break; 862 case RECEIVED: 863 status = BlockStatus.RECEIVED_BLOCK; 864 break; 865 case DELETED: 866 status = BlockStatus.DELETED_BLOCK; 867 break; 868 } 869 return new ReceivedDeletedBlockInfo( 870 PBHelper.convert(proto.getBlock()), 871 status, 872 proto.hasDeleteHint() ? proto.getDeleteHint() : null); 873 } 874 875 public static NamespaceInfoProto convert(NamespaceInfo info) { 876 return NamespaceInfoProto.newBuilder() 877 .setBlockPoolID(info.getBlockPoolID()) 878 .setBuildVersion(info.getBuildVersion()) 879 .setUnused(0) 880 .setStorageInfo(PBHelper.convert((StorageInfo)info)) 881 .setSoftwareVersion(info.getSoftwareVersion()).build(); 882 } 883 884 // Located Block Arrays and Lists 885 public static LocatedBlockProto[] convertLocatedBlock(LocatedBlock[] lb) { 886 if (lb == null) return null; 887 return convertLocatedBlock2(Arrays.asList(lb)).toArray( 888 new LocatedBlockProto[lb.length]); 889 } 890 891 public static LocatedBlock[] convertLocatedBlock(LocatedBlockProto[] lb) { 892 if (lb == null) return null; 893 return convertLocatedBlock(Arrays.asList(lb)).toArray( 894 new LocatedBlock[lb.length]); 895 } 896 897 public static List<LocatedBlock> convertLocatedBlock( 898 List<LocatedBlockProto> lb) { 899 if (lb == null) return null; 900 final int len = lb.size(); 901 List<LocatedBlock> result = 902 new ArrayList<LocatedBlock>(len); 903 for (int i = 0; i < len; ++i) { 904 result.add(PBHelper.convert(lb.get(i))); 905 } 906 return result; 907 } 908 909 public static List<LocatedBlockProto> convertLocatedBlock2(List<LocatedBlock> lb) { 910 if (lb == null) return null; 911 final int len = lb.size(); 912 List<LocatedBlockProto> result = new ArrayList<LocatedBlockProto>(len); 913 for (int i = 0; i < len; ++i) { 914 result.add(PBHelper.convert(lb.get(i))); 915 } 916 return result; 917 } 918 919 920 // LocatedBlocks 921 public static LocatedBlocks convert(LocatedBlocksProto lb) { 922 return new LocatedBlocks( 923 lb.getFileLength(), lb.getUnderConstruction(), 924 PBHelper.convertLocatedBlock(lb.getBlocksList()), 925 lb.hasLastBlock() ? PBHelper.convert(lb.getLastBlock()) : null, 926 lb.getIsLastBlockComplete()); 927 } 928 929 public static LocatedBlocksProto convert(LocatedBlocks lb) { 930 if (lb == null) { 931 return null; 932 } 933 LocatedBlocksProto.Builder builder = 934 LocatedBlocksProto.newBuilder(); 935 if (lb.getLastLocatedBlock() != null) { 936 builder.setLastBlock(PBHelper.convert(lb.getLastLocatedBlock())); 937 } 938 return builder.setFileLength(lb.getFileLength()) 939 .setUnderConstruction(lb.isUnderConstruction()) 940 .addAllBlocks(PBHelper.convertLocatedBlock2(lb.getLocatedBlocks())) 941 .setIsLastBlockComplete(lb.isLastBlockComplete()).build(); 942 } 943 944 // DataEncryptionKey 945 public static DataEncryptionKey convert(DataEncryptionKeyProto bet) { 946 String encryptionAlgorithm = bet.getEncryptionAlgorithm(); 947 return new DataEncryptionKey(bet.getKeyId(), 948 bet.getBlockPoolId(), 949 bet.getNonce().toByteArray(), 950 bet.getEncryptionKey().toByteArray(), 951 bet.getExpiryDate(), 952 encryptionAlgorithm.isEmpty() ? null : encryptionAlgorithm); 953 } 954 955 public static DataEncryptionKeyProto convert(DataEncryptionKey bet) { 956 DataEncryptionKeyProto.Builder b = DataEncryptionKeyProto.newBuilder() 957 .setKeyId(bet.keyId) 958 .setBlockPoolId(bet.blockPoolId) 959 .setNonce(ByteString.copyFrom(bet.nonce)) 960 .setEncryptionKey(ByteString.copyFrom(bet.encryptionKey)) 961 .setExpiryDate(bet.expiryDate); 962 if (bet.encryptionAlgorithm != null) { 963 b.setEncryptionAlgorithm(bet.encryptionAlgorithm); 964 } 965 return b.build(); 966 } 967 968 public static FsServerDefaults convert(FsServerDefaultsProto fs) { 969 if (fs == null) return null; 970 return new FsServerDefaults( 971 fs.getBlockSize(), fs.getBytesPerChecksum(), 972 fs.getWritePacketSize(), (short) fs.getReplication(), 973 fs.getFileBufferSize(), 974 fs.getEncryptDataTransfer(), 975 fs.getTrashInterval(), 976 PBHelper.convert(fs.getChecksumType())); 977 } 978 979 public static FsServerDefaultsProto convert(FsServerDefaults fs) { 980 if (fs == null) return null; 981 return FsServerDefaultsProto.newBuilder(). 982 setBlockSize(fs.getBlockSize()). 983 setBytesPerChecksum(fs.getBytesPerChecksum()). 984 setWritePacketSize(fs.getWritePacketSize()) 985 .setReplication(fs.getReplication()) 986 .setFileBufferSize(fs.getFileBufferSize()) 987 .setEncryptDataTransfer(fs.getEncryptDataTransfer()) 988 .setTrashInterval(fs.getTrashInterval()) 989 .setChecksumType(PBHelper.convert(fs.getChecksumType())) 990 .build(); 991 } 992 993 public static FsPermissionProto convert(FsPermission p) { 994 if (p == null) return null; 995 return FsPermissionProto.newBuilder().setPerm(p.toShort()).build(); 996 } 997 998 public static FsPermission convert(FsPermissionProto p) { 999 if (p == null) return null; 1000 return new FsPermission((short)p.getPerm()); 1001 } 1002 1003 1004 // The creatFlag field in PB is a bitmask whose values are the same a the 1005 // emum values of CreateFlag 1006 public static int convertCreateFlag(EnumSetWritable<CreateFlag> flag) { 1007 int value = 0; 1008 if (flag.contains(CreateFlag.APPEND)) { 1009 value |= CreateFlagProto.APPEND.getNumber(); 1010 } 1011 if (flag.contains(CreateFlag.CREATE)) { 1012 value |= CreateFlagProto.CREATE.getNumber(); 1013 } 1014 if (flag.contains(CreateFlag.OVERWRITE)) { 1015 value |= CreateFlagProto.OVERWRITE.getNumber(); 1016 } 1017 return value; 1018 } 1019 1020 public static EnumSetWritable<CreateFlag> convert(int flag) { 1021 EnumSet<CreateFlag> result = 1022 EnumSet.noneOf(CreateFlag.class); 1023 if ((flag & CreateFlagProto.APPEND_VALUE) == CreateFlagProto.APPEND_VALUE) { 1024 result.add(CreateFlag.APPEND); 1025 } 1026 if ((flag & CreateFlagProto.CREATE_VALUE) == CreateFlagProto.CREATE_VALUE) { 1027 result.add(CreateFlag.CREATE); 1028 } 1029 if ((flag & CreateFlagProto.OVERWRITE_VALUE) 1030 == CreateFlagProto.OVERWRITE_VALUE) { 1031 result.add(CreateFlag.OVERWRITE); 1032 } 1033 return new EnumSetWritable<CreateFlag>(result); 1034 } 1035 1036 1037 public static HdfsFileStatus convert(HdfsFileStatusProto fs) { 1038 if (fs == null) 1039 return null; 1040 return new HdfsLocatedFileStatus( 1041 fs.getLength(), fs.getFileType().equals(FileType.IS_DIR), 1042 fs.getBlockReplication(), fs.getBlocksize(), 1043 fs.getModificationTime(), fs.getAccessTime(), 1044 PBHelper.convert(fs.getPermission()), fs.getOwner(), fs.getGroup(), 1045 fs.getFileType().equals(FileType.IS_SYMLINK) ? 1046 fs.getSymlink().toByteArray() : null, 1047 fs.getPath().toByteArray(), 1048 fs.hasLocations() ? PBHelper.convert(fs.getLocations()) : null); 1049 } 1050 1051 public static HdfsFileStatusProto convert(HdfsFileStatus fs) { 1052 if (fs == null) 1053 return null; 1054 FileType fType = FileType.IS_FILE; 1055 if (fs.isDir()) { 1056 fType = FileType.IS_DIR; 1057 } else if (fs.isSymlink()) { 1058 fType = FileType.IS_SYMLINK; 1059 } 1060 1061 HdfsFileStatusProto.Builder builder = 1062 HdfsFileStatusProto.newBuilder(). 1063 setLength(fs.getLen()). 1064 setFileType(fType). 1065 setBlockReplication(fs.getReplication()). 1066 setBlocksize(fs.getBlockSize()). 1067 setModificationTime(fs.getModificationTime()). 1068 setAccessTime(fs.getAccessTime()). 1069 setPermission(PBHelper.convert(fs.getPermission())). 1070 setOwner(fs.getOwner()). 1071 setGroup(fs.getGroup()). 1072 setPath(ByteString.copyFrom(fs.getLocalNameInBytes())); 1073 if (fs.isSymlink()) { 1074 builder.setSymlink(ByteString.copyFrom(fs.getSymlinkInBytes())); 1075 } 1076 if (fs instanceof HdfsLocatedFileStatus) { 1077 LocatedBlocks locations = ((HdfsLocatedFileStatus)fs).getBlockLocations(); 1078 if (locations != null) { 1079 builder.setLocations(PBHelper.convert(locations)); 1080 } 1081 } 1082 return builder.build(); 1083 } 1084 1085 public static HdfsFileStatusProto[] convert(HdfsFileStatus[] fs) { 1086 if (fs == null) return null; 1087 final int len = fs.length; 1088 HdfsFileStatusProto[] result = new HdfsFileStatusProto[len]; 1089 for (int i = 0; i < len; ++i) { 1090 result[i] = PBHelper.convert(fs[i]); 1091 } 1092 return result; 1093 } 1094 1095 public static HdfsFileStatus[] convert(HdfsFileStatusProto[] fs) { 1096 if (fs == null) return null; 1097 final int len = fs.length; 1098 HdfsFileStatus[] result = new HdfsFileStatus[len]; 1099 for (int i = 0; i < len; ++i) { 1100 result[i] = PBHelper.convert(fs[i]); 1101 } 1102 return result; 1103 } 1104 1105 public static DirectoryListing convert(DirectoryListingProto dl) { 1106 if (dl == null) 1107 return null; 1108 List<HdfsFileStatusProto> partList = dl.getPartialListingList(); 1109 return new DirectoryListing( 1110 partList.isEmpty() ? new HdfsLocatedFileStatus[0] 1111 : PBHelper.convert( 1112 partList.toArray(new HdfsFileStatusProto[partList.size()])), 1113 dl.getRemainingEntries()); 1114 } 1115 1116 public static DirectoryListingProto convert(DirectoryListing d) { 1117 if (d == null) 1118 return null; 1119 return DirectoryListingProto.newBuilder(). 1120 addAllPartialListing(Arrays.asList( 1121 PBHelper.convert(d.getPartialListing()))). 1122 setRemainingEntries(d.getRemainingEntries()). 1123 build(); 1124 } 1125 1126 public static long[] convert(GetFsStatsResponseProto res) { 1127 long[] result = new long[6]; 1128 result[ClientProtocol.GET_STATS_CAPACITY_IDX] = res.getCapacity(); 1129 result[ClientProtocol.GET_STATS_USED_IDX] = res.getUsed(); 1130 result[ClientProtocol.GET_STATS_REMAINING_IDX] = res.getRemaining(); 1131 result[ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX] = res.getUnderReplicated(); 1132 result[ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX] = res.getCorruptBlocks(); 1133 result[ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX] = res.getMissingBlocks(); 1134 return result; 1135 } 1136 1137 public static GetFsStatsResponseProto convert(long[] fsStats) { 1138 GetFsStatsResponseProto.Builder result = GetFsStatsResponseProto 1139 .newBuilder(); 1140 if (fsStats.length >= ClientProtocol.GET_STATS_CAPACITY_IDX + 1) 1141 result.setCapacity(fsStats[ClientProtocol.GET_STATS_CAPACITY_IDX]); 1142 if (fsStats.length >= ClientProtocol.GET_STATS_USED_IDX + 1) 1143 result.setUsed(fsStats[ClientProtocol.GET_STATS_USED_IDX]); 1144 if (fsStats.length >= ClientProtocol.GET_STATS_REMAINING_IDX + 1) 1145 result.setRemaining(fsStats[ClientProtocol.GET_STATS_REMAINING_IDX]); 1146 if (fsStats.length >= ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX + 1) 1147 result.setUnderReplicated( 1148 fsStats[ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX]); 1149 if (fsStats.length >= ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX + 1) 1150 result.setCorruptBlocks( 1151 fsStats[ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX]); 1152 if (fsStats.length >= ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX + 1) 1153 result.setMissingBlocks( 1154 fsStats[ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX]); 1155 return result.build(); 1156 } 1157 1158 public static DatanodeReportTypeProto 1159 convert(DatanodeReportType t) { 1160 switch (t) { 1161 case ALL: return DatanodeReportTypeProto.ALL; 1162 case LIVE: return DatanodeReportTypeProto.LIVE; 1163 case DEAD: return DatanodeReportTypeProto.DEAD; 1164 default: 1165 throw new IllegalArgumentException("Unexpected data type report:" + t); 1166 } 1167 } 1168 1169 public static DatanodeReportType 1170 convert(DatanodeReportTypeProto t) { 1171 switch (t) { 1172 case ALL: return DatanodeReportType.ALL; 1173 case LIVE: return DatanodeReportType.LIVE; 1174 case DEAD: return DatanodeReportType.DEAD; 1175 default: 1176 throw new IllegalArgumentException("Unexpected data type report:" + t); 1177 } 1178 } 1179 1180 public static SafeModeActionProto convert( 1181 SafeModeAction a) { 1182 switch (a) { 1183 case SAFEMODE_LEAVE: 1184 return SafeModeActionProto.SAFEMODE_LEAVE; 1185 case SAFEMODE_ENTER: 1186 return SafeModeActionProto.SAFEMODE_ENTER; 1187 case SAFEMODE_GET: 1188 return SafeModeActionProto.SAFEMODE_GET; 1189 default: 1190 throw new IllegalArgumentException("Unexpected SafeModeAction :" + a); 1191 } 1192 } 1193 1194 public static SafeModeAction convert( 1195 ClientNamenodeProtocolProtos.SafeModeActionProto a) { 1196 switch (a) { 1197 case SAFEMODE_LEAVE: 1198 return SafeModeAction.SAFEMODE_LEAVE; 1199 case SAFEMODE_ENTER: 1200 return SafeModeAction.SAFEMODE_ENTER; 1201 case SAFEMODE_GET: 1202 return SafeModeAction.SAFEMODE_GET; 1203 default: 1204 throw new IllegalArgumentException("Unexpected SafeModeAction :" + a); 1205 } 1206 } 1207 1208 public static CorruptFileBlocks convert(CorruptFileBlocksProto c) { 1209 if (c == null) 1210 return null; 1211 List<String> fileList = c.getFilesList(); 1212 return new CorruptFileBlocks(fileList.toArray(new String[fileList.size()]), 1213 c.getCookie()); 1214 } 1215 1216 public static CorruptFileBlocksProto convert(CorruptFileBlocks c) { 1217 if (c == null) 1218 return null; 1219 return CorruptFileBlocksProto.newBuilder(). 1220 addAllFiles(Arrays.asList(c.getFiles())). 1221 setCookie(c.getCookie()). 1222 build(); 1223 } 1224 1225 public static ContentSummary convert(ContentSummaryProto cs) { 1226 if (cs == null) return null; 1227 return new ContentSummary( 1228 cs.getLength(), cs.getFileCount(), cs.getDirectoryCount(), cs.getQuota(), 1229 cs.getSpaceConsumed(), cs.getSpaceQuota()); 1230 } 1231 1232 public static ContentSummaryProto convert(ContentSummary cs) { 1233 if (cs == null) return null; 1234 return ContentSummaryProto.newBuilder(). 1235 setLength(cs.getLength()). 1236 setFileCount(cs.getFileCount()). 1237 setDirectoryCount(cs.getDirectoryCount()). 1238 setQuota(cs.getQuota()). 1239 setSpaceConsumed(cs.getSpaceConsumed()). 1240 setSpaceQuota(cs.getSpaceQuota()). 1241 build(); 1242 } 1243 1244 public static NNHAStatusHeartbeat convert(NNHAStatusHeartbeatProto s) { 1245 if (s == null) return null; 1246 switch (s.getState()) { 1247 case ACTIVE: 1248 return new NNHAStatusHeartbeat(HAServiceState.ACTIVE, s.getTxid()); 1249 case STANDBY: 1250 return new NNHAStatusHeartbeat(HAServiceState.STANDBY, s.getTxid()); 1251 default: 1252 throw new IllegalArgumentException("Unexpected NNHAStatusHeartbeat.State:" + s.getState()); 1253 } 1254 } 1255 1256 public static NNHAStatusHeartbeatProto convert(NNHAStatusHeartbeat hb) { 1257 if (hb == null) return null; 1258 NNHAStatusHeartbeatProto.Builder builder = 1259 NNHAStatusHeartbeatProto.newBuilder(); 1260 switch (hb.getState()) { 1261 case ACTIVE: 1262 builder.setState(NNHAStatusHeartbeatProto.State.ACTIVE); 1263 break; 1264 case STANDBY: 1265 builder.setState(NNHAStatusHeartbeatProto.State.STANDBY); 1266 break; 1267 default: 1268 throw new IllegalArgumentException("Unexpected NNHAStatusHeartbeat.State:" + 1269 hb.getState()); 1270 } 1271 builder.setTxid(hb.getTxId()); 1272 return builder.build(); 1273 } 1274 1275 public static DatanodeStorageProto convert(DatanodeStorage s) { 1276 return DatanodeStorageProto.newBuilder() 1277 .setState(PBHelper.convert(s.getState())) 1278 .setStorageID(s.getStorageID()).build(); 1279 } 1280 1281 private static StorageState convert(State state) { 1282 switch(state) { 1283 case READ_ONLY: 1284 return StorageState.READ_ONLY; 1285 case NORMAL: 1286 default: 1287 return StorageState.NORMAL; 1288 } 1289 } 1290 1291 public static DatanodeStorage convert(DatanodeStorageProto s) { 1292 return new DatanodeStorage(s.getStorageID(), PBHelper.convert(s.getState())); 1293 } 1294 1295 private static State convert(StorageState state) { 1296 switch(state) { 1297 case READ_ONLY: 1298 return DatanodeStorage.State.READ_ONLY; 1299 case NORMAL: 1300 default: 1301 return DatanodeStorage.State.NORMAL; 1302 } 1303 } 1304 1305 public static StorageReportProto convert(StorageReport r) { 1306 return StorageReportProto.newBuilder() 1307 .setBlockPoolUsed(r.getBlockPoolUsed()).setCapacity(r.getCapacity()) 1308 .setDfsUsed(r.getDfsUsed()).setRemaining(r.getRemaining()) 1309 .setStorageID(r.getStorageID()).build(); 1310 } 1311 1312 public static JournalInfo convert(JournalInfoProto info) { 1313 int lv = info.hasLayoutVersion() ? info.getLayoutVersion() : 0; 1314 int nsID = info.hasNamespaceID() ? info.getNamespaceID() : 0; 1315 return new JournalInfo(lv, info.getClusterID(), nsID); 1316 } 1317 1318 /** 1319 * Method used for converting {@link JournalInfoProto} sent from Namenode 1320 * to Journal receivers to {@link NamenodeRegistration}. 1321 */ 1322 public static JournalInfoProto convert(JournalInfo j) { 1323 return JournalInfoProto.newBuilder().setClusterID(j.getClusterId()) 1324 .setLayoutVersion(j.getLayoutVersion()) 1325 .setNamespaceID(j.getNamespaceId()).build(); 1326 } 1327 1328 public static DataChecksum.Type convert(HdfsProtos.ChecksumTypeProto type) { 1329 return DataChecksum.Type.valueOf(type.getNumber()); 1330 } 1331 1332 public static HdfsProtos.ChecksumTypeProto convert(DataChecksum.Type type) { 1333 return HdfsProtos.ChecksumTypeProto.valueOf(type.id); 1334 } 1335 1336 public static InputStream vintPrefixed(final InputStream input) 1337 throws IOException { 1338 final int firstByte = input.read(); 1339 if (firstByte == -1) { 1340 throw new EOFException("Premature EOF: no length prefix available"); 1341 } 1342 1343 int size = CodedInputStream.readRawVarint32(firstByte, input); 1344 assert size >= 0; 1345 return new ExactSizeInputStream(input, size); 1346 } 1347}