001 /** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018 019 package org.apache.hadoop.hdfs.protocolPB; 020 021 import java.io.IOException; 022 import java.util.List; 023 024 import org.apache.hadoop.hdfs.protocol.DatanodeID; 025 import org.apache.hadoop.hdfs.protocol.LocatedBlock; 026 import org.apache.hadoop.hdfs.protocol.RollingUpgradeStatus; 027 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto; 028 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto; 029 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto; 030 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto; 031 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto; 032 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto; 033 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto; 034 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto; 035 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto; 036 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto; 037 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto; 038 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto; 039 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto; 040 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto; 041 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto; 042 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto; 043 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto; 044 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto; 045 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto; 046 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto; 047 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto; 048 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto; 049 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto; 050 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; 051 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; 052 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; 053 import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse; 054 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; 055 import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo; 056 import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport; 057 import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks; 058 import org.apache.hadoop.hdfs.server.protocol.StorageReport; 059 060 import com.google.protobuf.RpcController; 061 import com.google.protobuf.ServiceException; 062 063 public class DatanodeProtocolServerSideTranslatorPB implements 064 DatanodeProtocolPB { 065 066 private final DatanodeProtocol impl; 067 private static final ErrorReportResponseProto 068 VOID_ERROR_REPORT_RESPONSE_PROTO = 069 ErrorReportResponseProto.newBuilder().build(); 070 private static final BlockReceivedAndDeletedResponseProto 071 VOID_BLOCK_RECEIVED_AND_DELETE_RESPONSE = 072 BlockReceivedAndDeletedResponseProto.newBuilder().build(); 073 private static final ReportBadBlocksResponseProto 074 VOID_REPORT_BAD_BLOCK_RESPONSE = 075 ReportBadBlocksResponseProto.newBuilder().build(); 076 private static final CommitBlockSynchronizationResponseProto 077 VOID_COMMIT_BLOCK_SYNCHRONIZATION_RESPONSE_PROTO = 078 CommitBlockSynchronizationResponseProto.newBuilder().build(); 079 080 public DatanodeProtocolServerSideTranslatorPB(DatanodeProtocol impl) { 081 this.impl = impl; 082 } 083 084 @Override 085 public RegisterDatanodeResponseProto registerDatanode( 086 RpcController controller, RegisterDatanodeRequestProto request) 087 throws ServiceException { 088 DatanodeRegistration registration = PBHelper.convert(request 089 .getRegistration()); 090 DatanodeRegistration registrationResp; 091 try { 092 registrationResp = impl.registerDatanode(registration); 093 } catch (IOException e) { 094 throw new ServiceException(e); 095 } 096 return RegisterDatanodeResponseProto.newBuilder() 097 .setRegistration(PBHelper.convert(registrationResp)).build(); 098 } 099 100 @Override 101 public HeartbeatResponseProto sendHeartbeat(RpcController controller, 102 HeartbeatRequestProto request) throws ServiceException { 103 HeartbeatResponse response; 104 try { 105 final StorageReport[] report = PBHelper.convertStorageReports( 106 request.getReportsList()); 107 response = impl.sendHeartbeat(PBHelper.convert(request.getRegistration()), 108 report, request.getCacheCapacity(), request.getCacheUsed(), 109 request.getXmitsInProgress(), 110 request.getXceiverCount(), request.getFailedVolumes()); 111 } catch (IOException e) { 112 throw new ServiceException(e); 113 } 114 HeartbeatResponseProto.Builder builder = HeartbeatResponseProto 115 .newBuilder(); 116 DatanodeCommand[] cmds = response.getCommands(); 117 if (cmds != null) { 118 for (int i = 0; i < cmds.length; i++) { 119 if (cmds[i] != null) { 120 builder.addCmds(PBHelper.convert(cmds[i])); 121 } 122 } 123 } 124 builder.setHaStatus(PBHelper.convert(response.getNameNodeHaState())); 125 RollingUpgradeStatus rollingUpdateStatus = response 126 .getRollingUpdateStatus(); 127 if (rollingUpdateStatus != null) { 128 builder.setRollingUpgradeStatus(PBHelper 129 .convertRollingUpgradeStatus(rollingUpdateStatus)); 130 } 131 return builder.build(); 132 } 133 134 @Override 135 public BlockReportResponseProto blockReport(RpcController controller, 136 BlockReportRequestProto request) throws ServiceException { 137 DatanodeCommand cmd = null; 138 StorageBlockReport[] report = 139 new StorageBlockReport[request.getReportsCount()]; 140 141 int index = 0; 142 for (StorageBlockReportProto s : request.getReportsList()) { 143 List<Long> blockIds = s.getBlocksList(); 144 long[] blocks = new long[blockIds.size()]; 145 for (int i = 0; i < blockIds.size(); i++) { 146 blocks[i] = blockIds.get(i); 147 } 148 report[index++] = new StorageBlockReport(PBHelper.convert(s.getStorage()), 149 blocks); 150 } 151 try { 152 cmd = impl.blockReport(PBHelper.convert(request.getRegistration()), 153 request.getBlockPoolId(), report); 154 } catch (IOException e) { 155 throw new ServiceException(e); 156 } 157 BlockReportResponseProto.Builder builder = 158 BlockReportResponseProto.newBuilder(); 159 if (cmd != null) { 160 builder.setCmd(PBHelper.convert(cmd)); 161 } 162 return builder.build(); 163 } 164 165 @Override 166 public CacheReportResponseProto cacheReport(RpcController controller, 167 CacheReportRequestProto request) throws ServiceException { 168 DatanodeCommand cmd = null; 169 try { 170 cmd = impl.cacheReport( 171 PBHelper.convert(request.getRegistration()), 172 request.getBlockPoolId(), 173 request.getBlocksList()); 174 } catch (IOException e) { 175 throw new ServiceException(e); 176 } 177 CacheReportResponseProto.Builder builder = 178 CacheReportResponseProto.newBuilder(); 179 if (cmd != null) { 180 builder.setCmd(PBHelper.convert(cmd)); 181 } 182 return builder.build(); 183 } 184 185 186 @Override 187 public BlockReceivedAndDeletedResponseProto blockReceivedAndDeleted( 188 RpcController controller, BlockReceivedAndDeletedRequestProto request) 189 throws ServiceException { 190 List<StorageReceivedDeletedBlocksProto> sBlocks = request.getBlocksList(); 191 StorageReceivedDeletedBlocks[] info = 192 new StorageReceivedDeletedBlocks[sBlocks.size()]; 193 for (int i = 0; i < sBlocks.size(); i++) { 194 StorageReceivedDeletedBlocksProto sBlock = sBlocks.get(i); 195 List<ReceivedDeletedBlockInfoProto> list = sBlock.getBlocksList(); 196 ReceivedDeletedBlockInfo[] rdBlocks = 197 new ReceivedDeletedBlockInfo[list.size()]; 198 for (int j = 0; j < list.size(); j++) { 199 rdBlocks[j] = PBHelper.convert(list.get(j)); 200 } 201 if (sBlock.hasStorage()) { 202 info[i] = new StorageReceivedDeletedBlocks( 203 PBHelper.convert(sBlock.getStorage()), rdBlocks); 204 } else { 205 info[i] = new StorageReceivedDeletedBlocks(sBlock.getStorageUuid(), rdBlocks); 206 } 207 } 208 try { 209 impl.blockReceivedAndDeleted(PBHelper.convert(request.getRegistration()), 210 request.getBlockPoolId(), info); 211 } catch (IOException e) { 212 throw new ServiceException(e); 213 } 214 return VOID_BLOCK_RECEIVED_AND_DELETE_RESPONSE; 215 } 216 217 @Override 218 public ErrorReportResponseProto errorReport(RpcController controller, 219 ErrorReportRequestProto request) throws ServiceException { 220 try { 221 impl.errorReport(PBHelper.convert(request.getRegistartion()), 222 request.getErrorCode(), request.getMsg()); 223 } catch (IOException e) { 224 throw new ServiceException(e); 225 } 226 return VOID_ERROR_REPORT_RESPONSE_PROTO; 227 } 228 229 @Override 230 public VersionResponseProto versionRequest(RpcController controller, 231 VersionRequestProto request) throws ServiceException { 232 NamespaceInfo info; 233 try { 234 info = impl.versionRequest(); 235 } catch (IOException e) { 236 throw new ServiceException(e); 237 } 238 return VersionResponseProto.newBuilder() 239 .setInfo(PBHelper.convert(info)).build(); 240 } 241 242 @Override 243 public ReportBadBlocksResponseProto reportBadBlocks(RpcController controller, 244 ReportBadBlocksRequestProto request) throws ServiceException { 245 List<LocatedBlockProto> lbps = request.getBlocksList(); 246 LocatedBlock [] blocks = new LocatedBlock [lbps.size()]; 247 for(int i=0; i<lbps.size(); i++) { 248 blocks[i] = PBHelper.convert(lbps.get(i)); 249 } 250 try { 251 impl.reportBadBlocks(blocks); 252 } catch (IOException e) { 253 throw new ServiceException(e); 254 } 255 return VOID_REPORT_BAD_BLOCK_RESPONSE; 256 } 257 258 @Override 259 public CommitBlockSynchronizationResponseProto commitBlockSynchronization( 260 RpcController controller, CommitBlockSynchronizationRequestProto request) 261 throws ServiceException { 262 List<DatanodeIDProto> dnprotos = request.getNewTaragetsList(); 263 DatanodeID[] dns = new DatanodeID[dnprotos.size()]; 264 for (int i = 0; i < dnprotos.size(); i++) { 265 dns[i] = PBHelper.convert(dnprotos.get(i)); 266 } 267 final List<String> sidprotos = request.getNewTargetStoragesList(); 268 final String[] storageIDs = sidprotos.toArray(new String[sidprotos.size()]); 269 try { 270 impl.commitBlockSynchronization(PBHelper.convert(request.getBlock()), 271 request.getNewGenStamp(), request.getNewLength(), 272 request.getCloseFile(), request.getDeleteBlock(), dns, storageIDs); 273 } catch (IOException e) { 274 throw new ServiceException(e); 275 } 276 return VOID_COMMIT_BLOCK_SYNCHRONIZATION_RESPONSE_PROTO; 277 } 278 }