001 /** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018 019 package org.apache.hadoop.hdfs.protocolPB; 020 021 import java.io.IOException; 022 import java.util.List; 023 024 import org.apache.hadoop.hdfs.protocol.DatanodeID; 025 import org.apache.hadoop.hdfs.protocol.LocatedBlock; 026 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto; 027 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto; 028 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto; 029 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto; 030 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto; 031 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto; 032 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto; 033 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto; 034 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto; 035 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto; 036 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto; 037 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto; 038 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto; 039 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto; 040 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto; 041 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto; 042 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto; 043 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto; 044 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto; 045 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto; 046 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto; 047 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto; 048 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; 049 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; 050 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; 051 import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse; 052 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; 053 import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo; 054 import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport; 055 import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks; 056 import org.apache.hadoop.hdfs.server.protocol.StorageReport; 057 058 import com.google.protobuf.RpcController; 059 import com.google.protobuf.ServiceException; 060 061 public class DatanodeProtocolServerSideTranslatorPB implements 062 DatanodeProtocolPB { 063 064 private final DatanodeProtocol impl; 065 private static final ErrorReportResponseProto ERROR_REPORT_RESPONSE_PROTO = 066 ErrorReportResponseProto.newBuilder().build(); 067 private static final BlockReceivedAndDeletedResponseProto 068 BLOCK_RECEIVED_AND_DELETE_RESPONSE = 069 BlockReceivedAndDeletedResponseProto.newBuilder().build(); 070 private static final ReportBadBlocksResponseProto REPORT_BAD_BLOCK_RESPONSE = 071 ReportBadBlocksResponseProto.newBuilder().build(); 072 private static final CommitBlockSynchronizationResponseProto 073 COMMIT_BLOCK_SYNCHRONIZATION_RESPONSE_PROTO = 074 CommitBlockSynchronizationResponseProto.newBuilder().build(); 075 076 public DatanodeProtocolServerSideTranslatorPB(DatanodeProtocol impl) { 077 this.impl = impl; 078 } 079 080 @Override 081 public RegisterDatanodeResponseProto registerDatanode( 082 RpcController controller, RegisterDatanodeRequestProto request) 083 throws ServiceException { 084 DatanodeRegistration registration = PBHelper.convert(request 085 .getRegistration()); 086 DatanodeRegistration registrationResp; 087 try { 088 registrationResp = impl.registerDatanode(registration); 089 } catch (IOException e) { 090 throw new ServiceException(e); 091 } 092 return RegisterDatanodeResponseProto.newBuilder() 093 .setRegistration(PBHelper.convert(registrationResp)).build(); 094 } 095 096 @Override 097 public HeartbeatResponseProto sendHeartbeat(RpcController controller, 098 HeartbeatRequestProto request) throws ServiceException { 099 HeartbeatResponse response; 100 try { 101 List<StorageReportProto> list = request.getReportsList(); 102 StorageReport[] report = new StorageReport[list.size()]; 103 int i = 0; 104 for (StorageReportProto p : list) { 105 report[i++] = new StorageReport(p.getStorageID(), p.getFailed(), 106 p.getCapacity(), p.getDfsUsed(), p.getRemaining(), 107 p.getBlockPoolUsed()); 108 } 109 response = impl.sendHeartbeat(PBHelper.convert(request.getRegistration()), 110 report, request.getXmitsInProgress(), request.getXceiverCount(), 111 request.getFailedVolumes()); 112 } catch (IOException e) { 113 throw new ServiceException(e); 114 } 115 HeartbeatResponseProto.Builder builder = HeartbeatResponseProto 116 .newBuilder(); 117 DatanodeCommand[] cmds = response.getCommands(); 118 if (cmds != null) { 119 for (int i = 0; i < cmds.length; i++) { 120 if (cmds[i] != null) { 121 builder.addCmds(PBHelper.convert(cmds[i])); 122 } 123 } 124 } 125 builder.setHaStatus(PBHelper.convert(response.getNameNodeHaState())); 126 return builder.build(); 127 } 128 129 @Override 130 public BlockReportResponseProto blockReport(RpcController controller, 131 BlockReportRequestProto request) throws ServiceException { 132 DatanodeCommand cmd = null; 133 StorageBlockReport[] report = 134 new StorageBlockReport[request.getReportsCount()]; 135 136 int index = 0; 137 for (StorageBlockReportProto s : request.getReportsList()) { 138 List<Long> blockIds = s.getBlocksList(); 139 long[] blocks = new long[blockIds.size()]; 140 for (int i = 0; i < blockIds.size(); i++) { 141 blocks[i] = blockIds.get(i); 142 } 143 report[index++] = new StorageBlockReport(PBHelper.convert(s.getStorage()), 144 blocks); 145 } 146 try { 147 cmd = impl.blockReport(PBHelper.convert(request.getRegistration()), 148 request.getBlockPoolId(), report); 149 } catch (IOException e) { 150 throw new ServiceException(e); 151 } 152 BlockReportResponseProto.Builder builder = 153 BlockReportResponseProto.newBuilder(); 154 if (cmd != null) { 155 builder.setCmd(PBHelper.convert(cmd)); 156 } 157 return builder.build(); 158 } 159 160 @Override 161 public BlockReceivedAndDeletedResponseProto blockReceivedAndDeleted( 162 RpcController controller, BlockReceivedAndDeletedRequestProto request) 163 throws ServiceException { 164 List<StorageReceivedDeletedBlocksProto> sBlocks = request.getBlocksList(); 165 StorageReceivedDeletedBlocks[] info = 166 new StorageReceivedDeletedBlocks[sBlocks.size()]; 167 for (int i = 0; i < sBlocks.size(); i++) { 168 StorageReceivedDeletedBlocksProto sBlock = sBlocks.get(i); 169 List<ReceivedDeletedBlockInfoProto> list = sBlock.getBlocksList(); 170 ReceivedDeletedBlockInfo[] rdBlocks = 171 new ReceivedDeletedBlockInfo[list.size()]; 172 for (int j = 0; j < list.size(); j++) { 173 rdBlocks[j] = PBHelper.convert(list.get(j)); 174 } 175 info[i] = new StorageReceivedDeletedBlocks(sBlock.getStorageID(), rdBlocks); 176 } 177 try { 178 impl.blockReceivedAndDeleted(PBHelper.convert(request.getRegistration()), 179 request.getBlockPoolId(), info); 180 } catch (IOException e) { 181 throw new ServiceException(e); 182 } 183 return BLOCK_RECEIVED_AND_DELETE_RESPONSE; 184 } 185 186 @Override 187 public ErrorReportResponseProto errorReport(RpcController controller, 188 ErrorReportRequestProto request) throws ServiceException { 189 try { 190 impl.errorReport(PBHelper.convert(request.getRegistartion()), 191 request.getErrorCode(), request.getMsg()); 192 } catch (IOException e) { 193 throw new ServiceException(e); 194 } 195 return ERROR_REPORT_RESPONSE_PROTO; 196 } 197 198 @Override 199 public VersionResponseProto versionRequest(RpcController controller, 200 VersionRequestProto request) throws ServiceException { 201 NamespaceInfo info; 202 try { 203 info = impl.versionRequest(); 204 } catch (IOException e) { 205 throw new ServiceException(e); 206 } 207 return VersionResponseProto.newBuilder() 208 .setInfo(PBHelper.convert(info)).build(); 209 } 210 211 @Override 212 public ReportBadBlocksResponseProto reportBadBlocks(RpcController controller, 213 ReportBadBlocksRequestProto request) throws ServiceException { 214 List<LocatedBlockProto> lbps = request.getBlocksList(); 215 LocatedBlock [] blocks = new LocatedBlock [lbps.size()]; 216 for(int i=0; i<lbps.size(); i++) { 217 blocks[i] = PBHelper.convert(lbps.get(i)); 218 } 219 try { 220 impl.reportBadBlocks(blocks); 221 } catch (IOException e) { 222 throw new ServiceException(e); 223 } 224 return REPORT_BAD_BLOCK_RESPONSE; 225 } 226 227 @Override 228 public CommitBlockSynchronizationResponseProto commitBlockSynchronization( 229 RpcController controller, CommitBlockSynchronizationRequestProto request) 230 throws ServiceException { 231 List<DatanodeIDProto> dnprotos = request.getNewTaragetsList(); 232 DatanodeID[] dns = new DatanodeID[dnprotos.size()]; 233 for (int i = 0; i < dnprotos.size(); i++) { 234 dns[i] = PBHelper.convert(dnprotos.get(i)); 235 } 236 final List<String> sidprotos = request.getNewTargetStoragesList(); 237 final String[] storageIDs = sidprotos.toArray(new String[sidprotos.size()]); 238 try { 239 impl.commitBlockSynchronization(PBHelper.convert(request.getBlock()), 240 request.getNewGenStamp(), request.getNewLength(), 241 request.getCloseFile(), request.getDeleteBlock(), dns, storageIDs); 242 } catch (IOException e) { 243 throw new ServiceException(e); 244 } 245 return COMMIT_BLOCK_SYNCHRONIZATION_RESPONSE_PROTO; 246 } 247 }