001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018
019package org.apache.hadoop.hdfs.protocolPB;
020
021import java.io.IOException;
022import java.util.List;
023
024import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
025import org.apache.hadoop.hdfs.protocol.DatanodeID;
026import org.apache.hadoop.hdfs.protocol.LocatedBlock;
027import org.apache.hadoop.hdfs.protocol.RollingUpgradeStatus;
028import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto;
029import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto;
030import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto;
031import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto;
032import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto;
033import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto;
034import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto;
035import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto;
036import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto;
037import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto;
038import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto;
039import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto;
040import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto;
041import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto;
042import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto;
043import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto;
044import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto;
045import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto;
046import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto;
047import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
048import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
049import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto;
050import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto;
051import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto;
052import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
053import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
054import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
055import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
056import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
057import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
058import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
059import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
060import org.apache.hadoop.hdfs.server.protocol.StorageReport;
061import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
062
063import com.google.common.base.Preconditions;
064import com.google.protobuf.RpcController;
065import com.google.protobuf.ServiceException;
066
067public class DatanodeProtocolServerSideTranslatorPB implements
068    DatanodeProtocolPB {
069
070  private final DatanodeProtocol impl;
071  private static final ErrorReportResponseProto
072      VOID_ERROR_REPORT_RESPONSE_PROTO = 
073          ErrorReportResponseProto.newBuilder().build();
074  private static final BlockReceivedAndDeletedResponseProto 
075      VOID_BLOCK_RECEIVED_AND_DELETE_RESPONSE = 
076          BlockReceivedAndDeletedResponseProto.newBuilder().build();
077  private static final ReportBadBlocksResponseProto
078      VOID_REPORT_BAD_BLOCK_RESPONSE = 
079          ReportBadBlocksResponseProto.newBuilder().build();
080  private static final CommitBlockSynchronizationResponseProto 
081      VOID_COMMIT_BLOCK_SYNCHRONIZATION_RESPONSE_PROTO =
082          CommitBlockSynchronizationResponseProto.newBuilder().build();
083
084  public DatanodeProtocolServerSideTranslatorPB(DatanodeProtocol impl) {
085    this.impl = impl;
086  }
087
088  @Override
089  public RegisterDatanodeResponseProto registerDatanode(
090      RpcController controller, RegisterDatanodeRequestProto request)
091      throws ServiceException {
092    DatanodeRegistration registration = PBHelper.convert(request
093        .getRegistration());
094    DatanodeRegistration registrationResp;
095    try {
096      registrationResp = impl.registerDatanode(registration);
097    } catch (IOException e) {
098      throw new ServiceException(e);
099    }
100    return RegisterDatanodeResponseProto.newBuilder()
101        .setRegistration(PBHelper.convert(registrationResp)).build();
102  }
103
104  @Override
105  public HeartbeatResponseProto sendHeartbeat(RpcController controller,
106      HeartbeatRequestProto request) throws ServiceException {
107    HeartbeatResponse response;
108    try {
109      final StorageReport[] report = PBHelper.convertStorageReports(
110          request.getReportsList());
111      VolumeFailureSummary volumeFailureSummary =
112          request.hasVolumeFailureSummary() ? PBHelper.convertVolumeFailureSummary(
113              request.getVolumeFailureSummary()) : null;
114      response = impl.sendHeartbeat(PBHelper.convert(request.getRegistration()),
115          report, request.getCacheCapacity(), request.getCacheUsed(),
116          request.getXmitsInProgress(),
117          request.getXceiverCount(), request.getFailedVolumes(),
118          volumeFailureSummary);
119    } catch (IOException e) {
120      throw new ServiceException(e);
121    }
122    HeartbeatResponseProto.Builder builder = HeartbeatResponseProto
123        .newBuilder();
124    DatanodeCommand[] cmds = response.getCommands();
125    if (cmds != null) {
126      for (int i = 0; i < cmds.length; i++) {
127        if (cmds[i] != null) {
128          builder.addCmds(PBHelper.convert(cmds[i]));
129        }
130      }
131    }
132    builder.setHaStatus(PBHelper.convert(response.getNameNodeHaState()));
133    RollingUpgradeStatus rollingUpdateStatus = response
134        .getRollingUpdateStatus();
135    if (rollingUpdateStatus != null) {
136      // V2 is always set for newer datanodes.
137      // To be compatible with older datanodes, V1 is set to null
138      //  if the RU was finalized.
139      RollingUpgradeStatusProto rus = PBHelper.convertRollingUpgradeStatus(
140          rollingUpdateStatus);
141      builder.setRollingUpgradeStatusV2(rus);
142      if (!rollingUpdateStatus.isFinalized()) {
143        builder.setRollingUpgradeStatus(rus);
144      }
145    }
146    return builder.build();
147  }
148
149  @Override
150  public BlockReportResponseProto blockReport(RpcController controller,
151      BlockReportRequestProto request) throws ServiceException {
152    DatanodeCommand cmd = null;
153    StorageBlockReport[] report = 
154        new StorageBlockReport[request.getReportsCount()];
155    
156    int index = 0;
157    for (StorageBlockReportProto s : request.getReportsList()) {
158      final BlockListAsLongs blocks;
159      if (s.hasNumberOfBlocks()) { // new style buffer based reports
160        int num = (int)s.getNumberOfBlocks();
161        Preconditions.checkState(s.getBlocksCount() == 0,
162            "cannot send both blocks list and buffers");
163        blocks = BlockListAsLongs.decodeBuffers(num, s.getBlocksBuffersList());
164      } else {
165        blocks = BlockListAsLongs.decodeLongs(s.getBlocksList());
166      }
167      report[index++] = new StorageBlockReport(PBHelper.convert(s.getStorage()),
168          blocks);
169    }
170    try {
171      cmd = impl.blockReport(PBHelper.convert(request.getRegistration()),
172          request.getBlockPoolId(), report,
173          request.hasContext() ?
174              PBHelper.convert(request.getContext()) : null);
175    } catch (IOException e) {
176      throw new ServiceException(e);
177    }
178    BlockReportResponseProto.Builder builder = 
179        BlockReportResponseProto.newBuilder();
180    if (cmd != null) {
181      builder.setCmd(PBHelper.convert(cmd));
182    }
183    return builder.build();
184  }
185
186  @Override
187  public CacheReportResponseProto cacheReport(RpcController controller,
188      CacheReportRequestProto request) throws ServiceException {
189    DatanodeCommand cmd = null;
190    try {
191      cmd = impl.cacheReport(
192          PBHelper.convert(request.getRegistration()),
193          request.getBlockPoolId(),
194          request.getBlocksList());
195    } catch (IOException e) {
196      throw new ServiceException(e);
197    }
198    CacheReportResponseProto.Builder builder =
199        CacheReportResponseProto.newBuilder();
200    if (cmd != null) {
201      builder.setCmd(PBHelper.convert(cmd));
202    }
203    return builder.build();
204  }
205
206
207  @Override
208  public BlockReceivedAndDeletedResponseProto blockReceivedAndDeleted(
209      RpcController controller, BlockReceivedAndDeletedRequestProto request)
210      throws ServiceException {
211    List<StorageReceivedDeletedBlocksProto> sBlocks = request.getBlocksList();
212    StorageReceivedDeletedBlocks[] info = 
213        new StorageReceivedDeletedBlocks[sBlocks.size()];
214    for (int i = 0; i < sBlocks.size(); i++) {
215      StorageReceivedDeletedBlocksProto sBlock = sBlocks.get(i);
216      List<ReceivedDeletedBlockInfoProto> list = sBlock.getBlocksList();
217      ReceivedDeletedBlockInfo[] rdBlocks = 
218          new ReceivedDeletedBlockInfo[list.size()];
219      for (int j = 0; j < list.size(); j++) {
220        rdBlocks[j] = PBHelper.convert(list.get(j));
221      }
222      if (sBlock.hasStorage()) {
223        info[i] = new StorageReceivedDeletedBlocks(
224            PBHelper.convert(sBlock.getStorage()), rdBlocks);
225      } else {
226        info[i] = new StorageReceivedDeletedBlocks(sBlock.getStorageUuid(), rdBlocks);
227      }
228    }
229    try {
230      impl.blockReceivedAndDeleted(PBHelper.convert(request.getRegistration()),
231          request.getBlockPoolId(), info);
232    } catch (IOException e) {
233      throw new ServiceException(e);
234    }
235    return VOID_BLOCK_RECEIVED_AND_DELETE_RESPONSE;
236  }
237
238  @Override
239  public ErrorReportResponseProto errorReport(RpcController controller,
240      ErrorReportRequestProto request) throws ServiceException {
241    try {
242      impl.errorReport(PBHelper.convert(request.getRegistartion()),
243          request.getErrorCode(), request.getMsg());
244    } catch (IOException e) {
245      throw new ServiceException(e);
246    }
247    return VOID_ERROR_REPORT_RESPONSE_PROTO;
248  }
249
250  @Override
251  public VersionResponseProto versionRequest(RpcController controller,
252      VersionRequestProto request) throws ServiceException {
253    NamespaceInfo info;
254    try {
255      info = impl.versionRequest();
256    } catch (IOException e) {
257      throw new ServiceException(e);
258    }
259    return VersionResponseProto.newBuilder()
260        .setInfo(PBHelper.convert(info)).build();
261  }
262
263  @Override
264  public ReportBadBlocksResponseProto reportBadBlocks(RpcController controller,
265      ReportBadBlocksRequestProto request) throws ServiceException {
266    List<LocatedBlockProto> lbps = request.getBlocksList();
267    LocatedBlock [] blocks = new LocatedBlock [lbps.size()];
268    for(int i=0; i<lbps.size(); i++) {
269      blocks[i] = PBHelper.convert(lbps.get(i));
270    }
271    try {
272      impl.reportBadBlocks(blocks);
273    } catch (IOException e) {
274      throw new ServiceException(e);
275    }
276    return VOID_REPORT_BAD_BLOCK_RESPONSE;
277  }
278
279  @Override
280  public CommitBlockSynchronizationResponseProto commitBlockSynchronization(
281      RpcController controller, CommitBlockSynchronizationRequestProto request)
282      throws ServiceException {
283    List<DatanodeIDProto> dnprotos = request.getNewTaragetsList();
284    DatanodeID[] dns = new DatanodeID[dnprotos.size()];
285    for (int i = 0; i < dnprotos.size(); i++) {
286      dns[i] = PBHelper.convert(dnprotos.get(i));
287    }
288    final List<String> sidprotos = request.getNewTargetStoragesList();
289    final String[] storageIDs = sidprotos.toArray(new String[sidprotos.size()]);
290    try {
291      impl.commitBlockSynchronization(PBHelper.convert(request.getBlock()),
292          request.getNewGenStamp(), request.getNewLength(),
293          request.getCloseFile(), request.getDeleteBlock(), dns, storageIDs);
294    } catch (IOException e) {
295      throw new ServiceException(e);
296    }
297    return VOID_COMMIT_BLOCK_SYNCHRONIZATION_RESPONSE_PROTO;
298  }
299}