001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hdfs.server.datanode;
019
020import java.io.File;
021
022import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
023import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
024import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
025
026/**
027 * This class represents replicas that are under block recovery
028 * It has a recovery id that is equal to the generation stamp 
029 * that the replica will be bumped to after recovery
030 * The recovery id is used to handle multiple concurrent block recoveries.
031 * A recovery with higher recovery id preempts recoveries with a lower id.
032 *
033 */
034public class ReplicaUnderRecovery extends ReplicaInfo {
035  private ReplicaInfo original; // the original replica that needs to be recovered
036  private long recoveryId; // recovery id; it is also the generation stamp 
037                           // that the replica will be bumped to after recovery
038
039  public ReplicaUnderRecovery(ReplicaInfo replica, long recoveryId) {
040    super(replica, replica.getVolume(), replica.getDir());
041    if ( replica.getState() != ReplicaState.FINALIZED &&
042         replica.getState() != ReplicaState.RBW &&
043         replica.getState() != ReplicaState.RWR ) {
044      throw new IllegalArgumentException("Cannot recover replica: " + replica);
045    }
046    this.original = replica;
047    this.recoveryId = recoveryId;
048  }
049
050  /**
051   * Copy constructor.
052   * @param from where to copy from
053   */
054  public ReplicaUnderRecovery(ReplicaUnderRecovery from) {
055    super(from);
056    this.original = from.getOriginalReplica();
057    this.recoveryId = from.getRecoveryID();
058  }
059
060  /** 
061   * Get the recovery id
062   * @return the generation stamp that the replica will be bumped to 
063   */
064  public long getRecoveryID() {
065    return recoveryId;
066  }
067
068  /** 
069   * Set the recovery id
070   * @param recoveryId the new recoveryId
071   */
072  public void setRecoveryID(long recoveryId) {
073    if (recoveryId > this.recoveryId) {
074      this.recoveryId = recoveryId;
075    } else {
076      throw new IllegalArgumentException("The new rcovery id: " + recoveryId
077          + " must be greater than the current one: " + this.recoveryId);
078    }
079  }
080
081  /**
082   * Get the original replica that's under recovery
083   * @return the original replica under recovery
084   */
085  public ReplicaInfo getOriginalReplica() {
086    return original;
087  }
088
089  @Override //ReplicaInfo
090  public boolean isUnlinked() {
091    return original.isUnlinked();
092  }
093
094  @Override //ReplicaInfo
095  public void setUnlinked() {
096    original.setUnlinked();
097  }
098  
099  @Override //ReplicaInfo
100  public ReplicaState getState() {
101    return ReplicaState.RUR;
102  }
103  
104  @Override
105  public long getVisibleLength() {
106    return original.getVisibleLength();
107  }
108
109  @Override
110  public long getBytesOnDisk() {
111    return original.getBytesOnDisk();
112  }
113
114  @Override  //org.apache.hadoop.hdfs.protocol.Block
115  public void setBlockId(long blockId) {
116    super.setBlockId(blockId);
117    original.setBlockId(blockId);
118  }
119
120  @Override //org.apache.hadoop.hdfs.protocol.Block
121  public void setGenerationStamp(long gs) {
122    super.setGenerationStamp(gs);
123    original.setGenerationStamp(gs);
124  }
125  
126  @Override //org.apache.hadoop.hdfs.protocol.Block
127  public void setNumBytes(long numBytes) {
128    super.setNumBytes(numBytes);
129    original.setNumBytes(numBytes);
130  }
131  
132  @Override //ReplicaInfo
133  public void setDir(File dir) {
134    super.setDir(dir);
135    original.setDir(dir);
136  }
137  
138  @Override //ReplicaInfo
139  void setVolume(FsVolumeSpi vol) {
140    super.setVolume(vol);
141    original.setVolume(vol);
142  }
143  
144  @Override  // Object
145  public boolean equals(Object o) {
146    return super.equals(o);
147  }
148  
149  @Override  // Object
150  public int hashCode() {
151    return super.hashCode();
152  }
153
154  @Override
155  public String toString() {
156    return super.toString()
157        + "\n  recoveryId=" + recoveryId
158        + "\n  original=" + original;
159  }
160
161  public ReplicaRecoveryInfo createInfo() {
162    return new ReplicaRecoveryInfo(original.getBlockId(), 
163        original.getBytesOnDisk(), original.getGenerationStamp(),
164        original.getState()); 
165  }
166}