Projects >> hadoop-common >>bd23905c89910fb3ef5b7611b53c4e5e56b575b9

Chunk
Conflicting content
   * 
   * @see ClientProtocol#append(String, String) 
   */
<<<<<<< HEAD
  OutputStream append(String src, int buffersize, Progressable progress) 
      throws IOException {
    checkOpen();
    HdfsFileStatus stat = getFileInfo(src);
    if (stat == null) { // No file found
      throw new FileNotFoundException("failed to append to non-existent file "
          + src + " on client " + clientName);
    }
=======
  OutputStream append(String src, int buffersize, Progressable progress)
      throws IOException {
    checkOpen();
    HdfsFileStatus stat = getFileInfo(src);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    OutputStream result = callAppend(stat, src, buffersize, progress);
    leasechecker.put(src, result);
    return result;
Solution content
   * 
   * @see ClientProtocol#append(String, String) 
   */
  OutputStream append(String src, int buffersize, Progressable progress) 
      throws IOException {
    checkOpen();
    HdfsFileStatus stat = getFileInfo(src);
    if (stat == null) { // No file found
      throw new FileNotFoundException("failed to append to non-existent file "
          + src + " on client " + clientName);
    }
    OutputStream result = callAppend(stat, src, buffersize, progress);
    leasechecker.put(src, result);
    return result;
File
DFSClient.java
Developer's decision
Version 1
Kind of conflict
If statement
Method invocation
Method signature
Variable
Chunk
Conflicting content
  public static final String  DFS_SECONDARY_NAMENODE_KRB_HTTPS_USER_NAME_KEY = "dfs.secondary.namenode.kerberos.https.principal";
  public static final String  DFS_NAMENODE_NAME_CACHE_THRESHOLD_KEY = "dfs.namenode.name.cache.threshold";
  public static final int     DFS_NAMENODE_NAME_CACHE_THRESHOLD_DEFAULT = 10;
<<<<<<< HEAD
  public static final String  DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY = "dfs.namenode.resource.check.interval";
  public static final int     DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_DEFAULT = 5000;
  public static final String  DFS_NAMENODE_DU_RESERVED_KEY = "dfs.namenode.resource.du.reserved";
  public static final long    DFS_NAMENODE_DU_RESERVED_DEFAULT = 1024 * 1024 * 100; // 100 MB
  public static final String  DFS_NAMENODE_CHECKED_VOLUMES_KEY = "dfs.namenode.resource.checked.volumes";
=======
  
  public static final String DFS_FEDERATION_NAMESERVICES = "dfs.federation.nameservices";
  public static final String DFS_FEDERATION_NAMESERVICE_ID = "dfs.federation.nameservice.id";
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
}
Solution content
  public static final String  DFS_SECONDARY_NAMENODE_KRB_HTTPS_USER_NAME_KEY = "dfs.secondary.namenode.kerberos.https.principal";
  public static final String  DFS_NAMENODE_NAME_CACHE_THRESHOLD_KEY = "dfs.namenode.name.cache.threshold";
  public static final int     DFS_NAMENODE_NAME_CACHE_THRESHOLD_DEFAULT = 10;
  
  public static final String DFS_FEDERATION_NAMESERVICES = "dfs.federation.nameservices";
  public static final String DFS_FEDERATION_NAMESERVICE_ID = "dfs.federation.nameservice.id";
  public static final String  DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY = "dfs.namenode.resource.check.interval";
  public static final int     DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_DEFAULT = 5000;
  public static final String  DFS_NAMENODE_DU_RESERVED_KEY = "dfs.namenode.resource.du.reserved";
  public static final long    DFS_NAMENODE_DU_RESERVED_DEFAULT = 1024 * 1024 * 100; // 100 MB
  public static final String  DFS_NAMENODE_CHECKED_VOLUMES_KEY = "dfs.namenode.resource.checked.volumes";
}
File
DFSConfigKeys.java
Developer's decision
Concatenation
Kind of conflict
Attribute
Comment
Chunk
Conflicting content
import java.io.IOException;
import java.io.UnsupportedEncodingException;
<<<<<<< HEAD
=======
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
import java.util.Comparator;
import java.util.StringTokenizer;
Solution content
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Comparator;
import java.util.StringTokenizer;
File
DFSUtil.java
Developer's decision
Version 2
Kind of conflict
Import
Chunk
Conflicting content
    setConf(conf);
    this.ugi = UserGroupInformation.getCurrentUser(); 
    nnAddr = NetUtils.createSocketAddr(name.toString());
<<<<<<< HEAD
    
    // in case we open connection to hftp of a different cluster
    // we need to know this cluster https port
    // if it is not set we assume it is the same cluster or same port
    int urlPort = conf.getInt("dfs.hftp.https.port", -1);
    if(urlPort == -1)
      urlPort = conf.getInt(DFSConfigKeys.DFS_HTTPS_PORT_KEY, 
          DFSConfigKeys.DFS_HTTPS_PORT_DEFAULT);

    nnHttpUrl = 
      buildUri("https://", NetUtils.normalizeHostName(name.getHost()), urlPort);
    LOG.debug("using url to get DT:" + nnHttpUrl);
=======
   
    nnHttpUrl = buildUri("https://", 
                         NetUtils.normalizeHostName(name.getHost()), 
                         conf.getInt(DFSConfigKeys.DFS_HTTPS_PORT_KEY, 
                                     DFSConfigKeys.DFS_HTTPS_PORT_DEFAULT));
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0

    
    
Solution content
    setConf(conf);
    this.ugi = UserGroupInformation.getCurrentUser(); 
    nnAddr = NetUtils.createSocketAddr(name.toString());
    
    // in case we open connection to hftp of a different cluster
    // we need to know this cluster https port
    // if it is not set we assume it is the same cluster or same port
    int urlPort = conf.getInt("dfs.hftp.https.port", -1);
    if(urlPort == -1)
      urlPort = conf.getInt(DFSConfigKeys.DFS_HTTPS_PORT_KEY, 
          DFSConfigKeys.DFS_HTTPS_PORT_DEFAULT);

    nnHttpUrl = 
      buildUri("https://", NetUtils.normalizeHostName(name.getHost()), urlPort);
    LOG.debug("using url to get DT:" + nnHttpUrl);

    
    
File
HftpFileSystem.java
Developer's decision
Version 1
Kind of conflict
Attribute
Comment
If statement
Method invocation
Variable
Chunk
Conflicting content
   * Compared to the previous version the following changes have been introduced:
   * (Only the latest change is reflected.
   * The log of historical changes can be retrieved from the svn).
<<<<<<< HEAD
   * 66: Add getAdditionalDatanode(..)
   */
  public static final long versionID = 66L;
=======
   * 67: Add block pool ID to Block
   */
  public static final long versionID = 67L;
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
  
  ///////////////////////////////////////
  // File contents
Solution content
   * Compared to the previous version the following changes have been introduced:
   * (Only the latest change is reflected.
   * The log of historical changes can be retrieved from the svn).
   * 67: Add block pool ID to Block
   */
  public static final long versionID = 67L;
  
  ///////////////////////////////////////
  // File contents
File
ClientProtocol.java
Developer's decision
Version 2
Kind of conflict
Attribute
Comment
Chunk
Conflicting content
   * @throws UnresolvedLinkException If src contains a symlink
   * @throws IOException If an I/O error occurred
   */
<<<<<<< HEAD
  public LocatedBlock getAdditionalDatanode(final String src, final Block blk,
=======
  public LocatedBlock getAdditionalDatanode(final String src, final ExtendedBlock blk,
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      final DatanodeInfo[] existings, final DatanodeInfo[] excludes,
      final int numAdditionalNodes, final String clientName
      ) throws AccessControlException, FileNotFoundException,
Solution content
   * @throws UnresolvedLinkException If src contains a symlink
   * @throws IOException If an I/O error occurred
   */
  public LocatedBlock getAdditionalDatanode(final String src, final ExtendedBlock blk,
      final DatanodeInfo[] existings, final DatanodeInfo[] excludes,
      final int numAdditionalNodes, final String clientName
      ) throws AccessControlException, FileNotFoundException,
File
ClientProtocol.java
Developer's decision
Version 2
Kind of conflict
Method signature
Chunk
Conflicting content
   * when protocol changes. It is not very obvious. 
   */
  /*
<<<<<<< HEAD
   * Version 22:
   *    Add a new feature to replace datanode on failure.
   */
  public static final int DATA_TRANSFER_VERSION = 22;
=======
   * Version 23:
   *    Changed the protocol methods to use ExtendedBlock instead
   *    of Block.
   */
  public static final int DATA_TRANSFER_VERSION = 23;
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0

  /** Operation */
  public enum Op {
Solution content
   * when protocol changes. It is not very obvious. 
   */
  /*
   * Version 23:
   *    Changed the protocol methods to use ExtendedBlock instead
   *    of Block.
   */
  public static final int DATA_TRANSFER_VERSION = 23;

  /** Operation */
  public enum Op {
File
DataTransferProtocol.java
Developer's decision
Version 2
Kind of conflict
Attribute
Comment
Chunk
Conflicting content
    }

    /** Send {@link Op#TRANSFER_BLOCK} */
<<<<<<< HEAD
    public static void opTransferBlock(DataOutputStream out, Block blk,
=======
    public static void opTransferBlock(DataOutputStream out, ExtendedBlock blk,
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
        String client, DatanodeInfo[] targets,
        Token blockToken) throws IOException {
      op(out, Op.TRANSFER_BLOCK);
Solution content
    }

    /** Send {@link Op#TRANSFER_BLOCK} */
    public static void opTransferBlock(DataOutputStream out, ExtendedBlock blk,
        String client, DatanodeInfo[] targets,
        Token blockToken) throws IOException {
      op(out, Op.TRANSFER_BLOCK);
File
DataTransferProtocol.java
Developer's decision
Version 2
Kind of conflict
Method signature
Chunk
Conflicting content
    /** Receive {@link Op#TRANSFER_BLOCK} */
    private void opTransferBlock(DataInputStream in) throws IOException {
<<<<<<< HEAD
      final Block blk = new Block();
=======
      final ExtendedBlock blk = new ExtendedBlock();
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      blk.readId(in);
      final String client = Text.readString(in);
      final DatanodeInfo targets[] = readDatanodeInfos(in);
Solution content
    /** Receive {@link Op#TRANSFER_BLOCK} */
    private void opTransferBlock(DataInputStream in) throws IOException {
      final ExtendedBlock blk = new ExtendedBlock();
      blk.readId(in);
      final String client = Text.readString(in);
      final DatanodeInfo targets[] = readDatanodeInfos(in);
File
DataTransferProtocol.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Variable
Chunk
Conflicting content
     * For {@link BlockConstructionStage#TRANSFER_RBW}
     * or {@link BlockConstructionStage#TRANSFER_FINALIZED}.
     */
<<<<<<< HEAD
    protected abstract void opTransferBlock(DataInputStream in, Block blk,
=======
    protected abstract void opTransferBlock(DataInputStream in, ExtendedBlock blk,
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
        String client, DatanodeInfo[] targets,
        Token blockToken)
        throws IOException;
Solution content
     * For {@link BlockConstructionStage#TRANSFER_RBW}
     * or {@link BlockConstructionStage#TRANSFER_FINALIZED}.
     */
    protected abstract void opTransferBlock(DataInputStream in, ExtendedBlock blk,
        String client, DatanodeInfo[] targets,
        Token blockToken)
        throws IOException;
File
DataTransferProtocol.java
Developer's decision
Version 2
Kind of conflict
Method signature
Chunk
Conflicting content
  // Version is reflected in the data storage file.
  // Versions are negative.
  // Decrement LAYOUT_VERSION to define a new version.
<<<<<<< HEAD
  public static final int LAYOUT_VERSION = -34;
  // Current version: 
  // -31, -32 and -33 are reserved for 0.20.203, 0.20.204 and 0.22.
=======
  public static final int LAYOUT_VERSION = -31;
  // Current version: 
  // -31: Adding support for block pools and multiple namenodes
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
}
Solution content
  // Version is reflected in the data storage file.
  // Versions are negative.
  // Decrement LAYOUT_VERSION to define a new version.
  public static final int LAYOUT_VERSION = -35;
  // Current version: 
  // -35: Adding support for block pools and multiple namenodes
}
File
FSConstants.java
Developer's decision
Manual
Kind of conflict
Attribute
Comment
Chunk
Conflicting content
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
<<<<<<< HEAD
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
=======
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DataTransferProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
Solution content
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DataTransferProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
File
Balancer.java
Developer's decision
Version 2
Kind of conflict
Import
Chunk
Conflicting content
              -threshold)*datanodeS.datanode.getCapacity()/100.0);
        }
      } else {
<<<<<<< HEAD
        datanodeS = new BalancerDatanode(datanode, avgUtilization, threshold);
=======
        datanodeS = new BalancerDatanode(datanode, policy, threshold);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
        if ( isBelowOrEqualAvgUtilized(datanodeS)) {
          this.belowAvgUtilizedDatanodes.add(datanodeS);
        } else {
Solution content
              -threshold)*datanodeS.datanode.getCapacity()/100.0);
        }
      } else {
        datanodeS = new BalancerDatanode(datanode, policy, threshold);
        if ( isBelowOrEqualAvgUtilized(datanodeS)) {
          this.belowAvgUtilizedDatanodes.add(datanodeS);
        } else {
File
Balancer.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Variable
Chunk
Conflicting content
      movedBlocks.add(new HashMap());
    }

<<<<<<< HEAD
    /* set the win width */
    private void setWinWidth(Configuration conf) {
      winWidth = conf.getLong(
          DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_KEY, 
          DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_DEFAULT);
    }
    
=======
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    /* add a block thus marking a block to be moved */
    synchronized private void add(BalancerBlock block) {
      movedBlocks.get(CUR_WIN).put(block.getBlock(), block);
Solution content
      movedBlocks.add(new HashMap());
    }

    /* add a block thus marking a block to be moved */
    synchronized private void add(BalancerBlock block) {
      movedBlocks.get(CUR_WIN).put(block.getBlock(), block);
File
Balancer.java
Developer's decision
Version 2
Kind of conflict
Comment
Method declaration
Chunk
Conflicting content
  /* Return true if the given datanode is below average utilized 
   * but not underUtilized */
  private boolean isBelowOrEqualAvgUtilized(BalancerDatanode datanode) {
<<<<<<< HEAD
        return (datanode.utilization >= (avgUtilization-threshold))
                 && (datanode.utilization <= avgUtilization);
=======
    final double avg = policy.getAvgUtilization();
    return (datanode.utilization >= (avg-threshold))
             && (datanode.utilization <= avg);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
  }

  // Exit status
Solution content
  /* Return true if the given datanode is below average utilized 
   * but not underUtilized */
  private boolean isBelowOrEqualAvgUtilized(BalancerDatanode datanode) {
    final double avg = policy.getAvgUtilization();
    return (datanode.utilization >= (avg-threshold))
             && (datanode.utilization <= avg);
  }

  // Exit status
File
Balancer.java
Developer's decision
Version 2
Kind of conflict
Attribute
Method invocation
Return statement
Variable
Chunk
Conflicting content
            " in this iteration");
      }

<<<<<<< HEAD
      Formatter formatter = new Formatter(System.out);
      System.out.println("Time Stamp               Iteration#  Bytes Already Moved  Bytes Left To Move  Bytes Being Moved");
      int iterations = 0;
      while (true ) {
        /* get all live datanodes of a cluster and their disk usage
         * decide the number of bytes need to be moved
         */
        long bytesLeftToMove = initNodes();
        if (bytesLeftToMove == 0) {
          System.out.println("The cluster is balanced. Exiting...");
          return SUCCESS;
        } else {
          LOG.info( "Need to move "+ StringUtils.byteDesc(bytesLeftToMove)
              +" bytes to make the cluster balanced." );
        }
        
        /* Decide all the nodes that will participate in the block move and
         * the number of bytes that need to be moved from one node to another
         * in this iteration. Maximum bytes to be moved per node is
         * Min(1 Band worth of bytes,  MAX_SIZE_TO_MOVE).
         */
        long bytesToMove = chooseNodes();
        if (bytesToMove == 0) {
          System.out.println("No block can be moved. Exiting...");
          return NO_MOVE_BLOCK;
        } else {
          LOG.info( "Will move " + StringUtils.byteDesc(bytesToMove) +
              "bytes in this iteration");
        }
   
        formatter.format("%-24s %10d  %19s  %18s  %17s\n", 
            DateFormat.getDateTimeInstance().format(new Date()),
            iterations,
            StringUtils.byteDesc(bytesMoved.get()),
            StringUtils.byteDesc(bytesLeftToMove),
            StringUtils.byteDesc(bytesToMove)
            );
        
        /* For each pair of , start a thread that repeatedly 
         * decide a block to be moved and its proxy source, 
         * then initiates the move until all bytes are moved or no more block
         * available to move.
         * Exit no byte has been moved for 5 consecutive iterations.
         */
        if (dispatchBlockMoves() > 0) {
          notChangedIterations = 0;
        } else {
          notChangedIterations++;
          if (notChangedIterations >= 5) {
            System.out.println(
                "No block has been moved for 5 iterations. Exiting...");
            return NO_MOVE_PROGRESS;
          }
        }

        // clean all lists
        resetData();
        
        try {
          Thread.sleep(2000*conf.getLong(
              DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
              DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT));
        } catch (InterruptedException ignored) {
=======
      formatter.format("%-24s %10d  %19s  %18s  %17s\n", 
          DateFormat.getDateTimeInstance().format(new Date()),
          iteration,
          StringUtils.byteDesc(bytesMoved.get()),
          StringUtils.byteDesc(bytesLeftToMove),
          StringUtils.byteDesc(bytesToMove)
          );
      
      /* For each pair of , start a thread that repeatedly 
       * decide a block to be moved and its proxy source, 
       * then initiates the move until all bytes are moved or no more block
       * available to move.
       * Exit no byte has been moved for 5 consecutive iterations.
       */
      if (dispatchBlockMoves() > 0) {
        notChangedIterations = 0;
      } else {
        notChangedIterations++;
        if (notChangedIterations >= 5) {
          System.out.println(
              "No block has been moved for 5 iterations. Exiting...");
          return ReturnStatus.NO_MOVE_PROGRESS;
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
        }
      }
Solution content
            " in this iteration");
      }

      formatter.format("%-24s %10d  %19s  %18s  %17s\n", 
          DateFormat.getDateTimeInstance().format(new Date()),
          iteration,
          StringUtils.byteDesc(bytesMoved.get()),
          StringUtils.byteDesc(bytesLeftToMove),
          StringUtils.byteDesc(bytesToMove)
          );
      
      /* For each pair of , start a thread that repeatedly 
       * decide a block to be moved and its proxy source, 
       * then initiates the move until all bytes are moved or no more block
       * available to move.
       * Exit no byte has been moved for 5 consecutive iterations.
       */
      if (dispatchBlockMoves() > 0) {
        notChangedIterations = 0;
      } else {
        notChangedIterations++;
        if (notChangedIterations >= 5) {
          System.out.println(
              "No block has been moved for 5 iterations. Exiting...");
          return ReturnStatus.NO_MOVE_PROGRESS;
        }
      }
File
Balancer.java
Developer's decision
Version 2
Kind of conflict
Attribute
Catch clause
Comment
If statement
Method invocation
Return statement
Try statement
Variable
While statement
Chunk
Conflicting content
  // last layout version that did not support persistent rbw replicas
  public static final int PRE_RBW_LAYOUT_VERSION = -19;
  
<<<<<<< HEAD
  /** Layout versions of 0.20.203 release */
  public static final int[] LAYOUT_VERSIONS_203 = {-19, -31};

  private   static final String STORAGE_FILE_LOCK     = "in_use.lock";
  protected static final String STORAGE_FILE_VERSION  = "VERSION";
  public    static final String STORAGE_DIR_CURRENT   = "current";
  private   static final String STORAGE_DIR_PREVIOUS  = "previous";
  private   static final String STORAGE_TMP_REMOVED   = "removed.tmp";
  private   static final String STORAGE_TMP_PREVIOUS  = "previous.tmp";
  private   static final String STORAGE_TMP_FINALIZED = "finalized.tmp";
  private   static final String STORAGE_TMP_LAST_CKPT = "lastcheckpoint.tmp";
  private   static final String STORAGE_PREVIOUS_CKPT = "previous.checkpoint";
=======
  // last layout version that is before federation
  public static final int LAST_PRE_FEDERATION_LAYOUT_VERSION = -30;
  
  private   static final String STORAGE_FILE_LOCK     = "in_use.lock";
  protected static final String STORAGE_FILE_VERSION  = "VERSION";
  public    static final String STORAGE_DIR_CURRENT   = "current";
  public    static final String STORAGE_DIR_PREVIOUS  = "previous";
  public    static final String STORAGE_TMP_REMOVED   = "removed.tmp";
  public    static final String STORAGE_TMP_PREVIOUS  = "previous.tmp";
  public    static final String STORAGE_TMP_FINALIZED = "finalized.tmp";
  public    static final String STORAGE_TMP_LAST_CKPT = "lastcheckpoint.tmp";
  public    static final String STORAGE_PREVIOUS_CKPT = "previous.checkpoint";
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
  
  public enum StorageState {
    NON_EXISTENT,
Solution content
  // last layout version that did not support persistent rbw replicas
  public static final int PRE_RBW_LAYOUT_VERSION = -19;
  
  // last layout version that is before federation
  public static final int LAST_PRE_FEDERATION_LAYOUT_VERSION = -30;
  
  /** Layout versions of 0.20.203 release */
  public static final int[] LAYOUT_VERSIONS_203 = {-19, -31};
  private   static final String STORAGE_FILE_LOCK     = "in_use.lock";
  protected static final String STORAGE_FILE_VERSION  = "VERSION";
  public    static final String STORAGE_DIR_CURRENT   = "current";
  public    static final String STORAGE_DIR_PREVIOUS  = "previous";
  public    static final String STORAGE_TMP_REMOVED   = "removed.tmp";
  public    static final String STORAGE_TMP_PREVIOUS  = "previous.tmp";
  public    static final String STORAGE_TMP_FINALIZED = "finalized.tmp";
  public    static final String STORAGE_TMP_LAST_CKPT = "lastcheckpoint.tmp";
  public    static final String STORAGE_PREVIOUS_CKPT = "previous.checkpoint";
  
  public enum StorageState {
    NON_EXISTENT,
File
Storage.java
Developer's decision
Combination
Kind of conflict
Array initializer
Attribute
Comment
Chunk
Conflicting content
  
      + "-" + Integer.toString(storage.getLayoutVersion())
      + "-" + Long.toString(storage.getCTime());
  }
<<<<<<< HEAD
  
  public static boolean is203LayoutVersion(int layoutVersion) {
    for (int lv203 : LAYOUT_VERSIONS_203) {
      if (lv203 == layoutVersion) {
        return true;
      }
    }
    return false;
=======
  
  String getProperty(Properties props, StorageDirectory sd,
      String name) throws InconsistentFSStateException {
    String property = props.getProperty(name);
    if (property == null) {
      throw new InconsistentFSStateException(sd.root, "file "
          + STORAGE_FILE_VERSION + " has " + name + " mising.");
    }
    return property;
  }
  /** Validate and set storage type from {@link Properties}*/
  protected void setStorageType(Properties props, StorageDirectory sd)
      throws InconsistentFSStateException {
    NodeType type = NodeType.valueOf(getProperty(props, sd, "storageType"));
    if (!storageType.equals(type)) {
      throw new InconsistentFSStateException(sd.root,
          "node type is incompatible with others.");
    }
    storageType = type;
  }
  
  /** Validate and set ctime from {@link Properties}*/
  protected void setcTime(Properties props, StorageDirectory sd)
      throws InconsistentFSStateException {
    cTime = Long.parseLong(getProperty(props, sd, "cTime"));
  }

  /** Validate and set clusterId from {@link Properties}*/
  protected void setClusterId(Properties props, int layoutVersion,
      StorageDirectory sd) throws InconsistentFSStateException {
    // No Cluster ID in version LAST_PRE_FEDERATION_LAYOUT_VERSION or before
    if (layoutVersion < Storage.LAST_PRE_FEDERATION_LAYOUT_VERSION) {
      String cid = getProperty(props, sd, "clusterID");
      if (!(clusterID.equals("") || cid.equals("") || clusterID.equals(cid))) {
        throw new InconsistentFSStateException(sd.getRoot(),
            "cluster Id is incompatible with others.");
      }
      clusterID = cid;
    }
  }
  
  /** Validate and set layout version from {@link Properties}*/
  protected void setLayoutVersion(Properties props, StorageDirectory sd)
      throws IncorrectVersionException, InconsistentFSStateException {
    int lv = Integer.parseInt(getProperty(props, sd, "layoutVersion"));
    if (lv < FSConstants.LAYOUT_VERSION) { // future version
      throw new IncorrectVersionException(lv, "storage directory "
          + sd.root.getAbsolutePath());
    }
    layoutVersion = lv;
  }
  
  /** Validate and set namespaceID version from {@link Properties}*/
  protected void setNamespaceID(Properties props, StorageDirectory sd)
      throws InconsistentFSStateException {
    int nsId = Integer.parseInt(getProperty(props, sd, "namespaceID"));
    if (namespaceID != 0 && nsId != 0 && namespaceID != nsId) {
      throw new InconsistentFSStateException(sd.root,
          "namespaceID is incompatible with others.");
    }
    namespaceID = nsId;
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
  }
}
Solution content
      + "-" + Integer.toString(storage.getLayoutVersion())
      + "-" + Long.toString(storage.getCTime());
  }
  
  String getProperty(Properties props, StorageDirectory sd,
      String name) throws InconsistentFSStateException {
    String property = props.getProperty(name);
    if (property == null) {
      throw new InconsistentFSStateException(sd.root, "file "
          + STORAGE_FILE_VERSION + " has " + name + " mising.");
    }
    return property;
  }
  
  /** Validate and set storage type from {@link Properties}*/
  protected void setStorageType(Properties props, StorageDirectory sd)
      throws InconsistentFSStateException {
    NodeType type = NodeType.valueOf(getProperty(props, sd, "storageType"));
    if (!storageType.equals(type)) {
      throw new InconsistentFSStateException(sd.root,
          "node type is incompatible with others.");
    }
    storageType = type;
  }
  
  /** Validate and set ctime from {@link Properties}*/
  protected void setcTime(Properties props, StorageDirectory sd)
      throws InconsistentFSStateException {
    cTime = Long.parseLong(getProperty(props, sd, "cTime"));
  }

  /** Validate and set clusterId from {@link Properties}*/
  protected void setClusterId(Properties props, int layoutVersion,
      StorageDirectory sd) throws InconsistentFSStateException {
    // No Cluster ID in version LAST_PRE_FEDERATION_LAYOUT_VERSION or before
    if (layoutVersion < Storage.LAST_PRE_FEDERATION_LAYOUT_VERSION) {
      String cid = getProperty(props, sd, "clusterID");
      if (!(clusterID.equals("") || cid.equals("") || clusterID.equals(cid))) {
        throw new InconsistentFSStateException(sd.getRoot(),
            "cluster Id is incompatible with others.");
      }
      clusterID = cid;
    }
  }
  
  /** Validate and set layout version from {@link Properties}*/
  protected void setLayoutVersion(Properties props, StorageDirectory sd)
      throws IncorrectVersionException, InconsistentFSStateException {
    int lv = Integer.parseInt(getProperty(props, sd, "layoutVersion"));
    if (lv < FSConstants.LAYOUT_VERSION) { // future version
      throw new IncorrectVersionException(lv, "storage directory "
          + sd.root.getAbsolutePath());
    }
    layoutVersion = lv;
  }
  
  /** Validate and set namespaceID version from {@link Properties}*/
  protected void setNamespaceID(Properties props, StorageDirectory sd)
      throws InconsistentFSStateException {
    int nsId = Integer.parseInt(getProperty(props, sd, "namespaceID"));
    if (namespaceID != 0 && nsId != 0 && namespaceID != nsId) {
      throw new InconsistentFSStateException(sd.root,
          "namespaceID is incompatible with others.");
    }
    namespaceID = nsId;
  }
  
  public static boolean is203LayoutVersion(int layoutVersion) {
    for (int lv203 : LAYOUT_VERSIONS_203) {
      if (lv203 == layoutVersion) {
        return true;
      }
    }
    return false;
  }
}
File
Storage.java
Developer's decision
Concatenation
Kind of conflict
Comment
For statement
If statement
Method declaration
Method invocation
Method signature
Return statement
Variable
Chunk
Conflicting content
import org.apache.commons.logging.Log;
import org.apache.hadoop.fs.FSOutputSummer;
<<<<<<< HEAD
import org.apache.hadoop.hdfs.protocol.Block;
=======
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage;
import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PacketHeader;
import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PipelineAck;
Solution content
import org.apache.commons.logging.Log;
import org.apache.hadoop.fs.FSOutputSummer;
import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage;
import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PacketHeader;
import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PipelineAck;
File
BlockReceiver.java
Developer's decision
Version 2
Kind of conflict
Import
Chunk
Conflicting content
import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PipelineAck;
import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
<<<<<<< HEAD
import org.apache.hadoop.hdfs.protocol.FSConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
=======
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.FSConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
import org.apache.hadoop.hdfs.util.DataTransferThrottler;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.Daemon;
Solution content
import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PipelineAck;
import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.FSConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.util.DataTransferThrottler;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.Daemon;
File
BlockReceiver.java
Developer's decision
Version 2
Kind of conflict
Import
Chunk
Conflicting content
  private final boolean isDatanode;

  /** the block to receive */
<<<<<<< HEAD
  private final Block block; 
=======
  private final ExtendedBlock block; 
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
  /** the replica to write */
  private final ReplicaInPipelineInterface replicaInfo;
  /** pipeline stage */
Solution content
  private final boolean isDatanode;

  /** the block to receive */
  private final ExtendedBlock block; 
  /** the replica to write */
  private final ReplicaInPipelineInterface replicaInfo;
  /** pipeline stage */
File
BlockReceiver.java
Developer's decision
Version 2
Kind of conflict
Attribute
Chunk
Conflicting content
  private final BlockConstructionStage stage;
  private final boolean isTransfer;

<<<<<<< HEAD
  BlockReceiver(final Block block, final DataInputStream in,
=======
  BlockReceiver(final ExtendedBlock block, final DataInputStream in,
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      final String inAddr, final String myAddr,
      final BlockConstructionStage stage, 
      final long newGs, final long minBytesRcvd, final long maxBytesRcvd, 
Solution content
  private final BlockConstructionStage stage;
  private final boolean isTransfer;

  BlockReceiver(final ExtendedBlock block, final DataInputStream in,
      final String inAddr, final String myAddr,
      final BlockConstructionStage stage, 
      final long newGs, final long minBytesRcvd, final long maxBytesRcvd, 
File
BlockReceiver.java
Developer's decision
Version 2
Kind of conflict
Method signature
Chunk
Conflicting content
                ClientTraceLog.info(String.format(DN_CLIENTTRACE_FORMAT,
                      inAddr, myAddr, block.getNumBytes(),
                      "HDFS_WRITE", clientname, offset,
<<<<<<< HEAD
                      datanode.dnRegistration.getStorageID(), block, endTime-startTime));
=======
                      dnR.getStorageID(), block, endTime-startTime));
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
              } else {
                LOG.info("Received block " + block + " of size "
                    + block.getNumBytes() + " from " + inAddr);
Solution content
                ClientTraceLog.info(String.format(DN_CLIENTTRACE_FORMAT,
                      inAddr, myAddr, block.getNumBytes(),
                      "HDFS_WRITE", clientname, offset,
                      dnR.getStorageID(), block, endTime-startTime));
              } else {
                LOG.info("Received block " + block + " of size "
                    + block.getNumBytes() + " from " + inAddr);
File
BlockReceiver.java
Developer's decision
Version 2
Kind of conflict
Attribute
Method invocation
Variable
Chunk
Conflicting content
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.Block;
<<<<<<< HEAD
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
import org.apache.hadoop.hdfs.util.DataTransferThrottler;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.StringUtils;
=======
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.datanode.DataNode.BPOfferService;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0

/**
 * DataBlockScanner manages block scanning for all the block pools. For each
Solution content
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.datanode.DataNode.BPOfferService;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;

/**
 * DataBlockScanner manages block scanning for all the block pools. For each
File
DataBlockScanner.java
Developer's decision
Version 2
Kind of conflict
Import
Chunk
Conflicting content
  private final FSDataset dataset;
  private final Configuration conf;
  
<<<<<<< HEAD
  private static final int MAX_SCAN_RATE = 8 * 1024 * 1024; // 8MB per sec
  private static final int MIN_SCAN_RATE = 1 * 1024 * 1024; // 1MB per sec
  
  static final long DEFAULT_SCAN_PERIOD_HOURS = 21*24L; // three weeks
  private static final long ONE_DAY = 24*3600*1000L;
  
  static final DateFormat dateFormat = 
                    new SimpleDateFormat("yyyy-MM-dd HH:mm:ss,SSS");
  
  static final String verificationLogFile = "dncp_block_verification.log";
  static final int verficationLogLimit = 5; // * numBlocks.

  private long scanPeriod = DEFAULT_SCAN_PERIOD_HOURS * 3600 * 1000;
  DataNode datanode;
  FSDataset dataset;
  
  // sorted set
  TreeSet blockInfoSet;
  HashMap blockMap;
  
  long totalScans = 0;
  long totalVerifications = 0; // includes remote verification by clients.
  long totalScanErrors = 0;
  long totalTransientErrors = 0;
  
  long currentPeriodStart = System.currentTimeMillis();
  long bytesLeft = 0; // Bytes to scan in this period
  long totalBytesToScan = 0;
  
  private LogFileHandler verificationLog;
  
  Random random = new Random();
  
  DataTransferThrottler throttler = null;
  
  // Reconciles blocks on disk to blocks in memory
  DirectoryScanner dirScanner;

  private static enum ScanType {
    REMOTE_READ,           // Verified when a block read by a client etc
    VERIFICATION_SCAN,     // scanned as part of periodic verfication
    NONE,
  }
  
  static class BlockScanInfo implements Comparable {
    Block block;
    long lastScanTime = 0;
    long lastLogTime = 0;
    ScanType lastScanType = ScanType.NONE; 
    boolean lastScanOk = true;
    
    public int compareTo(BlockScanInfo other) {
      long t1 = lastScanTime;
    BlockScanInfo(Block block) {
      this.block = block;
    }
    
    public int hashCode() {
      return block.hashCode();
    }
    
    public boolean equals(Object other) {
      return other instanceof BlockScanInfo &&
             compareTo((BlockScanInfo)other) == 0;
    }
    
    long getLastScanTime() {
      return ( lastScanType == ScanType.NONE) ? 0 : lastScanTime;
    }
    
      long t2 = other.lastScanTime;
      return ( t1 < t2 ) ? -1 : 
                          (( t1 > t2 ) ? 1 : block.compareTo(other.block)); 
    }
  }
=======
  /**
   * Map to find the BlockPoolScanner for a given block pool id. This is updated
   * when a BPOfferService becomes alive or dies.
   */
  private final TreeMap blockPoolScannerMap = 
    new TreeMap();
  Thread blockScannerThread = null;
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
  
  DataBlockScanner(DataNode datanode, FSDataset dataset, Configuration conf) {
    this.datanode = datanode;
Solution content
  private final FSDataset dataset;
  private final Configuration conf;
  
  /**
   * Map to find the BlockPoolScanner for a given block pool id. This is updated
   * when a BPOfferService becomes alive or dies.
   */
  private final TreeMap blockPoolScannerMap = 
    new TreeMap();
  Thread blockScannerThread = null;
  
  DataBlockScanner(DataNode datanode, FSDataset dataset, Configuration conf) {
    this.datanode = datanode;
File
DataBlockScanner.java
Developer's decision
Version 2
Kind of conflict
Attribute
Class declaration
Comment
Enum declaration
Method invocation
Chunk
Conflicting content
  DataBlockScanner(DataNode datanode, FSDataset dataset, Configuration conf) {
    this.datanode = datanode;
    this.dataset = dataset;
<<<<<<< HEAD
    scanPeriod = conf.getInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, 
                             DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT);
    if ( scanPeriod <= 0 ) {
      scanPeriod = DEFAULT_SCAN_PERIOD_HOURS;
    }
    scanPeriod *= 3600 * 1000;
    // initialized when the scanner thread is started.

    dirScanner = new DirectoryScanner(dataset, conf);
=======
    this.conf = conf;
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
  }
  
  public void run() {
Solution content
  DataBlockScanner(DataNode datanode, FSDataset dataset, Configuration conf) {
    this.datanode = datanode;
    this.dataset = dataset;
    this.conf = conf;
  }
  
  public void run() {
File
DataBlockScanner.java
Developer's decision
Version 2
Kind of conflict
Attribute
Comment
If statement
Method invocation
Chunk
Conflicting content
      }
      bpScanner.scanBlockPoolSlice();
    }
<<<<<<< HEAD
    if (dir == null) {
      dir = volumes[0].getDir();
    }
    
    try {
      // max lines will be updated later during initialization.
      verificationLog = new LogFileHandler(dir, verificationLogFile, 100);
    } catch (IOException e) {
      LOG.warn("Could not open verfication log. " +
               "Verification times are not stored.");
    }
    
    synchronized (this) {
      throttler = new DataTransferThrottler(200, MAX_SCAN_RATE);
    }
  }

  private synchronized long getNewBlockScanTime() {
    /* If there are a lot of blocks, this returns a random time with in 
     * the scan period. Otherwise something sooner.
     */
    long period = Math.min(scanPeriod, 
                           Math.max(blockMap.size(),1) * 600 * 1000L);
    return System.currentTimeMillis() - scanPeriod + 
           random.nextInt((int)period);    
=======
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
  }

  // Wait for at least one block pool to be up
Solution content
      }
      bpScanner.scanBlockPoolSlice();
    }
  }

  // Wait for at least one block pool to be up
File
DataBlockScanner.java
Developer's decision
Version 2
Kind of conflict
Attribute
Comment
If statement
Method invocation
Method signature
Return statement
Synchronized statement
Try statement
Variable
Chunk
Conflicting content
package org.apache.hadoop.hdfs.server.datanode;


<<<<<<< HEAD
=======
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
import static org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status.ERROR_ACCESS_TOKEN;
import static org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status.SUCCESS;
import static org.apache.hadoop.hdfs.server.common.Util.now;
Solution content
package org.apache.hadoop.hdfs.server.datanode;


import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
import static org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status.ERROR_ACCESS_TOKEN;
import static org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status.SUCCESS;
import static org.apache.hadoop.hdfs.server.common.Util.now;
File
DataNode.java
Developer's decision
Version 2
Kind of conflict
Import
Chunk
Conflicting content
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
<<<<<<< HEAD
=======
import org.apache.hadoop.hdfs.security.token.block.BlockPoolTokenSecretManager;
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
Solution content
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
import org.apache.hadoop.hdfs.security.token.block.BlockPoolTokenSecretManager;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
File
DataNode.java
Developer's decision
Version 2
Kind of conflict
Import
Chunk
Conflicting content
  boolean transferToAllowed = true;
  int writePacketSize = 0;
  boolean isBlockTokenEnabled;
<<<<<<< HEAD
  BlockTokenSecretManager blockTokenSecretManager;
  boolean isBlockTokenInitialized = false;
=======
  BlockPoolTokenSecretManager blockPoolTokenSecretManager;
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
  boolean syncOnClose;
  
  public DataBlockScanner blockScanner = null;
Solution content
  
  boolean transferToAllowed = true;
  int writePacketSize = 0;
  boolean isBlockTokenEnabled;
  BlockPoolTokenSecretManager blockPoolTokenSecretManager;
  boolean syncOnClose;
  public DataBlockScanner blockScanner = null;
File
DataNode.java
Developer's decision
Version 2
Kind of conflict
Attribute
Chunk
Conflicting content
  private void initConfig(Configuration conf) {
    this.socketTimeout =  conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY,
                                      HdfsConstants.READ_TIMEOUT);
<<<<<<< HEAD
    this.socketWriteTimeout = conf.getInt(DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
=======
    this.socketWriteTimeout = conf.getInt(DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
                                          HdfsConstants.WRITE_TIMEOUT);
    /* Based on results on different platforms, we might need set the default 
     * to false on some of them. */
Solution content
  private void initConfig(Configuration conf) {
    this.socketTimeout =  conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY,
                                      HdfsConstants.READ_TIMEOUT);
    this.socketWriteTimeout = conf.getInt(DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
                                          HdfsConstants.WRITE_TIMEOUT);
    /* Based on results on different platforms, we might need set the default 
     * to false on some of them. */
File
DataNode.java
Developer's decision
Version 2
Kind of conflict
Attribute
Method invocation
Chunk
Conflicting content
                                       DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT);

    this.blockReportInterval =
<<<<<<< HEAD
      conf.getLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, BLOCKREPORT_INTERVAL);
    this.initialBlockReportDelay = conf.getLong(DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY,
=======
      conf.getLong(DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, BLOCKREPORT_INTERVAL);
    this.initialBlockReportDelay = conf.getLong(DFS_BLOCKREPORT_INITIAL_DELAY_KEY,
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
                                            BLOCKREPORT_INITIAL_DELAY)* 1000L; 
    if (this.initialBlockReportDelay >= blockReportInterval) {
      this.initialBlockReportDelay = 0;
Solution content
                                       DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT);

    this.blockReportInterval =
      conf.getLong(DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, BLOCKREPORT_INTERVAL);
    this.initialBlockReportDelay = conf.getLong(DFS_BLOCKREPORT_INITIAL_DELAY_KEY,
                                            BLOCKREPORT_INITIAL_DELAY)* 1000L; 
    if (this.initialBlockReportDelay >= blockReportInterval) {
      this.initialBlockReportDelay = 0;
File
DataNode.java
Developer's decision
Version 2
Kind of conflict
Attribute
Method invocation
Chunk
Conflicting content
      LOG.info("dfs.blockreport.initialDelay is greater than " +
        "dfs.blockreport.intervalMsec." + " Setting initial delay to 0 msec:");
    }
<<<<<<< HEAD
    this.heartBeatInterval = conf.getLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, HEARTBEAT_INTERVAL) * 1000L;

    // do we need to sync block file contents to disk when blockfile is closed?
    this.syncOnClose = conf.getBoolean(DFSConfigKeys.DFS_DATANODE_SYNCONCLOSE_KEY, 
                                       DFSConfigKeys.DFS_DATANODE_SYNCONCLOSE_DEFAULT);
=======
    this.heartBeatInterval = conf.getLong(DFS_HEARTBEAT_INTERVAL_KEY, HEARTBEAT_INTERVAL) * 1000L;

    // do we need to sync block file contents to disk when blockfile is closed?
    this.syncOnClose = conf.getBoolean(DFS_DATANODE_SYNCONCLOSE_KEY, 
                                       DFS_DATANODE_SYNCONCLOSE_DEFAULT);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
  }
  
  private void startInfoServer(Configuration conf) throws IOException {
Solution content
      LOG.info("dfs.blockreport.initialDelay is greater than " +
        "dfs.blockreport.intervalMsec." + " Setting initial delay to 0 msec:");
    }
    this.heartBeatInterval = conf.getLong(DFS_HEARTBEAT_INTERVAL_KEY, HEARTBEAT_INTERVAL) * 1000L;

    // do we need to sync block file contents to disk when blockfile is closed?
    this.syncOnClose = conf.getBoolean(DFS_DATANODE_SYNCONCLOSE_KEY, 
                                       DFS_DATANODE_SYNCONCLOSE_DEFAULT);
  }
  
  private void startInfoServer(Configuration conf) throws IOException {
File
DataNode.java
Developer's decision
Version 2
Kind of conflict
Attribute
Comment
Method invocation
Chunk
Conflicting content
        conf.get("dfs.datanode.ipc.address"));
    ipcServer = RPC.getServer(DataNode.class, this, ipcAddr.getHostName(),
                              ipcAddr.getPort(), 
<<<<<<< HEAD
                              conf.getInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 
                                          DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_DEFAULT), 
                              false, conf, blockTokenSecretManager);
=======
                              conf.getInt(DFS_DATANODE_HANDLER_COUNT_KEY, 
                                          DFS_DATANODE_HANDLER_COUNT_DEFAULT), 
                              false, conf, blockPoolTokenSecretManager);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    // set service-level authorization security policy
    if (conf.getBoolean(
        CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
Solution content
        conf.get("dfs.datanode.ipc.address"));
    ipcServer = RPC.getServer(DataNode.class, this, ipcAddr.getHostName(),
                              ipcAddr.getPort(), 
                              conf.getInt(DFS_DATANODE_HANDLER_COUNT_KEY, 
                                          DFS_DATANODE_HANDLER_COUNT_DEFAULT), 
                              false, conf, blockPoolTokenSecretManager);
    // set service-level authorization security policy
    if (conf.getBoolean(
        CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
File
DataNode.java
Developer's decision
Version 2
Kind of conflict
Attribute
Method invocation
Variable
Chunk
Conflicting content
      return;
    }
    String reason = null;
<<<<<<< HEAD
    if (conf.getInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, 
                    DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT) < 0) {
=======
    if (conf.getInt(DFS_DATANODE_SCAN_PERIOD_HOURS_KEY,
                    DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT) < 0) {
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      reason = "verification is turned off by configuration";
    } else if (!(data instanceof FSDataset)) {
      reason = "verifcation is supported only with FSDataset";
Solution content
      return;
    }
    String reason = null;
    if (conf.getInt(DFS_DATANODE_SCAN_PERIOD_HOURS_KEY,
                    DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT) < 0) {
      reason = "verification is turned off by configuration";
    } else if (!(data instanceof FSDataset)) {
      reason = "verifcation is supported only with FSDataset";
File
DataNode.java
Developer's decision
Version 2
Kind of conflict
If statement
Chunk
Conflicting content
    return (socketWriteTimeout > 0) ? 
           SocketChannel.open().socket() : new Socket();                                   
  }
<<<<<<< HEAD
  
  private NamespaceInfo handshake() throws IOException {
    NamespaceInfo nsInfo = new NamespaceInfo();
    while (shouldRun) {
      try {
        nsInfo = namenode.versionRequest();
        break;
      } catch(IOException e) {  // namenode cannot be contacted
        LOG.info("Problem connecting to server: " + getNameNodeAddr(), e);
      }
      try {
        Thread.sleep(1000);
      } catch (InterruptedException ie) {}
    }
    String errorMsg = null;
    // verify build version
    if( ! nsInfo.getBuildVersion().equals( Storage.getBuildVersion() )) {
      errorMsg = "Incompatible build versions: namenode BV = " 
        + nsInfo.getBuildVersion() + "; datanode BV = "
        + Storage.getBuildVersion();
      LOG.fatal( errorMsg );
      try {
        namenode.errorReport( dnRegistration,
                              DatanodeProtocol.NOTIFY, errorMsg );
      } catch( SocketTimeoutException e ) {  // namenode is busy
        LOG.info("Problem connecting to server: " + getNameNodeAddr());
      }
      throw new IOException( errorMsg );
    }
    assert FSConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion() :
      "Data-node and name-node layout versions must be the same."
      + "Expected: "+ FSConstants.LAYOUT_VERSION + " actual "+ nsInfo.getLayoutVersion();
    return nsInfo;
  }
=======
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0

  private static void setDataNode(DataNode node) {
    datanodeObject = node;
Solution content
    return (socketWriteTimeout > 0) ? 
           SocketChannel.open().socket() : new Socket();                                   
  }

  private static void setDataNode(DataNode node) {
    datanodeObject = node;
File
DataNode.java
Developer's decision
Version 2
Kind of conflict
Method declaration
Chunk
Conflicting content
    } catch (NoSuchAlgorithmException e) {
      LOG.warn("Could not use SecureRandom");
      rand = R.nextInt(Integer.MAX_VALUE);
<<<<<<< HEAD
    }
    dnReg.storageID = "DS-" + rand + "-"+ ip + "-" + dnReg.getPort() + "-" + 
                      System.currentTimeMillis();
  }
  /**
   * Register datanode
   * 

* The datanode needs to register with the namenode on startup in order * 1) to report which storage it is serving now and * 2) to receive a registrationID * issued by the namenode to recognize registered datanodes. * * @see FSNamesystem#registerDatanode(DatanodeRegistration) * @throws IOException */ private void register() throws IOException { if (dnRegistration.getStorageID().equals("")) { setNewStorageID(dnRegistration); } while(shouldRun) { try { // reset name to machineName. Mainly for web interface. dnRegistration.name = machineName + ":" + dnRegistration.getPort(); dnRegistration = namenode.registerDatanode(dnRegistration); break; } catch(RemoteException re) { IOException ue = re.unwrapRemoteException( UnregisteredNodeException.class, DisallowedDatanodeException.class, IncorrectVersionException.class); if (ue != re) { LOG.warn("DataNode is shutting down: ", re); throw ue; } } catch(IOException e) { // namenode cannot be contacted LOG.info("Problem connecting to server: " + getNameNodeAddr(), e); } try { Thread.sleep(1000); } catch (InterruptedException ie) {} } assert ("".equals(storage.getStorageID()) && !"".equals(dnRegistration.getStorageID())) || storage.getStorageID().equals(dnRegistration.getStorageID()) : "New storageID can be assigned only if data-node is not formatted"; if (storage.getStorageID().equals("")) { storage.setStorageID(dnRegistration.getStorageID()); storage.writeAll(); LOG.info("New storage id " + dnRegistration.getStorageID() + " is assigned to data-node " + dnRegistration.getName()); } if(! storage.getStorageID().equals(dnRegistration.getStorageID())) { throw new IOException("Inconsistent storage IDs. Name-node returned " + dnRegistration.getStorageID() + ". Expecting " + storage.getStorageID()); } if (!isBlockTokenInitialized) { /* first time registering with NN */ ExportedBlockKeys keys = dnRegistration.exportedKeys; this.isBlockTokenEnabled = keys.isBlockTokenEnabled(); if (isBlockTokenEnabled) { long blockKeyUpdateInterval = keys.getKeyUpdateInterval(); long blockTokenLifetime = keys.getTokenLifetime(); LOG.info("Block token params received from NN: keyUpdateInterval=" + blockKeyUpdateInterval / (60 * 1000) + " min(s), tokenLifetime=" + blockTokenLifetime / (60 * 1000) + " min(s)"); blockTokenSecretManager.setTokenLifetime(blockTokenLifetime); } isBlockTokenInitialized = true; } if (isBlockTokenEnabled) { blockTokenSecretManager.setKeys(dnRegistration.exportedKeys); dnRegistration.exportedKeys = ExportedBlockKeys.DUMMY_KEYS; ======= >>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0 } return "DS-" + rand + "-" + ip + "-" + port + "-" + System.currentTimeMillis();

Solution content
    } catch (NoSuchAlgorithmException e) {
      LOG.warn("Could not use SecureRandom");
      rand = R.nextInt(Integer.MAX_VALUE);
    }
    return "DS-" + rand + "-" + ip + "-" + port + "-"
        + System.currentTimeMillis();
File
DataNode.java
Developer's decision
Version 2
Kind of conflict
Assert statement
Attribute
Comment
If statement
Method invocation
Method signature
Variable
While statement
Chunk
Conflicting content
  private void handleDiskError(String errMsgr) {
    final boolean hasEnoughResources = data.hasEnoughResource();
    LOG.warn("DataNode.handleDiskError: Keep Running: " + hasEnoughResources);
<<<<<<< HEAD

    // If we have enough active valid volumes then we do not want to 
    // shutdown the DN completely.
    int dpError = hasEnoughResources ? DatanodeProtocol.DISK_ERROR  
                                     : DatanodeProtocol.FATAL_DISK_ERROR;  
    
    myMetrics.volumesFailed.inc(1);
    try {
      namenode.errorReport(dnRegistration, dpError, errMsgr);
    } catch (IOException e) {
      LOG.warn("Error reporting disk failure to NameNode: " + 
          StringUtils.stringifyException(e));
    }
    
    if (hasEnoughResources) {
      scheduleBlockReport(0);
=======
    
    // If we have enough active valid volumes then we do not want to 
    // shutdown the DN completely.
    int dpError = hasEnoughResources ? DatanodeProtocol.DISK_ERROR  
                                     : DatanodeProtocol.FATAL_DISK_ERROR;  
    myMetrics.volumesFailed.inc(1);

    //inform NameNodes
    for(BPOfferService bpos: blockPoolManager.getAllNamenodeThreads()) {
      DatanodeProtocol nn = bpos.bpNamenode;
      try {
        nn.errorReport(bpos.bpRegistration, dpError, errMsgr);
      } catch(IOException e) {
        LOG.warn("Error reporting disk failure to NameNode: " + 
            StringUtils.stringifyException(e));
      }
    }
    
    if(hasEnoughResources) {
      scheduleAllBlockReport(0);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      return; // do not shutdown
    }
    
Solution content
  private void handleDiskError(String errMsgr) {
    final boolean hasEnoughResources = data.hasEnoughResource();
    LOG.warn("DataNode.handleDiskError: Keep Running: " + hasEnoughResources);
    
    // If we have enough active valid volumes then we do not want to 
    // shutdown the DN completely.
    int dpError = hasEnoughResources ? DatanodeProtocol.DISK_ERROR  
                                     : DatanodeProtocol.FATAL_DISK_ERROR;  
    myMetrics.volumesFailed.inc(1);

    //inform NameNodes
    for(BPOfferService bpos: blockPoolManager.getAllNamenodeThreads()) {
      DatanodeProtocol nn = bpos.bpNamenode;
      try {
        nn.errorReport(bpos.bpRegistration, dpError, errMsgr);
      } catch(IOException e) {
        LOG.warn("Error reporting disk failure to NameNode: " + 
            StringUtils.stringifyException(e));
      }
    }
    
    if(hasEnoughResources) {
      scheduleAllBlockReport(0);
      return; // do not shutdown
    }
    
File
DataNode.java
Developer's decision
Version 2
Kind of conflict
Comment
For statement
If statement
Method invocation
Try statement
Variable
Chunk
Conflicting content
    
    }
    
    LOG.warn("DataNode is shutting down: " + errMsgr);
<<<<<<< HEAD
    shouldRun = false; 
=======
    shouldRun = false;
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
  }
  /** Number of concurrent xceivers per node. */
Solution content
    }
    
    LOG.warn("DataNode is shutting down: " + errMsgr);
    shouldRun = false;
  }
    
  /** Number of concurrent xceivers per node. */
File
DataNode.java
Developer's decision
Version 1
Kind of conflict
Attribute
Chunk
Conflicting content
  int getXceiverCount() {
    return threadGroup == null ? 0 : threadGroup.activeCount();
  }
<<<<<<< HEAD
    
  /**
   * Main loop for the DataNode.  Runs until shutdown,
   * forever calling remote NameNode functions.
   */
  public void offerService() throws Exception {
     
    LOG.info("using BLOCKREPORT_INTERVAL of " + blockReportInterval + "msec" + 
       " Initial delay: " + initialBlockReportDelay + "msec");

    //
    // Now loop for a long time....
    //
    while (shouldRun) {
      try {
        long startTime = now();

        //
        // Every so often, send heartbeat or block-report
        //
        
        if (startTime - lastHeartbeat > heartBeatInterval) {
          //
          // All heartbeat messages include following info:
          // -- Datanode name
          // -- data transfer port
          // -- Total capacity
          // -- Bytes remaining
          //
          lastHeartbeat = startTime;
          DatanodeCommand[] cmds = namenode.sendHeartbeat(dnRegistration,
                                                       data.getCapacity(),
                                                       data.getDfsUsed(),
                                                       data.getRemaining(),
                                                       xmitsInProgress.get(),
                                                       getXceiverCount());
          myMetrics.heartbeats.inc(now() - startTime);
          if (!processCommand(cmds))
            continue;
        }
            
        reportReceivedBlocks();

        DatanodeCommand cmd = blockReport();
        processCommand(cmd);

        // start block scanner
        if (blockScanner != null && blockScannerThread == null &&
            upgradeManager.isUpgradeCompleted()) {
          LOG.info("Starting Periodic block scanner.");
          blockScannerThread = new Daemon(blockScanner);
          blockScannerThread.start();
        }
            
        //
        // There is no work to do;  sleep until hearbeat timer elapses, 
        // or work arrives, and then iterate again.
        //
        long waitTime = heartBeatInterval - (System.currentTimeMillis() - lastHeartbeat);
        synchronized(receivedBlockList) {
          if (waitTime > 0 && receivedBlockList.size() == 0) {
            try {
              receivedBlockList.wait(waitTime);
            } catch (InterruptedException ie) {
            }
          }
        } // synchronized
      } catch(RemoteException re) {
        String reClass = re.getClassName();
        if (UnregisteredNodeException.class.getName().equals(reClass) ||
            DisallowedDatanodeException.class.getName().equals(reClass) ||
            IncorrectVersionException.class.getName().equals(reClass)) {
          LOG.warn("DataNode is shutting down: " + 
                   StringUtils.stringifyException(re));
          shutdown();
          return;
        }
        LOG.warn(StringUtils.stringifyException(re));
        try {
          long sleepTime = Math.min(1000, heartBeatInterval);
          Thread.sleep(sleepTime);
        } catch (InterruptedException ie) {
          Thread.currentThread().interrupt();
        }
      } catch (IOException e) {
        LOG.warn(StringUtils.stringifyException(e));
      }
    } // while (shouldRun)
  } // offerService

  /**
   * Process an array of datanode commands
   * 
   * @param cmds an array of datanode commands
   * @return true if further processing may be required or false otherwise. 
   */
  private boolean processCommand(DatanodeCommand[] cmds) {
    if (cmds != null) {
      for (DatanodeCommand cmd : cmds) {
        try {
          if (processCommand(cmd) == false) {
            return false;
          }
        } catch (IOException ioe) {
          LOG.warn("Error processing datanode Command", ioe);
        }
      }
    }
    return true;
  }
  
    /**
     * 
     * @param cmd
     * @return true if further processing may be required or false otherwise. 
     * @throws IOException
     */
  private boolean processCommand(DatanodeCommand cmd) throws IOException {
    if (cmd == null)
      return true;
    final BlockCommand bcmd = cmd instanceof BlockCommand? (BlockCommand)cmd: null;

    switch(cmd.getAction()) {
    case DatanodeProtocol.DNA_TRANSFER:
      // Send a copy of a block to another datanode
      transferBlocks(bcmd.getBlocks(), bcmd.getTargets());
      myMetrics.blocksReplicated.inc(bcmd.getBlocks().length);
      break;
    case DatanodeProtocol.DNA_INVALIDATE:
      //
      // Some local block(s) are obsolete and can be 
      // safely garbage-collected.
      //
      Block toDelete[] = bcmd.getBlocks();
      try {
        if (blockScanner != null) {
          blockScanner.deleteBlocks(toDelete);
        }
        data.invalidate(toDelete);
      } catch(IOException e) {
        checkDiskError();
        throw e;
      }
      myMetrics.blocksRemoved.inc(toDelete.length);
      break;
    case DatanodeProtocol.DNA_SHUTDOWN:
      // shut down the data node
      this.shutdown();
      return false;
    case DatanodeProtocol.DNA_REGISTER:
      // namenode requested a registration - at start or if NN lost contact
      LOG.info("DatanodeCommand action: DNA_REGISTER");
      if (shouldRun) {
        register();
      }
      break;
    case DatanodeProtocol.DNA_FINALIZE:
      storage.finalizeUpgrade();
      break;
    case UpgradeCommand.UC_ACTION_START_UPGRADE:
      // start distributed upgrade here
      processDistributedUpgradeCommand((UpgradeCommand)cmd);
      break;
    case DatanodeProtocol.DNA_RECOVERBLOCK:
      recoverBlocks(((BlockRecoveryCommand)cmd).getRecoveringBlocks());
      break;
    case DatanodeProtocol.DNA_ACCESSKEYUPDATE:
      LOG.info("DatanodeCommand action: DNA_ACCESSKEYUPDATE");
      if (isBlockTokenEnabled) {
        blockTokenSecretManager.setKeys(((KeyUpdateCommand) cmd).getExportedKeys());
      }
      break;
    default:
      LOG.warn("Unknown DatanodeCommand action: " + cmd.getAction());
    }
    return true;
  }

  // Distributed upgrade manager
  UpgradeManagerDatanode upgradeManager = new UpgradeManagerDatanode(this);

  private void processDistributedUpgradeCommand(UpgradeCommand comm
                                               ) throws IOException {
    assert upgradeManager != null : "DataNode.upgradeManager is null.";
    upgradeManager.processUpgradeCommand(comm);
  }

  /**
   * Report received blocks and delete hints to the Namenode
   * @throws IOException
   */
  private void reportReceivedBlocks() throws IOException {
    //check if there are newly received blocks
    Block [] blockArray=null;
    String [] delHintArray=null;
    synchronized(receivedBlockList) {
      synchronized(delHints){
        int numBlocks = receivedBlockList.size();
        if (numBlocks > 0) {
          if(numBlocks!=delHints.size()) {
            LOG.warn("Panic: receiveBlockList and delHints are not of the same length" );
          }
          //
          // Send newly-received blockids to namenode
          //
          blockArray = receivedBlockList.toArray(new Block[numBlocks]);
          delHintArray = delHints.toArray(new String[numBlocks]);
        }
      }
    }
    if (blockArray != null) {
      if(delHintArray == null || delHintArray.length != blockArray.length ) {
        LOG.warn("Panic: block array & delHintArray are not the same" );
      }
      namenode.blockReceived(dnRegistration, blockArray, delHintArray);
      synchronized(receivedBlockList) {
        synchronized(delHints){
          for(int i=0; i blockReportInterval) {

      // Create block report
      long brCreateStartTime = now();
      BlockListAsLongs bReport = data.getBlockReport();

      // Send block report
      long brSendStartTime = now();
      cmd = namenode.blockReport(dnRegistration, bReport.getBlockListAsLongs());

      // Log the block report processing stats from Datanode perspective
      long brSendCost = now() - brSendStartTime;
      long brCreateCost = brSendStartTime - brCreateStartTime;
      myMetrics.blockReports.inc(brSendCost);
      LOG.info("BlockReport of " + bReport.getNumberOfBlocks()
          + " blocks took " + brCreateCost + " msec to generate and "
          + brSendCost + " msecs for RPC and NN processing");

      //
      // If we have sent the first block report, then wait a random
      // time before we start the periodic block reports.
      //
      if (resetBlockReportTime) {
        lastBlockReport = startTime - R.nextInt((int)(blockReportInterval));
        resetBlockReportTime = false;
      } else {
        /* say the last block report was at 8:20:14. The current report
         * should have started around 9:20:14 (default 1 hour interval).
         * If current time is :
         *   1) normal like 9:20:18, next report should be at 10:20:14
         *   2) unexpected like 11:35:43, next report should be at 12:20:14
         */
        lastBlockReport += (now() - lastBlockReport) /
                           blockReportInterval * blockReportInterval;
      }
=======

  static UpgradeManagerDatanode getUpgradeManagerDatanode(String bpid) {
    DataNode dn = getDataNode();
    BPOfferService bpos = dn.blockPoolManager.get(bpid);
    if(bpos==null) {
      return null;
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    }
    return bpos.getUpgradeManager();
  }
Solution content
  int getXceiverCount() {
    return threadGroup == null ? 0 : threadGroup.activeCount();
  }

  static UpgradeManagerDatanode getUpgradeManagerDatanode(String bpid) {
    DataNode dn = getDataNode();
    BPOfferService bpos = dn.blockPoolManager.get(bpid);
    if(bpos==null) {
      return null;
    }
    return bpos.getUpgradeManager();
  }
File
DataNode.java
Developer's decision
Version 2
Kind of conflict
Attribute
Comment
If statement
Method declaration
Method invocation
Method signature
Return statement
Variable
Chunk
Conflicting content
    /**
   */
  private class DataTransfer implements Runnable {
    final DatanodeInfo[] targets;
<<<<<<< HEAD
    final Block b;
    final BlockConstructionStage stage;
=======
    final ExtendedBlock b;
    final BlockConstructionStage stage;
    final private DatanodeRegistration bpReg;
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    final String clientname;
Solution content
   */
  private class DataTransfer implements Runnable {
    final DatanodeInfo[] targets;
    final ExtendedBlock b;
    final BlockConstructionStage stage;
    final private DatanodeRegistration bpReg;
    final String clientname;

    /**
File
DataNode.java
Developer's decision
Version 2
Kind of conflict
Attribute
Chunk
Conflicting content
     * Connect to the first item in the target list.  Pass along the 
     * entire target list, the block, and the data.
     */
<<<<<<< HEAD
    DataTransfer(DatanodeInfo targets[], Block b, BlockConstructionStage stage,
=======
    DataTransfer(DatanodeInfo targets[], ExtendedBlock b, BlockConstructionStage stage,
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
        final String clientname) throws IOException {
      if (DataTransferProtocol.LOG.isDebugEnabled()) {
        DataTransferProtocol.LOG.debug(getClass().getSimpleName() + ": "
Solution content
     * Connect to the first item in the target list.  Pass along the 
     * entire target list, the block, and the data.
     */
    DataTransfer(DatanodeInfo targets[], ExtendedBlock b, BlockConstructionStage stage,
        final String clientname) throws IOException {
      if (DataTransferProtocol.LOG.isDebugEnabled()) {
        DataTransferProtocol.LOG.debug(getClass().getSimpleName() + ": "
File
DataNode.java
Developer's decision
Version 2
Kind of conflict
Method signature
Chunk
Conflicting content
      this.targets = targets;
      this.b = b;
      this.stage = stage;
<<<<<<< HEAD
=======
      BPOfferService bpos = blockPoolManager.get(b.getBlockPoolId());
      bpReg = bpos.bpRegistration;
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      this.clientname = clientname;
    }
Solution content
      this.targets = targets;
      this.b = b;
      this.stage = stage;
      BPOfferService bpos = blockPoolManager.get(b.getBlockPoolId());
      bpReg = bpos.bpRegistration;
      this.clientname = clientname;
    }
File
DataNode.java
Developer's decision
Version 2
Kind of conflict
Attribute
Method invocation
Variable
Chunk
Conflicting content
                                                            SMALL_BUFFER_SIZE));
        blockSender = new BlockSender(b, 0, b.getNumBytes(), 
            false, false, false, DataNode.this);
<<<<<<< HEAD
        DatanodeInfo srcNode = new DatanodeInfo(dnRegistration);
=======
        DatanodeInfo srcNode = new DatanodeInfo(bpReg);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0

        //
        // Header info
Solution content
                                                            SMALL_BUFFER_SIZE));
        blockSender = new BlockSender(b, 0, b.getNumBytes(), 
            false, false, false, DataNode.this);
        DatanodeInfo srcNode = new DatanodeInfo(bpReg);

        //
        // Header info
File
DataNode.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Variable
Chunk
Conflicting content
  static Collection getStorageDirs(Configuration conf) {
    Collection dirNames =
<<<<<<< HEAD
      conf.getTrimmedStringCollection(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
=======
      conf.getTrimmedStringCollection(DFS_DATANODE_DATA_DIR_KEY);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    return Util.stringCollectionAsURIs(dirNames);
  }
Solution content
  static Collection getStorageDirs(Configuration conf) {
    Collection dirNames =
      conf.getTrimmedStringCollection(DFS_DATANODE_DATA_DIR_KEY);
    return Util.stringCollectionAsURIs(dirNames);
  }
File
DataNode.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Chunk
Conflicting content
        dirs.add(data);
      } catch (IOException e) {
        LOG.warn("Invalid directory in: "
<<<<<<< HEAD
                 + DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY + ": ", e);
=======
                 + DFS_DATANODE_DATA_DIR_KEY + ": ", e);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
        invalidDirs.append("\"").append(data.getCanonicalPath()).append("\" ");
      }
    }
Solution content
        dirs.add(data);
      } catch (IOException e) {
        LOG.warn("Invalid directory in: "
                 + DFS_DATANODE_DATA_DIR_KEY + ": ", e);
        invalidDirs.append("\"").append(data.getCanonicalPath()).append("\" ");
      }
    }
File
DataNode.java
Developer's decision
Version 2
Kind of conflict
Variable
Chunk
Conflicting content
    }
    if (dirs.size() == 0)
      throw new IOException("All directories in "
<<<<<<< HEAD
          + DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY + " are invalid: "
=======
          + DFS_DATANODE_DATA_DIR_KEY + " are invalid: "
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
          + invalidDirs);
    return dirs;
  }
Solution content
    }
    if (dirs.size() == 0)
      throw new IOException("All directories in "
          + DFS_DATANODE_DATA_DIR_KEY + " are invalid: "
          + invalidDirs);
    return dirs;
  }
File
DataNode.java
Developer's decision
Version 2
Kind of conflict
Throw statement
Chunk
Conflicting content
  // ClientDataNodeProtocol implementation
  /** {@inheritDoc} */
  @Override // ClientDataNodeProtocol
<<<<<<< HEAD
  public long getReplicaVisibleLength(final Block block) throws IOException {
=======
  public long getReplicaVisibleLength(final ExtendedBlock block) throws IOException {
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    checkWriteAccess(block);
    return data.getReplicaVisibleLength(block);
  }
Solution content
  // ClientDataNodeProtocol implementation
  /** {@inheritDoc} */
  @Override // ClientDataNodeProtocol
  public long getReplicaVisibleLength(final ExtendedBlock block) throws IOException {
    checkWriteAccess(block);
    return data.getReplicaVisibleLength(block);
  }
File
DataNode.java
Developer's decision
Version 2
Kind of conflict
Method signature
Chunk
Conflicting content
    return data.getReplicaVisibleLength(block);
  }

<<<<<<< HEAD
  private void checkWriteAccess(final Block block) throws IOException {
=======
  private void checkWriteAccess(final ExtendedBlock block) throws IOException {
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    if (isBlockTokenEnabled) {
      Set tokenIds = UserGroupInformation.getCurrentUser()
          .getTokenIdentifiers();
Solution content
    return data.getReplicaVisibleLength(block);
  }

  private void checkWriteAccess(final ExtendedBlock block) throws IOException {
    if (isBlockTokenEnabled) {
      Set tokenIds = UserGroupInformation.getCurrentUser()
          .getTokenIdentifiers();
File
DataNode.java
Developer's decision
Version 2
Kind of conflict
Method signature
Chunk
Conflicting content
   * @param targets
   * @param client
   */
<<<<<<< HEAD
  void transferReplicaForPipelineRecovery(final Block b,
=======
  void transferReplicaForPipelineRecovery(final ExtendedBlock b,
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      final DatanodeInfo[] targets, final String client) throws IOException {
    final long storedGS;
    final long visible;
Solution content
   * @param targets
   * @param client
   */
  void transferReplicaForPipelineRecovery(final ExtendedBlock b,
      final DatanodeInfo[] targets, final String client) throws IOException {
    final long storedGS;
    final long visible;
File
DataNode.java
Developer's decision
Version 2
Kind of conflict
Method signature
Chunk
Conflicting content
      } else if (data.isValidBlock(b)) {
        stage = BlockConstructionStage.TRANSFER_FINALIZED;
      } else {
<<<<<<< HEAD
        final String r = data.getReplicaString(b.getBlockId());
        throw new IOException(b + " is neither a RBW nor a Finalized, r=" + r);
      }

      storedGS = data.getStoredBlock(b.getBlockId()).getGenerationStamp();
=======
        final String r = data.getReplicaString(b.getBlockPoolId(), b.getBlockId());
        throw new IOException(b + " is neither a RBW nor a Finalized, r=" + r);
      }

      storedGS = data.getStoredBlock(b.getBlockPoolId(),
          b.getBlockId()).getGenerationStamp();
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      if (storedGS < b.getGenerationStamp()) {
        throw new IOException(
            storedGS + " = storedGS < b.getGenerationStamp(), b=" + b);        
Solution content
      } else if (data.isValidBlock(b)) {
        stage = BlockConstructionStage.TRANSFER_FINALIZED;
      } else {
        final String r = data.getReplicaString(b.getBlockPoolId(), b.getBlockId());
        throw new IOException(b + " is neither a RBW nor a Finalized, r=" + r);
      }

      storedGS = data.getStoredBlock(b.getBlockPoolId(),
          b.getBlockId()).getGenerationStamp();
      if (storedGS < b.getGenerationStamp()) {
        throw new IOException(
            storedGS + " = storedGS < b.getGenerationStamp(), b=" + b);        
File
DataNode.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Throw statement
Variable
Chunk
Conflicting content
import java.util.regex.Pattern;

import org.apache.hadoop.classification.InterfaceAudience;
<<<<<<< HEAD
import org.apache.hadoop.fs.HardLink;
=======
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.HardLink;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
import org.apache.hadoop.hdfs.protocol.FSConstants;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
Solution content
import java.util.regex.Pattern;

import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.HardLink;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.protocol.FSConstants;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
File
DataStorage.java
Developer's decision
Version 2
Kind of conflict
Import
Chunk
Conflicting content
      SUCCESS.write(out); // send op status
      long read = blockSender.sendBlock(out, baseStream, null); // send data
<<<<<<< HEAD

      // If client verification succeeded, and if it's for the whole block,
      // tell the DataBlockScanner that it's good. This is an optional response
      // from the client. If absent, we close the connection (which is what we
      // always do anyways).
      try {
        if (DataTransferProtocol.Status.read(in) == CHECKSUM_OK) {
          if (blockSender.isBlockReadFully() && datanode.blockScanner != null) {
            datanode.blockScanner.verifiedByClient(block);
          }
        }
      } catch (IOException ignored) {}
=======
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      
      datanode.myMetrics.bytesRead.inc((int) read);
      datanode.myMetrics.blocksRead.inc();
Solution content
      SUCCESS.write(out); // send op status
      long read = blockSender.sendBlock(out, baseStream, null); // send data
      
      datanode.myMetrics.bytesRead.inc((int) read);
      datanode.myMetrics.blocksRead.inc();
File
DataXceiver.java
Developer's decision
Version 2
Kind of conflict
Comment
Try statement
Chunk
Conflicting content
   * Write a block to disk.
   */
  @Override
<<<<<<< HEAD
  protected void opWriteBlock(final DataInputStream in, final Block block, 
=======
  protected void opWriteBlock(final DataInputStream in, final ExtendedBlock block, 
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      final int pipelineSize, final BlockConstructionStage stage,
      final long newGs, final long minBytesRcvd, final long maxBytesRcvd,
      final String clientname, final DatanodeInfo srcDataNode,
Solution content
   * Write a block to disk.
   */
  @Override
  protected void opWriteBlock(final DataInputStream in, final ExtendedBlock block, 
      final int pipelineSize, final BlockConstructionStage stage,
      final long newGs, final long minBytesRcvd, final long maxBytesRcvd,
      final String clientname, final DatanodeInfo srcDataNode,
File
DataXceiver.java
Developer's decision
Version 2
Kind of conflict
Method signature
Chunk
Conflicting content
    // We later mutate block's generation stamp and length, but we need to
    // forward the original version of the block to downstream mirrors, so
    // make a copy here.
<<<<<<< HEAD
    final Block originalBlock = new Block(block);

=======
    final ExtendedBlock originalBlock = new ExtendedBlock(block);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    block.setNumBytes(dataXceiverServer.estimateBlockSize);
    LOG.info("Receiving block " + block + 
             " src: " + remoteAddress +
Solution content
    // We later mutate block's generation stamp and length, but we need to
    // forward the original version of the block to downstream mirrors, so
    // make a copy here.
    final ExtendedBlock originalBlock = new ExtendedBlock(block);
    block.setNumBytes(dataXceiverServer.estimateBlockSize);
    LOG.info("Receiving block " + block + 
             " src: " + remoteAddress +
File
DataXceiver.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Variable
Chunk
Conflicting content
  @Override
  protected void opTransferBlock(final DataInputStream in,
<<<<<<< HEAD
      final Block blk, final String client,
=======
      final ExtendedBlock blk, final String client,
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      final DatanodeInfo[] targets,
      final Token blockToken) throws IOException {
    checkAccess(null, true, blk, blockToken,
Solution content
  @Override
  protected void opTransferBlock(final DataInputStream in,
      final ExtendedBlock blk, final String client,
      final DatanodeInfo[] targets,
      final Token blockToken) throws IOException {
    checkAccess(null, true, blk, blockToken,
File
DataXceiver.java
Developer's decision
Version 2
Kind of conflict
Variable
Chunk
Conflicting content
    checkAccess(out, true, block, blockToken,
        DataTransferProtocol.Op.BLOCK_CHECKSUM,
        BlockTokenSecretManager.AccessMode.READ);
<<<<<<< HEAD

=======
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    updateCurrentThreadName("Reading metadata for block " + block);
    final MetaDataInputStream metadataIn = 
      datanode.data.getMetaDataInputStream(block);
Solution content
    checkAccess(out, true, block, blockToken,
        DataTransferProtocol.Op.BLOCK_CHECKSUM,
        BlockTokenSecretManager.AccessMode.READ);
    updateCurrentThreadName("Reading metadata for block " + block);
    final MetaDataInputStream metadataIn = 
      datanode.data.getMetaDataInputStream(block);
File
DataXceiver.java
Developer's decision
Version 1
Kind of conflict
Blank
Chunk
Conflicting content
  }

  private void checkAccess(DataOutputStream out, final boolean reply, 
<<<<<<< HEAD
      final Block blk,
=======
      final ExtendedBlock blk,
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      final Token t,
      final DataTransferProtocol.Op op,
      final BlockTokenSecretManager.AccessMode mode) throws IOException {
Solution content
  }

  private void checkAccess(DataOutputStream out, final boolean reply, 
      final ExtendedBlock blk,
      final Token t,
      final DataTransferProtocol.Op op,
      final BlockTokenSecretManager.AccessMode mode) throws IOException {
File
DataXceiver.java
Developer's decision
Version 2
Kind of conflict
Variable
Chunk
Conflicting content
      final BlockTokenSecretManager.AccessMode mode) throws IOException {
    if (datanode.isBlockTokenEnabled) {
      try {
<<<<<<< HEAD
        datanode.blockTokenSecretManager.checkAccess(t, null, blk, mode);
=======
        datanode.blockPoolTokenSecretManager.checkAccess(t, null, blk, mode);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      } catch(InvalidToken e) {
        try {
          if (reply) {
Solution content
      final BlockTokenSecretManager.AccessMode mode) throws IOException {
    if (datanode.isBlockTokenEnabled) {
      try {
        datanode.blockPoolTokenSecretManager.checkAccess(t, null, blk, mode);
      } catch(InvalidToken e) {
        try {
          if (reply) {
File
DataXceiver.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Chunk
Conflicting content
            }
            ERROR_ACCESS_TOKEN.write(out);
            if (mode == BlockTokenSecretManager.AccessMode.WRITE) {
<<<<<<< HEAD
              Text.writeString(out, datanode.dnRegistration.getName());
=======
              DatanodeRegistration dnR = 
                datanode.getDNRegistrationForBP(blk.getBlockPoolId());
              Text.writeString(out, dnR.getName());
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
            }
            out.flush();
          }
Solution content
            }
            ERROR_ACCESS_TOKEN.write(out);
            if (mode == BlockTokenSecretManager.AccessMode.WRITE) {
              DatanodeRegistration dnR = 
                datanode.getDNRegistrationForBP(blk.getBlockPoolId());
              Text.writeString(out, dnR.getName());
            }
            out.flush();
          }
File
DataXceiver.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Variable
Chunk
Conflicting content
   * Enforcing the limit is required in order to avoid data-node
   * running out of memory.
   */
<<<<<<< HEAD
  int maxXceiverCount =
    DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_DEFAULT;
=======
  int maxXceiverCount = DFSConfigKeys.DFS_DATANODE_MAX_XCIEVERS_DEFAULT;
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0

  /** A manager to make sure that cluster balancing does not
   * take too much resources.
Solution content
   * Enforcing the limit is required in order to avoid data-node
   * running out of memory.
   */
  int maxXceiverCount =
    DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_DEFAULT;

  /** A manager to make sure that cluster balancing does not
   * take too much resources.
File
DataXceiverServer.java
Developer's decision
Version 1
Kind of conflict
Attribute
Chunk
Conflicting content
    this.datanode = datanode;
    
    this.maxXceiverCount = 
<<<<<<< HEAD
      conf.getInt(DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY,
                  DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_DEFAULT);
=======
      conf.getInt(DFSConfigKeys.DFS_DATANODE_MAX_XCIEVERS_KEY,
                  DFSConfigKeys.DFS_DATANODE_MAX_XCIEVERS_DEFAULT);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    
    this.estimateBlockSize = 
      conf.getLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
Solution content
    this.datanode = datanode;
    
    this.maxXceiverCount = 
      conf.getInt(DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY,
                  DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_DEFAULT);
    
    this.estimateBlockSize = 
      conf.getLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
File
DataXceiverServer.java
Developer's decision
Version 1
Kind of conflict
Method invocation
Chunk
Conflicting content
    private final File rbwDir; // directory store RBW replica
    private final File tmpDir; // directory store Temporary replica
    
<<<<<<< HEAD
    FSVolume(File currentDir, Configuration conf) throws IOException {
      this.reserved = conf.getLong(DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY,
                                   DFSConfigKeys.DFS_DATANODE_DU_RESERVED_DEFAULT);
      this.currentDir = currentDir; 
      File parent = currentDir.getParentFile();
=======
    // TODO:FEDERATION scalability issue - a thread per DU is needed
    private final DU dfsUsage;

    /**
     * 
     * @param bpid Block pool Id
     * @param volume {@link FSVolume} to which this BlockPool belongs to
     * @param bpDir directory corresponding to the BlockPool
     * @param conf
     * @throws IOException
     */
    BlockPoolSlice(String bpid, FSVolume volume, File bpDir, Configuration conf)
        throws IOException {
      this.bpid = bpid;
      this.volume = volume;
      this.currentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT); 
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      final File finalizedDir = new File(
          currentDir, DataStorage.STORAGE_DIR_FINALIZED);
Solution content
    /**
    // TODO:FEDERATION scalability issue - a thread per DU is needed
    private final DU dfsUsage;
    private final File rbwDir; // directory store RBW replica
    private final File tmpDir; // directory store Temporary replica
    
     * 
     * @param bpid Block pool Id
     * @param volume {@link FSVolume} to which this BlockPool belongs to
     * @param bpDir directory corresponding to the BlockPool
     * @param conf
     * @throws IOException
     */
    BlockPoolSlice(String bpid, FSVolume volume, File bpDir, Configuration conf)
        throws IOException {
      this.bpid = bpid;
      this.volume = volume;
      this.currentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT); 
      final File finalizedDir = new File(
          currentDir, DataStorage.STORAGE_DIR_FINALIZED);
File
FSDataset.java
Developer's decision
Version 2
Kind of conflict
Attribute
Comment
Method invocation
Method signature
Variable
Chunk
Conflicting content
    FSVolumeSet(FSVolume[] volumes, BlockVolumeChoosingPolicy blockChooser) {
    private final DF usage;           
    private final long reserved;
    
<<<<<<< HEAD
  static class FSVolumeSet {
    FSVolume[] volumes = null;
    BlockVolumeChoosingPolicy blockChooser;
      
      this.volumes = volumes;
      this.blockChooser = blockChooser;
=======
    FSVolume(File currentDir, Configuration conf) throws IOException {
      this.reserved = conf.getLong(DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY,
                                   DFSConfigKeys.DFS_DATANODE_DU_RESERVED_DEFAULT);
      this.currentDir = currentDir; 
      File parent = currentDir.getParentFile();
      this.usage = new DF(parent, conf);
    }

    /** Return storage directory corresponding to the volume */
    public File getDir() {
      return currentDir.getParentFile();
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    }
    
    public File getCurrentDir() {
Solution content
    private final DF usage;           
    private final long reserved;
    
    FSVolume(File currentDir, Configuration conf) throws IOException {
      this.reserved = conf.getLong(DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY,
                                   DFSConfigKeys.DFS_DATANODE_DU_RESERVED_DEFAULT);
      this.currentDir = currentDir; 
      File parent = currentDir.getParentFile();
      this.usage = new DF(parent, conf);
    }

    /** Return storage directory corresponding to the volume */
    public File getDir() {
      return currentDir.getParentFile();
    }
    
    public File getCurrentDir() {
File
FSDataset.java
Developer's decision
Version 2
Kind of conflict
Attribute
Class signature
Comment
Method declaration
Method invocation
Method signature
Return statement
Chunk
Conflicting content
      }
      return remaining > 0 ? remaining : 0;
    }
      
<<<<<<< HEAD
    synchronized FSVolume getNextVolume(long blockSize) throws IOException {
      return blockChooser.chooseVolume(volumes, blockSize);
=======
    long getAvailable() throws IOException {
      long remaining = getCapacity()-getDfsUsed();
      long available = usage.getAvailable();
      if (remaining>available) {
        remaining = available;
      }
      return (remaining > 0) ? remaining : 0;
    }
      
    long getReserved(){
      return reserved;
    }
    
    String getMount() throws IOException {
      return usage.getMount();
    }
    
    BlockPoolSlice getBlockPoolSlice(String bpid) throws IOException {
      BlockPoolSlice bp = map.get(bpid);
      if (bp == null) {
        throw new IOException("block pool " + bpid + " is not found");
      }
      return bp;
    }
    
    /**
     * Make a deep copy of the list of currently active BPIDs
     */
    String[] getBlockPoolList() {
      synchronized(FSDataset.this) {
        return map.keySet().toArray(new String[map.keySet().size()]);   
      }
    }
      
    /**
     * Temporary files. They get moved to the finalized block directory when
     * the block is finalized.
     */
    File createTmpFile(String bpid, Block b) throws IOException {
      BlockPoolSlice bp = getBlockPoolSlice(bpid);
      return bp.createTmpFile(b);
    }

    /**
     * RBW files. They get moved to the finalized block directory when
     * the block is finalized.
     */
    File createRbwFile(String bpid, Block b) throws IOException {
      BlockPoolSlice bp = getBlockPoolSlice(bpid);
      return bp.createRbwFile(b);
    }

    File addBlock(String bpid, Block b, File f) throws IOException {
      BlockPoolSlice bp = getBlockPoolSlice(bpid);
      return bp.addBlock(b, f);
    }
      
    void checkDirs() throws DiskErrorException {
      // TODO:FEDERATION valid synchronization
      Set> set = map.entrySet();
      for (Entry entry : set) {
        entry.getValue().checkDirs();
    /**
        }
    }
      
    void getVolumeMap(ReplicasMap volumeMap) throws IOException {
      Set> set = map.entrySet();
      for (Entry entry : set) {
        entry.getValue().getVolumeMap(volumeMap);
      }
    }
    
    void getVolumeMap(String bpid, ReplicasMap volumeMap) throws IOException {
      BlockPoolSlice bp = getBlockPoolSlice(bpid);
      bp.getVolumeMap(volumeMap);
    }
    
     * Add replicas under the given directory to the volume map
     * @param volumeMap the replicas map
     * @param dir an input directory
     * @param isFinalized true if the directory has finalized replicas;
     *                    false if the directory has rbw replicas
     * @throws IOException 
     */
    private void addToReplicasMap(String bpid, ReplicasMap volumeMap, 
        File dir, boolean isFinalized) throws IOException {
      BlockPoolSlice bp = getBlockPoolSlice(bpid);
      // TODO move this up
      // dfsUsage.incDfsUsed(b.getNumBytes()+metaFile.length());
      bp.addToReplicasMap(volumeMap, dir, isFinalized);
    }
    
    void clearPath(String bpid, File f) throws IOException {
      BlockPoolSlice bp = getBlockPoolSlice(bpid);
      bp.clearPath(f);
    }
      
    public String toString() {
      return currentDir.getAbsolutePath();
    }

    public void shutdown() {
      Set> set = map.entrySet();
      for (Entry entry : set) {
        entry.getValue().shutdown();
      }
    }

    public void addBlockPool(String bpid, Configuration conf)
        throws IOException {
      File bpdir = new File(currentDir, bpid);
      BlockPoolSlice bp = new BlockPoolSlice(bpid, this, bpdir, conf);
      map.put(bpid, bp);
    }
    
    public void shutdownBlockPool(String bpid) {
      BlockPoolSlice bp = map.get(bpid);
      if (bp!=null) {
        bp.shutdown();
      }
      map.remove(bpid);
    }

    private boolean isBPDirEmpty(String bpid)
        throws IOException {
      File volumeCurrentDir = this.getCurrentDir();
      File bpDir = new File(volumeCurrentDir, bpid);
      File bpCurrentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT);
      File finalizedDir = new File(bpCurrentDir,
          DataStorage.STORAGE_DIR_FINALIZED);
      File rbwDir = new File(bpCurrentDir, DataStorage.STORAGE_DIR_RBW);
      if (finalizedDir.exists() && finalizedDir.list().length != 0) {
        return false;
      }
      if (rbwDir.exists() && rbwDir.list().length != 0) {
        return false;
      }
      return true;
    }
    
    private void deleteBPDirectories(String bpid, boolean force)
        throws IOException {
      File volumeCurrentDir = this.getCurrentDir();
      File bpDir = new File(volumeCurrentDir, bpid);
      if (!bpDir.isDirectory()) {
        // nothing to be deleted
        return;
      }
      File tmpDir = new File(bpDir, DataStorage.STORAGE_DIR_TMP);
      File bpCurrentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT);
      File finalizedDir = new File(bpCurrentDir,
          DataStorage.STORAGE_DIR_FINALIZED);
      File rbwDir = new File(bpCurrentDir, DataStorage.STORAGE_DIR_RBW);
      if (force) {
        FileUtil.fullyDelete(bpDir);
      } else {
        if (!rbwDir.delete()) {
          throw new IOException("Failed to delete " + rbwDir);
        }
        if (!finalizedDir.delete()) {
          throw new IOException("Failed to delete " + finalizedDir);
        }
        FileUtil.fullyDelete(tmpDir);
        for (File f : bpCurrentDir.listFiles()) {
          if (!f.delete()) {
            throw new IOException("Failed to delete " + f);
          }
        }
        if (!bpCurrentDir.delete()) {
          throw new IOException("Failed to delete " + bpCurrentDir);
        }
        for (File f : bpDir.listFiles()) {
          if (!f.delete()) {
            throw new IOException("Failed to delete " + f);
          }
        if (!bpDir.delete()) {
          throw new IOException("Failed to delete " + bpDir);
        }
      }
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    }
  }
    
Solution content
      return remaining > 0 ? remaining : 0;
    }
      
    long getAvailable() throws IOException {
      long remaining = getCapacity()-getDfsUsed();
      long available = usage.getAvailable();
      if (remaining>available) {
        remaining = available;
      }
      return (remaining > 0) ? remaining : 0;
    }
      
    long getReserved(){
      return reserved;
    }
    
    String getMount() throws IOException {
      return usage.getMount();
    }
    
    BlockPoolSlice getBlockPoolSlice(String bpid) throws IOException {
      BlockPoolSlice bp = map.get(bpid);
      if (bp == null) {
        throw new IOException("block pool " + bpid + " is not found");
      }
      return bp;
    }
    
    /**
     * Make a deep copy of the list of currently active BPIDs
     */
    String[] getBlockPoolList() {
      synchronized(FSDataset.this) {
        return map.keySet().toArray(new String[map.keySet().size()]);   
      }
    }
      
    /**
     * Temporary files. They get moved to the finalized block directory when
     * the block is finalized.
     */
    File createTmpFile(String bpid, Block b) throws IOException {
      BlockPoolSlice bp = getBlockPoolSlice(bpid);
      return bp.createTmpFile(b);
    }

    /**
     * RBW files. They get moved to the finalized block directory when
     * the block is finalized.
     */
    File createRbwFile(String bpid, Block b) throws IOException {
      BlockPoolSlice bp = getBlockPoolSlice(bpid);
      return bp.createRbwFile(b);
    }

    File addBlock(String bpid, Block b, File f) throws IOException {
      BlockPoolSlice bp = getBlockPoolSlice(bpid);
      return bp.addBlock(b, f);
    }
      
    void checkDirs() throws DiskErrorException {
      // TODO:FEDERATION valid synchronization
      Set> set = map.entrySet();
      for (Entry entry : set) {
        entry.getValue().checkDirs();
      }
    }
      
    void getVolumeMap(ReplicasMap volumeMap) throws IOException {
      Set> set = map.entrySet();
      for (Entry entry : set) {
        entry.getValue().getVolumeMap(volumeMap);
      }
    }
    
    void getVolumeMap(String bpid, ReplicasMap volumeMap) throws IOException {
      BlockPoolSlice bp = getBlockPoolSlice(bpid);
      bp.getVolumeMap(volumeMap);
    }
    
    /**
     * Add replicas under the given directory to the volume map
     * @param volumeMap the replicas map
     * @param dir an input directory
     * @param isFinalized true if the directory has finalized replicas;
     *                    false if the directory has rbw replicas
     * @throws IOException 
     */
    private void addToReplicasMap(String bpid, ReplicasMap volumeMap, 
        File dir, boolean isFinalized) throws IOException {
      BlockPoolSlice bp = getBlockPoolSlice(bpid);
      // TODO move this up
      // dfsUsage.incDfsUsed(b.getNumBytes()+metaFile.length());
      bp.addToReplicasMap(volumeMap, dir, isFinalized);
    }
    
    void clearPath(String bpid, File f) throws IOException {
      BlockPoolSlice bp = getBlockPoolSlice(bpid);
      bp.clearPath(f);
    }
      
    public String toString() {
      return currentDir.getAbsolutePath();
    }

    public void shutdown() {
      Set> set = map.entrySet();
      for (Entry entry : set) {
        entry.getValue().shutdown();
      }
    }

    public void addBlockPool(String bpid, Configuration conf)
        throws IOException {
      File bpdir = new File(currentDir, bpid);
      BlockPoolSlice bp = new BlockPoolSlice(bpid, this, bpdir, conf);
      map.put(bpid, bp);
    }
    
    public void shutdownBlockPool(String bpid) {
      BlockPoolSlice bp = map.get(bpid);
      if (bp!=null) {
        bp.shutdown();
      }
      map.remove(bpid);
    }

    private boolean isBPDirEmpty(String bpid)
        throws IOException {
      File volumeCurrentDir = this.getCurrentDir();
      File bpDir = new File(volumeCurrentDir, bpid);
      File bpCurrentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT);
      File finalizedDir = new File(bpCurrentDir,
          DataStorage.STORAGE_DIR_FINALIZED);
      File rbwDir = new File(bpCurrentDir, DataStorage.STORAGE_DIR_RBW);
      if (finalizedDir.exists() && finalizedDir.list().length != 0) {
        return false;
      }
      if (rbwDir.exists() && rbwDir.list().length != 0) {
        return false;
      }
      return true;
    }
    
    private void deleteBPDirectories(String bpid, boolean force)
        throws IOException {
      File volumeCurrentDir = this.getCurrentDir();
      File bpDir = new File(volumeCurrentDir, bpid);
      if (!bpDir.isDirectory()) {
        // nothing to be deleted
        return;
      }
      File tmpDir = new File(bpDir, DataStorage.STORAGE_DIR_TMP);
      File bpCurrentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT);
      File finalizedDir = new File(bpCurrentDir,
          DataStorage.STORAGE_DIR_FINALIZED);
      File rbwDir = new File(bpCurrentDir, DataStorage.STORAGE_DIR_RBW);
      if (force) {
        FileUtil.fullyDelete(bpDir);
      } else {
        if (!rbwDir.delete()) {
          throw new IOException("Failed to delete " + rbwDir);
        }
        if (!finalizedDir.delete()) {
          throw new IOException("Failed to delete " + finalizedDir);
        }
        FileUtil.fullyDelete(tmpDir);
        for (File f : bpCurrentDir.listFiles()) {
          if (!f.delete()) {
            throw new IOException("Failed to delete " + f);
          }
        }
        if (!bpCurrentDir.delete()) {
          throw new IOException("Failed to delete " + bpCurrentDir);
        }
        for (File f : bpDir.listFiles()) {
          if (!f.delete()) {
            throw new IOException("Failed to delete " + f);
          }
        }
        if (!bpDir.delete()) {
          throw new IOException("Failed to delete " + bpDir);
        }
      }
    }
  }
    
File
FSDataset.java
Developer's decision
Version 2
Kind of conflict
Comment
If statement
Method declaration
Method invocation
Method signature
Return statement
Variable
Chunk
Conflicting content
    /**
     * Calls {@link FSVolume#checkDirs()} on each volume, removing any
     * volumes from the active list that result in a DiskErrorException.
<<<<<<< HEAD
     * @return list of all the removed volumes.
     */
    synchronized List checkDirs() {
      ArrayList removedVols = null;  
=======
     * 
     * This method is synchronized to allow only one instance of checkDirs() 
     * call
     * @return list of all the removed volumes.
     */
    private synchronized List checkDirs() {
      ArrayList removedVols = null;
      
      // Make a copy of volumes for performing modification 
      List volumeList = new ArrayList(getVolumes());
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      
      for (int idx = 0; idx < volumeList.size(); idx++) {
        FSVolume fsv = volumeList.get(idx);
Solution content
    /**
     * Calls {@link FSVolume#checkDirs()} on each volume, removing any
     * volumes from the active list that result in a DiskErrorException.
     * 
     * This method is synchronized to allow only one instance of checkDirs() 
     * call
     * @return list of all the removed volumes.
     */
    private synchronized List checkDirs() {
      ArrayList removedVols = null;
      
      // Make a copy of volumes for performing modification 
      List volumeList = new ArrayList(getVolumes());
      
      for (int idx = 0; idx < volumeList.size(); idx++) {
        FSVolume fsv = volumeList.get(idx);
File
FSDataset.java
Developer's decision
Version 2
Kind of conflict
Comment
Method invocation
Method signature
Variable
Chunk
Conflicting content
          if (removedVols == null) {
            removedVols = new ArrayList(1);
          }
<<<<<<< HEAD
          removedVols.add(volumes[idx]);
          volumes[idx] = null; // Remove the volume
=======
          removedVols.add(volumeList.get(idx));
          volumeList.set(idx, null); // Remove the volume
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
        }
      }
      
Solution content
          if (removedVols == null) {
            removedVols = new ArrayList(1);
          }
          removedVols.add(volumeList.get(idx));
          volumeList.set(idx, null); // Remove the volume
        }
      }
      
File
FSDataset.java
Developer's decision
Version 2
Kind of conflict
Array access
Comment
Method invocation
Chunk
Conflicting content
      
      // Remove null volumes from the volumes array
      if (removedVols != null && removedVols.size() > 0) {
<<<<<<< HEAD
        FSVolume newVols[] = new FSVolume[volumes.length - removedVols.size()];
        int i = 0;
        for (FSVolume vol : volumes) {
          if (vol != null) {
            newVols[i++] = vol;
          }
        }
        volumes = newVols; // Replace array of volumes
=======
        List newVols = new ArrayList();
        for (FSVolume vol : volumeList) {
          if (vol != null) {
            newVols.add(vol);
          }
        }
        volumes = Collections.unmodifiableList(newVols); // Replace volume list
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
        DataNode.LOG.info("Completed FSVolumeSet.checkDirs. Removed "
            + removedVols.size() + " volumes. List of current volumes: "
            + this);
Solution content
      
      // Remove null volumes from the volumes array
      if (removedVols != null && removedVols.size() > 0) {
        List newVols = new ArrayList();
        for (FSVolume vol : volumeList) {
          if (vol != null) {
            newVols.add(vol);
          }
        }
        volumes = Collections.unmodifiableList(newVols); // Replace volume list
        DataNode.LOG.info("Completed FSVolumeSet.checkDirs. Removed "
            + removedVols.size() + " volumes. List of current volumes: "
            + this);
File
FSDataset.java
Developer's decision
Version 2
Kind of conflict
Attribute
Comment
For statement
Method invocation
Variable
Chunk
Conflicting content
    }
    FSVolume[] volArray = new FSVolume[storage.getNumStorageDirs()];
    for (int idx = 0; idx < storage.getNumStorageDirs(); idx++) {
<<<<<<< HEAD
      volArray[idx] = new FSVolume(storage.getStorageDir(idx).getCurrentDir(), conf);
    }
=======
      volArray[idx] = new FSVolume(storage.getStorageDir(idx).getCurrentDir(),
          conf);
      DataNode.LOG.info("FSDataset added volume - "
          + storage.getStorageDir(idx).getCurrentDir());
    }
    volumeMap = new ReplicasMap(this);

>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    BlockVolumeChoosingPolicy blockChooserImpl =
      (BlockVolumeChoosingPolicy) ReflectionUtils.newInstance(
        conf.getClass(DFSConfigKeys.DFS_DATANODE_BLOCKVOLUMECHOICEPOLICY,
Solution content
    }
    FSVolume[] volArray = new FSVolume[storage.getNumStorageDirs()];
    for (int idx = 0; idx < storage.getNumStorageDirs(); idx++) {
      volArray[idx] = new FSVolume(storage.getStorageDir(idx).getCurrentDir(),
          conf);
      DataNode.LOG.info("FSDataset added volume - "
          + storage.getStorageDir(idx).getCurrentDir());
    }
    volumeMap = new ReplicasMap(this);

    BlockVolumeChoosingPolicy blockChooserImpl =
      (BlockVolumeChoosingPolicy) ReflectionUtils.newInstance(
        conf.getClass(DFSConfigKeys.DFS_DATANODE_BLOCKVOLUMECHOICEPOLICY,
File
FSDataset.java
Developer's decision
Version 2
Kind of conflict
Array access
Method invocation
Variable
Chunk
Conflicting content
  
  @Override // FSDatasetInterface
  public synchronized ReplicaInPipelineInterface convertTemporaryToRbw(
<<<<<<< HEAD
      final Block b) throws IOException {
=======
      final ExtendedBlock b) throws IOException {
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    final long blockId = b.getBlockId();
    final long expectedGs = b.getGenerationStamp();
    final long visible = b.getNumBytes();
Solution content
  
  @Override // FSDatasetInterface
  public synchronized ReplicaInPipelineInterface convertTemporaryToRbw(
      final ExtendedBlock b) throws IOException {
    final long blockId = b.getBlockId();
    final long expectedGs = b.getGenerationStamp();
    final long visible = b.getNumBytes();
File
FSDataset.java
Developer's decision
Version 2
Kind of conflict
Variable
Chunk
Conflicting content
    final ReplicaInPipeline temp;
    {
      // get replica
<<<<<<< HEAD
      final ReplicaInfo r = volumeMap.get(blockId);
=======
      final ReplicaInfo r = volumeMap.get(b.getBlockPoolId(), blockId);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      if (r == null) {
        throw new ReplicaNotFoundException(
            ReplicaNotFoundException.NON_EXISTENT_REPLICA + b);
Solution content
    final ReplicaInPipeline temp;
    {
      // get replica
      final ReplicaInfo r = volumeMap.get(b.getBlockPoolId(), blockId);
      if (r == null) {
        throw new ReplicaNotFoundException(
            ReplicaNotFoundException.NON_EXISTENT_REPLICA + b);
File
FSDataset.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Variable
Chunk
Conflicting content
    }
    
    // move block files to the rbw directory
<<<<<<< HEAD
    final File dest = moveBlockFiles(b, temp.getBlockFile(), v.rbwDir);
=======
    BlockPoolSlice bpslice = v.getBlockPoolSlice(b.getBlockPoolId());
    final File dest = moveBlockFiles(b.getLocalBlock(), temp.getBlockFile(), 
        bpslice.getRbwDir());
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    // create RBW
    final ReplicaBeingWritten rbw = new ReplicaBeingWritten(
        blockId, numBytes, expectedGs,
Solution content
    }
    
    // move block files to the rbw directory
    BlockPoolSlice bpslice = v.getBlockPoolSlice(b.getBlockPoolId());
    final File dest = moveBlockFiles(b.getLocalBlock(), temp.getBlockFile(), 
        bpslice.getRbwDir());
    // create RBW
    final ReplicaBeingWritten rbw = new ReplicaBeingWritten(
        blockId, numBytes, expectedGs,
File
FSDataset.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Variable
Chunk
Conflicting content
        v, dest.getParentFile(), Thread.currentThread());
    rbw.setBytesAcked(visible);
    // overwrite the RBW in the volume map
<<<<<<< HEAD
    volumeMap.add(rbw);
=======
    volumeMap.add(b.getBlockPoolId(), rbw);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    return rbw;
  }
Solution content
        v, dest.getParentFile(), Thread.currentThread());
    rbw.setBytesAcked(visible);
    // overwrite the RBW in the volume map
    volumeMap.add(b.getBlockPoolId(), rbw);
    return rbw;
  }
File
FSDataset.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Chunk
Conflicting content
  }

  @Override // FSDatasetInterface
<<<<<<< HEAD
  public synchronized ReplicaInPipelineInterface createTemporary(Block b)
=======
  public synchronized ReplicaInPipelineInterface createTemporary(ExtendedBlock b)
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      throws IOException {
    ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), b.getBlockId());
    if (replicaInfo != null) {
Solution content
  }

  @Override // FSDatasetInterface
  public synchronized ReplicaInPipelineInterface createTemporary(ExtendedBlock b)
      throws IOException {
    ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), b.getBlockId());
    if (replicaInfo != null) {
File
FSDataset.java
Developer's decision
Version 2
Kind of conflict
Method signature
Chunk
Conflicting content
   * valid means finalized
   */
  @Override // FSDatasetInterface
<<<<<<< HEAD
  public boolean isValidBlock(Block b) {
=======
  public boolean isValidBlock(ExtendedBlock b) {
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    return isValid(b, ReplicaState.FINALIZED);
  }
Solution content
   * valid means finalized
   */
  @Override // FSDatasetInterface
  public boolean isValidBlock(ExtendedBlock b) {
    return isValid(b, ReplicaState.FINALIZED);
  }
File
FSDataset.java
Developer's decision
Version 2
Kind of conflict
Method signature
Chunk
Conflicting content
   * Check whether the given block is a valid RBW.
   */
  @Override // {@link FSDatasetInterface}
<<<<<<< HEAD
  public boolean isValidRbw(final Block b) {
=======
  public boolean isValidRbw(final ExtendedBlock b) {
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    return isValid(b, ReplicaState.RBW);
  }
Solution content
   * Check whether the given block is a valid RBW.
   */
  @Override // {@link FSDatasetInterface}
  public boolean isValidRbw(final ExtendedBlock b) {
    return isValid(b, ReplicaState.RBW);
  }
File
FSDataset.java
Developer's decision
Version 2
Kind of conflict
Method signature
Chunk
Conflicting content
  }

  /** Does the block exist and have the given state? */
<<<<<<< HEAD
  private boolean isValid(final Block b, final ReplicaState state) {
    final ReplicaInfo replicaInfo = volumeMap.get(b);
=======
  private boolean isValid(final ExtendedBlock b, final ReplicaState state) {
    final ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), 
        b.getLocalBlock());
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    return replicaInfo != null
        && replicaInfo.getState() == state
        && replicaInfo.getBlockFile().exists();
Solution content
  }

  /** Does the block exist and have the given state? */
  private boolean isValid(final ExtendedBlock b, final ReplicaState state) {
    final ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), 
        b.getLocalBlock());
    return replicaInfo != null
        && replicaInfo.getState() == state
        && replicaInfo.getBlockFile().exists();
File
FSDataset.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Method signature
Variable
Chunk
Conflicting content
    // Otherwise remove blocks for the failed volumes
    long mlsec = System.currentTimeMillis();
    synchronized (this) {
<<<<<<< HEAD
      Iterator ib = volumeMap.replicas().iterator();
      while (ib.hasNext()) {
        ReplicaInfo b = ib.next();
        totalBlocks++;
        // check if the volume block belongs to still valid
        FSVolume vol = b.getVolume();
        for (FSVolume fv: failedVols) {
          if (vol == fv) {
            DataNode.LOG.warn("Removing replica info for block " + 
              b.getBlockId() + " on failed volume " + 
              vol.dataDir.dir.getAbsolutePath());
            ib.remove();
            removedBlocks++;
            break;
=======
      for (FSVolume fv: failedVols) {
        for (String bpid : fv.map.keySet()) {
          Iterator ib = volumeMap.replicas(bpid).iterator();
          while(ib.hasNext()) {
            ReplicaInfo b = ib.next();
            totalBlocks++;
            // check if the volume block belongs to still valid
            if(b.getVolume() == fv) {
              DataNode.LOG.warn("Removing replica " + bpid + ":" + b.getBlockId()
                  + " on failed volume " + fv.currentDir.getAbsolutePath());
              ib.remove();
              removedBlocks++;
            }
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
          }
        }
      }
Solution content
    // Otherwise remove blocks for the failed volumes
    long mlsec = System.currentTimeMillis();
    synchronized (this) {
      for (FSVolume fv: failedVols) {
        for (String bpid : fv.map.keySet()) {
          Iterator ib = volumeMap.replicas(bpid).iterator();
          while(ib.hasNext()) {
            ReplicaInfo b = ib.next();
            totalBlocks++;
            // check if the volume block belongs to still valid
            if(b.getVolume() == fv) {
              DataNode.LOG.warn("Removing replica " + bpid + ":" + b.getBlockId()
                  + " on failed volume " + fv.currentDir.getAbsolutePath());
              ib.remove();
              removedBlocks++;
            }
          }
        }
      }
File
FSDataset.java
Developer's decision
Version 2
Kind of conflict
Break statement
Comment
For statement
If statement
Method invocation
Variable
While statement
Chunk
Conflicting content
    // report the error
    StringBuilder sb = new StringBuilder();
    for (FSVolume fv : failedVols) {
<<<<<<< HEAD
      sb.append(fv.dataDir.dir.getAbsolutePath() + ";");
=======
      sb.append(fv.currentDir.getAbsolutePath() + ";");
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    }

    throw  new DiskErrorException("DataNode failed volumes:" + sb);
Solution content
    // report the error
    StringBuilder sb = new StringBuilder();
    for (FSVolume fv : failedVols) {
      sb.append(fv.currentDir.getAbsolutePath() + ";");
    }

    throw  new DiskErrorException("DataNode failed volumes:" + sb);
File
FSDataset.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Chunk
Conflicting content
   * @return the result RBW
   */
  public ReplicaInPipelineInterface convertTemporaryToRbw(
<<<<<<< HEAD
      Block temporary) throws IOException;
=======
      ExtendedBlock temporary) throws IOException;
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0

  /**
   * Append to a finalized replica and returns the meta info of the replica
Solution content
   * @return the result RBW
   */
  public ReplicaInPipelineInterface convertTemporaryToRbw(
      ExtendedBlock temporary) throws IOException;

  /**
   * Append to a finalized replica and returns the meta info of the replica
File
FSDatasetInterface.java
Developer's decision
Version 2
Kind of conflict
Variable
Chunk
Conflicting content
      return storedBlock;
    }

<<<<<<< HEAD
    // do not try to handle over/under-replicated blocks during safe mode
    if (!namesystem.isPopulatingReplQueues()) {
=======
    // do not handle mis-replicated blocks during start up
    if (!namesystem.isPopulatingReplQueues())
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      return storedBlock;
    }
Solution content
      return storedBlock;
    }

    // do not try to handle over/under-replicated blocks during safe mode
    if (!namesystem.isPopulatingReplQueues()) {
      return storedBlock;
    }
File
BlockManager.java
Developer's decision
Version 1
Kind of conflict
Comment
If statement
Chunk
Conflicting content
  long editsTime = -1L;
  long checkpointTime = -1L;
  MD5Hash imageDigest = null;
<<<<<<< HEAD
=======
  String blockpoolID = "";
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0

  public CheckpointSignature() {}
Solution content
  long editsTime = -1L;
  long checkpointTime = -1L;
  MD5Hash imageDigest = null;
  String blockpoolID = "";

  public CheckpointSignature() {}
File
CheckpointSignature.java
Developer's decision
Version 2
Kind of conflict
Attribute
Chunk
Conflicting content
  CheckpointSignature(FSImage fsImage) {
    super(fsImage.getStorage());
<<<<<<< HEAD
    editsTime = fsImage.getEditLog().getFsEditTime();
    checkpointTime = fsImage.getStorage().getCheckpointTime();
    imageDigest = fsImage.getStorage().getImageDigest();
=======
    blockpoolID = fsImage.getBlockPoolID();
    editsTime = fsImage.getEditLog().getFsEditTime();
    checkpointTime = fsImage.getStorage().getCheckpointTime();
    imageDigest = fsImage.getStorage().getImageDigest();
    checkpointTime = fsImage.getStorage().getCheckpointTime();
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
  }

  CheckpointSignature(String str) {
Solution content
  CheckpointSignature(FSImage fsImage) {
    super(fsImage.getStorage());
    blockpoolID = fsImage.getBlockPoolID();
    editsTime = fsImage.getEditLog().getFsEditTime();
    checkpointTime = fsImage.getStorage().getCheckpointTime();
    imageDigest = fsImage.getStorage().getImageDigest();
    checkpointTime = fsImage.getStorage().getCheckpointTime();
  }

  CheckpointSignature(String str) {
File
CheckpointSignature.java
Developer's decision
Version 2
Kind of conflict
Attribute
Method invocation
Chunk
Conflicting content
  CheckpointSignature(String str) {
    String[] fields = str.split(FIELD_SEPARATOR);
<<<<<<< HEAD
    assert fields.length == 6 : "Must be 6 fields in CheckpointSignature";
=======
    assert fields.length == 8 : "Must be 8 fields in CheckpointSignature";
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    layoutVersion = Integer.valueOf(fields[0]);
    namespaceID = Integer.valueOf(fields[1]);
    cTime = Long.valueOf(fields[2]);
Solution content
  CheckpointSignature(String str) {
    String[] fields = str.split(FIELD_SEPARATOR);
    assert fields.length == 8 : "Must be 8 fields in CheckpointSignature";
    layoutVersion = Integer.valueOf(fields[0]);
    namespaceID = Integer.valueOf(fields[1]);
    cTime = Long.valueOf(fields[2]);
File
CheckpointSignature.java
Developer's decision
Version 2
Kind of conflict
Assert statement
Chunk
Conflicting content
    editsTime = Long.valueOf(fields[3]);
    checkpointTime = Long.valueOf(fields[4]);
    imageDigest = new MD5Hash(fields[5]);
<<<<<<< HEAD
  }

  /**
   * Get the MD5 image digest
   * @return the MD5 image digest
   */
  MD5Hash getImageDigest() {
    return imageDigest;
=======
    clusterID = fields[6];
    blockpoolID = fields[7];
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
  }

  /**
Solution content
    editsTime = Long.valueOf(fields[3]);
    checkpointTime = Long.valueOf(fields[4]);
    imageDigest = new MD5Hash(fields[5]);
    clusterID = fields[6];
    blockpoolID = fields[7];
  }
  /**
File
CheckpointSignature.java
Developer's decision
Version 2
Kind of conflict
Array access
Attribute
Comment
Method signature
Return statement
Chunk
Conflicting content
         + String.valueOf(cTime) + FIELD_SEPARATOR
         + String.valueOf(editsTime) + FIELD_SEPARATOR
         + String.valueOf(checkpointTime) + FIELD_SEPARATOR
<<<<<<< HEAD
         +  imageDigest.toString();
  }

  void validateStorageInfo(FSImage si) throws IOException {
    if(layoutVersion != si.getStorage().layoutVersion
       || namespaceID != si.getStorage().namespaceID 
       || cTime != si.getStorage().cTime
       || checkpointTime != si.getStorage().getCheckpointTime() ||
       !imageDigest.equals(si.getStorage().getImageDigest())) {
=======
         + imageDigest.toString() + FIELD_SEPARATOR
         + clusterID + FIELD_SEPARATOR
         + blockpoolID ;
  }

  void validateStorageInfo(FSImage si) throws IOException {
    if(layoutVersion != si.getLayoutVersion()
        || namespaceID != si.getNamespaceID() 
        || cTime != si.getStorage().cTime
        || checkpointTime != si.getStorage().getCheckpointTime() 
        || !imageDigest.equals(si.getStorage().imageDigest)
        || !clusterID.equals(si.getClusterID())
        || !blockpoolID.equals(si.getBlockPoolID())) {
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      // checkpointTime can change when the image is saved - do not compare
      throw new IOException("Inconsistent checkpoint fields.\n"
          + "LV = " + layoutVersion + " namespaceID = " + namespaceID
Solution content
         + String.valueOf(cTime) + FIELD_SEPARATOR
         + String.valueOf(editsTime) + FIELD_SEPARATOR
         + String.valueOf(checkpointTime) + FIELD_SEPARATOR
         + imageDigest.toString() + FIELD_SEPARATOR
         + clusterID + FIELD_SEPARATOR
         + blockpoolID ;
  }

  void validateStorageInfo(FSImage si) throws IOException {
    if(layoutVersion != si.getLayoutVersion()
        || namespaceID != si.getNamespaceID() 
        || cTime != si.getStorage().cTime
        || checkpointTime != si.getStorage().getCheckpointTime() 
        || !imageDigest.equals(si.getStorage().imageDigest)
        || !clusterID.equals(si.getClusterID())
        || !blockpoolID.equals(si.getBlockPoolID())) {
      // checkpointTime can change when the image is saved - do not compare
      throw new IOException("Inconsistent checkpoint fields.\n"
          + "LV = " + layoutVersion + " namespaceID = " + namespaceID
File
CheckpointSignature.java
Developer's decision
Version 2
Kind of conflict
Attribute
If statement
Method invocation
Method signature
Chunk
Conflicting content
          + "LV = " + layoutVersion + " namespaceID = " + namespaceID
          + " cTime = " + cTime + "; checkpointTime = " + checkpointTime
          + " ; imageDigest = " + imageDigest
<<<<<<< HEAD
          + ".\nExpecting respectively: "
          + si.getStorage().layoutVersion + "; " 
          + si.getStorage().namespaceID + "; " + si.getStorage().cTime
          + "; " + si.getStorage().getCheckpointTime() + "; " 
          + si.getStorage().getImageDigest());
=======
          + " ; clusterId = " + clusterID
          + " ; blockpoolId = " + blockpoolID
          + ".\nExpecting respectively: "
          + si.getLayoutVersion() + "; " 
          + si.getNamespaceID() + "; " + si.getStorage().cTime
          + "; " + si.getStorage().getCheckpointTime() + "; " 
          + si.getStorage().imageDigest
          + "; " + si.getClusterID() + "; " 
          + si.getBlockPoolID() + ".");
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    }
  }
Solution content
          + "LV = " + layoutVersion + " namespaceID = " + namespaceID
          + " cTime = " + cTime + "; checkpointTime = " + checkpointTime
          + " ; imageDigest = " + imageDigest
          + " ; clusterId = " + clusterID
          + " ; blockpoolId = " + blockpoolID
          + ".\nExpecting respectively: "
          + si.getLayoutVersion() + "; " 
          + si.getNamespaceID() + "; " + si.getStorage().cTime
          + "; " + si.getStorage().getCheckpointTime() + "; " 
          + si.getStorage().imageDigest
          + "; " + si.getClusterID() + "; " 
          + si.getBlockPoolID() + ".");
    }
  }
File
CheckpointSignature.java
Developer's decision
Version 2
Kind of conflict
Throw statement
Chunk
Conflicting content
      (editsTime < o.editsTime) ? -1 : (editsTime > o.editsTime) ? 1 :
      (checkpointTime < o.checkpointTime) ? -1 : 
                  (checkpointTime > o.checkpointTime) ? 1 :
<<<<<<< HEAD
=======
      (clusterID.compareTo(o.clusterID) < 0) ? -1 : 
                  (clusterID.compareTo(o.clusterID) > 0) ? 1 :
      (blockpoolID.compareTo(o.blockpoolID) < 0) ? -1 : 
                  (blockpoolID.compareTo(o.blockpoolID) > 0) ? 1 :
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
                    imageDigest.compareTo(o.imageDigest);
  }
Solution content
      (editsTime < o.editsTime) ? -1 : (editsTime > o.editsTime) ? 1 :
      (checkpointTime < o.checkpointTime) ? -1 : 
                  (checkpointTime > o.checkpointTime) ? 1 :
      (clusterID.compareTo(o.clusterID) < 0) ? -1 : 
                  (clusterID.compareTo(o.clusterID) > 0) ? 1 :
      (blockpoolID.compareTo(o.blockpoolID) < 0) ? -1 : 
                  (blockpoolID.compareTo(o.blockpoolID) > 0) ? 1 :
                    imageDigest.compareTo(o.imageDigest);
  }
File
CheckpointSignature.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Chunk
Conflicting content
<<<<<<< HEAD
  public int hashCode() {
    return layoutVersion ^ namespaceID ^
            (int)(cTime ^ editsTime ^ checkpointTime) ^
            imageDigest.hashCode();
=======
            imageDigest.hashCode() ^ clusterID.hashCode()
            ^ blockpoolID.hashCode();
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
  }

  /////////////////////////////////////////////////
Solution content
  public int hashCode() {
    return layoutVersion ^ namespaceID ^
            (int)(cTime ^ editsTime ^ checkpointTime) ^
            imageDigest.hashCode() ^ clusterID.hashCode()
            ^ blockpoolID.hashCode();
  }

  /////////////////////////////////////////////////
File
CheckpointSignature.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Chunk
Conflicting content
    }

    BackupImage bnImage = getFSImage();
<<<<<<< HEAD
=======
    bnImage.getStorage().setBlockPoolID(backupNode.getBlockPoolId());
    bnImage.getStorage().setClusterID(backupNode.getClusterId());
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    bnImage.loadCheckpoint(sig);
    sig.validateStorageInfo(bnImage);
    bnImage.saveCheckpoint();
Solution content
    }

    BackupImage bnImage = getFSImage();
    bnImage.getStorage().setBlockPoolID(backupNode.getBlockPoolId());
    bnImage.getStorage().setClusterID(backupNode.getClusterId());
    bnImage.loadCheckpoint(sig);
    sig.validateStorageInfo(bnImage);
    bnImage.saveCheckpoint();
File
Checkpointer.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Chunk
Conflicting content
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
<<<<<<< HEAD
import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
=======
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportIterator;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
import org.apache.hadoop.io.Text;
import org.apache.hadoop.hdfs.DeprecatedUTF8;
import org.apache.hadoop.io.WritableUtils;
Solution content
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.hdfs.DeprecatedUTF8;
import org.apache.hadoop.io.WritableUtils;
File
DatanodeDescriptor.java
Developer's decision
None
Kind of conflict
Import
Chunk
Conflicting content
  private long lastBlocksScheduledRollTime = 0;
  private static final int BLOCKS_SCHEDULED_ROLL_INTERVAL = 600*1000; //10min
  private int volumeFailures = 0;
<<<<<<< HEAD
  
=======
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
  /** 
   * When set to true, the node is not in include list and is not allowed
   * to communicate with the namenode
Solution content
  private long lastBlocksScheduledRollTime = 0;
  private static final int BLOCKS_SCHEDULED_ROLL_INTERVAL = 600*1000; //10min
  private int volumeFailures = 0;
  /** 
   * When set to true, the node is not in include list and is not allowed
   * to communicate with the namenode
File
DatanodeDescriptor.java
Developer's decision
Version 1
Kind of conflict
Blank
Chunk
Conflicting content
    return blockarray;
  }

<<<<<<< HEAD
=======
  void reportDiff(BlockManager blockManager,
                  BlockListAsLongs newReport,
                  Collection toAdd,    // add to DatanodeDescriptor
                  Collection toRemove, // remove from DatanodeDescriptor
                  Collection toInvalidate, // should be removed from DN
                  Collection toCorrupt) {// add to corrupt replicas
    // place a delimiter in the list which separates blocks 
    // that have been reported from those that have not
    BlockInfo delimiter = new BlockInfo(new Block(), 1);
    boolean added = this.addBlock(delimiter);
    assert added : "Delimiting block cannot be present in the node";
    if(newReport == null)
      newReport = new BlockListAsLongs();
    // scan the report and process newly reported blocks
    BlockReportIterator itBR = newReport.getBlockReportIterator();
    while(itBR.hasNext()) {
      Block iblk = itBR.next();
      ReplicaState iState = itBR.getCurrentReplicaState();
      BlockInfo storedBlock = processReportedBlock(blockManager, iblk, iState,
                                               toAdd, toInvalidate, toCorrupt);
      // move block to the head of the list
      if(storedBlock != null && storedBlock.findDatanode(this) >= 0)
        this.moveBlockToHead(storedBlock);
    }
    // collect blocks that have not been reported
    // all of them are next to the delimiter
    Iterator it = new BlockIterator(delimiter.getNext(0),this);
    while(it.hasNext())
      toRemove.add(it.next());
    this.removeBlock(delimiter);
  }

  /**
   * Process a block replica reported by the data-node.
   * 
   * 
    *
  1. If the block is not known to the system (not in blocksMap) then the * data-node should be notified to invalidate this block.
  2. *
  3. If the reported replica is valid that is has the same generation stamp * and length as recorded on the name-node, then the replica location is * added to the name-node.
  4. *
  5. If the reported replica is not valid, then it is marked as corrupt, * which triggers replication of the existing valid replicas. * Corrupt replicas are removed from the system when the block * is fully replicated.
  6. *
* * @param blockManager * @param block reported block replica * @param rState reported replica state * @param toAdd add to DatanodeDescriptor * @param toInvalidate missing blocks (not in the blocks map) * should be removed from the data-node * @param toCorrupt replicas with unexpected length or generation stamp; * add to corrupt replicas * @return */ BlockInfo processReportedBlock( BlockManager blockManager, Block block, // reported block replica ReplicaState rState, // reported replica state Collection toAdd, // add to DatanodeDescriptor Collection toInvalidate, // should be removed from DN Collection toCorrupt) {// add to corrupt replicas if(FSNamesystem.LOG.isDebugEnabled()) { FSNamesystem.LOG.debug("Reported block " + block + " on " + getName() + " size " + block.getNumBytes() + " replicaState = " + rState); } // find block by blockId BlockInfo storedBlock = blockManager.blocksMap.getStoredBlock(block); if(storedBlock == null) { // If blocksMap does not contain reported block id, // the replica should be removed from the data-node. toInvalidate.add(new Block(block)); return null; } if(FSNamesystem.LOG.isDebugEnabled()) { FSNamesystem.LOG.debug("In memory blockUCState = " + storedBlock.getBlockUCState()); } // Ignore replicas already scheduled to be removed from the DN if(blockManager.belongsToInvalidates(getStorageID(), block)) { assert storedBlock.findDatanode(this) < 0 : "Block " + block + " in recentInvalidatesSet should not appear in DN " + this; return storedBlock; } // Block is on the DN boolean isCorrupt = false; switch(rState) { case FINALIZED: switch(storedBlock.getBlockUCState()) { case COMPLETE: case COMMITTED: if(storedBlock.getGenerationStamp() != block.getGenerationStamp() || storedBlock.getNumBytes() != block.getNumBytes()) isCorrupt = true; break; case UNDER_CONSTRUCTION: case UNDER_RECOVERY: ((BlockInfoUnderConstruction)storedBlock).addReplicaIfNotPresent( this, block, rState); } if(!isCorrupt && storedBlock.findDatanode(this) < 0) if (storedBlock.getNumBytes() != block.getNumBytes()) { toAdd.add(new Block(block)); } else { toAdd.add(storedBlock); } break; case RBW: case RWR: if(!storedBlock.isComplete()) ((BlockInfoUnderConstruction)storedBlock).addReplicaIfNotPresent( this, block, rState); else isCorrupt = true; break; case RUR: // should not be reported case TEMPORARY: // should not be reported default: FSNamesystem.LOG.warn("Unexpected replica state " + rState + " for block: " + storedBlock + " on " + getName() + " size " + storedBlock.getNumBytes()); break; } if(isCorrupt) toCorrupt.add(storedBlock); return storedBlock; } >>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0 /** Serialization for FSEditLog */ void readFieldsFromFSEditLog(DataInput in) throws IOException { this.name = DeprecatedUTF8.readString(in);
Solution content
    return blockarray;
  }

  /** Serialization for FSEditLog */
  void readFieldsFromFSEditLog(DataInput in) throws IOException {
    this.name = DeprecatedUTF8.readString(in);
File
DatanodeDescriptor.java
Developer's decision
Version 1
Kind of conflict
Comment
Method declaration
Chunk
Conflicting content
  public void incVolumeFailure() {
    volumeFailures++;
  }
<<<<<<< HEAD
  
=======
  
  /**
   * Set the flag to indicate if this datanode is disallowed from communicating
   * with the namenode.
   */
  void setDisallowed(boolean flag) {
    disallowed = flag;
  }
  
  boolean isDisallowed() {
    return disallowed;
  }

>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
  /**
   * @return number of failed volumes in the datanode.
   */
Solution content
  public void incVolumeFailure() {
    volumeFailures++;
  }
  
  /**
   * Set the flag to indicate if this datanode is disallowed from communicating
   * with the namenode.
   */
  void setDisallowed(boolean flag) {
    disallowed = flag;
  }
  
  boolean isDisallowed() {
    return disallowed;
  }

  /**
   * @return number of failed volumes in the datanode.
   */
File
DatanodeDescriptor.java
Developer's decision
Version 2
Kind of conflict
Comment
Method declaration
Chunk
Conflicting content
    super.updateRegInfo(nodeReg);
    volumeFailures = 0;
  }
<<<<<<< HEAD
  
  /**
   * Set the flag to indicate if this datanode is disallowed from communicating
   * with the namenode.
   */
  void setDisallowed(boolean flag) {
    disallowed = flag;
  }
  
  boolean isDisallowed() {
    return disallowed;
  }
=======
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
}
Solution content
    super.updateRegInfo(nodeReg);
    volumeFailures = 0;
  }
}
File
DatanodeDescriptor.java
Developer's decision
Version 2
Kind of conflict
Comment
Method declaration
Chunk
Conflicting content
    // format before starting up if requested
    if (startOpt == StartupOption.FORMAT) {
      fsImage.getStorage().setStorageDirectories(dataDirs, editsDirs);
<<<<<<< HEAD
      fsImage.getStorage().format();
=======
      fsImage.getStorage().format(fsImage.getStorage().determineClusterId()); // reuse current id
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      startOpt = StartupOption.REGULAR;
    }
    try {
Solution content
    // format before starting up if requested
    if (startOpt == StartupOption.FORMAT) {
      fsImage.getStorage().setStorageDirectories(dataDirs, editsDirs);
      fsImage.getStorage().format(fsImage.getStorage().determineClusterId()); // reuse current id
      startOpt = StartupOption.REGULAR;
    }
    try {
File
FSDirectory.java
Developer's decision
Version 2
Kind of conflict
Comment
Method invocation
Chunk
Conflicting content
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.common.Storage.StorageState;
<<<<<<< HEAD
=======
import org.apache.hadoop.hdfs.server.common.StorageInfo;
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
import org.apache.hadoop.hdfs.server.common.Util;
import static org.apache.hadoop.hdfs.server.common.Util.now;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole;
Solution content
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.common.Storage.StorageState;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.common.Util;
import static org.apache.hadoop.hdfs.server.common.Util.now;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole;
File
FSImage.java
Developer's decision
Version 2
Kind of conflict
Import
Chunk
Conflicting content
      throws IOException {
    this();
    storage.setStorageDirectories(fsDirs, fsEditsDirs);
<<<<<<< HEAD
=======
  }

  public FSImage(StorageInfo storageInfo, String bpid) {
    storage = new NNStorage(storageInfo, bpid);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
  }

  /**
Solution content
      throws IOException {
    this();
    storage.setStorageDirectories(fsDirs, fsEditsDirs);
  }

  public FSImage(StorageInfo storageInfo, String bpid) {
    storage = new NNStorage(storageInfo, bpid);
  }

  /**
File
FSImage.java
Developer's decision
Version 2
Kind of conflict
Attribute
Method invocation
Method signature
Chunk
Conflicting content
    }
    if (startOpt != StartupOption.UPGRADE
        && storage.getLayoutVersion() < Storage.LAST_PRE_UPGRADE_LAYOUT_VERSION
<<<<<<< HEAD
        && storage.getLayoutVersion() != FSConstants.LAYOUT_VERSION)
=======
        && storage.getLayoutVersion() != FSConstants.LAYOUT_VERSION) {
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      throw new IOException(
          "\nFile system image contains an old layout version " 
          + storage.getLayoutVersion() + ".\nAn upgrade to version "
Solution content
    }
    if (startOpt != StartupOption.UPGRADE
        && storage.getLayoutVersion() < Storage.LAST_PRE_UPGRADE_LAYOUT_VERSION
        && storage.getLayoutVersion() != FSConstants.LAYOUT_VERSION) {
      throw new IOException(
          "\nFile system image contains an old layout version " 
          + storage.getLayoutVersion() + ".\nAn upgrade to version "
File
FSImage.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Chunk
Conflicting content
          + storage.getLayoutVersion() + ".\nAn upgrade to version "
          + FSConstants.LAYOUT_VERSION + " is required.\n"
          + "Please restart NameNode with -upgrade option.");
<<<<<<< HEAD
=======
    }
    
    // Upgrade to federation requires -upgrade -clusterid  option
    if (startOpt == StartupOption.UPGRADE
        && storage.getLayoutVersion() > Storage.LAST_PRE_FEDERATION_LAYOUT_VERSION) {
      if (startOpt.getClusterId() == null) {
        throw new IOException(
            "\nFile system image contains an old layout version "
                + storage.getLayoutVersion() + ".\nAn upgrade to version "
                + FSConstants.LAYOUT_VERSION
                + " is required.\nPlease restart NameNode with "
                + "-upgrade -clusterid  option.");
      }
      storage.setClusterID(startOpt.getClusterId());
      
      // Create new block pool Id
      storage.setBlockPoolID(storage.newBlockPoolID());
    }
    
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    // check whether distributed upgrade is reguired and/or should be continued
    storage.verifyDistributedUpgradeProgress(startOpt);
Solution content
          + storage.getLayoutVersion() + ".\nAn upgrade to version "
          + FSConstants.LAYOUT_VERSION + " is required.\n"
          + "Please restart NameNode with -upgrade option.");
    }
    
    // Upgrade to federation requires -upgrade -clusterid  option
    if (startOpt == StartupOption.UPGRADE
        && storage.getLayoutVersion() > Storage.LAST_PRE_FEDERATION_LAYOUT_VERSION) {
      if (startOpt.getClusterId() == null) {
        throw new IOException(
            "\nFile system image contains an old layout version "
                + storage.getLayoutVersion() + ".\nAn upgrade to version "
                + FSConstants.LAYOUT_VERSION
                + " is required.\nPlease restart NameNode with "
                + "-upgrade -clusterid  option.");
      }
      storage.setClusterID(startOpt.getClusterId());
      
      // Create new block pool Id
      storage.setBlockPoolID(storage.newBlockPoolID());
    }
    
    // check whether distributed upgrade is reguired and/or should be continued
    storage.verifyDistributedUpgradeProgress(startOpt);
File
FSImage.java
Developer's decision
Version 2
Kind of conflict
Comment
If statement
Chunk
Conflicting content
    FSImageFormat.Loader loader = new FSImageFormat.Loader(
        conf, getFSNamesystem());
    loader.load(curFile);
<<<<<<< HEAD

=======
    namesystem.setBlockPoolId(this.getBlockPoolID());
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0

    // Check that the image digest we loaded matches up with what
    // we expected
Solution content
    FSImageFormat.Loader loader = new FSImageFormat.Loader(
        conf, getFSNamesystem());
    loader.load(curFile);
    namesystem.setBlockPoolId(this.getBlockPoolID());

    // Check that the image digest we loaded matches up with what
    // we expected
File
FSImage.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Chunk
Conflicting content
    getEditLog().close();
    storage.close();
  }
<<<<<<< HEAD
=======

>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0

  /**
   * Retrieve checkpoint dirs from configuration.
Solution content
    getEditLog().close();
    storage.close();
  }


  /**
   * Retrieve checkpoint dirs from configuration.
File
FSImage.java
Developer's decision
Version 1
Kind of conflict
Blank
Chunk
Conflicting content
               + " has been successfully formatted.");
    }
  };
<<<<<<< HEAD

  @Override // NNStorageListener
  public void directoryAvailable(StorageDirectory sd) throws IOException {
    // do nothing
=======

  @Override // NNStorageListener
  public void directoryAvailable(StorageDirectory sd) throws IOException {
    // do nothing
  }

  public int getLayoutVersion() {
    return storage.getLayoutVersion();
  }
  
  public int getNamespaceID() {
    return storage.getNamespaceID();
  }
  
  public String getClusterID() {
    return storage.getClusterID();
  }
  
  public String getBlockPoolID() {
    return storage.getBlockPoolID();
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
  }
}
Solution content
  }
}
               + " has been successfully formatted.");
    }
  };

  @Override // NNStorageListener
  public void directoryAvailable(StorageDirectory sd) throws IOException {
    // do nothing
  }

  public int getLayoutVersion() {
    return storage.getLayoutVersion();
  }
  
  public int getNamespaceID() {
    return storage.getNamespaceID();
  }
  
  public String getClusterID() {
    return storage.getClusterID();
  }
  
  public String getBlockPoolID() {
    return storage.getBlockPoolID();
File
FSImage.java
Developer's decision
Version 2
Kind of conflict
Annotation
Comment
Method declaration
Method invocation
Method signature
Return statement
Chunk
Conflicting content
  
  NamespaceInfo getNamespaceInfo() {
    return new NamespaceInfo(dir.fsImage.getStorage().getNamespaceID(),
<<<<<<< HEAD
=======
                             getClusterId(),
                             getBlockPoolId(),
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
                             dir.fsImage.getStorage().getCTime(),
                             getDistributedUpgradeVersion());
  }
Solution content
  
  NamespaceInfo getNamespaceInfo() {
    return new NamespaceInfo(dir.fsImage.getStorage().getNamespaceID(),
                             getClusterId(),
                             getBlockPoolId(),
                             dir.fsImage.getStorage().getCTime(),
                             getDistributedUpgradeVersion());
  }
File
FSNamesystem.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Chunk
Conflicting content
    return b;
  }

<<<<<<< HEAD
  /** @see NameNode#getAdditionalDatanode(String, Block, DatanodeInfo[], DatanodeInfo[], int, String) */
  LocatedBlock getAdditionalDatanode(final String src, final Block blk,
=======
  /** @see NameNode#getAdditionalDatanode(String, ExtendedBlock, DatanodeInfo[], DatanodeInfo[], int, String) */
  LocatedBlock getAdditionalDatanode(final String src, final ExtendedBlock blk,
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      final DatanodeInfo[] existings,  final HashMap excludes,
      final int numAdditionalNodes, final String clientName
      ) throws IOException {
Solution content
    return b;
  }

  /** @see NameNode#getAdditionalDatanode(String, ExtendedBlock, DatanodeInfo[], DatanodeInfo[], int, String) */
  LocatedBlock getAdditionalDatanode(final String src, final ExtendedBlock blk,
      final DatanodeInfo[] existings,  final HashMap excludes,
      final int numAdditionalNodes, final String clientName
      ) throws IOException {
File
FSNamesystem.java
Developer's decision
Version 2
Kind of conflict
Comment
Method signature
Chunk
Conflicting content
    assert(Thread.holdsLock(heartbeats));
    if (isAdded) {
      capacityUsed += node.getDfsUsed();
<<<<<<< HEAD
=======
      blockPoolUsed += node.getBlockPoolUsed();
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      totalLoad += node.getXceiverCount();
      if (!(node.isDecommissionInProgress() || node.isDecommissioned())) {
        capacityTotal += node.getCapacity();
Solution content
    assert(Thread.holdsLock(heartbeats));
    if (isAdded) {
      capacityUsed += node.getDfsUsed();
      blockPoolUsed += node.getBlockPoolUsed();
      totalLoad += node.getXceiverCount();
      if (!(node.isDecommissionInProgress() || node.isDecommissioned())) {
        capacityTotal += node.getCapacity();
File
FSNamesystem.java
Developer's decision
Version 2
Kind of conflict
Attribute
Method invocation
Chunk
Conflicting content
      }
    } else {
      capacityUsed -= node.getDfsUsed();
<<<<<<< HEAD
=======
      blockPoolUsed -= node.getBlockPoolUsed();
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      totalLoad -= node.getXceiverCount();
      if (!(node.isDecommissionInProgress() || node.isDecommissioned())) {
        capacityTotal -= node.getCapacity();
Solution content
      }
    } else {
      capacityUsed -= node.getDfsUsed();
      blockPoolUsed -= node.getBlockPoolUsed();
      totalLoad -= node.getXceiverCount();
      if (!(node.isDecommissionInProgress() || node.isDecommissioned())) {
        capacityTotal -= node.getCapacity();
File
FSNamesystem.java
Developer's decision
Version 2
Kind of conflict
Attribute
Method invocation
Chunk
Conflicting content
      } else {
        capacityTotal -= node.getDfsUsed();
      }
<<<<<<< HEAD
    }
  }

  /**
   * Returns whether or not there were available resources at the last check of
   * resources.
   *
   * @return true if there were sufficient resources available, false otherwise.
   */
  private boolean nameNodeHasResourcesAvailable() {
    return hasResourcesAvailable;
  }

  /**
   * Perform resource checks and cache the results.
   * @throws IOException
   */
  private void checkAvailableResources() throws IOException {
    hasResourcesAvailable = nnResourceChecker.hasAvailableDiskSpace();
  }

  /**
   * Periodically calls hasAvailableResources of NameNodeResourceChecker, and if
   * there are found to be insufficient resources available, causes the NN to
   * enter safe mode. If resources are later found to have returned to
   * acceptable levels, this daemon will cause the NN to exit safe mode.
   */
  class NameNodeResourceMonitor implements Runnable  {
    @Override
    public void run () {
      try {
        while (fsRunning) {
          checkAvailableResources();
          if(!nameNodeHasResourcesAvailable()) {
            String lowResourcesMsg = "NameNode low on available disk space. ";
            if (!isInSafeMode()) {
              FSNamesystem.LOG.warn(lowResourcesMsg + "Entering safe mode.");
            } else {
              FSNamesystem.LOG.warn(lowResourcesMsg + "Already in safe mode.");
            }
            enterSafeMode(true);
          }
          try {
            Thread.sleep(resourceRecheckInterval);
          } catch (InterruptedException ie) {
            // Deliberately ignore
          }
        }
      } catch (Exception e) {
        FSNamesystem.LOG.error("Exception in NameNodeResourceMonitor: ", e);
      }
=======
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    }
  }
Solution content
      } else {
        capacityTotal -= node.getDfsUsed();
      }
    }
  }

  /**
   * Returns whether or not there were available resources at the last check of
   * resources.
   *
   * @return true if there were sufficient resources available, false otherwise.
   */
  private boolean nameNodeHasResourcesAvailable() {
    return hasResourcesAvailable;
  }

  /**
   * Perform resource checks and cache the results.
   * @throws IOException
   */
  private void checkAvailableResources() throws IOException {
    hasResourcesAvailable = nnResourceChecker.hasAvailableDiskSpace();
  }

  /**
   * Periodically calls hasAvailableResources of NameNodeResourceChecker, and if
   * there are found to be insufficient resources available, causes the NN to
   * enter safe mode. If resources are later found to have returned to
   * acceptable levels, this daemon will cause the NN to exit safe mode.
   */
  class NameNodeResourceMonitor implements Runnable  {
    @Override
    public void run () {
      try {
        while (fsRunning) {
          checkAvailableResources();
          if(!nameNodeHasResourcesAvailable()) {
            String lowResourcesMsg = "NameNode low on available disk space. ";
            if (!isInSafeMode()) {
              FSNamesystem.LOG.warn(lowResourcesMsg + "Entering safe mode.");
            } else {
              FSNamesystem.LOG.warn(lowResourcesMsg + "Already in safe mode.");
            }
            enterSafeMode(true);
          }
          try {
            Thread.sleep(resourceRecheckInterval);
          } catch (InterruptedException ie) {
            // Deliberately ignore
          }
        }
      } catch (Exception e) {
        FSNamesystem.LOG.error("Exception in NameNodeResourceMonitor: ", e);
      }
    }
  }
File
FSNamesystem.java
Developer's decision
Version 1
Kind of conflict
Annotation
Class signature
Comment
Method declaration
Method signature
Try statement
Chunk
Conflicting content
   * The given node is reporting all its blocks.  Use this info to 
   * update the (machine-->blocklist) and (block-->machinelist) tables.
   */
<<<<<<< HEAD
  public void processReport(DatanodeID nodeID, 
      BlockListAsLongs newReport) throws IOException {
    long startTime, endTime;
=======
  public void processReport(DatanodeID nodeID, String poolId,
      BlockListAsLongs newReport) throws IOException {
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0

    writeLock();
    startTime = now(); //after acquiring write lock
Solution content
   * The given node is reporting all its blocks.  Use this info to 
   * update the (machine-->blocklist) and (block-->machinelist) tables.
   */
  public void processReport(DatanodeID nodeID, String poolId,
      BlockListAsLongs newReport) throws IOException {
    long startTime, endTime;

    writeLock();
    startTime = now(); //after acquiring write lock
File
FSNamesystem.java
Developer's decision
Combination
Kind of conflict
Method signature
Variable
Chunk
Conflicting content
    private long lastStatusReport = 0;
    /** flag indicating whether replication queues have been initialized */
    private boolean initializedReplQueues = false;
<<<<<<< HEAD
    /** Was safemode entered automatically because available resources were low. */
    private boolean resourcesLow = false;
=======
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    
    /**
     * Creates SafeModeInfo when the name node enters
Solution content
    private long lastStatusReport = 0;
    /** flag indicating whether replication queues have been initialized */
    private boolean initializedReplQueues = false;
    /** Was safemode entered automatically because available resources were low. */
    private boolean resourcesLow = false;
    
    /**
     * Creates SafeModeInfo when the name node enters
File
FSNamesystem.java
Developer's decision
Version 1
Kind of conflict
Attribute
Comment
Chunk
Conflicting content
      if (isPopulatingReplQueues()) {
        LOG.warn("Replication queues already initialized.");
      }
<<<<<<< HEAD
      long startTimeMisReplicatedScan = now();
      blockManager.processMisReplicatedBlocks();
      initializedReplQueues = true;
      NameNode.stateChangeLog.info("STATE* Replication Queue initialization "
          + "scan for invalid, over- and under-replicated blocks "
          + "completed in " + (now() - startTimeMisReplicatedScan)
          + " msec");
=======
      blockManager.processMisReplicatedBlocks();
      initializedReplQueues = true;
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    }

    /**
Solution content
      if (isPopulatingReplQueues()) {
        LOG.warn("Replication queues already initialized.");
      }
      long startTimeMisReplicatedScan = now();
      blockManager.processMisReplicatedBlocks();
      initializedReplQueues = true;
      NameNode.stateChangeLog.info("STATE* Replication Queue initialization "
          + "scan for invalid, over- and under-replicated blocks "
          + "completed in " + (now() - startTimeMisReplicatedScan)
          + " msec");
    }

    /**
File
FSNamesystem.java
Developer's decision
Version 1
Kind of conflict
Attribute
Method invocation
Variable
Chunk
Conflicting content
     */
    boolean needEnter() {
      return (threshold != 0 && blockSafe < blockThreshold) ||
<<<<<<< HEAD
        (getNumLiveDataNodes() < datanodeThreshold) ||
        (!nameNodeHasResourcesAvailable());
=======
        (getNumLiveDataNodes() < datanodeThreshold);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    }
      
    /**
Solution content
     */
    boolean needEnter() {
      return (threshold != 0 && blockSafe < blockThreshold) ||
        (getNumLiveDataNodes() < datanodeThreshold) ||
        (!nameNodeHasResourcesAvailable());
    }
      
    /**
File
FSNamesystem.java
Developer's decision
Version 1
Kind of conflict
Attribute
Method invocation
Chunk
Conflicting content
  }

  @Override
<<<<<<< HEAD
  public LocatedBlock getAdditionalDatanode(final String src, final Block blk,
=======
  public LocatedBlock getAdditionalDatanode(final String src, final ExtendedBlock blk,
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      final DatanodeInfo[] existings, final DatanodeInfo[] excludes,
      final int numAdditionalNodes, final String clientName
      ) throws IOException {
Solution content
  }

  @Override
  public LocatedBlock getAdditionalDatanode(final String src, final ExtendedBlock blk,
      final DatanodeInfo[] existings, final DatanodeInfo[] excludes,
      final int numAdditionalNodes, final String clientName
      ) throws IOException {
File
NameNode.java
Developer's decision
Version 2
Kind of conflict
Method signature
Chunk
Conflicting content
      }
    }

<<<<<<< HEAD
    FSNamesystem nsys = new FSNamesystem(new FSImage(dirsToFormat,
                                         editDirsToFormat), conf);
    nsys.dir.fsImage.getStorage().format();
=======
    FSImage fsImage = new FSImage(dirsToFormat, editDirsToFormat);
    FSNamesystem nsys = new FSNamesystem(fsImage, conf);
    
    // if clusterID is not provided - see if you can find the current one
    String clusterId = StartupOption.FORMAT.getClusterId();
    if(clusterId == null || clusterId.equals("")) {
      // try to get one from the existing storage
      clusterId = fsImage.getStorage().determineClusterId();
      if (clusterId == null || clusterId.equals("")) {
        throw new IllegalArgumentException("Format must be provided with clusterid");
      }
      if(isConfirmationNeeded) {
        System.err.print("Use existing cluster id=" + clusterId + "? (Y or N)");
        if(System.in.read() != 'Y') {
          throw new IllegalArgumentException("Format must be provided with clusterid");
        }
        while(System.in.read() != '\n'); // discard the enter-key
      }
    }
    nsys.dir.fsImage.getStorage().format(clusterId);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    return false;
  }
Solution content
      }
    }

    FSImage fsImage = new FSImage(dirsToFormat, editDirsToFormat);
    FSNamesystem nsys = new FSNamesystem(fsImage, conf);
    
    // if clusterID is not provided - see if you can find the current one
    String clusterId = StartupOption.FORMAT.getClusterId();
    if(clusterId == null || clusterId.equals("")) {
      // try to get one from the existing storage
      clusterId = fsImage.getStorage().determineClusterId();
      if (clusterId == null || clusterId.equals("")) {
        throw new IllegalArgumentException("Format must be provided with clusterid");
      }
      if(isConfirmationNeeded) {
        System.err.print("Use existing cluster id=" + clusterId + "? (Y or N)");
        if(System.in.read() != 'Y') {
          throw new IllegalArgumentException("Format must be provided with clusterid");
        }
        while(System.in.read() != '\n'); // discard the enter-key
      }
    }
    nsys.dir.fsImage.getStorage().format(clusterId);
    return false;
  }
File
NameNode.java
Developer's decision
Version 2
Kind of conflict
Comment
If statement
Method invocation
Variable
Chunk
Conflicting content
import javax.servlet.jsp.JspWriter;

import org.apache.hadoop.conf.Configuration;
<<<<<<< HEAD
=======
import org.apache.hadoop.hdfs.DFSUtil;
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
Solution content
import javax.servlet.jsp.JspWriter;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
File
NamenodeJspHelper.java
Developer's decision
Version 2
Kind of conflict
Import
Chunk
Conflicting content
       * interact with datanodes.
       */

<<<<<<< HEAD
      generateNodeDataHeader(out, d, suffix, alive, nnHttpPort);
=======
      generateNodeDataHeader(out, d, suffix, alive, nnHttpPort, nnaddr);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      if (!alive) {
        out.print("
" + d.isDecommissioned() + "\n");
Solution content
       * interact with datanodes.
       */

      generateNodeDataHeader(out, d, suffix, alive, nnHttpPort, nnaddr);
      if (!alive) {
        out.print("
" + d.isDecommissioned() + "\n");
File
NamenodeJspHelper.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Chunk
Conflicting content
          + percentUsed
          + "
" + ServletUtil.percentageGraph((int) Double.parseDouble(percentUsed), <<<<<<< HEAD 100) + "" + percentRemaining + "" + d.numBlocks() ======= 100) + "" + percentRemaining + "" + d.numBlocks()+"\n" + "" + StringUtils.limitDecimalTo2(bpUsed * 1.0 / diskBytes) + "" + percentBpUsed >>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0 + "" + d.getVolumeFailures() + "\n"); }
Solution content
          + percentUsed
          + "
" + ServletUtil.percentageGraph((int) Double.parseDouble(percentUsed), 100) + "" + percentRemaining + "" + d.numBlocks()+"\n" + "" + StringUtils.limitDecimalTo2(bpUsed * 1.0 / diskBytes) + "" + percentBpUsed + "" + d.getVolumeFailures() + "\n"); }
File
NamenodeJspHelper.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Variable
Chunk
Conflicting content
                + "> Used 
(%)
Used
(%)
Remaining
(%)
Blocks Block Pool
Used (" + diskByteStr + ")
Block Pool
Used (%)" >>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0 + "> Blocks
Failed Volumes\n");
Solution content
                + "> Used 
(%)
Used
(%)
Remaining
(%)
Blocks Block Pool
Used (" + diskByteStr + ")
Block Pool
Used (%)" + "> Blocks
Failed Volumes\n");
File
NamenodeJspHelper.java
Developer's decision
Version 2
Kind of conflict
Attribute
Method invocation
Chunk
Conflicting content
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
<<<<<<< HEAD
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
=======
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
import org.apache.hadoop.http.HttpServer;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RemoteException;
Solution content
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.http.HttpServer;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RemoteException;
File
SecondaryNameNode.java
Developer's decision
Version 2
Kind of conflict
Import
Chunk
Conflicting content
      if (loadImage) {
        // to avoid assert in loadFSImage()
        this.getStorage().layoutVersion = -1;
<<<<<<< HEAD
        loadFSImage(getStorage().getStorageFile(sdName, NameNodeFile.IMAGE));
=======
        getStorage();
        loadFSImage(NNStorage.getStorageFile(sdName, NameNodeFile.IMAGE));
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      }
      loadFSEdits(sdEdits);
      storage.setClusterID(sig.getClusterID());
Solution content
      if (loadImage) {
        // to avoid assert in loadFSImage()
        this.getStorage().layoutVersion = -1;
        getStorage();
        loadFSImage(NNStorage.getStorageFile(sdName, NameNodeFile.IMAGE));
      }
      loadFSEdits(sdEdits);
      storage.setClusterID(sig.getClusterID());
File
SecondaryNameNode.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Chunk
Conflicting content
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.PrintStream;
<<<<<<< HEAD
=======
import java.net.InetSocketAddress;
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
import java.net.URL;
import java.net.URLConnection;
import java.net.URLEncoder;
Solution content
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.PrintStream;
import java.net.InetSocketAddress;
import java.net.URL;
import java.net.URLConnection;
import java.net.URLEncoder;
File
DFSck.java
Developer's decision
Version 2
Kind of conflict
Import
Chunk
Conflicting content
    try {
      URL url = new URL(buf.toString());
      SecurityUtil.fetchServiceTicket(url);
<<<<<<< HEAD
      connection = (HttpURLConnection) url.openConnection();
=======
      HttpURLConnection connection = (HttpURLConnection) url.openConnection();
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      if (connection.getResponseCode() != HttpURLConnection.HTTP_OK) {
        throw new IOException("Error renewing token: " + 
            connection.getResponseMessage());
Solution content
    try {
      URL url = new URL(buf.toString());
      SecurityUtil.fetchServiceTicket(url);
      connection = (HttpURLConnection) url.openConnection();
      if (connection.getResponseCode() != HttpURLConnection.HTTP_OK) {
        throw new IOException("Error renewing token: " + 
            connection.getResponseMessage());
File
DelegationTokenFetcher.java
Developer's decision
Version 1
Kind of conflict
Cast expression
Variable
Chunk
Conflicting content
class ImageLoaderCurrent implements ImageLoader {
  protected final DateFormat dateFormat = 
                                      new SimpleDateFormat("yyyy-MM-dd HH:mm");
<<<<<<< HEAD
  private static int[] versions = { -16, -17, -18, -19, -20, -21, -22, -23,
      -24, -25, -26, -27, -28, -30, -31, -32, -33, -34 };
=======
  private static int [] versions = 
    {-16, -17, -18, -19, -20, -21, -22, -23, -24, -25, -26, -27, -28, -30, -31};
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
  private int imageVersion = 0;

  /* (non-Javadoc)
Solution content
class ImageLoaderCurrent implements ImageLoader {
  protected final DateFormat dateFormat = 
                                      new SimpleDateFormat("yyyy-MM-dd HH:mm");
  private static int[] versions = { -16, -17, -18, -19, -20, -21, -22, -23,
      -24, -25, -26, -27, -28, -30, -31, -32, -33, -34, -35 };
  private int imageVersion = 0;

  /* (non-Javadoc)
File
ImageLoaderCurrent.java
Developer's decision
Manual
Kind of conflict
Array initializer
Attribute
Chunk
Conflicting content
    final String methodName = FiTestUtil.getMethodName();
    runCallWritePacketToDisk(methodName, 2, new DoosAction(methodName, 2));
  }
<<<<<<< HEAD
}
=======
}
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
Solution content
    final String methodName = FiTestUtil.getMethodName();
    runCallWritePacketToDisk(methodName, 2, new DoosAction(methodName, 2));
  }
}
File
TestFiDataTransferProtocol.java
Developer's decision
Version 1
Kind of conflict
Other
Chunk
Conflicting content
                                    throws IOException, LoginException, URISyntaxException  {
    Configuration conf = new HdfsConfiguration();
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
<<<<<<< HEAD
    fc = FileContext.getFileContext(cluster.getURI(), conf);
=======
    fc = FileContext.getFileContext(cluster.getURI(0), conf);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    defaultWorkingDirectory = fc.makeQualified( new Path("/user/" + 
        UserGroupInformation.getCurrentUser().getShortUserName()));
    fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true);
Solution content
                                    throws IOException, LoginException, URISyntaxException  {
    Configuration conf = new HdfsConfiguration();
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    fc = FileContext.getFileContext(cluster.getURI(0), conf);
    defaultWorkingDirectory = fc.makeQualified( new Path("/user/" + 
        UserGroupInformation.getCurrentUser().getShortUserName()));
    fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true);
File
TestFcHdfsCreateMkdir.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Chunk
Conflicting content
                                    throws IOException, LoginException, URISyntaxException  {
    Configuration conf = new HdfsConfiguration();
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
<<<<<<< HEAD
    fc = FileContext.getFileContext(cluster.getURI(), conf);
=======
    fc = FileContext.getFileContext(cluster.getURI(0), conf);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    defaultWorkingDirectory = fc.makeQualified( new Path("/user/" + 
        UserGroupInformation.getCurrentUser().getShortUserName()));
    fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true);
Solution content
                                    throws IOException, LoginException, URISyntaxException  {
    Configuration conf = new HdfsConfiguration();
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    fc = FileContext.getFileContext(cluster.getURI(0), conf);
    defaultWorkingDirectory = fc.makeQualified( new Path("/user/" + 
        UserGroupInformation.getCurrentUser().getShortUserName()));
    fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true);
File
TestFcHdfsPermission.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Chunk
Conflicting content
    conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
    conf.set(FsPermission.UMASK_LABEL, "000");
    cluster = new MiniDFSCluster.Builder(conf).build();
<<<<<<< HEAD
    fc = FileContext.getFileContext(cluster.getURI());
=======
    fc = FileContext.getFileContext(cluster.getURI(0));
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
  }
  
  @AfterClass
Solution content
    conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
    conf.set(FsPermission.UMASK_LABEL, "000");
    cluster = new MiniDFSCluster.Builder(conf).build();
    fc = FileContext.getFileContext(cluster.getURI(0));
  }
  
  @AfterClass
File
TestFcHdfsSymlink.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Chunk
Conflicting content
import java.io.FileInputStream;
import java.io.FileReader;
import java.io.IOException;
<<<<<<< HEAD
import java.io.InputStream;
import java.net.InetSocketAddress;
=======
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
import java.net.Socket;
import java.net.URL;
import java.net.URLConnection;
Solution content
import java.io.FileInputStream;
import java.io.FileReader;
import java.io.IOException;
import java.io.InputStream;
import java.net.InetSocketAddress;
import java.net.Socket;
import java.net.URL;
import java.net.URLConnection;
File
DFSTestUtil.java
Developer's decision
Version 1
Kind of conflict
Import
Chunk
Conflicting content
import org.apache.hadoop.fs.FileSystem.Statistics;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
<<<<<<< HEAD
import org.apache.hadoop.hdfs.protocol.Block;
=======
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
import org.apache.hadoop.hdfs.protocol.DataTransferProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
Solution content
import org.apache.hadoop.fs.FileSystem.Statistics;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.DataTransferProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
File
DFSTestUtil.java
Developer's decision
Version 2
Kind of conflict
Import
Chunk
Conflicting content
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.TestTransferRbw;
<<<<<<< HEAD
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
=======
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.ShellBasedUnixGroupsMapping;
Solution content
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.TestTransferRbw;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.ShellBasedUnixGroupsMapping;
File
DFSTestUtil.java
Developer's decision
Version 1
Kind of conflict
Import
Chunk
Conflicting content
<<<<<<< HEAD

import org.apache.hadoop.security.ShellBasedUnixGroupsMapping;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import static org.junit.Assert.*;
=======
import org.junit.Assert;
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0

/** Utilities for HDFS tests */
public class DFSTestUtil {
Solution content
import org.apache.hadoop.security.ShellBasedUnixGroupsMapping;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;

import static org.junit.Assert.*;

/** Utilities for HDFS tests */
public class DFSTestUtil {
File
DFSTestUtil.java
Developer's decision
Version 1
Kind of conflict
Import
Chunk
Conflicting content
  }

  /** For {@link TestTransferRbw} */
<<<<<<< HEAD
  public static DataTransferProtocol.Status transferRbw(final Block b, 
      final DFSClient dfsClient, final DatanodeInfo... datanodes) throws IOException {
    assertEquals(2, datanodes.length);
=======
  public static DataTransferProtocol.Status transferRbw(final ExtendedBlock b, 
      final DFSClient dfsClient, final DatanodeInfo... datanodes) throws IOException {
    Assert.assertEquals(2, datanodes.length);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    final Socket s = DFSOutputStream.createSocketForPipeline(datanodes[0],
        datanodes.length, dfsClient);
    final long writeTimeout = dfsClient.getDatanodeWriteTimeout(datanodes.length);
Solution content
  }

  /** For {@link TestTransferRbw} */
  public static DataTransferProtocol.Status transferRbw(final ExtendedBlock b, 
      final DFSClient dfsClient, final DatanodeInfo... datanodes) throws IOException {
    assertEquals(2, datanodes.length);
    final Socket s = DFSOutputStream.createSocketForPipeline(datanodes[0],
        datanodes.length, dfsClient);
    final long writeTimeout = dfsClient.getDatanodeWriteTimeout(datanodes.length);
File
DFSTestUtil.java
Developer's decision
Combination
Kind of conflict
Method invocation
Method signature
Chunk
Conflicting content
@InterfaceStability.Unstable
public class MiniDFSCluster {

<<<<<<< HEAD
=======
  private static final String NAMESERVICE_ID_PREFIX = "nameserviceId";
  private static final Log LOG = LogFactory.getLog(MiniDFSCluster.class);

>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
  /**
   * Class to construct instances of MiniDFSClusters with specific options.
   */
Solution content
public class MiniDFSCluster {

  private static final String NAMESERVICE_ID_PREFIX = "nameserviceId";
  private static final Log LOG = LogFactory.getLog(MiniDFSCluster.class);

  /**
   * Class to construct instances of MiniDFSClusters with specific options.
   */
File
MiniDFSCluster.java
Developer's decision
Version 2
Kind of conflict
Attribute
Method invocation
Chunk
Conflicting content
  public static class Builder {
    private int nameNodePort = 0;
    private final Configuration conf;
<<<<<<< HEAD
=======
    private int numNameNodes = 1;
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    private int numDataNodes = 1;
    private boolean format = true;
    private boolean manageNameDfsDirs = true;
Solution content
  public static class Builder {
    private int nameNodePort = 0;
    private final Configuration conf;
    private int numNameNodes = 1;
    private int numDataNodes = 1;
    private boolean format = true;
    private boolean manageNameDfsDirs = true;
File
MiniDFSCluster.java
Developer's decision
Version 2
Kind of conflict
Attribute
Chunk
Conflicting content
    private String[] racks = null; 
    private String [] hosts = null;
    private long [] simulatedCapacities = null;
<<<<<<< HEAD
    // wait until namenode has left safe mode?
    private boolean waitSafeMode = true;
    private boolean setupHostsFile = false;
=======
    private String clusterId = null;
    private boolean waitSafeMode = true;
    private boolean setupHostsFile = false;
    private boolean federation = false;
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    
    public Builder(Configuration conf) {
      this.conf = conf;
Solution content
    private String[] racks = null; 
    private String [] hosts = null;
    private long [] simulatedCapacities = null;
    private String clusterId = null;
    private boolean waitSafeMode = true;
    private boolean setupHostsFile = false;
    private boolean federation = false;
    
    public Builder(Configuration conf) {
      this.conf = conf;
File
MiniDFSCluster.java
Developer's decision
Version 2
Kind of conflict
Attribute
Comment
Chunk
Conflicting content
    }
    
    /**
<<<<<<< HEAD
=======
     * default false - non federated cluster
     * @param val
     * @return Builder object
     */
    public Builder federation (boolean val){
      this.federation = val;
      return this;
    }
    /**
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
     * Default: 0
     */
    public Builder nameNodePort(int val) {
Solution content
    }
    
    /**
     * default false - non federated cluster
     * @param val
     * @return Builder object
     */
    public Builder federation (boolean val){
      this.federation = val;
      return this;
    }
    /**
     * Default: 0
     */
    public Builder nameNodePort(int val) {
File
MiniDFSCluster.java
Developer's decision
Version 2
Kind of conflict
Comment
Method declaration
Chunk
Conflicting content
    /**
    }
    /**
     * Default: 1
     */
<<<<<<< HEAD
=======
    public Builder numNameNodes(int val) {
      this.numNameNodes = val;
      return this;
     * Default: 1
     */
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    public Builder numDataNodes(int val) {
      this.numDataNodes = val;
      return this;
Solution content
    /**
     * Default: 1
     */
    public Builder numNameNodes(int val) {
      this.numNameNodes = val;
      return this;
    }

    /**
     * Default: 1
     */
    public Builder numDataNodes(int val) {
      this.numDataNodes = val;
      return this;
File
MiniDFSCluster.java
Developer's decision
Version 2
Kind of conflict
Comment
Method declaration
Chunk
Conflicting content
    }
    
    /**
<<<<<<< HEAD
=======
     * Default: null
     */
    public Builder clusterId(String cid) {
      this.clusterId = cid;
      return this;
    }

    /**
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
     * Default: false
     * When true the hosts file/include file for the cluster is setup
     */
Solution content
     * Default: null
    }
    
    /**
     */
    public Builder clusterId(String cid) {
      this.clusterId = cid;
      return this;
    }

    /**
     * Default: false
     * When true the hosts file/include file for the cluster is setup
     */
File
MiniDFSCluster.java
Developer's decision
Version 2
Kind of conflict
Comment
Method declaration
Chunk
Conflicting content
   * Used by builder to create and return an instance of MiniDFSCluster
   */
  private MiniDFSCluster(Builder builder) throws IOException {
<<<<<<< HEAD
=======
    LOG.info("starting cluster with " + builder.numNameNodes + " namenodes.");
    nameNodes = new NameNodeInfo[builder.numNameNodes];
    // try to determine if in federation mode
    if(builder.numNameNodes > 1)
      builder.federation = true;
      
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    initMiniDFSCluster(builder.nameNodePort,
                       builder.conf,
                       builder.numDataNodes,
Solution content
   * Used by builder to create and return an instance of MiniDFSCluster
   */
  private MiniDFSCluster(Builder builder) throws IOException {
    LOG.info("starting cluster with " + builder.numNameNodes + " namenodes.");
    nameNodes = new NameNodeInfo[builder.numNameNodes];
    // try to determine if in federation mode
    if(builder.numNameNodes > 1)
      builder.federation = true;
      
    initMiniDFSCluster(builder.nameNodePort,
                       builder.conf,
                       builder.numDataNodes,
File
MiniDFSCluster.java
Developer's decision
Version 2
Kind of conflict
Comment
If statement
Method invocation
Chunk
Conflicting content
                       builder.racks,
                       builder.hosts,
                       builder.simulatedCapacities,
<<<<<<< HEAD
                       builder.waitSafeMode,
                       builder.setupHostsFile);
=======
                       builder.clusterId,
                       builder.waitSafeMode,
                       builder.setupHostsFile,
                       builder.federation);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
  }
  
  public class DataNodeProperties {
Solution content
                       builder.racks,
                       builder.hosts,
                       builder.simulatedCapacities,
                       builder.clusterId,
                       builder.waitSafeMode,
                       builder.setupHostsFile,
                       builder.federation);
  }
  
  public class DataNodeProperties {
File
MiniDFSCluster.java
Developer's decision
Version 2
Kind of conflict
Attribute
Variable
Chunk
Conflicting content
                         new ArrayList();
  private File base_dir;
  private File data_dir;
<<<<<<< HEAD

=======
  private boolean federation = false; 
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
  private boolean waitSafeMode = true;
  
  /**
Solution content
                         new ArrayList();
  private File base_dir;
  private File data_dir;
  private boolean federation = false; 
  private boolean waitSafeMode = true;
  
  /**
File
MiniDFSCluster.java
Developer's decision
Version 2
Kind of conflict
Attribute
Chunk
Conflicting content
                        StartupOption operation,
                        String[] racks, String hosts[],
                        long[] simulatedCapacities) throws IOException {
<<<<<<< HEAD
    initMiniDFSCluster(nameNodePort, conf, numDataNodes, format,
        manageNameDfsDirs, manageDataDfsDirs, operation, racks, hosts,
        simulatedCapacities, true, false);
=======
    this.nameNodes = new NameNodeInfo[1]; // Single namenode in the cluster
    initMiniDFSCluster(nameNodePort, conf, 1, format,
        manageNameDfsDirs, manageDataDfsDirs, operation, racks, hosts,
        simulatedCapacities, null, true, false, false);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
  }

  private void initMiniDFSCluster(int nameNodePort, Configuration conf,
Solution content
                        StartupOption operation,
                        String[] racks, String hosts[],
                        long[] simulatedCapacities) throws IOException {
    this.nameNodes = new NameNodeInfo[1]; // Single namenode in the cluster
    initMiniDFSCluster(nameNodePort, conf, 1, format,
        manageNameDfsDirs, manageDataDfsDirs, operation, racks, hosts,
        simulatedCapacities, null, true, false, false);
  }

  private void initMiniDFSCluster(int nameNodePort, Configuration conf,
File
MiniDFSCluster.java
Developer's decision
Version 2
Kind of conflict
Attribute
Comment
Method invocation
Chunk
Conflicting content
    this.waitSafeMode = waitSafeMode;
    
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
  private void initMiniDFSCluster(int nameNodePort, Configuration conf,
      int numDataNodes, boolean format, boolean manageNameDfsDirs,
      boolean manageDataDfsDirs, StartupOption operation, String[] racks,
<<<<<<< HEAD
      String[] hosts, long[] simulatedCapacities, boolean waitSafeMode,
      boolean setupHostsFile)
    throws IOException {
    this.conf = conf;
    base_dir = new File(getBaseDirectory());
    data_dir = new File(base_dir, "data");

=======
      String[] hosts, long[] simulatedCapacities, String clusterId,
      boolean waitSafeMode, boolean setupHostsFile, boolean federation) 
  throws IOException {
    this.conf = conf;
    base_dir = new File(getBaseDirectory());
    data_dir = new File(base_dir, "data");
    this.federation = federation;
    // use alternate RPC engine if spec'd
Solution content
  private void initMiniDFSCluster(int nameNodePort, Configuration conf,
      int numDataNodes, boolean format, boolean manageNameDfsDirs,
      boolean manageDataDfsDirs, StartupOption operation, String[] racks,
      String[] hosts, long[] simulatedCapacities, String clusterId,
      boolean waitSafeMode, boolean setupHostsFile, boolean federation) 
  throws IOException {
    this.conf = conf;
    base_dir = new File(getBaseDirectory());
    data_dir = new File(base_dir, "data");
    this.federation = federation;
    this.waitSafeMode = waitSafeMode;
    
    // use alternate RPC engine if spec'd
File
MiniDFSCluster.java
Developer's decision
Version 2
Kind of conflict
Attribute
Method invocation
Variable
Chunk
Conflicting content
          fileAsURI(new File(base_dir, "namesecondary" + (2*nnIndex + 2))));
    }
    
<<<<<<< HEAD
    int replication = conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3);
    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, Math.min(replication, numDataNodes));
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, 0);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 3); // 3 second
    
=======
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    // Format and clean out DataNode directories
    if (format) {
      GenericTestUtils.formatNamenode(conf);
Solution content
          fileAsURI(new File(base_dir, "namesecondary" + (2*nnIndex + 2))));
    }
    
    // Format and clean out DataNode directories
    if (format) {
      GenericTestUtils.formatNamenode(conf);
File
MiniDFSCluster.java
Developer's decision
Version 2
Kind of conflict
Comment
Method invocation
Variable
Chunk
Conflicting content
                     operation == StartupOption.FORMAT ||
                     operation == StartupOption.REGULAR) ?
      new String[] {} : new String[] {operation.getName()};
<<<<<<< HEAD
    conf.setClass(DFSConfigKeys.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, 
                   StaticMapping.class, DNSToSwitchMapping.class);
    nameNode = NameNode.createNameNode(args, conf);
    
    // Start the DataNodes
    startDataNodes(conf, numDataNodes, manageDataDfsDirs, operation, racks,
        hosts, simulatedCapacities, setupHostsFile);
    waitClusterUp();

    //make sure ProxyUsers uses the latest conf
    ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
=======
    return NameNode.createNameNode(args, conf);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
  }
  
  private void createFederatedNameNode(int nnIndex, Configuration conf,
Solution content
                     operation == StartupOption.FORMAT ||
                     operation == StartupOption.REGULAR) ?
      new String[] {} : new String[] {operation.getName()};
    return NameNode.createNameNode(args, conf);
  }
  
  private void createFederatedNameNode(int nnIndex, Configuration conf,
File
MiniDFSCluster.java
Developer's decision
Version 2
Kind of conflict
Attribute
Comment
Method invocation
Return statement
Chunk
Conflicting content
    // If minicluster's name node is null assume that the conf has been
    // set with the right address:port of the name node.
    //
<<<<<<< HEAD
    if (nameNode != null) { // set conf from the name node
      InetSocketAddress nnAddr = nameNode.getNameNodeAddress(); 
      int nameNodePort = nnAddr.getPort(); 
      try {
	  myUri = new URI("hdfs://"+ nnAddr.getHostName() + ":" +
			  Integer.toString(nameNodePort));
      } catch (URISyntaxException e) {
	  throw new IOException("Couldn't parse own URI", e);
      }
      FileSystem.setDefaultUri(conf, myUri);
    }

=======
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    if (racks != null && numDataNodes > racks.length ) {
      throw new IllegalArgumentException( "The length of racks [" + racks.length
          + "] is less than the number of datanodes [" + numDataNodes + "].");
Solution content
    // If minicluster's name node is null assume that the conf has been
    // set with the right address:port of the name node.
    //
    if (racks != null && numDataNodes > racks.length ) {
      throw new IllegalArgumentException( "The length of racks [" + racks.length
          + "] is less than the number of datanodes [" + numDataNodes + "].");
File
MiniDFSCluster.java
Developer's decision
Version 2
Kind of conflict
If statement
Chunk
Conflicting content
   * @throws IOException on error accessing the file for the given block
   * @return The contents of the block file, null if none found
   */
<<<<<<< HEAD
  public String readBlockOnDataNode(int i, String blockName) throws IOException {
    assert (i >= 0 && i < dataNodes.size()) : "Invalid datanode "+i;

    // Each datanode has multiple data dirs, check each
    for (int dn = i*2; dn < i*2+2; dn++) {
      File dataDir = new File(getBaseDirectory() + "data");
      File blockFile = new File(dataDir,
          "data" + (dn+1) + FINALIZED_DIR_NAME + blockName);
      if (blockFile.exists()) {
        return DFSTestUtil.readFile(blockFile);
      }
    }
    return null;
  }

  /**
   * Corrupt a block on all datanodes.
   *
   * @param The name of the block
   * @throws IOException on error accessing the given block.
   * @return The number of block files corrupted.
   */  
  public int corruptBlockOnDataNodes(String blockName) throws IOException {
    int blocksCorrupted = 0;
    for (int i=0; i < dataNodes.size(); i++) {
      if (corruptReplica(blockName, i)) {
        blocksCorrupted++;
      }
    }
    return blocksCorrupted;
  }

  /**
   * Corrupt a block on a particular datanode.
   *
   * @param The index of the datanode
   * @param The name of the block
   * @throws IOException on error accessing the given block or if
   * the contents of the block (on the same datanode) differ.
   * @return true if a replica was corrupted, false otherwise
   */
  public boolean corruptReplica(String blockName, int i) throws IOException {
    Random random = new Random();
    assert (i >= 0 && i < dataNodes.size()) : "Invalid datanode "+i;
    int filesCorrupted = 0;

    // Each datanode has multiple data dirs, check each
    for (int dn = i*2; dn < i*2+2; dn++) {
      File dataDir = new File(getBaseDirectory() + "data");
      File blockFile = new File(dataDir,
          "data" + (dn+1) + FINALIZED_DIR_NAME + blockName);

      // Corrupt the replica by writing some bytes into a random offset
      if (blockFile.exists()) { 
        System.out.println("Corrupting " + blockFile);
        RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw");
        FileChannel channel = raFile.getChannel();
        String badString = "BADBAD";
        int rand = random.nextInt((int)channel.size()/2);
        raFile.seek(rand);
        raFile.write(badString.getBytes());
        raFile.close();
        filesCorrupted++;
      }
    }
    assert filesCorrupted == 0 || filesCorrupted == 1
      : "Unexpected # block files";
    return filesCorrupted == 1;
=======
  void corruptBlockOnDataNodes(ExtendedBlock block) throws Exception{
    File[] blockFiles = getAllBlockFiles(block);
    for (File f : blockFiles) {
      corruptBlock(f);
    }
  }

  /*
   * Corrupt a block on a particular datanode
   * Types: delete, write bad data, truncate
   */
  public static boolean corruptBlockOnDataNode(int i, ExtendedBlock blk)
      throws IOException {
    File blockFile = getBlockFile(i, blk);
    return corruptBlock(blockFile);
  }

  /*
   * Corrupt a block on a particular datanode
   */
  public static boolean corruptBlock(File blockFile) throws IOException {
    if (blockFile == null || !blockFile.exists()) {
      return false;
    }
    // Corrupt replica by writing random bytes into replica
    Random random = new Random();
    RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw");
    FileChannel channel = raFile.getChannel();
    String badString = "BADBAD";
    int rand = random.nextInt((int)channel.size()/2);
    raFile.seek(rand);
    raFile.write(badString.getBytes());
    raFile.close();
    LOG.warn("Corrupting the block " + blockFile);
    return true;
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
  }

  /*
Solution content
   * @throws IOException on error accessing the file for the given block
   */
  int corruptBlockOnDataNodes(ExtendedBlock block) throws IOException{
    int blocksCorrupted = 0;
    File[] blockFiles = getAllBlockFiles(block);
    for (File f : blockFiles) {
      if (corruptBlock(f)) {
        blocksCorrupted++;
      }
    }
    return blocksCorrupted;
  }

  public String readBlockOnDataNode(int i, ExtendedBlock block)
      throws IOException {
    assert (i >= 0 && i < dataNodes.size()) : "Invalid datanode "+i;
    File blockFile = getBlockFile(i, block);
    if (blockFile != null && blockFile.exists()) {
      return DFSTestUtil.readFile(blockFile);
    }
    return null;
  }

  /**
   * Corrupt a block on a particular datanode.
   *
   * @param i index of the datanode
   * @param blk name of the block
   * @throws IOException on error accessing the given block or if
   * the contents of the block (on the same datanode) differ.
   * @return true if a replica was corrupted, false otherwise
   * Types: delete, write bad data, truncate
   */
  public static boolean corruptReplica(int i, ExtendedBlock blk)
      throws IOException {
    File blockFile = getBlockFile(i, blk);
    return corruptBlock(blockFile);
  }

  /*
   * Corrupt a block on a particular datanode
   */
  public static boolean corruptBlock(File blockFile) throws IOException {
    if (blockFile == null || !blockFile.exists()) {
      return false;
    }
    // Corrupt replica by writing random bytes into replica
    Random random = new Random();
    RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw");
    FileChannel channel = raFile.getChannel();
    String badString = "BADBAD";
    int rand = random.nextInt((int)channel.size()/2);
    raFile.seek(rand);
    raFile.write(badString.getBytes());
    raFile.close();
    LOG.warn("Corrupting the block " + blockFile);
    return true;
  }

  /*
File
MiniDFSCluster.java
Developer's decision
Manual
Kind of conflict
Assert statement
Comment
For statement
If statement
Method declaration
Method invocation
Method signature
Return statement
Variable
Chunk
Conflicting content
  public static String getBaseDirectory() {
    return System.getProperty("test.build.data", "build/test/data") + "/dfs/";
  }
<<<<<<< HEAD
=======

  /**
   * Get a storage directory for a datanode. There are two storage directories
   * per datanode:
   * 
    *
  1. /data/data<2*dnIndex + 1>
  2. *
  3. /data/data<2*dnIndex + 2>
  4. *
* * @param dnIndex datanode index (starts from 0) * @param dirIndex directory index (0 or 1). Index 0 provides access to the * first storage directory. Index 1 provides access to the second * storage directory. * @return Storage directory */ public static File getStorageDir(int dnIndex, int dirIndex) { return new File(getBaseDirectory() + "data/data" + (2*dnIndex + 1 + dirIndex)); } /** * Get current directory corresponding to the datanode * @param storageDir * @return current directory */ public static String getDNCurrentDir(File storageDir) { return storageDir + "/" + Storage.STORAGE_DIR_CURRENT + "/"; } /** * Get directory corresponding to block pool directory in the datanode * @param storageDir * @return current directory */ public static String getBPDir(File storageDir, String bpid) { return getDNCurrentDir(storageDir) + bpid + "/"; } /** * Get directory relative to block pool directory in the datanode * @param storageDir * @return current directory */ public static String getBPDir(File storageDir, String bpid, String dirName) { return getBPDir(storageDir, bpid) + dirName + "/"; } /** * Get finalized directory for a block pool * @param storageDir storage directory * @param bpid Block pool Id * @return finalized directory for a block pool */ public static File getRbwDir(File storageDir, String bpid) { return new File(getBPDir(storageDir, bpid, Storage.STORAGE_DIR_CURRENT) + DataStorage.STORAGE_DIR_RBW ); } /** * Get finalized directory for a block pool * @param storageDir storage directory * @param bpid Block pool Id * @return finalized directory for a block pool */ public static File getFinalizedDir(File storageDir, String bpid) { return new File(getBPDir(storageDir, bpid, Storage.STORAGE_DIR_CURRENT) + DataStorage.STORAGE_DIR_FINALIZED ); } /** * Get file correpsonding to a block * @param storageDir storage directory * @param blk block to be corrupted * @return file corresponding to the block */ public static File getBlockFile(File storageDir, ExtendedBlock blk) { return new File(getFinalizedDir(storageDir, blk.getBlockPoolId()), blk.getBlockName()); } /** * Get all files related to a block from all the datanodes * @param block block for which corresponding files are needed */ public File[] getAllBlockFiles(ExtendedBlock block) { if (dataNodes.size() == 0) return new File[0]; ArrayList list = new ArrayList(); for (int i=0; i < dataNodes.size(); i++) { File blockFile = getBlockFile(i, block); if (blockFile != null) { list.add(blockFile); } } return list.toArray(new File[list.size()]); } /** * Get files related to a block for a given datanode * @param dnIndex Index of the datanode to get block files for * @param block block for which corresponding files are needed */ public static File getBlockFile(int dnIndex, ExtendedBlock block) { // Check for block file in the two storage directories of the datanode for (int i = 0; i <=1 ; i++) { File storageDir = MiniDFSCluster.getStorageDir(dnIndex, i); File blockFile = getBlockFile(storageDir, block); if (blockFile.exists()) { return blockFile; } } return null; } /** * Throw an exception if the MiniDFSCluster is not started with a single * namenode */ private void checkSingleNameNode() { if (nameNodes.length != 1) { throw new IllegalArgumentException("Namenode index is needed"); } } /** * Add a namenode to a federated cluster and start it. Configuration of * datanodes in the cluster is refreshed to register with the new namenode. * * @return newly started namenode */ public NameNode addNameNode(Configuration conf, int namenodePort) throws IOException { if(!federation) throw new IOException("cannot add namenode to non-federated cluster"); int nnIndex = nameNodes.length; int numNameNodes = nameNodes.length + 1; NameNodeInfo[] newlist = new NameNodeInfo[numNameNodes]; System.arraycopy(nameNodes, 0, newlist, 0, nameNodes.length); nameNodes = newlist; String nameserviceId = NAMESERVICE_ID_PREFIX + (nnIndex + 1); String nameserviceIds = conf.get(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES); nameserviceIds += "," + nameserviceId; conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, nameserviceIds); initFederatedNamenodeAddress(conf, nameserviceId, namenodePort); createFederatedNameNode(nnIndex, conf, numDataNodes, true, true, null, null, nameserviceId); // Refresh datanodes with the newly started namenode for (DataNodeProperties dn : dataNodes) { DataNode datanode = dn.datanode; datanode.refreshNamenodes(conf); } // Wait for new namenode to get registrations from all the datanodes waitActive(nnIndex); return nameNodes[nnIndex].nameNode; } >>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0 private int getFreeSocketPort() { int port = 0;
Solution content
  public static String getBaseDirectory() {
    return System.getProperty("test.build.data", "build/test/data") + "/dfs/";
  }

  /**
   * Get a storage directory for a datanode. There are two storage directories
   * per datanode:
   * 
    *
  1. /data/data<2*dnIndex + 1>
  2. *
  3. /data/data<2*dnIndex + 2>
  4. *
* * @param dnIndex datanode index (starts from 0) * @param dirIndex directory index (0 or 1). Index 0 provides access to the * first storage directory. Index 1 provides access to the second * storage directory. * @return Storage directory */ public static File getStorageDir(int dnIndex, int dirIndex) { return new File(getBaseDirectory() + "data/data" + (2*dnIndex + 1 + dirIndex)); } /** * Get current directory corresponding to the datanode * @param storageDir * @return current directory */ public static String getDNCurrentDir(File storageDir) { return storageDir + "/" + Storage.STORAGE_DIR_CURRENT + "/"; } /** * Get directory corresponding to block pool directory in the datanode * @param storageDir * @return current directory */ public static String getBPDir(File storageDir, String bpid) { return getDNCurrentDir(storageDir) + bpid + "/"; } /** * Get directory relative to block pool directory in the datanode * @param storageDir * @return current directory */ public static String getBPDir(File storageDir, String bpid, String dirName) { return getBPDir(storageDir, bpid) + dirName + "/"; } /** * Get finalized directory for a block pool * @param storageDir storage directory * @param bpid Block pool Id * @return finalized directory for a block pool */ public static File getRbwDir(File storageDir, String bpid) { return new File(getBPDir(storageDir, bpid, Storage.STORAGE_DIR_CURRENT) + DataStorage.STORAGE_DIR_RBW ); } /** * Get finalized directory for a block pool * @param storageDir storage directory * @param bpid Block pool Id * @return finalized directory for a block pool */ public static File getFinalizedDir(File storageDir, String bpid) { return new File(getBPDir(storageDir, bpid, Storage.STORAGE_DIR_CURRENT) + DataStorage.STORAGE_DIR_FINALIZED ); } /** * Get file correpsonding to a block * @param storageDir storage directory * @param blk block to be corrupted * @return file corresponding to the block */ public static File getBlockFile(File storageDir, ExtendedBlock blk) { return new File(getFinalizedDir(storageDir, blk.getBlockPoolId()), blk.getBlockName()); } /** * Get all files related to a block from all the datanodes * @param block block for which corresponding files are needed */ public File[] getAllBlockFiles(ExtendedBlock block) { if (dataNodes.size() == 0) return new File[0]; ArrayList list = new ArrayList(); for (int i=0; i < dataNodes.size(); i++) { File blockFile = getBlockFile(i, block); if (blockFile != null) { list.add(blockFile); } } return list.toArray(new File[list.size()]); } /** * Get files related to a block for a given datanode * @param dnIndex Index of the datanode to get block files for * @param block block for which corresponding files are needed */ public static File getBlockFile(int dnIndex, ExtendedBlock block) { // Check for block file in the two storage directories of the datanode for (int i = 0; i <=1 ; i++) { File storageDir = MiniDFSCluster.getStorageDir(dnIndex, i); File blockFile = getBlockFile(storageDir, block); if (blockFile.exists()) { return blockFile; } } return null; } /** * Throw an exception if the MiniDFSCluster is not started with a single * namenode */ private void checkSingleNameNode() { if (nameNodes.length != 1) { throw new IllegalArgumentException("Namenode index is needed"); } } /** * Add a namenode to a federated cluster and start it. Configuration of * datanodes in the cluster is refreshed to register with the new namenode. * * @return newly started namenode */ public NameNode addNameNode(Configuration conf, int namenodePort) throws IOException { if(!federation) throw new IOException("cannot add namenode to non-federated cluster"); int nnIndex = nameNodes.length; int numNameNodes = nameNodes.length + 1; NameNodeInfo[] newlist = new NameNodeInfo[numNameNodes]; System.arraycopy(nameNodes, 0, newlist, 0, nameNodes.length); nameNodes = newlist; String nameserviceId = NAMESERVICE_ID_PREFIX + (nnIndex + 1); String nameserviceIds = conf.get(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES); nameserviceIds += "," + nameserviceId; conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, nameserviceIds); initFederatedNamenodeAddress(conf, nameserviceId, namenodePort); createFederatedNameNode(nnIndex, conf, numDataNodes, true, true, null, null, nameserviceId); // Refresh datanodes with the newly started namenode for (DataNodeProperties dn : dataNodes) { DataNode datanode = dn.datanode; datanode.refreshNamenodes(conf); } // Wait for new namenode to get registrations from all the datanodes waitActive(nnIndex); return nameNodes[nnIndex].nameNode; } private int getFreeSocketPort() { int port = 0;
File
MiniDFSCluster.java
Developer's decision
Version 2
Kind of conflict
Comment
Method declaration
Chunk
Conflicting content
  
  private void addToFile(String p, String address) throws IOException {
    File f = new File(p);
<<<<<<< HEAD
    if (!f.exists()) {
      f.createNewFile();
    }
=======
    f.createNewFile();
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    PrintWriter writer = new PrintWriter(new FileWriter(f, true));
    try {
      writer.println(address);
Solution content
  
  private void addToFile(String p, String address) throws IOException {
    File f = new File(p);
    f.createNewFile();
    PrintWriter writer = new PrintWriter(new FileWriter(f, true));
    try {
      writer.println(address);
File
MiniDFSCluster.java
Developer's decision
Version 2
Kind of conflict
If statement
Method invocation
Chunk
Conflicting content
      DFSTestUtil.createFile(fs, file, fileSize, replFactor, 12345L /*seed*/);
      DFSTestUtil.waitReplication(fs, file, replFactor);

<<<<<<< HEAD
      String block = DFSTestUtil.getFirstBlock(fs, file).getBlockName();
      int blockFilesCorrupted = cluster.corruptBlockOnDataNodes(block);
      assertEquals("All replicas not corrupted", replFactor, blockFilesCorrupted);
=======
      ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, file);
      cluster.corruptBlockOnDataNodes(block);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0

      try {
        IOUtils.copyBytes(fs.open(file), new IOUtils.NullOutputStream(), conf,
Solution content
      DFSTestUtil.createFile(fs, file, fileSize, replFactor, 12345L /*seed*/);
      DFSTestUtil.waitReplication(fs, file, replFactor);

      ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, file);
      int blockFilesCorrupted = cluster.corruptBlockOnDataNodes(block);
      assertEquals("All replicas not corrupted", replFactor, blockFilesCorrupted);

      try {
        IOUtils.copyBytes(fs.open(file), new IOUtils.NullOutputStream(), conf,
File
TestCrcCorruption.java
Developer's decision
Combination
Kind of conflict
Method invocation
Variable
Chunk
Conflicting content
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
<<<<<<< HEAD
import org.apache.hadoop.hdfs.protocol.Block;
=======
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
Solution content
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
File
TestDFSClientRetries.java
Developer's decision
Version 2
Kind of conflict
Import
Chunk
Conflicting content
    int bufferSize = 4096;
    
    Configuration conf = new HdfsConfiguration();
<<<<<<< HEAD
    conf.setInt(DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY, xcievers);
=======
    conf.setInt(DFSConfigKeys.DFS_DATANODE_MAX_XCIEVERS_KEY,xcievers);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    conf.setInt(DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY, 
                retries);
    conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, timeWin);
Solution content
    int bufferSize = 4096;
    
    Configuration conf = new HdfsConfiguration();
    conf.setInt(DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY, xcievers);
    conf.setInt(DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY, 
                retries);
    conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, timeWin);
File
TestDFSClientRetries.java
Developer's decision
Version 1
Kind of conflict
Method invocation
Chunk
Conflicting content
      String[] dataNodeDirs = conf.getStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
      
      log("Finalize with existing previous dir", numDirs);
<<<<<<< HEAD
      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
      UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
      UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "previous");
=======
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
      UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
      UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "previous");
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      cluster = new MiniDFSCluster.Builder(conf)
                                  .format(false)
                                  .manageDataDfsDirs(false)
Solution content
      String[] dataNodeDirs = conf.getStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
      
      log("Finalize with existing previous dir", numDirs);
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
      UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
      UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "previous");
      cluster = new MiniDFSCluster.Builder(conf)
                                  .format(false)
                                  .manageDataDfsDirs(false)
File
TestDFSFinalize.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Chunk
Conflicting content
      String[] dataNodeDirs = conf.getStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
      
      log("Normal NameNode rollback", numDirs);
<<<<<<< HEAD
      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
=======
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
                                                .format(false)
                                                .manageDataDfsDirs(false)
Solution content
      String[] dataNodeDirs = conf.getStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
      
      log("Normal NameNode rollback", numDirs);
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
                                                .format(false)
                                                .manageDataDfsDirs(false)
File
TestDFSRollback.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Chunk
Conflicting content
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
      
      log("Normal DataNode rollback", numDirs);
<<<<<<< HEAD
      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
=======
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
                                                .format(false)
                                                .manageDataDfsDirs(false)
Solution content
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
      
      log("Normal DataNode rollback", numDirs);
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
                                                .format(false)
                                                .manageDataDfsDirs(false)
File
TestDFSRollback.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Chunk
Conflicting content
                                                .manageNameDfsDirs(false)
                                                .startupOption(StartupOption.ROLLBACK)
                                                .build();
<<<<<<< HEAD
      UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
      UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "previous");
=======
      UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
      UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "previous");
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      cluster.startDataNodes(conf, 1, false, StartupOption.ROLLBACK, null);
      checkResult(DATA_NODE, dataNodeDirs);
      cluster.shutdown();
Solution content
                                                .manageNameDfsDirs(false)
                                                .startupOption(StartupOption.ROLLBACK)
                                                .build();
      UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
      UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "previous");
      cluster.startDataNodes(conf, 1, false, StartupOption.ROLLBACK, null);
      checkResult(DATA_NODE, dataNodeDirs);
      cluster.shutdown();
File
TestDFSRollback.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Chunk
Conflicting content
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
      
      log("DataNode rollback without existing previous dir", numDirs);
<<<<<<< HEAD
      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
=======
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
                                                .format(false)
                                                .manageDataDfsDirs(false)
Solution content
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
      
      log("DataNode rollback without existing previous dir", numDirs);
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
                                                .format(false)
                                                .manageDataDfsDirs(false)
File
TestDFSRollback.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Chunk
Conflicting content
                                                .manageNameDfsDirs(false)
                                                .startupOption(StartupOption.UPGRADE)
                                                .build();
<<<<<<< HEAD
      UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
=======
      UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      cluster.startDataNodes(conf, 1, false, StartupOption.ROLLBACK, null);
      cluster.shutdown();
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
Solution content
                                                .manageNameDfsDirs(false)
                                                .startupOption(StartupOption.UPGRADE)
                                                .build();
      UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
      cluster.startDataNodes(conf, 1, false, StartupOption.ROLLBACK, null);
      cluster.shutdown();
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
File
TestDFSRollback.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Chunk
Conflicting content
      UpgradeUtilities.createEmptyDirs(dataNodeDirs);

      log("DataNode rollback with future stored layout version in previous", numDirs);
<<<<<<< HEAD
      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
=======
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
                                                .format(false)
                                                .manageDataDfsDirs(false)
Solution content
      UpgradeUtilities.createEmptyDirs(dataNodeDirs);

      log("DataNode rollback with future stored layout version in previous", numDirs);
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
                                                .format(false)
                                                .manageDataDfsDirs(false)
File
TestDFSRollback.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Chunk
Conflicting content
                                                .manageNameDfsDirs(false)
                                                .startupOption(StartupOption.ROLLBACK)
                                                .build();
<<<<<<< HEAD
      UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
      baseDirs = UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "previous");
      UpgradeUtilities.createVersionFile(conf, DATA_NODE, baseDirs,
                                         new StorageInfo(Integer.MIN_VALUE,
                                                         UpgradeUtilities.getCurrentNamespaceID(cluster),
                                                         UpgradeUtilities.getCurrentFsscTime(cluster)));
      startDataNodeShouldFail(StartupOption.ROLLBACK);
=======
      UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
      baseDirs = UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "previous");
      storageInfo = new StorageInfo(Integer.MIN_VALUE, 
          UpgradeUtilities.getCurrentNamespaceID(cluster), 
          UpgradeUtilities.getCurrentClusterID(cluster), 
          UpgradeUtilities.getCurrentFsscTime(cluster));
      
      UpgradeUtilities.createDataNodeVersionFile(baseDirs, storageInfo,
          UpgradeUtilities.getCurrentBlockPoolID(cluster));
      
      startBlockPoolShouldFail(StartupOption.ROLLBACK, 
          cluster.getNamesystem().getBlockPoolId());
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      cluster.shutdown();
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
      UpgradeUtilities.createEmptyDirs(dataNodeDirs);
Solution content
                                                .manageNameDfsDirs(false)
                                                .startupOption(StartupOption.ROLLBACK)
                                                .build();
      UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
      baseDirs = UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "previous");
      storageInfo = new StorageInfo(Integer.MIN_VALUE, 
          UpgradeUtilities.getCurrentNamespaceID(cluster), 
          UpgradeUtilities.getCurrentClusterID(cluster), 
          UpgradeUtilities.getCurrentFsscTime(cluster));
      
      UpgradeUtilities.createDataNodeVersionFile(baseDirs, storageInfo,
          UpgradeUtilities.getCurrentBlockPoolID(cluster));
      
      startBlockPoolShouldFail(StartupOption.ROLLBACK, 
          cluster.getNamesystem().getBlockPoolId());
      cluster.shutdown();
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
      UpgradeUtilities.createEmptyDirs(dataNodeDirs);
File
TestDFSRollback.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Variable
Chunk
Conflicting content
      UpgradeUtilities.createEmptyDirs(dataNodeDirs);
      
      log("DataNode rollback with newer fsscTime in previous", numDirs);
<<<<<<< HEAD
      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
=======
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
                                                .format(false)
                                                .manageDataDfsDirs(false)
Solution content
      UpgradeUtilities.createEmptyDirs(dataNodeDirs);
      
      log("DataNode rollback with newer fsscTime in previous", numDirs);
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
                                                .format(false)
                                                .manageDataDfsDirs(false)
File
TestDFSRollback.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Chunk
Conflicting content
                                                .manageNameDfsDirs(false)
                                                .startupOption(StartupOption.ROLLBACK)
                                                .build();
<<<<<<< HEAD
      UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
      baseDirs = UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "previous");
      UpgradeUtilities.createVersionFile(conf, DATA_NODE, baseDirs,
                                         new StorageInfo(UpgradeUtilities.getCurrentLayoutVersion(),
                                                         UpgradeUtilities.getCurrentNamespaceID(cluster),
                                                         Long.MAX_VALUE));
      startDataNodeShouldFail(StartupOption.ROLLBACK);
=======
      
      UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
      baseDirs = UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "previous");
      storageInfo = new StorageInfo(UpgradeUtilities.getCurrentLayoutVersion(), 
            UpgradeUtilities.getCurrentNamespaceID(cluster), 
            UpgradeUtilities.getCurrentClusterID(cluster), Long.MAX_VALUE);
      
      UpgradeUtilities.createDataNodeVersionFile(baseDirs, storageInfo,
          UpgradeUtilities.getCurrentBlockPoolID(cluster));
      
      startBlockPoolShouldFail(StartupOption.ROLLBACK, 
          cluster.getNamesystem().getBlockPoolId());
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      cluster.shutdown();
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
      UpgradeUtilities.createEmptyDirs(dataNodeDirs);
Solution content
                                                .manageNameDfsDirs(false)
                                                .startupOption(StartupOption.ROLLBACK)
                                                .build();
      
      UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
      baseDirs = UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "previous");
      storageInfo = new StorageInfo(UpgradeUtilities.getCurrentLayoutVersion(), 
            UpgradeUtilities.getCurrentNamespaceID(cluster), 
            UpgradeUtilities.getCurrentClusterID(cluster), Long.MAX_VALUE);
      
      UpgradeUtilities.createDataNodeVersionFile(baseDirs, storageInfo,
          UpgradeUtilities.getCurrentBlockPoolID(cluster));
      
      startBlockPoolShouldFail(StartupOption.ROLLBACK, 
          cluster.getNamesystem().getBlockPoolId());
      cluster.shutdown();
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
      UpgradeUtilities.createEmptyDirs(dataNodeDirs);
File
TestDFSRollback.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Variable
Chunk
Conflicting content
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
      
      log("NameNode rollback with old layout version in previous", numDirs);
<<<<<<< HEAD
      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
      baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
      UpgradeUtilities.createVersionFile(conf, NAME_NODE, baseDirs,
                                         new StorageInfo(1,
                                                         UpgradeUtilities.getCurrentNamespaceID(null),
                                                         UpgradeUtilities.getCurrentFsscTime(null)));
=======
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
      baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
      storageInfo = new StorageInfo(1, 
          UpgradeUtilities.getCurrentNamespaceID(null),
          UpgradeUtilities.getCurrentClusterID(null),
          UpgradeUtilities.getCurrentFsscTime(null));
      
      UpgradeUtilities.createNameNodeVersionFile(conf, baseDirs,
          storageInfo, UpgradeUtilities.getCurrentBlockPoolID(cluster));
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      startNameNodeShouldFail(StartupOption.UPGRADE);
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
    } // end numDir loop
Solution content
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
      
      log("NameNode rollback with old layout version in previous", numDirs);
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
      baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
      storageInfo = new StorageInfo(1, 
          UpgradeUtilities.getCurrentNamespaceID(null),
          UpgradeUtilities.getCurrentClusterID(null),
          UpgradeUtilities.getCurrentFsscTime(null));
      
      UpgradeUtilities.createNameNodeVersionFile(conf, baseDirs,
          storageInfo, UpgradeUtilities.getCurrentBlockPoolID(cluster));
      startNameNodeShouldFail(StartupOption.UPGRADE);
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
    } // end numDir loop
File
TestDFSRollback.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Variable
Chunk
Conflicting content
    UpgradeUtilities.initialize();
    Configuration conf = UpgradeUtilities.initializeStorageStateConf(1, 
                                                      new HdfsConfiguration());
<<<<<<< HEAD
    StorageInfo[] versions = initializeVersions();
    UpgradeUtilities.createStorageDirs(
                                       NAME_NODE, conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY), "current");
=======
    StorageData[] versions = initializeVersions();
    UpgradeUtilities.createNameNodeStorageDirs(
        conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY), "current");
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
                                              .format(false)
                                              .manageDataDfsDirs(false)
Solution content
    UpgradeUtilities.initialize();
    Configuration conf = UpgradeUtilities.initializeStorageStateConf(1, 
                                                      new HdfsConfiguration());
    StorageData[] versions = initializeVersions();
    UpgradeUtilities.createNameNodeStorageDirs(
        conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY), "current");
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
                                              .format(false)
                                              .manageDataDfsDirs(false)
File
TestDFSStartupVersions.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Variable
Chunk
Conflicting content
                                              .manageNameDfsDirs(false)
                                              .startupOption(StartupOption.REGULAR)
                                              .build();
<<<<<<< HEAD
    StorageInfo nameNodeVersion = new StorageInfo(
                                                  UpgradeUtilities.getCurrentLayoutVersion(),
                                                  UpgradeUtilities.getCurrentNamespaceID(cluster),
                                                  UpgradeUtilities.getCurrentFsscTime(cluster));
=======
    StorageData nameNodeVersion = new StorageData(
        UpgradeUtilities.getCurrentLayoutVersion(),
        UpgradeUtilities.getCurrentNamespaceID(cluster),
        UpgradeUtilities.getCurrentClusterID(cluster),
        UpgradeUtilities.getCurrentFsscTime(cluster),
        UpgradeUtilities.getCurrentBlockPoolID(cluster));
    
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    log("NameNode version info", NAME_NODE, null, nameNodeVersion);
    String bpid = UpgradeUtilities.getCurrentBlockPoolID(cluster);
    for (int i = 0; i < versions.length; i++) {
Solution content
                                              .manageNameDfsDirs(false)
                                              .startupOption(StartupOption.REGULAR)
                                              .build();
    StorageData nameNodeVersion = new StorageData(
        UpgradeUtilities.getCurrentLayoutVersion(),
        UpgradeUtilities.getCurrentNamespaceID(cluster),
        UpgradeUtilities.getCurrentClusterID(cluster),
        UpgradeUtilities.getCurrentFsscTime(cluster),
        UpgradeUtilities.getCurrentBlockPoolID(cluster));
    
    log("NameNode version info", NAME_NODE, null, nameNodeVersion);
    String bpid = UpgradeUtilities.getCurrentBlockPoolID(cluster);
    for (int i = 0; i < versions.length; i++) {
File
TestDFSStartupVersions.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Variable
Chunk
Conflicting content
      File[] storage = UpgradeUtilities.createDataNodeStorageDirs(
          conf.getStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY), "current");
      log("DataNode version info", DATA_NODE, i, versions[i]);
<<<<<<< HEAD
      UpgradeUtilities.createVersionFile(conf, DATA_NODE, storage, versions[i]);
=======
      UpgradeUtilities.createDataNodeVersionFile(storage,
          versions[i].storageInfo, bpid, versions[i].blockPoolId);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      try {
        cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
      } catch (Exception ignore) {
Solution content
      File[] storage = UpgradeUtilities.createDataNodeStorageDirs(
          conf.getStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY), "current");
      log("DataNode version info", DATA_NODE, i, versions[i]);
      UpgradeUtilities.createDataNodeVersionFile(storage,
          versions[i].storageInfo, bpid, versions[i].blockPoolId);
      try {
        cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
      } catch (Exception ignore) {
File
TestDFSStartupVersions.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Chunk
Conflicting content
        baseDirs = createNameNodeStorageState(testCase);
        if (shouldRecover) {
          cluster = createCluster(conf);
<<<<<<< HEAD
          checkResult(NAME_NODE, baseDirs, curAfterRecover, prevAfterRecover);
=======
          checkResultNameNode(baseDirs, curAfterRecover, prevAfterRecover);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
          cluster.shutdown();
        } else {
          try {
Solution content
        baseDirs = createNameNodeStorageState(testCase);
        if (shouldRecover) {
          cluster = createCluster(conf);
          checkResultNameNode(baseDirs, curAfterRecover, prevAfterRecover);
          cluster.shutdown();
        } else {
          try {
File
TestDFSStorageStateRecovery.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Chunk
Conflicting content
        boolean prevAfterRecover = testCase[7];

        log("DATA_NODE recovery", numDirs, i, testCase);
<<<<<<< HEAD
        createStorageState(NAME_NODE,
                           new boolean[] {true, true, false, false, false});
        cluster = createCluster(conf);
        baseDirs = createStorageState(DATA_NODE, testCase);
=======
        createNameNodeStorageState(new boolean[] { true, true, false, false,
            false });
        cluster = createCluster(conf);
        baseDirs = createDataNodeStorageState(testCase);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
        if (!testCase[0] && !testCase[1] && !testCase[2] && !testCase[3]) {
          // DataNode will create and format current if no directories exist
          cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
Solution content
        boolean prevAfterRecover = testCase[7];

        log("DATA_NODE recovery", numDirs, i, testCase);
        createNameNodeStorageState(new boolean[] { true, true, false, false,
            false });
        cluster = createCluster(conf);
        baseDirs = createDataNodeStorageState(testCase);
        if (!testCase[0] && !testCase[1] && !testCase[2] && !testCase[3]) {
          // DataNode will create and format current if no directories exist
          cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
File
TestDFSStorageStateRecovery.java
Developer's decision
Version 2
Kind of conflict
Attribute
Method invocation
Variable
Chunk
Conflicting content
      String[] dataNodeDirs = conf.getStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
      
      log("Normal NameNode upgrade", numDirs);
<<<<<<< HEAD
      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
      cluster = createCluster();
      checkResult(NAME_NODE, nameNodeDirs);
=======
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
      cluster = createCluster();
      checkNameNode(nameNodeDirs);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      cluster.shutdown();
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
      
Solution content
      String[] dataNodeDirs = conf.getStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
      
      log("Normal NameNode upgrade", numDirs);
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
      cluster = createCluster();
      checkNameNode(nameNodeDirs);
      cluster.shutdown();
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
      
File
TestDFSUpgrade.java
Developer's decision
Version 2
Kind of conflict
Attribute
Method invocation
Chunk
Conflicting content
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
      
      log("Normal DataNode upgrade", numDirs);
<<<<<<< HEAD
      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
      cluster = createCluster();
      UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
=======
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
      cluster = createCluster();
      UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
      checkDataNode(dataNodeDirs, UpgradeUtilities.getCurrentBlockPoolID(null));
      cluster.shutdown();
Solution content
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
      
      log("Normal DataNode upgrade", numDirs);
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
      cluster = createCluster();
      UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
      cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
      checkDataNode(dataNodeDirs, UpgradeUtilities.getCurrentBlockPoolID(null));
      cluster.shutdown();
File
TestDFSUpgrade.java
Developer's decision
Version 2
Kind of conflict
Attribute
Method invocation
Chunk
Conflicting content
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
      
      log("DataNode upgrade with existing previous dir", numDirs);
<<<<<<< HEAD
      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
      cluster = createCluster();
      UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
      UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "previous");
=======
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
      cluster = createCluster();
      UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
      UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "previous");
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
      checkDataNode(dataNodeDirs, UpgradeUtilities.getCurrentBlockPoolID(null));
      cluster.shutdown();
Solution content
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
      
      log("DataNode upgrade with existing previous dir", numDirs);
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
      cluster = createCluster();
      UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
      UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "previous");
      cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
      checkDataNode(dataNodeDirs, UpgradeUtilities.getCurrentBlockPoolID(null));
      cluster.shutdown();
File
TestDFSUpgrade.java
Developer's decision
Version 2
Kind of conflict
Attribute
Method invocation
Chunk
Conflicting content
      
      UpgradeUtilities.createEmptyDirs(dataNodeDirs);

      log("DataNode upgrade with future stored layout version in current", numDirs);
<<<<<<< HEAD
      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
      cluster = createCluster();
      baseDirs = UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
      UpgradeUtilities.createVersionFile(conf, DATA_NODE, baseDirs,
                                         new StorageInfo(Integer.MIN_VALUE,
                                                         UpgradeUtilities.getCurrentNamespaceID(cluster),
                                                         UpgradeUtilities.getCurrentFsscTime(cluster)));
      startDataNodeShouldFail(StartupOption.REGULAR);
=======
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
      cluster = createCluster();
      baseDirs = UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
      storageInfo = new StorageInfo(Integer.MIN_VALUE, 
          UpgradeUtilities.getCurrentNamespaceID(cluster),
          UpgradeUtilities.getCurrentClusterID(cluster),
          UpgradeUtilities.getCurrentFsscTime(cluster));
      
      UpgradeUtilities.createDataNodeVersionFile(baseDirs, storageInfo,
          UpgradeUtilities.getCurrentBlockPoolID(cluster));
      startBlockPoolShouldFail(StartupOption.REGULAR, UpgradeUtilities
          .getCurrentBlockPoolID(null));
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      cluster.shutdown();
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
      UpgradeUtilities.createEmptyDirs(dataNodeDirs);
Solution content
      UpgradeUtilities.createEmptyDirs(dataNodeDirs);

      log("DataNode upgrade with future stored layout version in current", numDirs);
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
      cluster = createCluster();
      baseDirs = UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
      storageInfo = new StorageInfo(Integer.MIN_VALUE, 
          UpgradeUtilities.getCurrentNamespaceID(cluster),
          UpgradeUtilities.getCurrentClusterID(cluster),
          UpgradeUtilities.getCurrentFsscTime(cluster));
      
      UpgradeUtilities.createDataNodeVersionFile(baseDirs, storageInfo,
          UpgradeUtilities.getCurrentBlockPoolID(cluster));
      
      startBlockPoolShouldFail(StartupOption.REGULAR, UpgradeUtilities
          .getCurrentBlockPoolID(null));
      cluster.shutdown();
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
      UpgradeUtilities.createEmptyDirs(dataNodeDirs);
File
TestDFSUpgrade.java
Developer's decision
Version 2
Kind of conflict
Attribute
Method invocation
Variable
Chunk
Conflicting content
      UpgradeUtilities.createEmptyDirs(dataNodeDirs);
      
      log("DataNode upgrade with newer fsscTime in current", numDirs);
<<<<<<< HEAD
      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
      cluster = createCluster();
      baseDirs = UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
      UpgradeUtilities.createVersionFile(conf, DATA_NODE, baseDirs,
                                         new StorageInfo(UpgradeUtilities.getCurrentLayoutVersion(),
                                                         UpgradeUtilities.getCurrentNamespaceID(cluster),
                                                         Long.MAX_VALUE));
      startDataNodeShouldFail(StartupOption.REGULAR);
=======
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
      cluster = createCluster();
      baseDirs = UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
      storageInfo = new StorageInfo(UpgradeUtilities.getCurrentLayoutVersion(), 
          UpgradeUtilities.getCurrentNamespaceID(cluster),
          UpgradeUtilities.getCurrentClusterID(cluster), Long.MAX_VALUE);
          
      UpgradeUtilities.createDataNodeVersionFile(baseDirs, storageInfo, 
          UpgradeUtilities.getCurrentBlockPoolID(cluster));
      // Ensure corresponding block pool failed to initialized
      startBlockPoolShouldFail(StartupOption.REGULAR, UpgradeUtilities
          .getCurrentBlockPoolID(null));
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      cluster.shutdown();
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
      UpgradeUtilities.createEmptyDirs(dataNodeDirs);
Solution content
      UpgradeUtilities.createEmptyDirs(dataNodeDirs);
      
      log("DataNode upgrade with newer fsscTime in current", numDirs);
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
      cluster = createCluster();
      baseDirs = UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
      storageInfo = new StorageInfo(UpgradeUtilities.getCurrentLayoutVersion(), 
          UpgradeUtilities.getCurrentNamespaceID(cluster),
          UpgradeUtilities.getCurrentClusterID(cluster), Long.MAX_VALUE);
          
      UpgradeUtilities.createDataNodeVersionFile(baseDirs, storageInfo, 
          UpgradeUtilities.getCurrentBlockPoolID(cluster));
      // Ensure corresponding block pool failed to initialized
      startBlockPoolShouldFail(StartupOption.REGULAR, UpgradeUtilities
          .getCurrentBlockPoolID(null));
      cluster.shutdown();
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
      UpgradeUtilities.createEmptyDirs(dataNodeDirs);
File
TestDFSUpgrade.java
Developer's decision
Version 2
Kind of conflict
Attribute
Comment
Method invocation
Variable
Chunk
Conflicting content
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
      
      log("NameNode upgrade with old layout version in current", numDirs);
<<<<<<< HEAD
      baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
      UpgradeUtilities.createVersionFile(conf, NAME_NODE, baseDirs,
                                         new StorageInfo(Storage.LAST_UPGRADABLE_LAYOUT_VERSION + 1,
                                                         UpgradeUtilities.getCurrentNamespaceID(null),
                                                         UpgradeUtilities.getCurrentFsscTime(null)));
=======
      baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
      storageInfo = new StorageInfo(Storage.LAST_UPGRADABLE_LAYOUT_VERSION + 1, 
          UpgradeUtilities.getCurrentNamespaceID(null),
          UpgradeUtilities.getCurrentClusterID(null),
          UpgradeUtilities.getCurrentFsscTime(null));
      
      UpgradeUtilities.createNameNodeVersionFile(conf, baseDirs, storageInfo,
          UpgradeUtilities.getCurrentBlockPoolID(cluster));
      
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      startNameNodeShouldFail(StartupOption.UPGRADE);
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
      
Solution content
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
      
      log("NameNode upgrade with old layout version in current", numDirs);
      baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
      storageInfo = new StorageInfo(Storage.LAST_UPGRADABLE_LAYOUT_VERSION + 1, 
          UpgradeUtilities.getCurrentNamespaceID(null),
          UpgradeUtilities.getCurrentClusterID(null),
          UpgradeUtilities.getCurrentFsscTime(null));
      
      UpgradeUtilities.createNameNodeVersionFile(conf, baseDirs, storageInfo,
          UpgradeUtilities.getCurrentBlockPoolID(cluster));
      
      startNameNodeShouldFail(StartupOption.UPGRADE);
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
      
File
TestDFSUpgrade.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Variable
Chunk
Conflicting content
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
      
      log("NameNode upgrade with future layout version in current", numDirs);
<<<<<<< HEAD
      baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
      UpgradeUtilities.createVersionFile(conf, NAME_NODE, baseDirs,
                                         new StorageInfo(Integer.MIN_VALUE,
                                                         UpgradeUtilities.getCurrentNamespaceID(null),
                                                         UpgradeUtilities.getCurrentFsscTime(null)));
=======
      baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
      storageInfo = new StorageInfo(Integer.MIN_VALUE, 
          UpgradeUtilities.getCurrentNamespaceID(null),
          UpgradeUtilities.getCurrentClusterID(null),
          UpgradeUtilities.getCurrentFsscTime(null));
      
      UpgradeUtilities.createNameNodeVersionFile(conf, baseDirs, storageInfo,
          UpgradeUtilities.getCurrentBlockPoolID(cluster));
      
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      startNameNodeShouldFail(StartupOption.UPGRADE);
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
    } // end numDir loop
Solution content
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
      
      log("NameNode upgrade with future layout version in current", numDirs);
      baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
      storageInfo = new StorageInfo(Integer.MIN_VALUE, 
          UpgradeUtilities.getCurrentNamespaceID(null),
          UpgradeUtilities.getCurrentClusterID(null),
          UpgradeUtilities.getCurrentFsscTime(null));
      
      UpgradeUtilities.createNameNodeVersionFile(conf, baseDirs, storageInfo,
          UpgradeUtilities.getCurrentBlockPoolID(cluster));
      
      startNameNodeShouldFail(StartupOption.UPGRADE);
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
    } // end numDir loop
File
TestDFSUpgrade.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Variable
Chunk
Conflicting content
                                  .numDataNodes(numDataNodes)
                                  .format(false)
                                  .startupOption(StartupOption.UPGRADE)
<<<<<<< HEAD
=======
                                  .clusterId("testClusterId")
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
                                  .build();
      cluster.waitActive();
      DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
Solution content
                                  .numDataNodes(numDataNodes)
                                  .format(false)
                                  .startupOption(StartupOption.UPGRADE)
                                  .clusterId("testClusterId")
                                  .build();
      cluster.waitActive();
      DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
File
TestDFSUpgradeFromImage.java
Developer's decision
Version 2
Kind of conflict
Other
Chunk
Conflicting content
=======
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
<<<<<<< HEAD
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
Solution content
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
File
TestDatanodeBlockScanner.java
Developer's decision
Combination
Kind of conflict
Import
Chunk
Conflicting content
     */
    DFSTestUtil.createFile(fs, file1, 10, (short)1, 0);
    cluster.shutdown();
<<<<<<< HEAD

=======
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    cluster = new MiniDFSCluster.Builder(conf)
                                .numDataNodes(1)
                                .format(false).build();
Solution content
     */
    DFSTestUtil.createFile(fs, file1, 10, (short)1, 0);
    cluster.shutdown();

    cluster = new MiniDFSCluster.Builder(conf)
                                .numDataNodes(1)
                                .format(false).build();
File
TestDatanodeBlockScanner.java
Developer's decision
Version 1
Kind of conflict
Blank
Chunk
Conflicting content
    cluster.shutdown();
  }

<<<<<<< HEAD
  public void testBlockCorruptionPolicy() throws Exception {
=======
  public static boolean corruptReplica(ExtendedBlock blk, int replica) throws IOException {
    return MiniDFSCluster.corruptBlockOnDataNode(replica, blk);
  }

  public void testBlockCorruptionPolicy() throws IOException {
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    Configuration conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
    Random random = new Random();
Solution content
    Random random = new Random();
    cluster.shutdown();
  }

  public static boolean corruptReplica(ExtendedBlock blk, int replica) throws IOException {
    return MiniDFSCluster.corruptReplica(replica, blk);
  }

  public void testBlockCorruptionPolicy() throws IOException {
    Configuration conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
File
TestDatanodeBlockScanner.java
Developer's decision
Manual
Kind of conflict
Method declaration
Method signature
Chunk
Conflicting content
                                             throws Exception {
    Configuration conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 30L);
<<<<<<< HEAD
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 3);
    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 3L);
=======
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 30);
    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 30L);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, false);

    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
Solution content
                                             throws Exception {
    Configuration conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 30L);
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 3);
    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 3L);
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, false);

    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
File
TestDatanodeBlockScanner.java
Developer's decision
Version 1
Kind of conflict
Method invocation
Chunk
Conflicting content
    FileSystem fs = cluster.getFileSystem();
    Path file1 = new Path("/tmp/testBlockCorruptRecovery/file");
    DFSTestUtil.createFile(fs, file1, 1024, numReplicas, 0);
<<<<<<< HEAD
    Block blk = DFSTestUtil.getFirstBlock(fs, file1);
    String block = blk.getBlockName();
=======
    ExtendedBlock blk = DFSTestUtil.getFirstBlock(fs, file1);
    
    dfsClient = new DFSClient(new InetSocketAddress("localhost", 
                                        cluster.getNameNodePort()), conf);
    blocks = dfsClient.getNamenode().
               getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
    replicaCount = blocks.get(0).getLocations().length;
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0

    // Wait until block is replicated to numReplicas
    DFSTestUtil.waitReplication(fs, file1, numReplicas);
Solution content
    FileSystem fs = cluster.getFileSystem();
    Path file1 = new Path("/tmp/testBlockCorruptRecovery/file");
    DFSTestUtil.createFile(fs, file1, 1024, numReplicas, 0);
    ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, file1);

    // Wait until block is replicated to numReplicas
    DFSTestUtil.waitReplication(fs, file1, numReplicas);
File
TestDatanodeBlockScanner.java
Developer's decision
Manual
Kind of conflict
Method invocation
Variable
Chunk
Conflicting content
    // Corrupt numCorruptReplicas replicas of block 
    int[] corruptReplicasDNIDs = new int[numCorruptReplicas];
    for (int i=0, j=0; (j != numCorruptReplicas) && (i < numDataNodes); i++) {
<<<<<<< HEAD
      if (cluster.corruptReplica(block, i)) {
=======
      if (corruptReplica(blk, i)) 
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
        corruptReplicasDNIDs[j++] = i;
        LOG.info("successfully corrupted block " + block + " on node " 
                 + i + " " + cluster.getDataNodes().get(i).getSelfAddr());
Solution content
    // Corrupt numCorruptReplicas replicas of block 
    int[] corruptReplicasDNIDs = new int[numCorruptReplicas];
    for (int i=0, j=0; (j != numCorruptReplicas) && (i < numDataNodes); i++) {
      if (corruptReplica(block, i)) {
        corruptReplicasDNIDs[j++] = i;
        LOG.info("successfully corrupted block " + block + " on node " 
                 + i + " " + cluster.getDataNodes().get(i).getSelfAddr());
File
TestDatanodeBlockScanner.java
Developer's decision
Manual
Kind of conflict
If statement
Chunk
Conflicting content
    }

    // Loop until all corrupt replicas are reported
<<<<<<< HEAD
    DFSTestUtil.waitCorruptReplicas(fs, cluster.getNamesystem(), file1, 
        blk, numCorruptReplicas);
=======
    int corruptReplicaSize = cluster.getNamesystem().
                              numCorruptReplicas(blk.getLocalBlock());
    while (corruptReplicaSize != numCorruptReplicas) {
      try {
        IOUtils.copyBytes(fs.open(file1), new IOUtils.NullOutputStream(), 
                          conf, true);
      } catch (IOException e) {
      }
      try {
        LOG.info("Looping until expected " + numCorruptReplicas + " are " +
                 "reported. Current reported " + corruptReplicaSize);
        Thread.sleep(1000);
      } catch (InterruptedException ignore) {
      }
      corruptReplicaSize = cluster.getNamesystem().
                              numCorruptReplicas(blk.getLocalBlock());
    }
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    
    // Loop until the block recovers after replication
    DFSTestUtil.waitReplication(fs, file1, numReplicas);
Solution content
    }

    // Loop until all corrupt replicas are reported
    DFSTestUtil.waitCorruptReplicas(fs, cluster.getNamesystem(), file1, 
        block, numCorruptReplicas);
    
    // Loop until the block recovers after replication
    DFSTestUtil.waitReplication(fs, file1, numReplicas);
File
TestDatanodeBlockScanner.java
Developer's decision
Manual
Kind of conflict
Method invocation
Variable
While statement
Chunk
Conflicting content
    // Make sure the corrupt replica is invalidated and removed from
    // corruptReplicasMap
<<<<<<< HEAD
    DFSTestUtil.waitCorruptReplicas(fs, cluster.getNamesystem(), file1, 
        blk, 0);
=======
    corruptReplicaSize = cluster.getNamesystem().
                          numCorruptReplicas(blk.getLocalBlock());
    while (corruptReplicaSize != 0 || replicaCount != numReplicas) {
      try {
        LOG.info("Looping until corrupt replica is invalidated");
        Thread.sleep(1000);
      } catch (InterruptedException ignore) {
      }
      corruptReplicaSize = cluster.getNamesystem().
                            numCorruptReplicas(blk.getLocalBlock());
      blocks = dfsClient.getNamenode().
                 getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
      replicaCount = blocks.get(0).getLocations().length;
    }
    // Make sure block is healthy 
    assertTrue(corruptReplicaSize == 0);
    assertTrue(replicaCount == numReplicas);
    assertTrue(blocks.get(0).isCorrupt() == false);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    cluster.shutdown();
  }
  
Solution content
    // Make sure the corrupt replica is invalidated and removed from
    // corruptReplicasMap
    DFSTestUtil.waitCorruptReplicas(fs, cluster.getNamesystem(), file1, 
        block, 0);
    cluster.shutdown();
  }
  
File
TestDatanodeBlockScanner.java
Developer's decision
Manual
Kind of conflict
Comment
Method invocation
Variable
While statement
Chunk
Conflicting content
    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 3L);
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, false);

<<<<<<< HEAD
    long startTime = System.currentTimeMillis();
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
                                               .numDataNodes(REPLICATION_FACTOR)
                                               .build();
=======
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION_FACTOR).build();
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    cluster.waitActive();
    
    try {
Solution content
    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 3L);
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, false);

    long startTime = System.currentTimeMillis();
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
                                               .numDataNodes(REPLICATION_FACTOR)
                                               .build();
    cluster.waitActive();
    
    ExtendedBlock block;
    try {
File
TestDatanodeBlockScanner.java
Developer's decision
Manual
Kind of conflict
Method invocation
Variable
Chunk
Conflicting content
      cluster.shutdown();
    }

<<<<<<< HEAD
    // Restart cluster and confirm block is verified on datanode 0,
    // then truncate it on datanode 0.
    cluster = new MiniDFSCluster.Builder(conf)
                                .numDataNodes(REPLICATION_FACTOR)
                                .format(false)
                                .build();
    cluster.waitActive();
    try {
      FileSystem fs = cluster.getFileSystem();
      DatanodeInfo dn = new DatanodeInfo(cluster.getDataNodes().get(0).dnRegistration);
      assertTrue(waitForVerification(dn, fs, fileName, 1, startTime, TIMEOUT) >= startTime);
      
=======
      ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);

>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      // Truncate replica of block
      if (!changeReplicaLength(block, 0, -1)) {
        throw new IOException(
Solution content
      cluster.shutdown();
    }

    // Restart cluster and confirm block is verified on datanode 0,
    // then truncate it on datanode 0.
    cluster = new MiniDFSCluster.Builder(conf)
                                .numDataNodes(REPLICATION_FACTOR)
                                .format(false)
                                .build();
    cluster.waitActive();
    try {
      FileSystem fs = cluster.getFileSystem();
      int infoPort = cluster.getDataNodes().get(0).getInfoPort();
      assertTrue(waitForVerification(infoPort, fs, fileName, 1, startTime, TIMEOUT) >= startTime);
      
      // Truncate replica of block
      if (!changeReplicaLength(block, 0, -1)) {
        throw new IOException(
File
TestDatanodeBlockScanner.java
Developer's decision
Manual
Kind of conflict
Comment
Method invocation
Try statement
Variable
Chunk
Conflicting content
                                  .numDataNodes(REPLICATION_FACTOR)
      cluster.shutdown();
    }

<<<<<<< HEAD
    // Restart the cluster, add a node, and check that the truncated block is 
    // handled correctly
    cluster = new MiniDFSCluster.Builder(conf)
                                .numDataNodes(REPLICATION_FACTOR)
                                .format(false)
                                .build();
    cluster.startDataNodes(conf, 1, true, null, null);
    cluster.waitActive();  // now we have 3 datanodes
=======
      // restart the cluster
      cluster = new MiniDFSCluster.Builder(conf)
                                  .format(false)
                                  .build();
      cluster.startDataNodes(conf, 1, true, null, null);
      cluster.waitActive();  // now we have 3 datanodes
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0

    // Assure the cluster has left safe mode.
    cluster.waitClusterUp();
Solution content
      cluster.shutdown();
    }

    // Restart the cluster, add a node, and check that the truncated block is 
    // handled correctly
    cluster = new MiniDFSCluster.Builder(conf)
                                .numDataNodes(REPLICATION_FACTOR)
                                .format(false)
                                .build();
    cluster.startDataNodes(conf, 1, true, null, null);
    cluster.waitActive();  // now we have 3 datanodes

    // Assure the cluster has left safe mode.
    cluster.waitClusterUp();
File
TestDatanodeBlockScanner.java
Developer's decision
Version 1
Kind of conflict
Comment
Method invocation
Variable
Chunk
Conflicting content
  /**
   * Change the length of a block at datanode dnIndex
   */
<<<<<<< HEAD
  static boolean changeReplicaLength(String blockName, int dnIndex, int lenDelta) throws IOException {
    File baseDir = new File(MiniDFSCluster.getBaseDirectory(), "data");
    for (int i=dnIndex*2; i>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    }
    LOG.info("failed to change length of block " + blockName);
    return false;
Solution content
  /**
   * Change the length of a block at datanode dnIndex
   */
  static boolean changeReplicaLength(ExtendedBlock blk, int dnIndex,
      int lenDelta) throws IOException {
    File blockFile = MiniDFSCluster.getBlockFile(dnIndex, blk);
    if (blockFile != null && blockFile.exists()) {
      RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw");
      raFile.setLength(raFile.length()+lenDelta);
      raFile.close();
      return true;
    }
    LOG.info("failed to change length of block " + blk);
    return false;
File
TestDatanodeBlockScanner.java
Developer's decision
Version 2
Kind of conflict
For statement
If statement
Method invocation
Method signature
Return statement
Variable
Chunk
Conflicting content
    return false;
  }
  
<<<<<<< HEAD
  private static void waitForBlockDeleted(String blockName, int dnIndex,
      long timeout) 
  throws IOException, TimeoutException, InterruptedException {
    File baseDir = new File(MiniDFSCluster.getBaseDirectory(), "data");
    File blockFile1 = new File(baseDir, "data" + (2*dnIndex+1) + 
        MiniDFSCluster.FINALIZED_DIR_NAME + blockName);
    File blockFile2 = new File(baseDir, "data" + (2*dnIndex+2) + 
        MiniDFSCluster.FINALIZED_DIR_NAME + blockName);
    long failtime = System.currentTimeMillis() 
                    + ((timeout > 0) ? timeout : Long.MAX_VALUE);
    while (blockFile1.exists() || blockFile2.exists()) {
      if (failtime < System.currentTimeMillis()) {
        throw new TimeoutException("waited too long for blocks to be deleted: "
            + blockFile1.getPath() + (blockFile1.exists() ? " still exists; " : " is absent; ")
            + blockFile2.getPath() + (blockFile2.exists() ? " still exists." : " is absent."));
      }
=======
  private static void waitForBlockDeleted(ExtendedBlock blk, int dnIndex) 
  throws IOException, InterruptedException {
    File blockFile = MiniDFSCluster.getBlockFile(dnIndex, blk);
    while (blockFile != null) {
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      Thread.sleep(100);
      blockFile = MiniDFSCluster.getBlockFile(dnIndex, blk);
    }
Solution content
    return false;
  }
  
  private static void waitForBlockDeleted(ExtendedBlock blk, int dnIndex,
      long timeout) throws IOException, TimeoutException, InterruptedException {
    File blockFile = MiniDFSCluster.getBlockFile(dnIndex, blk);
    long failtime = System.currentTimeMillis() 
                    + ((timeout > 0) ? timeout : Long.MAX_VALUE);
    while (blockFile != null && blockFile.exists()) {
      if (failtime < System.currentTimeMillis()) {
        throw new TimeoutException("waited too long for blocks to be deleted: "
            + blockFile.getPath() + (blockFile.exists() ? " still exists; " : " is absent; "));
      }
      Thread.sleep(100);
      blockFile = MiniDFSCluster.getBlockFile(dnIndex, blk);
    }
File
TestDatanodeBlockScanner.java
Developer's decision
Manual
Kind of conflict
If statement
Method invocation
Method signature
Variable
While statement
Chunk
Conflicting content
    MiniDFSCluster cluster = null;
    try {
      cluster = new MiniDFSCluster.Builder(conf).build();
<<<<<<< HEAD
      fs = cluster.getFileSystem();

=======
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      InetSocketAddress addr = new InetSocketAddress(
        "localhost",
        cluster.getNameNodePort());
Solution content
    MiniDFSCluster cluster = null;
    try {
      cluster = new MiniDFSCluster.Builder(conf).build();
      InetSocketAddress addr = new InetSocketAddress(
        "localhost",
        cluster.getNameNodePort());
File
TestDatanodeRegistration.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Variable
Chunk
Conflicting content
    LOG.info("Created file " + name + " with " + repl + " replicas.");
  }
  
<<<<<<< HEAD
  private void printFileLocations(FileSystem fileSys, Path name)
  throws IOException {
    BlockLocation[] locations = fileSys.getFileBlockLocations(
        fileSys.getFileStatus(name), 0, fileSize);
    for (int idx = 0; idx < locations.length; idx++) {
      String[] loc = locations[idx].getHosts();
      StringBuilder buf = new StringBuilder("Block[" + idx + "] : ");
      for (int j = 0; j < loc.length; j++) {
        buf.append(loc[j] + " ");
      }
      LOG.info(buf.toString());
    }
  }

=======
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
  /**
   * For blocks that reside on the nodes that are down, verify that their
   * replication factor is 1 more than the specified one.
Solution content
    LOG.info("Created file " + name + " with " + repl + " replicas.");
  }
  
  /**
   * For blocks that reside on the nodes that are down, verify that their
   * replication factor is 1 more than the specified one.
File
TestDecommission.java
Developer's decision
Version 2
Kind of conflict
Method declaration
Chunk
Conflicting content
      }
      LOG.info("Block " + blk.getBlock() + " has " + hasdown
          + " decommissioned replica.");
<<<<<<< HEAD
      assertEquals("Number of replicas for block" + blk.getBlock(),
=======
      assertEquals("Number of replicas for block " + blk.getBlock(),
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
                   Math.min(numDatanodes, repl+hasdown), nodes.length);  
    }
  }
Solution content
      }
      LOG.info("Block " + blk.getBlock() + " has " + hasdown
          + " decommissioned replica.");
      assertEquals("Number of replicas for block " + blk.getBlock(),
                   Math.min(numDatanodes, repl+hasdown), nodes.length);  
    }
  }
File
TestDecommission.java
Developer's decision
Version 1
Kind of conflict
Method invocation
Chunk
Conflicting content
    assertTrue(!fileSys.exists(name));
  }

<<<<<<< HEAD
  private void printDatanodeReport(DatanodeInfo[] info) {
    LOG.info("-------------------------------------------------");
    for (int i = 0; i < info.length; i++) {
      LOG.info(info[i].getDatanodeReport());
      LOG.info("");
    }
  }

=======
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
  /*
   * decommission one random node and wait for each to reach the
   * given {@code waitForState}.
Solution content
    assertTrue(!fileSys.exists(name));
  }

  /*
   * decommission one random node and wait for each to reach the
   * given {@code waitForState}.
File
TestDecommission.java
Developer's decision
Version 2
Kind of conflict
Method declaration
Chunk
Conflicting content
   * decommission one random node and wait for each to reach the
   * given {@code waitForState}.
   */
<<<<<<< HEAD
  private DatanodeInfo decommissionNode(FSNamesystem namesystem,
                                  ArrayListdecommissionedNodes,
                                  AdminStates waitForState)
    throws IOException {
    DFSClient client = getDfsClient(cluster, conf);
=======
  private DatanodeInfo decommissionNode(int nnIndex,
                                  ArrayListdecommissionedNodes,
                                  AdminStates waitForState)
    throws IOException {
    DFSClient client = getDfsClient(cluster.getNameNode(nnIndex), conf);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);

    //
Solution content
   * decommission one random node and wait for each to reach the
   * given {@code waitForState}.
   */
  private DatanodeInfo decommissionNode(int nnIndex,
                                  ArrayListdecommissionedNodes,
                                  AdminStates waitForState)
    throws IOException {
    DFSClient client = getDfsClient(cluster.getNameNode(nnIndex), conf);
    DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);

    //
File
TestDecommission.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Method signature
Variable
Chunk
Conflicting content
    LOG.info("Decommissioning node: " + nodename);

    // write nodename into the exclude file.
<<<<<<< HEAD
    ArrayList nodes = new ArrayList(decommissionedNodes);
    nodes.add(nodename);
    writeConfigFile(excludeFile, nodes);
    namesystem.refreshNodes(conf);
    DatanodeInfo ret = namesystem.getDatanode(info[index]);
=======
    ArrayList nodes = new ArrayList();
    if (decommissionedNodes != null) {
      for (DatanodeInfo dn : decommissionedNodes) {
        nodes.add(dn.getName());
      }
    }
    nodes.add(nodename);
    writeConfigFile(excludeFile, nodes);
    cluster.getNamesystem(nnIndex).refreshNodes(conf);
    DatanodeInfo ret = cluster.getNamesystem(nnIndex).getDatanode(info[index]);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    waitNodeState(ret, waitForState);
    return ret;
  }
Solution content
    LOG.info("Decommissioning node: " + nodename);

    // write nodename into the exclude file.
    ArrayList nodes = new ArrayList();
    if (decommissionedNodes != null) {
      for (DatanodeInfo dn : decommissionedNodes) {
        nodes.add(dn.getName());
      }
    }
    nodes.add(nodename);
    writeConfigFile(excludeFile, nodes);
    cluster.getNamesystem(nnIndex).refreshNodes(conf);
    DatanodeInfo ret = cluster.getNamesystem(nnIndex).getDatanode(info[index]);
    waitNodeState(ret, waitForState);
    return ret;
  }
File
TestDecommission.java
Developer's decision
Version 2
Kind of conflict
If statement
Method invocation
Variable
Chunk
Conflicting content
  }
  
  /* Get DFSClient to the namenode */
<<<<<<< HEAD
  private static DFSClient getDfsClient(MiniDFSCluster cluster,
      Configuration conf) throws IOException {
    InetSocketAddress addr = new InetSocketAddress("localhost", 
                                                   cluster.getNameNodePort());
    return new DFSClient(addr, conf);
=======
  private static DFSClient getDfsClient(NameNode nn,
      Configuration conf) throws IOException {
    return new DFSClient(nn.getNameNodeAddress(), conf);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
  }
  
  /* Validate cluster has expected number of datanodes */
Solution content
  }
  
  /* Get DFSClient to the namenode */
  private static DFSClient getDfsClient(NameNode nn,
      Configuration conf) throws IOException {
    return new DFSClient(nn.getNameNodeAddress(), conf);
  }
  
  /* Validate cluster has expected number of datanodes */
File
TestDecommission.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Method signature
Return statement
Variable
Chunk
Conflicting content
  
  /** Start a MiniDFSCluster 
   * @throws IOException */
<<<<<<< HEAD
  private void startCluster(int numDatanodes, Configuration conf)
      throws IOException {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
        .build();
    cluster.waitActive();
    DFSClient client = getDfsClient(cluster, conf);
    validateCluster(client, numDatanodes);
=======
  private void startCluster(int numNameNodes, int numDatanodes,
      Configuration conf) throws IOException {
    cluster = new MiniDFSCluster.Builder(conf).numNameNodes(numNameNodes)
        .numDataNodes(numDatanodes).build();
    cluster.waitActive();
    for (int i = 0; i < numNameNodes; i++) {
      DFSClient client = getDfsClient(cluster.getNameNode(i), conf);
      validateCluster(client, numDatanodes);
    }
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
  }
  
  private void verifyStats(NameNode namenode, FSNamesystem fsn,
Solution content
    }
  }
  
  
  /** Start a MiniDFSCluster 
   * @throws IOException */
  private void startCluster(int numNameNodes, int numDatanodes,
      Configuration conf) throws IOException {
    cluster = new MiniDFSCluster.Builder(conf).numNameNodes(numNameNodes)
        .numDataNodes(numDatanodes).build();
    cluster.waitActive();
    for (int i = 0; i < numNameNodes; i++) {
      DFSClient client = getDfsClient(cluster.getNameNode(i), conf);
      validateCluster(client, numDatanodes);
  private void verifyStats(NameNode namenode, FSNamesystem fsn,
File
TestDecommission.java
Developer's decision
Version 2
Kind of conflict
Attribute
For statement
Method invocation
Method signature
Variable
Chunk
Conflicting content
  @Test
  }

  /**
<<<<<<< HEAD
   * Tests Decommission in DFS.
   */
  @Test
  public void testDecommission() throws IOException {
    LOG.info("Starting test testDecommission");
    int numDatanodes = 6;
    startCluster(numDatanodes, conf);
    try {
      DFSClient client = getDfsClient(cluster, conf);
      FileSystem fileSys = cluster.getFileSystem();
      FSNamesystem fsn = cluster.getNamesystem();
      ArrayList decommissionedNodes = new ArrayList(numDatanodes);
      for (int iteration = 0; iteration < numDatanodes - 1; iteration++) {
        int replicas = numDatanodes - iteration - 1;
        
        // Decommission one node. Verify that node is decommissioned.
        Path file1 = new Path("testDecommission.dat");
        writeFile(fileSys, file1, replicas);
        LOG.info("Created file decommission.dat with " + replicas
            + " replicas.");
        printFileLocations(fileSys, file1);
        DatanodeInfo downnode = decommissionNode(fsn, decommissionedNodes,
            AdminStates.DECOMMISSIONED);
        decommissionedNodes.add(downnode.getName());
        
        // Ensure decommissioned datanode is not automatically shutdown
        assertEquals("All datanodes must be alive", numDatanodes, 
            client.datanodeReport(DatanodeReportType.LIVE).length);
        
        checkFile(fileSys, file1, replicas, downnode.getName(), numDatanodes);
        cleanupFile(fileSys, file1);
      }
      
      // Restart the cluster and ensure decommissioned datanodes
      // are allowed to register with the namenode
      cluster.shutdown();
      startCluster(numDatanodes, conf);
    } catch (IOException e) {
      DFSClient client = getDfsClient(cluster, conf);
      DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.ALL);
      printDatanodeReport(info);
      throw e;
=======
   * Tests decommission for non federated cluster
   */
  @Test
  public void testDecommission() throws IOException {
    testDecommission(1, 6);
  }
  
  /**
   * Test decommission for federeated cluster
   */
  @Test
  public void testDecommissionFederation() throws IOException {
    testDecommission(2, 2);
  }
  
  private void testDecommission(int numNamenodes, int numDatanodes)
      throws IOException {
    LOG.info("Starting test testDecommission");
    startCluster(numNamenodes, numDatanodes, conf);
    
    ArrayList> namenodeDecomList = 
      new ArrayList>(numNamenodes);
    for(int i = 0; i < numNamenodes; i++) {
      namenodeDecomList.add(i, new ArrayList(numDatanodes));
    }
    Path file1 = new Path("testDecommission.dat");
    for (int iteration = 0; iteration < numDatanodes - 1; iteration++) {
      int replicas = numDatanodes - iteration - 1;
      
      // Start decommissioning one namenode at a time
      for (int i = 0; i < numNamenodes; i++) {
        ArrayList decommissionedNodes = namenodeDecomList.get(i);
        FileSystem fileSys = cluster.getFileSystem(i);
        writeFile(fileSys, file1, replicas);
        
        // Decommission one node. Verify that node is decommissioned.
        DatanodeInfo decomNode = decommissionNode(i, decommissionedNodes,
            AdminStates.DECOMMISSIONED);
        decommissionedNodes.add(decomNode);
        
        // Ensure decommissioned datanode is not automatically shutdown
        DFSClient client = getDfsClient(cluster.getNameNode(i), conf);
        assertEquals("All datanodes must be alive", numDatanodes, 
            client.datanodeReport(DatanodeReportType.LIVE).length);
        checkFile(fileSys, file1, replicas, decomNode.getName(), numDatanodes);
        cleanupFile(fileSys, file1);
      }
    }
    
    // Restart the cluster and ensure decommissioned datanodes
    // are allowed to register with the namenode
    cluster.shutdown();
    startCluster(numNamenodes, numDatanodes, conf);
  }
  
  /**
   * Tests cluster storage statistics during decommissioning for non
   * federated cluster
   */
  @Test
  public void testClusterStats() throws Exception {
    testClusterStats(1);
  }
  
  /**
   * Tests cluster storage statistics during decommissioning for
   * federated cluster
   */
  @Test
  public void testClusterStatsFederation() throws Exception {
    testClusterStats(3);
  }
  
  public void testClusterStats(int numNameNodes) throws IOException,
      InterruptedException {
    LOG.info("Starting test testClusterStats");
    int numDatanodes = 1;
    startCluster(numNameNodes, numDatanodes, conf);
    
    for (int i = 0; i < numNameNodes; i++) {
      FileSystem fileSys = cluster.getFileSystem(i);
      Path file = new Path("testClusterStats.dat");
      writeFile(fileSys, file, 1);
      
      FSNamesystem fsn = cluster.getNamesystem(i);
      NameNode namenode = cluster.getNameNode(i);
      DatanodeInfo downnode = decommissionNode(i, null,
          AdminStates.DECOMMISSION_INPROGRESS);
      // Check namenode stats for multiple datanode heartbeats
      verifyStats(namenode, fsn, downnode, true);
      
      // Stop decommissioning and verify stats
      writeConfigFile(excludeFile, null);
      fsn.refreshNodes(conf);
      DatanodeInfo ret = fsn.getDatanode(downnode);
      waitNodeState(ret, AdminStates.NORMAL);
      verifyStats(namenode, fsn, ret, false);
    }
  }
  
  /**
   * Test host/include file functionality. Only datanodes
   * in the include file are allowed to connect to the namenode in a non
   * federated cluster.
   */
  @Test
  public void testHostsFile() throws IOException, InterruptedException {
    // Test for a single namenode cluster
    testHostsFile(1);
  }
  
  /**
   * Test host/include file functionality. Only datanodes
   * in the include file are allowed to connect to the namenode in a 
   * federated cluster.
   */
  public void testHostsFileFederation() throws IOException, InterruptedException {
    // Test for 3 namenode federated cluster
    testHostsFile(3);
  }
  
  public void testHostsFile(int numNameNodes) throws IOException,
      InterruptedException {
    conf.set(DFSConfigKeys.DFS_HOSTS, hostsFile.toUri().getPath());
    int numDatanodes = 1;
    cluster = new MiniDFSCluster.Builder(conf).numNameNodes(numNameNodes)
        .numDataNodes(numDatanodes).setupHostsFile(true).build();
    cluster.waitActive();
    
    // Now empty hosts file and ensure the datanode is disallowed
    // from talking to namenode, resulting in it's shutdown.
    ArrayListlist = new ArrayList();
    list.add("invalidhost");
    writeConfigFile(hostsFile, list);
    
    for (int j = 0; j < numNameNodes; j++) {
      cluster.getNamesystem(j).refreshNodes(conf);
      
      DFSClient client = getDfsClient(cluster.getNameNode(j), conf);
      DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
      for (int i = 0 ; i < 5 && info.length != 0; i++) {
        LOG.info("Waiting for datanode to be marked dead");
        Thread.sleep(HEARTBEAT_INTERVAL * 1000);
        info = client.datanodeReport(DatanodeReportType.LIVE);
      }
      assertEquals("Number of live nodes should be 0", 0, info.length);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    }
  }
  
Solution content
  }

  /**
   * Tests decommission for non federated cluster
   */
  @Test
  public void testDecommission() throws IOException {
    testDecommission(1, 6);
  }
  
  /**
   * Test decommission for federeated cluster
   */
  @Test
  public void testDecommissionFederation() throws IOException {
    testDecommission(2, 2);
  }
  
  private void testDecommission(int numNamenodes, int numDatanodes)
      throws IOException {
    LOG.info("Starting test testDecommission");
    startCluster(numNamenodes, numDatanodes, conf);
    
    ArrayList> namenodeDecomList = 
      new ArrayList>(numNamenodes);
    for(int i = 0; i < numNamenodes; i++) {
      namenodeDecomList.add(i, new ArrayList(numDatanodes));
    }
    Path file1 = new Path("testDecommission.dat");
    for (int iteration = 0; iteration < numDatanodes - 1; iteration++) {
      int replicas = numDatanodes - iteration - 1;
      
      // Start decommissioning one namenode at a time
      for (int i = 0; i < numNamenodes; i++) {
        ArrayList decommissionedNodes = namenodeDecomList.get(i);
        FileSystem fileSys = cluster.getFileSystem(i);
        writeFile(fileSys, file1, replicas);
        
        // Decommission one node. Verify that node is decommissioned.
        DatanodeInfo decomNode = decommissionNode(i, decommissionedNodes,
            AdminStates.DECOMMISSIONED);
        decommissionedNodes.add(decomNode);
        
        // Ensure decommissioned datanode is not automatically shutdown
        DFSClient client = getDfsClient(cluster.getNameNode(i), conf);
        assertEquals("All datanodes must be alive", numDatanodes, 
            client.datanodeReport(DatanodeReportType.LIVE).length);
        checkFile(fileSys, file1, replicas, decomNode.getName(), numDatanodes);
        cleanupFile(fileSys, file1);
      }
    }
    
    // Restart the cluster and ensure decommissioned datanodes
    // are allowed to register with the namenode
    cluster.shutdown();
    startCluster(numNamenodes, numDatanodes, conf);
  }
  
  /**
   * Tests cluster storage statistics during decommissioning for non
   * federated cluster
   */
  @Test
  public void testClusterStats() throws Exception {
    testClusterStats(1);
  }
  
  /**
   * Tests cluster storage statistics during decommissioning for
   * federated cluster
   */
  @Test
  public void testClusterStatsFederation() throws Exception {
    testClusterStats(3);
  }
  
  public void testClusterStats(int numNameNodes) throws IOException,
      InterruptedException {
    LOG.info("Starting test testClusterStats");
    int numDatanodes = 1;
    startCluster(numNameNodes, numDatanodes, conf);
    
    for (int i = 0; i < numNameNodes; i++) {
      FileSystem fileSys = cluster.getFileSystem(i);
      Path file = new Path("testClusterStats.dat");
      writeFile(fileSys, file, 1);
      
      FSNamesystem fsn = cluster.getNamesystem(i);
      NameNode namenode = cluster.getNameNode(i);
      DatanodeInfo downnode = decommissionNode(i, null,
          AdminStates.DECOMMISSION_INPROGRESS);
      // Check namenode stats for multiple datanode heartbeats
      verifyStats(namenode, fsn, downnode, true);
      
      // Stop decommissioning and verify stats
      writeConfigFile(excludeFile, null);
      fsn.refreshNodes(conf);
      DatanodeInfo ret = fsn.getDatanode(downnode);
      waitNodeState(ret, AdminStates.NORMAL);
      verifyStats(namenode, fsn, ret, false);
    }
  }
  
  /**
   * Test host/include file functionality. Only datanodes
   * in the include file are allowed to connect to the namenode in a non
   * federated cluster.
   */
  @Test
  public void testHostsFile() throws IOException, InterruptedException {
    // Test for a single namenode cluster
    testHostsFile(1);
  }
  
  /**
   * Test host/include file functionality. Only datanodes
   * in the include file are allowed to connect to the namenode in a 
   * federated cluster.
   */
  @Test
  public void testHostsFileFederation() throws IOException, InterruptedException {
    // Test for 3 namenode federated cluster
    testHostsFile(3);
  }
  
  public void testHostsFile(int numNameNodes) throws IOException,
      InterruptedException {
    conf.set(DFSConfigKeys.DFS_HOSTS, hostsFile.toUri().getPath());
    int numDatanodes = 1;
    cluster = new MiniDFSCluster.Builder(conf).numNameNodes(numNameNodes)
        .numDataNodes(numDatanodes).setupHostsFile(true).build();
    cluster.waitActive();
    
    // Now empty hosts file and ensure the datanode is disallowed
    // from talking to namenode, resulting in it's shutdown.
    ArrayListlist = new ArrayList();
    list.add("invalidhost");
    writeConfigFile(hostsFile, list);
    
    for (int j = 0; j < numNameNodes; j++) {
      cluster.getNamesystem(j).refreshNodes(conf);
      
      DFSClient client = getDfsClient(cluster.getNameNode(j), conf);
      DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
      for (int i = 0 ; i < 5 && info.length != 0; i++) {
        LOG.info("Waiting for datanode to be marked dead");
        Thread.sleep(HEARTBEAT_INTERVAL * 1000);
        info = client.datanodeReport(DatanodeReportType.LIVE);
      }
      assertEquals("Number of live nodes should be 0", 0, info.length);
    }
  }
}
File
TestDecommission.java
Developer's decision
Version 2
Kind of conflict
Annotation
Attribute
Catch clause
Comment
For statement
Method declaration
Method invocation
Method signature
Throw statement
Try statement
Variable
Chunk
Conflicting content
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.HardLink;
<<<<<<< HEAD
import org.apache.hadoop.hdfs.protocol.Block;
=======
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
Solution content
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.HardLink;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
File
TestFileAppend.java
Developer's decision
Version 2
Kind of conflict
Import
Chunk
Conflicting content
    if (withService) {
      NameNode.setServiceAddress(config, THIS_HOST);      
    }
<<<<<<< HEAD
    config.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, THIS_HOST);
    NameNode.format(config);
=======
    config.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, NAME_NODE_HTTP_HOST + "0");
    GenericTestUtils.formatNamenode(config);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0

    String[] args = new String[] {};
    // NameNode will modify config with the ports it bound to
Solution content
    if (withService) {
      NameNode.setServiceAddress(config, THIS_HOST);      
    }
    config.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, THIS_HOST);
    GenericTestUtils.formatNamenode(config);

    String[] args = new String[] {};
    // NameNode will modify config with the ports it bound to
File
TestHDFSServerPorts.java
Developer's decision
Combination
Kind of conflict
Method invocation
Chunk
Conflicting content
  @BeforeClass
  public static void testSetUp() throws Exception {
    cluster = new MiniDFSCluster.Builder(conf).build();
<<<<<<< HEAD
    fc = FileContext.getFileContext(cluster.getConfiguration());
=======
    fc = FileContext.getFileContext(cluster.getConfiguration(0));
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    fc.delete(TEST_DIR, true);
  }
  
Solution content
  @BeforeClass
  public static void testSetUp() throws Exception {
    cluster = new MiniDFSCluster.Builder(conf).build();
    fc = FileContext.getFileContext(cluster.getConfiguration(0));
    fc.delete(TEST_DIR, true);
  }
  
File
TestListFilesInFileContext.java
Developer's decision
Version 2
Kind of conflict
Attribute
Method invocation
Chunk
Conflicting content

      // Corrupt the block
<<<<<<< HEAD
      String block = DFSTestUtil.getFirstBlock(dfs, corruptFile).getBlockName();
      assertTrue(cluster.corruptReplica(block, 0));
=======
      ExtendedBlock block = DFSTestUtil.getFirstBlock(dfs, corruptFile);
      assertTrue(TestDatanodeBlockScanner.corruptReplica(block, 0));
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0

      // read the file so that the corrupt block is reported to NN
      FSDataInputStream in = dfs.open(corruptFile); 
Solution content

      // Corrupt the block
      ExtendedBlock block = DFSTestUtil.getFirstBlock(dfs, corruptFile);
      assertTrue(TestDatanodeBlockScanner.corruptReplica(block, 0));

      // read the file so that the corrupt block is reported to NN
      FSDataInputStream in = dfs.open(corruptFile); 
File
TestMissingBlocksAlert.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Variable
Chunk
Conflicting content
=======
    List lb = cluster.getNameNode().getBlockLocations(
      filePath.toString(), FILE_SIZE - 1, FILE_SIZE).getLocatedBlocks();

<<<<<<< HEAD
    for (DataNode dn : cluster.getDataNodes()) {
      Replica r = DataNodeAdapter.fetchReplicaInfo(
        dn, lb.get(0).getBlock().getBlockId());

      assertTrue("Replica on DN " + dn + " shouldn't be null", r != null);
      assertEquals(
        "Should be RBW replica on " + dn + " after sequence of calls " +
        "append()/write()/hflush()",
        HdfsConstants.ReplicaState.RBW, r.getState());
    String bpid = cluster.getNamesystem().getBlockPoolId();
    for (DataNode dn : cluster.getDataNodes()) {
      Replica r = DataNodeAdapter.fetchReplicaInfo(dn, bpid, lb.get(0)
          .getBlock().getBlockId());

      assertTrue("Replica on DN " + dn + " shouldn't be null", r != null);
      assertEquals("Should be RBW replica on " + dn
          + " after sequence of calls append()/write()/hflush()",
          HdfsConstants.ReplicaState.RBW, r.getState());
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    }
    ofs.close();
  }
Solution content
    List lb = cluster.getNameNode().getBlockLocations(
      filePath.toString(), FILE_SIZE - 1, FILE_SIZE).getLocatedBlocks();

    String bpid = cluster.getNamesystem().getBlockPoolId();
    for (DataNode dn : cluster.getDataNodes()) {
      Replica r = DataNodeAdapter.fetchReplicaInfo(dn, bpid, lb.get(0)
          .getBlock().getBlockId());

      assertTrue("Replica on DN " + dn + " shouldn't be null", r != null);
      assertEquals("Should be RBW replica on " + dn
          + " after sequence of calls append()/write()/hflush()",
          HdfsConstants.ReplicaState.RBW, r.getState());
    }
    ofs.close();
  }
File
TestPipelines.java
Developer's decision
Version 2
Kind of conflict
For statement
Method invocation
Variable
Chunk
Conflicting content
    DFSClient dfsClient = null;
    LocatedBlocks blocks = null;
    int replicaCount = 0;
<<<<<<< HEAD
    short replFactor = 1;
=======
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    cluster.waitActive();
    fs = cluster.getFileSystem();
Solution content
    DFSClient dfsClient = null;
    LocatedBlocks blocks = null;
    int replicaCount = 0;
    short replFactor = 1;
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    cluster.waitActive();
    fs = cluster.getFileSystem();
File
TestReplication.java
Developer's decision
Version 1
Kind of conflict
Variable
Chunk
Conflicting content
    DFSTestUtil.waitReplication(fs, file1, replFactor);
  
    // Corrupt the block belonging to the created file
<<<<<<< HEAD
    String block = DFSTestUtil.getFirstBlock(fs, file1).getBlockName();

    int blockFilesCorrupted = cluster.corruptBlockOnDataNodes(block);
    assertEquals("Corrupted too few blocks", replFactor, blockFilesCorrupted); 

=======
    ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, file1);
    cluster.corruptBlockOnDataNodes(block);
  
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    // Increase replication factor, this should invoke transfer request
    // Receiving datanode fails on checksum and reports it to namenode
    replFactor = 2;
Solution content
    DFSTestUtil.waitReplication(fs, file1, replFactor);
  
    // Corrupt the block belonging to the created file
    ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, file1);

    int blockFilesCorrupted = cluster.corruptBlockOnDataNodes(block);
    assertEquals("Corrupted too few blocks", replFactor, blockFilesCorrupted); 

    // Increase replication factor, this should invoke transfer request
    // Receiving datanode fails on checksum and reports it to namenode
    replFactor = 2;
File
TestReplication.java
Developer's decision
Combination
Kind of conflict
Method invocation
Variable
Chunk
Conflicting content
      createEmptyDirs(new String[] {datanodeStorage.toString()});
      
      // format and start NameNode and start DataNode
<<<<<<< HEAD
      NameNode.format(config); 
=======
      GenericTestUtils.formatNamenode(config);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      cluster =  new MiniDFSCluster.Builder(config)
                                   .numDataNodes(1)
                                   .startupOption(StartupOption.REGULAR)
Solution content
      createEmptyDirs(new String[] {datanodeStorage.toString()});
      
      // format and start NameNode and start DataNode
      GenericTestUtils.formatNamenode(config);
      cluster =  new MiniDFSCluster.Builder(config)
                                   .numDataNodes(1)
                                   .startupOption(StartupOption.REGULAR)
File
UpgradeUtilities.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Chunk
Conflicting content
                                   .manageDataDfsDirs(false)
                                   .manageNameDfsDirs(false)
                                   .build();
<<<<<<< HEAD
=======
        
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      NameNode namenode = cluster.getNameNode();
      namenodeStorageNamespaceID = namenode.versionRequest().getNamespaceID();
      namenodeStorageFsscTime = namenode.versionRequest().getCTime();
Solution content
                                   .manageDataDfsDirs(false)
                                   .manageNameDfsDirs(false)
                                   .build();
        
      NameNode namenode = cluster.getNameNode();
      namenodeStorageNamespaceID = namenode.versionRequest().getNamespaceID();
      namenodeStorageFsscTime = namenode.versionRequest().getCTime();
File
UpgradeUtilities.java
Developer's decision
Version 1
Kind of conflict
Blank
Chunk
Conflicting content
      File newDir = new File(parents[i], dirName);
      createEmptyDirs(new String[] {newDir.toString()});
      LocalFileSystem localFS = FileSystem.getLocal(new HdfsConfiguration());
<<<<<<< HEAD
      switch (nodeType) {
      case NAME_NODE:
        localFS.copyToLocalFile(new Path(namenodeStorage.toString(), "current"),
                                new Path(newDir.toString()),
                                false);
        break;
      case DATA_NODE:
        localFS.copyToLocalFile(new Path(datanodeStorage.toString(), "current"),
                                new Path(newDir.toString()),
                                false);
        break;
      }
   * with the content of datanode storage directory that comes from a singleton
=======
      localFS.copyToLocalFile(new Path(namenodeStorage.toString(), "current"),
                              new Path(newDir.toString()),
                              false);
      retVal[i] = newDir;
    }
    return retVal;
  }  
  
  /**
   * Simulate the dfs.data.dir of a populated DFS filesystem.
   * This method populates for each parent directory, parent/dirName
   * datanode master (that contains version and block files). If the destination
   * directory does not exist, it will be created.  If the directory already 
   * exists, it will first be deleted.
   * 
   * @param parents parent directory where {@code dirName} is created
   * @param dirName directory under which storage directory is created
   * @return the array of created directories
   */
  public static File[] createDataNodeStorageDirs(String[] parents,
      String dirName) throws Exception {
    File[] retVal = new File[parents.length];
    for (int i = 0; i < parents.length; i++) {
      File newDir = new File(parents[i], dirName);
      createEmptyDirs(new String[] {newDir.toString()});
      LocalFileSystem localFS = FileSystem.getLocal(new HdfsConfiguration());
      localFS.copyToLocalFile(new Path(datanodeStorage.toString(), "current"),
                              new Path(newDir.toString()),
                              false);
      retVal[i] = newDir;
    }
    return retVal;
  }
  
  /**
   * Simulate the dfs.data.dir of a populated DFS filesystem.
   * This method populates for each parent directory, parent/dirName
   * with the content of block pool storage directory that comes from a singleton
   * datanode master (that contains version and block files). If the destination
   * directory does not exist, it will be created.  If the directory already 
   * exists, it will first be deleted.
   * 
   * @param parents parent directory where {@code dirName} is created
   * @param dirName directory under which storage directory is created
   * @param bpid block pool id for which the storage directory is created.
   * @return the array of created directories
   */
  public static File[] createBlockPoolStorageDirs(String[] parents,
      String dirName, String bpid) throws Exception {
    File[] retVal = new File[parents.length];
    Path bpCurDir = new Path(MiniDFSCluster.getBPDir(datanodeStorage,
        bpid, Storage.STORAGE_DIR_CURRENT));
    for (int i = 0; i < parents.length; i++) {
      File newDir = new File(parents[i] + "/current/" + bpid, dirName);
      createEmptyDirs(new String[] {newDir.toString()});
      LocalFileSystem localFS = FileSystem.getLocal(new HdfsConfiguration());
      localFS.copyToLocalFile(bpCurDir,
                              new Path(newDir.toString()),
                              false);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      retVal[i] = newDir;
    }
    return retVal;
Solution content
      File newDir = new File(parents[i], dirName);
      createEmptyDirs(new String[] {newDir.toString()});
      LocalFileSystem localFS = FileSystem.getLocal(new HdfsConfiguration());
      localFS.copyToLocalFile(new Path(namenodeStorage.toString(), "current"),
                              new Path(newDir.toString()),
                              false);
      retVal[i] = newDir;
    }
    return retVal;
  }  
  
  /**
   * Simulate the dfs.data.dir of a populated DFS filesystem.
   * This method populates for each parent directory, parent/dirName
   * with the content of datanode storage directory that comes from a singleton
   * datanode master (that contains version and block files). If the destination
   * directory does not exist, it will be created.  If the directory already 
   * exists, it will first be deleted.
   * 
   * @param parents parent directory where {@code dirName} is created
   * @param dirName directory under which storage directory is created
   * @return the array of created directories
   */
  public static File[] createDataNodeStorageDirs(String[] parents,
      String dirName) throws Exception {
    File[] retVal = new File[parents.length];
    for (int i = 0; i < parents.length; i++) {
      File newDir = new File(parents[i], dirName);
      createEmptyDirs(new String[] {newDir.toString()});
      LocalFileSystem localFS = FileSystem.getLocal(new HdfsConfiguration());
      localFS.copyToLocalFile(new Path(datanodeStorage.toString(), "current"),
                              new Path(newDir.toString()),
                              false);
      retVal[i] = newDir;
    }
    return retVal;
  }
  
  /**
   * Simulate the dfs.data.dir of a populated DFS filesystem.
   * This method populates for each parent directory, parent/dirName
   * with the content of block pool storage directory that comes from a singleton
   * datanode master (that contains version and block files). If the destination
   * directory does not exist, it will be created.  If the directory already 
   * exists, it will first be deleted.
   * 
   * @param parents parent directory where {@code dirName} is created
   * @param dirName directory under which storage directory is created
   * @param bpid block pool id for which the storage directory is created.
   * @return the array of created directories
   */
  public static File[] createBlockPoolStorageDirs(String[] parents,
      String dirName, String bpid) throws Exception {
    File[] retVal = new File[parents.length];
    Path bpCurDir = new Path(MiniDFSCluster.getBPDir(datanodeStorage,
        bpid, Storage.STORAGE_DIR_CURRENT));
    for (int i = 0; i < parents.length; i++) {
      File newDir = new File(parents[i] + "/current/" + bpid, dirName);
      createEmptyDirs(new String[] {newDir.toString()});
      LocalFileSystem localFS = FileSystem.getLocal(new HdfsConfiguration());
      localFS.copyToLocalFile(bpCurDir,
                              new Path(newDir.toString()),
                              false);
      retVal[i] = newDir;
    }
    return retVal;
File
UpgradeUtilities.java
Developer's decision
Version 2
Kind of conflict
Array access
Comment
For statement
Method declaration
Method invocation
Method signature
Return statement
Switch statement
Variable
Chunk
Conflicting content
   *
   * @return the created version file
   */
<<<<<<< HEAD
  public static File[] createVersionFile(Configuration conf, NodeType nodeType, File[] parent,
                                         StorageInfo version) throws IOException 
  {
=======
  public static File[] createNameNodeVersionFile(Configuration conf,
      File[] parent, StorageInfo version, String bpid) throws IOException {
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    Storage storage = null;
    File[] versionFiles = new File[parent.length];
    for (int i = 0; i < parent.length; i++) {
Solution content
   *
   * @return the created version file
   */
  public static File[] createNameNodeVersionFile(Configuration conf,
      File[] parent, StorageInfo version, String bpid) throws IOException {
    Storage storage = null;
    File[] versionFiles = new File[parent.length];
    for (int i = 0; i < parent.length; i++) {
File
UpgradeUtilities.java
Developer's decision
Version 2
Kind of conflict
Method signature
Chunk
Conflicting content
    for (int i = 0; i < parent.length; i++) {
      File versionFile = new File(parent[i], "VERSION");
      FileUtil.fullyDelete(versionFile);
<<<<<<< HEAD
      switch (nodeType) {
      case NAME_NODE:
        storage = new NNStorage(conf);
        storage.setStorageInfo(version);
        break;
      case DATA_NODE:
        storage = new DataStorage(version, "doNotCare");
        break;
      }
=======
      storage = new NNStorage(conf);
      storage.setStorageInfo(version);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      StorageDirectory sd = storage.new StorageDirectory(parent[i].getParentFile());
      sd.write(versionFile);
      versionFiles[i] = versionFile;
Solution content
    for (int i = 0; i < parent.length; i++) {
      File versionFile = new File(parent[i], "VERSION");
      FileUtil.fullyDelete(versionFile);
      storage = new NNStorage(conf);
      storage.setStorageInfo(version);
      StorageDirectory sd = storage.new StorageDirectory(parent[i].getParentFile());
      sd.write(versionFile);
      versionFiles[i] = versionFile;
File
UpgradeUtilities.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Switch statement
Variable
Chunk
Conflicting content
import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
<<<<<<< HEAD
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.mortbay.log.Log;

import junit.framework.TestCase;
=======
import org.apache.hadoop.hdfs.server.namenode.NameNode;
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
/**
 * This class tests if a balancer schedules tasks correctly.
 */
Solution content
import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
import org.apache.hadoop.hdfs.server.namenode.NameNode;

/**
 * This class tests if a balancer schedules tasks correctly.
 */
File
TestBalancer.java
Developer's decision
Version 2
Kind of conflict
Import
Chunk
Conflicting content
  /* fill up a cluster with numNodes datanodes 
   * whose used space to be size
   */
<<<<<<< HEAD
  private Block[] generateBlocks(Configuration conf, long size, short numNodes) throws IOException {
=======
  private ExtendedBlock[] generateBlocks(Configuration conf, long size,
      short numNodes) throws IOException {
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numNodes).build();
    try {
      cluster.waitActive();
Solution content
  /* fill up a cluster with numNodes datanodes 
   * whose used space to be size
   */
  private ExtendedBlock[] generateBlocks(Configuration conf, long size,
      short numNodes) throws IOException {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numNodes).build();
    try {
      cluster.waitActive();
File
TestBalancer.java
Developer's decision
Version 2
Kind of conflict
Method signature
Chunk
Conflicting content
      long newCapacity, String newRack) throws Exception {
    assertEquals(capacities.length, racks.length);
    int numOfDatanodes = capacities.length;
<<<<<<< HEAD
=======
    assertEquals(numOfDatanodes, racks.length);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    cluster = new MiniDFSCluster.Builder(conf)
                                .numDataNodes(capacities.length)
                                .racks(racks)
Solution content
      long newCapacity, String newRack) throws Exception {
    assertEquals(capacities.length, racks.length);
    int numOfDatanodes = capacities.length;
    cluster = new MiniDFSCluster.Builder(conf)
                                .numDataNodes(capacities.length)
                                .racks(racks)
File
TestBalancer.java
Developer's decision
Version 1
Kind of conflict
Method invocation
Chunk
Conflicting content
    }
  }

<<<<<<< HEAD
  /* Start balancer and check if the cluster is balanced after the run */
  private void runBalancer(Configuration conf, long totalUsedSpace, long totalCapacity )
  throws Exception {
    waitForHeartBeat(totalUsedSpace, totalCapacity);

    // start rebalancing
    balancer = new Balancer(conf);
    balancer.run(new String[0]);

    waitForHeartBeat(totalUsedSpace, totalCapacity);
    Log.info("Rebalancing.");
    waitForBalancer(totalUsedSpace, totalCapacity);
  }

  private void runBalancerDefaultConstructor(Configuration conf,
=======
  private void runBalancer(Configuration conf,
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      long totalUsedSpace, long totalCapacity) throws Exception {
    waitForHeartBeat(totalUsedSpace, totalCapacity);
Solution content
    }
  }

  private void runBalancer(Configuration conf,
      long totalUsedSpace, long totalCapacity) throws Exception {
    waitForHeartBeat(totalUsedSpace, totalCapacity);
File
TestBalancer.java
Developer's decision
Version 2
Kind of conflict
Comment
Method declaration
Method signature
Chunk
Conflicting content
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
<<<<<<< HEAD

import static org.apache.hadoop.hdfs.protocol.FSConstants.LAYOUT_VERSION;

=======
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
Solution content
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
File
TestDistributedUpgrade.java
Developer's decision
Version 2
Kind of conflict
Import
Chunk
Conflicting content
      // nn dirs set to name1 and name2
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
                                              .format(false)
<<<<<<< HEAD
                                              .startupOption(operation)
                                              .build(); // should fail
      throw new AssertionError("Jakob was here. NameNode should have failed to start");
=======
                                              .clusterId(clusterId)
                                              .startupOption(operation)
                                              .build(); // should fail
      throw new AssertionError("NameNode should have failed to start");
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    } catch (Exception expected) {
      expected = null;
      // expected
Solution content
      // nn dirs set to name1 and name2
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
                                              .format(false)
                                              .clusterId(clusterId)
                                              .startupOption(operation)
                                              .build(); // should fail
      throw new AssertionError("NameNode should have failed to start");
    } catch (Exception expected) {
      expected = null;
      // expected
File
TestDistributedUpgrade.java
Developer's decision
Version 2
Kind of conflict
Attribute
Comment
Throw statement
Variable
Chunk
Conflicting content
    // .startupOption(StartupOption.UPGRADE).build();
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
                                              .format(false)
<<<<<<< HEAD
=======
                                              .clusterId(clusterId)
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
                                              .startupOption(StartupOption.UPGRADE)
                                              .build();
    cluster.shutdown();
Solution content
    // .startupOption(StartupOption.UPGRADE).build();
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
                                              .format(false)
                                              .clusterId(clusterId)
                                              .startupOption(StartupOption.UPGRADE)
                                              .build();
    cluster.shutdown();
File
TestDistributedUpgrade.java
Developer's decision
Version 2
Kind of conflict
Attribute
Chunk
Conflicting content
    cluster = new MiniDFSCluster.Builder(conf)
                                .numDataNodes(numDNs)
                                .format(false)
<<<<<<< HEAD
=======
                                .clusterId(clusterId)
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
                                .startupOption(StartupOption.UPGRADE)
                                .build();
    DFSAdmin dfsAdmin = new DFSAdmin();
Solution content
    cluster = new MiniDFSCluster.Builder(conf)
                                .numDataNodes(numDNs)
                                .format(false)
                                .clusterId(clusterId)
                                .startupOption(StartupOption.UPGRADE)
                                .build();
    DFSAdmin dfsAdmin = new DFSAdmin();
File
TestDistributedUpgrade.java
Developer's decision
Version 2
Kind of conflict
Attribute
Chunk
Conflicting content
    log("NameCluster regular startup after the upgrade", numDirs);
    cluster = new MiniDFSCluster.Builder(conf)
                                .numDataNodes(numDNs)
<<<<<<< HEAD
=======
                                .clusterId(clusterId)
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
                                .format(false)
                                .startupOption(StartupOption.REGULAR)
                                .build();
Solution content
    log("NameCluster regular startup after the upgrade", numDirs);
    cluster = new MiniDFSCluster.Builder(conf)
                                .numDataNodes(numDNs)
                                .clusterId(clusterId)
                                .format(false)
                                .startupOption(StartupOption.REGULAR)
                                .build();
File
TestDistributedUpgrade.java
Developer's decision
Version 2
Kind of conflict
Attribute
Chunk
Conflicting content
    binfo.finalizeBlock(b.getBlockPoolId(), b.getNumBytes());
  }

<<<<<<< HEAD
  @Override
  public synchronized void unfinalizeBlock(Block b) throws IOException {
    if (isValidRbw(b)) {
      blockMap.remove(b);
=======
  @Override // FSDatasetInterface
  public synchronized void unfinalizeBlock(ExtendedBlock b) throws IOException {
    if (isValidRbw(b)) {
      blockMap.remove(b.getLocalBlock());
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    }
  }
Solution content
    binfo.finalizeBlock(b.getBlockPoolId(), b.getNumBytes());
  }

  @Override // FSDatasetInterface
  public synchronized void unfinalizeBlock(ExtendedBlock b) throws IOException {
    if (isValidRbw(b)) {
      blockMap.remove(b.getLocalBlock());
    }
  }
File
SimulatedFSDataset.java
Developer's decision
Version 2
Kind of conflict
Annotation
Comment
If statement
Method invocation
Method signature
Chunk
Conflicting content
  }

  @Override 
<<<<<<< HEAD
  public synchronized String getReplicaString(long blockId) {
    final Replica r = blockMap.get(new Block(blockId));
    return r == null? "null": r.toString();
  }

  @Override
  public Block getStoredBlock(long blkid) throws IOException {
    Block b = new Block(blkid);
    BInfo binfo = blockMap.get(b);
    if (binfo == null) {
      return null;
=======
  public synchronized String getReplicaString(String bpid, long blockId) {
    Replica r = null;
    final Map map = blockMap.get(bpid);
    if (map != null) {
      r = map.get(new Block(blockId));
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    }
    return r == null? "null": r.toString();
  }
Solution content
  }

  @Override 
  public synchronized String getReplicaString(String bpid, long blockId) {
    Replica r = null;
    final Map map = blockMap.get(bpid);
    if (map != null) {
      r = map.get(new Block(blockId));
    }
    return r == null? "null": r.toString();
  }
File
SimulatedFSDataset.java
Developer's decision
Version 2
Kind of conflict
Annotation
If statement
Method declaration
Method invocation
Method signature
Return statement
Variable
Chunk
Conflicting content
    return binfo.isFinalized();
  }

<<<<<<< HEAD
  @Override
  public synchronized boolean isValidRbw(Block b) {
    BInfo binfo = blockMap.get(b);
=======
  /* check if a block is created but not finalized */
  @Override
  public synchronized boolean isValidRbw(ExtendedBlock b) {
    final Map map = blockMap.get(b.getBlockPoolId());
    if (map == null) {
      return false;
    }
    BInfo binfo = map.get(b.getLocalBlock());
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    if (binfo == null) {
      return false;
    }
Solution content
    return binfo.isFinalized();
  }

  /* check if a block is created but not finalized */
  @Override
  public synchronized boolean isValidRbw(ExtendedBlock b) {
    final Map map = blockMap.get(b.getBlockPoolId());
    if (map == null) {
      return false;
    }
    BInfo binfo = map.get(b.getLocalBlock());
    if (binfo == null) {
      return false;
    }
File
SimulatedFSDataset.java
Developer's decision
Version 2
Kind of conflict
Annotation
Comment
If statement
Method invocation
Method signature
Variable
Chunk
Conflicting content
    return block.getNumBytes();
  }

<<<<<<< HEAD
  @Override
  public ReplicaInPipelineInterface convertTemporaryToRbw(Block temporary)
      throws IOException {
    final BInfo r = blockMap.get(temporary);
=======
  @Override // FSDatasetInterface
  public void addBlockPool(String bpid, Configuration conf) {
    Map map = new HashMap();
    blockMap.put(bpid, map);
    storage.addBlockPool(bpid);
  }
  
  @Override // FSDatasetInterface
  public void shutdownBlockPool(String bpid) {
    blockMap.remove(bpid);
    storage.removeBlockPool(bpid);
  }
  
  @Override // FSDatasetInterface
  public void deleteBlockPool(String bpid, boolean force) {
     return;
  }

  @Override
  public ReplicaInPipelineInterface convertTemporaryToRbw(ExtendedBlock temporary)
      throws IOException {
    final Map map = blockMap.get(temporary.getBlockPoolId());
    if (map == null) {
      throw new IOException("Block pool not found, temporary=" + temporary);
    }
    final BInfo r = map.get(temporary.getLocalBlock());
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    if (r == null) {
      throw new IOException("Block not found, temporary=" + temporary);
    } else if (r.isFinalized()) {
Solution content
    return block.getNumBytes();
  }

  @Override // FSDatasetInterface
  public void addBlockPool(String bpid, Configuration conf) {
    Map map = new HashMap();
    blockMap.put(bpid, map);
    storage.addBlockPool(bpid);
  }
  
  @Override // FSDatasetInterface
  public void shutdownBlockPool(String bpid) {
    blockMap.remove(bpid);
    storage.removeBlockPool(bpid);
  }
  
  @Override // FSDatasetInterface
  public void deleteBlockPool(String bpid, boolean force) {
     return;
  }

  @Override
  public ReplicaInPipelineInterface convertTemporaryToRbw(ExtendedBlock temporary)
      throws IOException {
    final Map map = blockMap.get(temporary.getBlockPoolId());
    if (map == null) {
      throw new IOException("Block pool not found, temporary=" + temporary);
    }
    final BInfo r = map.get(temporary.getLocalBlock());
    if (r == null) {
      throw new IOException("Block not found, temporary=" + temporary);
    } else if (r.isFinalized()) {
File
SimulatedFSDataset.java
Developer's decision
Version 2
Kind of conflict
Annotation
Comment
If statement
Method declaration
Method invocation
Method signature
Variable
Chunk
Conflicting content
    
    for (LocatedBlock lb : locatedBlocks) {
      DatanodeInfo dinfo = lb.getLocations()[1];
<<<<<<< HEAD
      Block b = lb.getBlock();
=======
      ExtendedBlock b = lb.getBlock();
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      try {
        accessBlock(dinfo, lb);
      } catch (IOException e) {
Solution content
    
    for (LocatedBlock lb : locatedBlocks) {
      DatanodeInfo dinfo = lb.getLocations()[1];
      ExtendedBlock b = lb.getBlock();
      try {
        accessBlock(dinfo, lb);
      } catch (IOException e) {
File
TestDataNodeVolumeFailure.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Variable
Chunk
Conflicting content
  private FileSystem fs;
  private MiniDFSCluster cluster;
  private Configuration conf;
<<<<<<< HEAD
  private String dataDir;
=======
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0

  @Before
  public void setUp() throws Exception {
Solution content
  private FileSystem fs;
  private MiniDFSCluster cluster;
  private Configuration conf;

  @Before
  public void setUp() throws Exception {
File
TestDiskError.java
Developer's decision
Version 2
Kind of conflict
Attribute
Chunk
Conflicting content
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    fs = cluster.getFileSystem();
<<<<<<< HEAD
    dataDir = cluster.getDataDirectory();
=======
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
  }

  @After
Solution content
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    fs = cluster.getFileSystem();
  }

  @After
File
TestDiskError.java
Developer's decision
Version 2
Kind of conflict
Attribute
Method invocation
Chunk
Conflicting content
    cluster.startDataNodes(conf, 2, true, null, null);
    cluster.waitActive();
    final int dnIndex = 0;
<<<<<<< HEAD
    File dir1 = new File(new File(dataDir, "data"+(2*dnIndex+1)), "current/rbw");
    File dir2 = new File(new File(dataDir, "data"+(2*dnIndex+2)), "current/rbw");
=======
    String bpid = cluster.getNamesystem().getBlockPoolId();
    File storageDir = MiniDFSCluster.getStorageDir(dnIndex, 0);
    File dir1 = MiniDFSCluster.getRbwDir(storageDir, bpid);
    storageDir = MiniDFSCluster.getStorageDir(dnIndex, 1);
    File dir2 = MiniDFSCluster.getRbwDir(storageDir, bpid);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    try {
      // make the data directory of the first datanode to be readonly
      assertTrue("Couldn't chmod local vol", dir1.setReadOnly());
Solution content
    cluster.startDataNodes(conf, 2, true, null, null);
    cluster.waitActive();
    final int dnIndex = 0;
    String bpid = cluster.getNamesystem().getBlockPoolId();
    File storageDir = MiniDFSCluster.getStorageDir(dnIndex, 0);
    File dir1 = MiniDFSCluster.getRbwDir(storageDir, bpid);
    storageDir = MiniDFSCluster.getStorageDir(dnIndex, 1);
    File dir2 = MiniDFSCluster.getRbwDir(storageDir, bpid);
    try {
      // make the data directory of the first datanode to be readonly
      assertTrue("Couldn't chmod local vol", dir1.setReadOnly());
File
TestDiskError.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Variable
Chunk
Conflicting content
    out.close();

    // the temporary block & meta files should be deleted
<<<<<<< HEAD
    String dataDir = cluster.getDataDirectory();
    File dir1 = new File(new File(dataDir, "data"+(2*sndNode+1)), "current/rbw");
    File dir2 = new File(new File(dataDir, "data"+(2*sndNode+2)), "current/rbw");
=======
    String bpid = cluster.getNamesystem().getBlockPoolId();
    File storageDir = MiniDFSCluster.getStorageDir(sndNode, 0);
    File dir1 = MiniDFSCluster.getRbwDir(storageDir, bpid);
    storageDir = MiniDFSCluster.getStorageDir(sndNode, 1);
    File dir2 = MiniDFSCluster.getRbwDir(storageDir, bpid);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    while (dir1.listFiles().length != 0 || dir2.listFiles().length != 0) {
      Thread.sleep(100);
    }
Solution content
    out.close();

    // the temporary block & meta files should be deleted
    String bpid = cluster.getNamesystem().getBlockPoolId();
    File storageDir = MiniDFSCluster.getStorageDir(sndNode, 0);
    File dir1 = MiniDFSCluster.getRbwDir(storageDir, bpid);
    storageDir = MiniDFSCluster.getStorageDir(sndNode, 1);
    File dir2 = MiniDFSCluster.getRbwDir(storageDir, bpid);
    while (dir1.listFiles().length != 0 || dir2.listFiles().length != 0) {
      Thread.sleep(100);
    }
File
TestDiskError.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Variable
Chunk
Conflicting content
  @Test
  public void testClose() throws Exception {
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).build();
<<<<<<< HEAD
=======
    
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    try {
      cluster.waitActive();
      DataNode dn = cluster.getDataNodes().get(0);
Solution content
  @Test
  public void testClose() throws Exception {
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).build();
    
    try {
      cluster.waitActive();
      DataNode dn = cluster.getDataNodes().get(0);
File
TestWriteToReplica.java
Developer's decision
Version 1
Kind of conflict
Blank
Chunk
Conflicting content
    try {
      // start name-node and backup node 1
      cluster = new MiniDFSCluster.Builder(conf1).numDataNodes(0).build();
<<<<<<< HEAD
      conf1.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY,
                THIS_HOST + ":7771");
      conf1.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,
                THIS_HOST + ":7775");
=======
      conf1.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY, "0.0.0.0:7771");
      conf1.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, "0.0.0.0:7775");
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      backup1 = startBackupNode(conf1, StartupOption.BACKUP, 1);
      // try to start backup node 2
      conf2 = new HdfsConfiguration(conf1);
Solution content
    try {
      // start name-node and backup node 1
      cluster = new MiniDFSCluster.Builder(conf1).numDataNodes(0).build();
      conf1.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY, "0.0.0.0:7771");
      conf1.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, "0.0.0.0:7775");
      backup1 = startBackupNode(conf1, StartupOption.BACKUP, 1);
      // try to start backup node 2
      conf2 = new HdfsConfiguration(conf1);
File
TestBackupNode.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Chunk
Conflicting content
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.Block;
<<<<<<< HEAD
import org.apache.hadoop.hdfs.server.datanode.DataNode;
=======
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NumberReplicas;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.log4j.Level;
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
Solution content
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
File
TestBlocksWithNotEnoughRacks.java
Developer's decision
Manual
Kind of conflict
Import
Chunk
Conflicting content
   */
  private Configuration getConf() {
    Configuration conf = new HdfsConfiguration();
<<<<<<< HEAD

    // Lower the heart beat interval so the NN quickly learns of dead
    // or decommissioned DNs and the NN issues replication and invalidation
    // commands quickly (as replies to heartbeats)
    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);

    // Have the NN ReplicationMonitor compute the replication and
    // invalidation commands to send DNs every second.
=======
    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);

    // Have the NN check for pending replications every second so it
Solution content
   */
  private Configuration getConf() {
    Configuration conf = new HdfsConfiguration();

    // Lower the heart beat interval so the NN quickly learns of dead
    // or decommissioned DNs and the NN issues replication and invalidation
    // commands quickly (as replies to heartbeats)
    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);

    // Have the NN ReplicationMonitor compute the replication and
    // invalidation commands to send DNs every second.
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);

    // Have the NN check for pending replications every second so it
File
TestBlocksWithNotEnoughRacks.java
Developer's decision
Version 1
Kind of conflict
Comment
Method invocation
Chunk
Conflicting content
  public void testSufficientlyReplBlocksUsesNewRack() throws Exception {
    Configuration conf = getConf();
    final short REPLICATION_FACTOR = 3;
<<<<<<< HEAD
    final Path filePath = new Path("/testFile");
    // All datanodes are on the same rack
    String racks[] = {"/rack1", "/rack1", "/rack1"};
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(racks.length).racks(racks).build();

    try {
      // Create a file with one block with a replication factor of 3
      final FileSystem fs = cluster.getFileSystem();
      DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
      Block b = DFSTestUtil.getFirstBlock(fs, filePath);
      DFSTestUtil.waitForReplication(cluster, b, 1, REPLICATION_FACTOR, 1);

      // Add a new datanode on a different rack
      String newRacks[] = {"/rack2"};
      cluster.startDataNodes(conf, 1, true, null, newRacks);
      cluster.waitActive();

      DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
    } finally {
      cluster.shutdown();
    }
  }

  /*
   * Like the previous test but the block starts with a single replica,
   * and therefore unlike the previous test the block does not start
   * off needing replicas.
   */
  @Test
  public void testSufficientlySingleReplBlockUsesNewRack() throws Exception {
    Configuration conf = getConf();
    short REPLICATION_FACTOR = 1;
    final Path filePath = new Path("/testFile");

    String racks[] = {"/rack1", "/rack1", "/rack1", "/rack2"};
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(racks.length).racks(racks).build();
    final FSNamesystem ns = cluster.getNameNode().getNamesystem();

    try {
      // Create a file with one block with a replication factor of 1
      final FileSystem fs = cluster.getFileSystem();
      DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
      Block b = DFSTestUtil.getFirstBlock(fs, filePath);
      DFSTestUtil.waitForReplication(cluster, b, 1, REPLICATION_FACTOR, 0);

      REPLICATION_FACTOR = 2;
      ns.setReplication("/testFile", REPLICATION_FACTOR);
      DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
    } finally {
      cluster.shutdown();
    }
  }

  /*
   * Creates a block with all datanodes on the same rack. Add additional
   * datanodes on a different rack and increase the replication factor, 
   * making sure there are enough replicas across racks. If the previous
   * test passes this one should too, however this test may pass when
   * the previous one fails because the replication code is explicitly
   * triggered by setting the replication factor.
   */
  @Test
  public void testUnderReplicatedUsesNewRacks() throws Exception {
    Configuration conf = getConf();
    short REPLICATION_FACTOR = 3;
    final Path filePath = new Path("/testFile");
    // All datanodes are on the same rack
    String racks[] = {"/rack1", "/rack1", "/rack1", "/rack1", "/rack1"};
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(racks.length).racks(racks).build();
    final FSNamesystem ns = cluster.getNameNode().getNamesystem();

=======
    final String FILE_NAME = "/testFile";
    final Path FILE_PATH = new Path(FILE_NAME);
    //All datanodes are on the same rack
    String racks[] = {"/rack1","/rack1","/rack1",} ;
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION_FACTOR).racks(racks).build();
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    try {
      // Create a file with one block
      final FileSystem fs = cluster.getFileSystem();
Solution content
  public void testSufficientlyReplBlocksUsesNewRack() throws Exception {
    Configuration conf = getConf();
    final short REPLICATION_FACTOR = 3;
    final Path filePath = new Path("/testFile");
    // All datanodes are on the same rack
    String racks[] = {"/rack1", "/rack1", "/rack1"};
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(racks.length).racks(racks).build();

    try {
      // Create a file with one block with a replication factor of 3
      final FileSystem fs = cluster.getFileSystem();
      DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
      ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
      DFSTestUtil.waitForReplication(cluster, b, 1, REPLICATION_FACTOR, 1);

      // Add a new datanode on a different rack
      String newRacks[] = {"/rack2"};
      cluster.startDataNodes(conf, 1, true, null, newRacks);
      cluster.waitActive();

      DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
    } finally {
      cluster.shutdown();
    }
  }

  /*
   * Like the previous test but the block starts with a single replica,
   * and therefore unlike the previous test the block does not start
   * off needing replicas.
   */
  @Test
  public void testSufficientlySingleReplBlockUsesNewRack() throws Exception {
    Configuration conf = getConf();
    short REPLICATION_FACTOR = 1;
    final Path filePath = new Path("/testFile");

    String racks[] = {"/rack1", "/rack1", "/rack1", "/rack2"};
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(racks.length).racks(racks).build();
    final FSNamesystem ns = cluster.getNameNode().getNamesystem();

    try {
      // Create a file with one block with a replication factor of 1
      final FileSystem fs = cluster.getFileSystem();
      DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
      ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
      DFSTestUtil.waitForReplication(cluster, b, 1, REPLICATION_FACTOR, 0);

      REPLICATION_FACTOR = 2;
      ns.setReplication("/testFile", REPLICATION_FACTOR);
      DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
    } finally {
      cluster.shutdown();
    }
  }

  /*
   * Creates a block with all datanodes on the same rack. Add additional
   * datanodes on a different rack and increase the replication factor, 
   * making sure there are enough replicas across racks. If the previous
   * test passes this one should too, however this test may pass when
   * the previous one fails because the replication code is explicitly
   * triggered by setting the replication factor.
   */
  @Test
  public void testUnderReplicatedUsesNewRacks() throws Exception {
    Configuration conf = getConf();
    short REPLICATION_FACTOR = 3;
    final Path filePath = new Path("/testFile");
    // All datanodes are on the same rack
    String racks[] = {"/rack1", "/rack1", "/rack1", "/rack1", "/rack1"};
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(racks.length).racks(racks).build();
    final FSNamesystem ns = cluster.getNameNode().getNamesystem();

    try {
      // Create a file with one block
      final FileSystem fs = cluster.getFileSystem();
File
TestBlocksWithNotEnoughRacks.java
Developer's decision
Manual
Kind of conflict
Annotation
Array initializer
Comment
Method declaration
Method invocation
Method signature
Try statement
Variable
Chunk
Conflicting content
    try {
      Block b = DFSTestUtil.getFirstBlock(fs, filePath);
      DFSTestUtil.waitForReplication(cluster, b, 1, REPLICATION_FACTOR, 1);
      
<<<<<<< HEAD
      // Add new datanodes on a different rack and increase the
      // replication factor so the block is underreplicated and make
      // sure at least one of the hosts on the new rack is used. 
      String newRacks[] = {"/rack2", "/rack2"};
      cluster.startDataNodes(conf, 2, true, null, newRacks);
      REPLICATION_FACTOR = 5;
      ns.setReplication("/testFile", REPLICATION_FACTOR);

      DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
    } finally {
      cluster.shutdown();
    }
  }

  /*
   * Test that a block that is re-replicated because one of its replicas
   * is found to be corrupt and is re-replicated across racks.
   */
  @Test
  public void testCorruptBlockRereplicatedAcrossRacks() throws Exception {
    Configuration conf = getConf();
    short REPLICATION_FACTOR = 2;
    int fileLen = 512;
    final Path filePath = new Path("/testFile");
    // Datanodes are spread across two racks
    String racks[] = {"/rack1", "/rack1", "/rack2", "/rack2"};
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(racks.length).racks(racks).build();
    final FSNamesystem ns = cluster.getNameNode().getNamesystem();

      // Create a file with one block with a replication factor of 2
      final FileSystem fs = cluster.getFileSystem();
=======
      ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, FILE_PATH);
      final FSNamesystem namesystem = cluster.getNamesystem();
      int numRacks = namesystem.blockManager.getNumberOfRacks(b.getLocalBlock());
      NumberReplicas number = namesystem.blockManager.countNodes(b.getLocalBlock());
      int curReplicas = number.liveReplicas();
      int neededReplicationSize = 
                           namesystem.blockManager.neededReplications.size();
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      
      DFSTestUtil.createFile(fs, filePath, fileLen, REPLICATION_FACTOR, 1L);
      final String fileContent = DFSTestUtil.readFile(fs, filePath);
Solution content
      DFSTestUtil.waitForReplication(cluster, b, 1, REPLICATION_FACTOR, 1);
      
      // Add new datanodes on a different rack and increase the
      // replication factor so the block is underreplicated and make
      // sure at least one of the hosts on the new rack is used. 
      String newRacks[] = {"/rack2", "/rack2"};
      cluster.startDataNodes(conf, 2, true, null, newRacks);
      REPLICATION_FACTOR = 5;
      ns.setReplication("/testFile", REPLICATION_FACTOR);

      DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
    } finally {
      cluster.shutdown();
    }
  }

  /*
   * Test that a block that is re-replicated because one of its replicas
   * is found to be corrupt and is re-replicated across racks.
   */
  @Test
  public void testCorruptBlockRereplicatedAcrossRacks() throws Exception {
    Configuration conf = getConf();
    short REPLICATION_FACTOR = 2;
    int fileLen = 512;
    final Path filePath = new Path("/testFile");
    // Datanodes are spread across two racks
    String racks[] = {"/rack1", "/rack1", "/rack2", "/rack2"};
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(racks.length).racks(racks).build();
    final FSNamesystem ns = cluster.getNameNode().getNamesystem();

    try {
      // Create a file with one block with a replication factor of 2
      final FileSystem fs = cluster.getFileSystem();
      
      DFSTestUtil.createFile(fs, filePath, fileLen, REPLICATION_FACTOR, 1L);
      final String fileContent = DFSTestUtil.readFile(fs, filePath);
File
TestBlocksWithNotEnoughRacks.java
Developer's decision
Version 1
Kind of conflict
Annotation
Array initializer
Comment
Method invocation
Method signature
Try statement
Variable
Chunk
Conflicting content
      // Wait for the namenode to notice the corrupt replica
      DFSTestUtil.waitCorruptReplicas(fs, ns, filePath, b, 1);

<<<<<<< HEAD
      // The rack policy is still respected
      DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);

      // Ensure all replicas are valid (the corrupt replica may not
      // have been cleaned up yet).
      for (int i = 0; i < racks.length; i++) {
        String blockContent = cluster.readBlockOnDataNode(i, b.getBlockName());
        if (blockContent != null && i != dnToCorrupt) {
          assertEquals("Corrupt replica", fileContent, blockContent);
        }
=======
      while ( (numRacks < 2) || (curReplicas != REPLICATION_FACTOR) ||
              (neededReplicationSize > 0) ) {
        LOG.info("Waiting for replication");
        Thread.sleep(600);
        numRacks = namesystem.blockManager.getNumberOfRacks(b.getLocalBlock());
        number = namesystem.blockManager.countNodes(b.getLocalBlock());
        curReplicas = number.liveReplicas();
        neededReplicationSize = 
                           namesystem.blockManager.neededReplications.size();
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      }
    } finally {
      cluster.shutdown();
Solution content
      // Wait for the namenode to notice the corrupt replica
      DFSTestUtil.waitCorruptReplicas(fs, ns, filePath, b, 1);

      // The rack policy is still respected
      DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
      // Ensure all replicas are valid (the corrupt replica may not
      // have been cleaned up yet).
      for (int i = 0; i < racks.length; i++) {
        String blockContent = cluster.readBlockOnDataNode(i, b);
        if (blockContent != null && i != dnToCorrupt) {
          assertEquals("Corrupt replica", fileContent, blockContent);
        }
      }
    } finally {
      cluster.shutdown();
File
TestBlocksWithNotEnoughRacks.java
Developer's decision
Manual
Kind of conflict
Comment
For statement
If statement
Method invocation
Variable
While statement
Chunk
Conflicting content
    }
  }

<<<<<<< HEAD
  /*
   * Test that when a block is replicated because a replica is lost due
   * to host failure the the rack policy is preserved.
   */
  @Test
  public void testReplDueToNodeFailRespectsRackPolicy() throws Exception {
    Configuration conf = getConf();
    short REPLICATION_FACTOR = 3;
    final Path filePath = new Path("/testFile");
    // Last datanode is on a different rack
    String racks[] = {"/rack1", "/rack1", "/rack1", "/rack2", "/rack2"};
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(racks.length).racks(racks).build();
    final FSNamesystem ns = cluster.getNameNode().getNamesystem();

=======
  public void testUnderReplicatedNotEnoughRacks() throws Exception {
    Configuration conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 1);
    conf.set(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY, "xyz");
    short REPLICATION_FACTOR = 3;
    final String FILE_NAME = "/testFile";
    final Path FILE_PATH = new Path(FILE_NAME);
    //All datanodes are on the same rack
    String racks[] = {"/rack1","/rack1","/rack1",} ;
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION_FACTOR).racks(racks).build();
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    try {
      // Create a file with one block with a replication factor of 2
      final FileSystem fs = cluster.getFileSystem();
Solution content
    }
  }

  /*
   * Test that when a block is replicated because a replica is lost due
   * to host failure the the rack policy is preserved.
   */
  @Test
  public void testReplDueToNodeFailRespectsRackPolicy() throws Exception {
    Configuration conf = getConf();
    short REPLICATION_FACTOR = 3;
    final Path filePath = new Path("/testFile");
    // Last datanode is on a different rack
    String racks[] = {"/rack1", "/rack1", "/rack1", "/rack2", "/rack2"};
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(racks.length).racks(racks).build();
    final FSNamesystem ns = cluster.getNameNode().getNamesystem();

    try {
      // Create a file with one block with a replication factor of 2
      final FileSystem fs = cluster.getFileSystem();
File
TestBlocksWithNotEnoughRacks.java
Developer's decision
Version 1
Kind of conflict
Annotation
Array initializer
Comment
Method invocation
Method signature
Variable
Chunk
Conflicting content
      // it should have been replicated within the same rack.
      DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
      
<<<<<<< HEAD
      // Fail the last datanode again, it's also on rack2 so there is
      // only 1 rack for all the replicas
      datanodes = cluster.getDataNodes();
      idx = datanodes.size() - 1;
      dataNode = datanodes.get(idx);
      cluster.stopDataNode(idx);
      ns.removeDatanode(dataNode.dnRegistration);

      // Make sure we have enough live replicas even though we are
      // short one rack and therefore need one replica
      DFSTestUtil.waitForReplication(cluster, b, 1, REPLICATION_FACTOR, 1);
    } finally {
      cluster.shutdown();
    }
  }
  
  /*
   * Test that when the excess replicas of a block are reduced due to
   * a node re-joining the cluster the rack policy is not violated.
   */
  @Test
  public void testReduceReplFactorDueToRejoinRespectsRackPolicy() 
      throws Exception {
    Configuration conf = getConf();
    short REPLICATION_FACTOR = 2;
    final Path filePath = new Path("/testFile");
    // Last datanode is on a different rack
    String racks[] = {"/rack1", "/rack1", "/rack2"};
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(racks.length).racks(racks).build();
    final FSNamesystem ns = cluster.getNameNode().getNamesystem();

    try {
      // Create a file with one block
      final FileSystem fs = cluster.getFileSystem();
      DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
      Block b = DFSTestUtil.getFirstBlock(fs, filePath);
      DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);

      // Make the last (cross rack) datanode look like it failed
      // to heartbeat by stopping it and calling removeDatanode.
      ArrayList datanodes = cluster.getDataNodes();
      assertEquals(3, datanodes.size());
      DataNode dataNode = datanodes.get(2);
      cluster.stopDataNode(2);
      ns.removeDatanode(dataNode.dnRegistration);

      // The block gets re-replicated to another datanode so it has a 
      // sufficient # replicas, but not across racks, so there should
      // be 1 rack, and 1 needed replica (even though there are 2 hosts 
      // available and only 2 replicas required).
      DFSTestUtil.waitForReplication(cluster, b, 1, REPLICATION_FACTOR, 1);

      // Start the "failed" datanode, which has a replica so the block is
      // now over-replicated and therefore a replica should be removed but
      // not on the restarted datanode as that would violate the rack policy.
      String rack2[] = {"/rack2"};
      cluster.startDataNodes(conf, 1, true, null, rack2);
      cluster.waitActive();      
      
      // The block now has sufficient # replicas, across racks
      DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
    } finally {
      cluster.shutdown();
    }
  }

  /*
   * Test that rack policy is still respected when blocks are replicated
   * due to node decommissioning.
   */
  @Test
  public void testNodeDecomissionRespectsRackPolicy() throws Exception {
    Configuration conf = getConf();
    short REPLICATION_FACTOR = 2;
    final Path filePath = new Path("/testFile");

    // Configure an excludes file
    FileSystem localFileSys = FileSystem.getLocal(conf);
    Path workingDir = localFileSys.getWorkingDirectory();
    Path dir = new Path(workingDir, "build/test/data/temp/decommission");
    Path excludeFile = new Path(dir, "exclude");
    assertTrue(localFileSys.mkdirs(dir));
    DFSTestUtil.writeFile(localFileSys, excludeFile, "");
    conf.set("dfs.hosts.exclude", excludeFile.toUri().getPath());

    // Two blocks and four racks
    String racks[] = {"/rack1", "/rack1", "/rack2", "/rack2"};
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(racks.length).racks(racks).build();
    final FSNamesystem ns = cluster.getNameNode().getNamesystem();

    try {
      // Create a file with one block
      final FileSystem fs = cluster.getFileSystem();
      DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
      Block b = DFSTestUtil.getFirstBlock(fs, filePath);
      DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);

      // Decommission one of the hosts with the block, this should cause 
      // the block to get replicated to another host on the same rack,
      // otherwise the rack policy is violated.
      BlockLocation locs[] = fs.getFileBlockLocations(
          fs.getFileStatus(filePath), 0, Long.MAX_VALUE);
      String name = locs[0].getNames()[0];
      DFSTestUtil.writeFile(localFileSys, excludeFile, name);
      ns.refreshNodes(conf);
      DFSTestUtil.waitForDecommission(fs, name);
=======
      ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, FILE_PATH);
      final FSNamesystem namesystem = cluster.getNamesystem();
      int numRacks = namesystem.blockManager.getNumberOfRacks(b.getLocalBlock());
      NumberReplicas number = namesystem.blockManager.countNodes(b.getLocalBlock());
      int curReplicas = number.liveReplicas();
      int neededReplicationSize = 
                           namesystem.blockManager.neededReplications.size();
      
      //Add a new datanode on a different rack
      String newRacks[] = {"/rack2","/rack2","/rack2"} ;
      cluster.startDataNodes(conf, 3, true, null, newRacks);
      REPLICATION_FACTOR = 5;
      namesystem.setReplication(FILE_NAME, REPLICATION_FACTOR); 

      while ( (numRacks < 2) || (curReplicas < REPLICATION_FACTOR) ||
              (neededReplicationSize > 0) ) {
        LOG.info("Waiting for replication");
        Thread.sleep(600);
        numRacks = namesystem.blockManager.getNumberOfRacks(b.getLocalBlock());
        number = namesystem.blockManager.countNodes(b.getLocalBlock());
        curReplicas = number.liveReplicas();
        neededReplicationSize = 
                           namesystem.blockManager.neededReplications.size();
      }
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0

      // Check the block still has sufficient # replicas across racks
      DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
Solution content
      // it should have been replicated within the same rack.
      DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
      
      // Fail the last datanode again, it's also on rack2 so there is
      // only 1 rack for all the replicas
      datanodes = cluster.getDataNodes();
      idx = datanodes.size() - 1;
      dataNode = datanodes.get(idx);
      dnId = dataNode.getDatanodeId();
      cluster.stopDataNode(idx);
      ns.removeDatanode(dnId);

      // Make sure we have enough live replicas even though we are
      // short one rack and therefore need one replica
      DFSTestUtil.waitForReplication(cluster, b, 1, REPLICATION_FACTOR, 1);
    } finally {
      cluster.shutdown();
    }
  }
  
  /*
   * Test that when the excess replicas of a block are reduced due to
   * a node re-joining the cluster the rack policy is not violated.
   */
  @Test
  public void testReduceReplFactorDueToRejoinRespectsRackPolicy() 
      throws Exception {
    Configuration conf = getConf();
    short REPLICATION_FACTOR = 2;
    final Path filePath = new Path("/testFile");
    // Last datanode is on a different rack
    String racks[] = {"/rack1", "/rack1", "/rack2"};
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(racks.length).racks(racks).build();
    final FSNamesystem ns = cluster.getNameNode().getNamesystem();

    try {
      // Create a file with one block
      final FileSystem fs = cluster.getFileSystem();
      DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
      ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
      DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);

      // Make the last (cross rack) datanode look like it failed
      // to heartbeat by stopping it and calling removeDatanode.
      ArrayList datanodes = cluster.getDataNodes();
      assertEquals(3, datanodes.size());
      DataNode dataNode = datanodes.get(2);
      DatanodeID dnId = dataNode.getDatanodeId();
      cluster.stopDataNode(2);
      ns.removeDatanode(dnId);

      // The block gets re-replicated to another datanode so it has a 
      // sufficient # replicas, but not across racks, so there should
      // be 1 rack, and 1 needed replica (even though there are 2 hosts 
      // available and only 2 replicas required).
      DFSTestUtil.waitForReplication(cluster, b, 1, REPLICATION_FACTOR, 1);

      // Start the "failed" datanode, which has a replica so the block is
      // now over-replicated and therefore a replica should be removed but
      // not on the restarted datanode as that would violate the rack policy.
      String rack2[] = {"/rack2"};
      cluster.startDataNodes(conf, 1, true, null, rack2);
      cluster.waitActive();      
      
      // The block now has sufficient # replicas, across racks
      DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
    } finally {
      cluster.shutdown();
    }
  }

  /*
   * Test that rack policy is still respected when blocks are replicated
   * due to node decommissioning.
   */
  @Test
  public void testNodeDecomissionRespectsRackPolicy() throws Exception {
    Configuration conf = getConf();
    short REPLICATION_FACTOR = 2;
    final Path filePath = new Path("/testFile");

    // Configure an excludes file
    FileSystem localFileSys = FileSystem.getLocal(conf);
    Path workingDir = localFileSys.getWorkingDirectory();
    Path dir = new Path(workingDir, "build/test/data/temp/decommission");
    Path excludeFile = new Path(dir, "exclude");
    assertTrue(localFileSys.mkdirs(dir));
    DFSTestUtil.writeFile(localFileSys, excludeFile, "");
    conf.set("dfs.hosts.exclude", excludeFile.toUri().getPath());

    // Two blocks and four racks
    String racks[] = {"/rack1", "/rack1", "/rack2", "/rack2"};
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(racks.length).racks(racks).build();
    final FSNamesystem ns = cluster.getNameNode().getNamesystem();

    try {
      // Create a file with one block
      final FileSystem fs = cluster.getFileSystem();
      DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
      ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
      DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);

      // Decommission one of the hosts with the block, this should cause 
      // the block to get replicated to another host on the same rack,
      // otherwise the rack policy is violated.
      BlockLocation locs[] = fs.getFileBlockLocations(
          fs.getFileStatus(filePath), 0, Long.MAX_VALUE);
      String name = locs[0].getNames()[0];
      DFSTestUtil.writeFile(localFileSys, excludeFile, name);
      ns.refreshNodes(conf);
      DFSTestUtil.waitForDecommission(fs, name);

      // Check the block still has sufficient # replicas across racks
      DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
File
TestBlocksWithNotEnoughRacks.java
Developer's decision
Manual
Kind of conflict
Annotation
Array access
Array initializer
Comment
Method declaration
Method invocation
Method signature
Try statement
Variable
While statement
Chunk
Conflicting content
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
      cluster.waitActive();
      fs = (DistributedFileSystem)(cluster.getFileSystem());
<<<<<<< HEAD
      fc = FileContext.getFileContext(cluster.getURI());
=======
      fc = FileContext.getFileContext(cluster.getURI(0));
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0

      // Saving image without safe mode should fail
      DFSAdmin admin = new DFSAdmin(conf);
Solution content
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
      cluster.waitActive();
      fs = (DistributedFileSystem)(cluster.getFileSystem());
      fc = FileContext.getFileContext(cluster.getURI(0));

      // Saving image without safe mode should fail
      DFSAdmin admin = new DFSAdmin(conf);
File
TestCheckpoint.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Variable
Chunk
Conflicting content
      cluster.waitActive();
      fs = (DistributedFileSystem)(cluster.getFileSystem());
      checkFile(fs, file, replication);
<<<<<<< HEAD
      fc = FileContext.getFileContext(cluster.getURI());
=======
      fc = FileContext.getFileContext(cluster.getURI(0));
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      assertTrue(fc.getFileLinkStatus(symlink).isSymlink());
    } finally {
      if(fs != null) fs.close();
Solution content
      cluster.waitActive();
      fs = (DistributedFileSystem)(cluster.getFileSystem());
      checkFile(fs, file, replication);
      fc = FileContext.getFileContext(cluster.getURI(0));
      assertTrue(fc.getFileLinkStatus(symlink).isSymlink());
    } finally {
      if(fs != null) fs.close();
File
TestCheckpoint.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Variable
Chunk
Conflicting content
    }
  }
  
<<<<<<< HEAD
=======
  /* Test case to test CheckpointSignature */
  @SuppressWarnings("deprecation")
  public void testCheckpointSignature() throws IOException {

    MiniDFSCluster cluster = null;
    Configuration conf = new HdfsConfiguration();

    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
        .format(false).build();
    NameNode nn = cluster.getNameNode();

    SecondaryNameNode secondary = startSecondaryNameNode(conf);
    // prepare checkpoint image
    secondary.doCheckpoint();
    CheckpointSignature sig = nn.rollEditLog();
    // manipulate the CheckpointSignature fields
    sig.setBlockpoolID("somerandomebpid");
    sig.clusterID = "somerandomcid";
    try {
      sig.validateStorageInfo(nn.getFSImage()); // this should fail
      assertTrue("This test is expected to fail.", false);
    } catch (Exception ignored) {
    }

    secondary.shutdown();
    cluster.shutdown();
  }
  
  /**
   * Starts two namenodes and two secondary namenodes, verifies that secondary
   * namenodes are configured correctly to talk to their respective namenodes
   * and can do the checkpoint.
   * 
   * @throws IOException
   */
  @SuppressWarnings("deprecation")
  public void testMultipleSecondaryNamenodes() throws IOException {
    Configuration conf = new HdfsConfiguration();
    String nameserviceId1 = "ns1";
    String nameserviceId2 = "ns2";
    conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, nameserviceId1
        + "," + nameserviceId2);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numNameNodes(2)
        .nameNodePort(9928).build();
    Configuration snConf1 = new HdfsConfiguration(cluster.getConfiguration(0));
    Configuration snConf2 = new HdfsConfiguration(cluster.getConfiguration(1));
    InetSocketAddress nn1RpcAddress = cluster.getNameNode(0).rpcAddress;
    InetSocketAddress nn2RpcAddress = cluster.getNameNode(1).rpcAddress;
    String nn1 = nn1RpcAddress.getHostName() + ":" + nn1RpcAddress.getPort();
    String nn2 = nn2RpcAddress.getHostName() + ":" + nn2RpcAddress.getPort();

    // Set the Service Rpc address to empty to make sure the node specific
    // setting works
    snConf1.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "");
    snConf2.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "");

    // Set the nameserviceIds
    snConf1.set(DFSUtil.getNameServiceIdKey(
        DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, nameserviceId1), nn1);
    snConf2.set(DFSUtil.getNameServiceIdKey(
        DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, nameserviceId2), nn2);

    SecondaryNameNode secondary1 = startSecondaryNameNode(snConf1);
    SecondaryNameNode secondary2 = startSecondaryNameNode(snConf2);

    // make sure the two secondary namenodes are talking to correct namenodes.
    assertEquals(secondary1.getNameNodeAddress().getPort(), nn1RpcAddress.getPort());
    assertEquals(secondary2.getNameNodeAddress().getPort(), nn2RpcAddress.getPort());
    assertTrue(secondary1.getNameNodeAddress().getPort() != secondary2
        .getNameNodeAddress().getPort());

    // both should checkpoint.
    secondary1.doCheckpoint();
    secondary2.doCheckpoint();
    secondary1.shutdown();
    secondary2.shutdown();
    cluster.shutdown();
  }
  
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
  /**
   * Simulate a secondary node failure to transfer image
   * back to the name-node.
Solution content
    }
  }
  
  /* Test case to test CheckpointSignature */
  @SuppressWarnings("deprecation")
  public void testCheckpointSignature() throws IOException {

    MiniDFSCluster cluster = null;
    Configuration conf = new HdfsConfiguration();

    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
        .format(false).build();
    NameNode nn = cluster.getNameNode();

    SecondaryNameNode secondary = startSecondaryNameNode(conf);
    // prepare checkpoint image
    secondary.doCheckpoint();
    CheckpointSignature sig = nn.rollEditLog();
    // manipulate the CheckpointSignature fields
    sig.setBlockpoolID("somerandomebpid");
    sig.clusterID = "somerandomcid";
    try {
      sig.validateStorageInfo(nn.getFSImage()); // this should fail
      assertTrue("This test is expected to fail.", false);
    } catch (Exception ignored) {
    }

    secondary.shutdown();
    cluster.shutdown();
  }
  
  /**
   * Starts two namenodes and two secondary namenodes, verifies that secondary
   * namenodes are configured correctly to talk to their respective namenodes
   * and can do the checkpoint.
   * 
   * @throws IOException
   */
  @SuppressWarnings("deprecation")
  public void testMultipleSecondaryNamenodes() throws IOException {
    Configuration conf = new HdfsConfiguration();
    String nameserviceId1 = "ns1";
    String nameserviceId2 = "ns2";
    conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, nameserviceId1
        + "," + nameserviceId2);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numNameNodes(2)
        .nameNodePort(9928).build();
    Configuration snConf1 = new HdfsConfiguration(cluster.getConfiguration(0));
    Configuration snConf2 = new HdfsConfiguration(cluster.getConfiguration(1));
    InetSocketAddress nn1RpcAddress = cluster.getNameNode(0).rpcAddress;
    InetSocketAddress nn2RpcAddress = cluster.getNameNode(1).rpcAddress;
    String nn1 = nn1RpcAddress.getHostName() + ":" + nn1RpcAddress.getPort();
    String nn2 = nn2RpcAddress.getHostName() + ":" + nn2RpcAddress.getPort();

    // Set the Service Rpc address to empty to make sure the node specific
    // setting works
    snConf1.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "");
    snConf2.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "");

    // Set the nameserviceIds
    snConf1.set(DFSUtil.getNameServiceIdKey(
        DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, nameserviceId1), nn1);
    snConf2.set(DFSUtil.getNameServiceIdKey(
        DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, nameserviceId2), nn2);

    SecondaryNameNode secondary1 = startSecondaryNameNode(snConf1);
    SecondaryNameNode secondary2 = startSecondaryNameNode(snConf2);

    // make sure the two secondary namenodes are talking to correct namenodes.
    assertEquals(secondary1.getNameNodeAddress().getPort(), nn1RpcAddress.getPort());
    assertEquals(secondary2.getNameNodeAddress().getPort(), nn2RpcAddress.getPort());
    assertTrue(secondary1.getNameNodeAddress().getPort() != secondary2
        .getNameNodeAddress().getPort());

    // both should checkpoint.
    secondary1.doCheckpoint();
    secondary2.doCheckpoint();
    secondary1.shutdown();
    secondary2.shutdown();
    cluster.shutdown();
  }
  
  /**
   * Simulate a secondary node failure to transfer image
   * back to the name-node.
File
TestCheckpoint.java
Developer's decision
Version 2
Kind of conflict
Annotation
Comment
Method declaration
Chunk
Conflicting content
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
<<<<<<< HEAD
=======
import org.apache.hadoop.hdfs.TestDatanodeBlockScanner;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
import org.junit.Test;

/** A JUnit test for corrupt_files.jsp */
Solution content
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.TestDatanodeBlockScanner;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.junit.Test;

/** A JUnit test for corrupt_files.jsp */
File
TestCorruptFilesJsp.java
Developer's decision
Version 2
Kind of conflict
Import
Chunk
Conflicting content
      // Now corrupt all the files except for the last one
      for (int idx = 0; idx < filepaths.length - 1; idx++) {
<<<<<<< HEAD
        String blockName = DFSTestUtil.getFirstBlock(fs, filepaths[idx])
            .getBlockName();
        assertTrue(cluster.corruptReplica(blockName, 0));
=======
        ExtendedBlock blk = DFSTestUtil.getFirstBlock(fs, filepaths[idx]);
        TestDatanodeBlockScanner.corruptReplica(blk, 0);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0

        // read the file so that the corrupt block is reported to NN
        FSDataInputStream in = fs.open(filepaths[idx]);
Solution content
      // Now corrupt all the files except for the last one
      for (int idx = 0; idx < filepaths.length - 1; idx++) {
        ExtendedBlock blk = DFSTestUtil.getFirstBlock(fs, filepaths[idx]);
        assertTrue(TestDatanodeBlockScanner.corruptReplica(blk, 0));

        // read the file so that the corrupt block is reported to NN
        FSDataInputStream in = fs.open(filepaths[idx]);
File
TestCorruptFilesJsp.java
Developer's decision
Manual
Kind of conflict
Method invocation
Variable
Chunk
Conflicting content
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
<<<<<<< HEAD
=======
import org.apache.hadoop.test.GenericTestUtils;
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0

import static org.junit.Assert.*;
import org.junit.Test;
Solution content
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
import org.apache.hadoop.test.GenericTestUtils;

import static org.junit.Assert.*;
import org.junit.Test;
File
TestEditLogRace.java
Developer's decision
Version 2
Kind of conflict
Import
Chunk
Conflicting content
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
<<<<<<< HEAD
=======
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.tools.DFSck;
Solution content
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.tools.DFSck;
File
TestFsck.java
Developer's decision
Version 2
Kind of conflict
Import
Chunk
Conflicting content
      DatanodeDescriptor dd = namesystem.getDatanode(nodeReg);
      
      final int REMAINING_BLOCKS = 1;
<<<<<<< HEAD
      final int MAX_REPLICATE_LIMIT = conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 2);
=======
      final int MAX_REPLICATE_LIMIT = 
        conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 2);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      final int MAX_INVALIDATE_LIMIT = DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT;
      final int MAX_INVALIDATE_BLOCKS = 2*MAX_INVALIDATE_LIMIT+REMAINING_BLOCKS;
      final int MAX_REPLICATE_BLOCKS = 2*MAX_REPLICATE_LIMIT+REMAINING_BLOCKS;
Solution content
      DatanodeDescriptor dd = namesystem.getDatanode(nodeReg);
      
      final int REMAINING_BLOCKS = 1;
      final int MAX_REPLICATE_LIMIT = 
        conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 2);
      final int MAX_INVALIDATE_LIMIT = DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT;
      final int MAX_INVALIDATE_BLOCKS = 2*MAX_INVALIDATE_LIMIT+REMAINING_BLOCKS;
      final int MAX_REPLICATE_BLOCKS = 2*MAX_REPLICATE_LIMIT+REMAINING_BLOCKS;
File
TestHeartbeatHandling.java
Developer's decision
Version 1
Kind of conflict
Method invocation
Variable
Chunk
Conflicting content
            dd.addBlockToBeReplicated(
                new Block(i, 0, GenerationStamp.FIRST_VALID_STAMP), ONE_TARGET);
          }
<<<<<<< HEAD

          DatanodeCommand[] cmds = namesystem.handleHeartbeat(
              nodeReg, dd.getCapacity(), dd.getDfsUsed(), dd.getRemaining(), 0, 0);
          assertEquals(1, cmds.length);
          assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
          assertEquals(MAX_REPLICATE_LIMIT, ((BlockCommand)cmds[0]).getBlocks().length);

=======
          DatanodeCommand[]cmds = sendHeartBeat(nodeReg, dd, namesystem);
          assertEquals(1, cmds.length);
          assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
          assertEquals(MAX_REPLICATE_LIMIT, ((BlockCommand)cmds[0]).getBlocks().length);
          
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
          ArrayList blockList = new ArrayList(MAX_INVALIDATE_BLOCKS);
          for (int i=0; i
Solution content
            dd.addBlockToBeReplicated(
                new Block(i, 0, GenerationStamp.FIRST_VALID_STAMP), ONE_TARGET);
          }
          DatanodeCommand[]cmds = sendHeartBeat(nodeReg, dd, namesystem);
          assertEquals(1, cmds.length);
          assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
          assertEquals(MAX_REPLICATE_LIMIT, ((BlockCommand)cmds[0]).getBlocks().length);
          
          ArrayList blockList = new ArrayList(MAX_INVALIDATE_BLOCKS);
          for (int i=0; i
File
TestHeartbeatHandling.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Variable
Chunk
Conflicting content
            blockList.add(new Block(i, 0, GenerationStamp.FIRST_VALID_STAMP));
          }
          dd.addBlocksToBeInvalidated(blockList);
<<<<<<< HEAD

          cmds = namesystem.handleHeartbeat(
              nodeReg, dd.getCapacity(), dd.getDfsUsed(), dd.getRemaining(), 0, 0);
=======
               
          cmds = sendHeartBeat(nodeReg, dd, namesystem);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
          assertEquals(2, cmds.length);
          assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
          assertEquals(MAX_REPLICATE_LIMIT, ((BlockCommand)cmds[0]).getBlocks().length);
Solution content
            blockList.add(new Block(i, 0, GenerationStamp.FIRST_VALID_STAMP));
          }
          dd.addBlocksToBeInvalidated(blockList);
               
          cmds = sendHeartBeat(nodeReg, dd, namesystem);
          assertEquals(2, cmds.length);
          assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
          assertEquals(MAX_REPLICATE_LIMIT, ((BlockCommand)cmds[0]).getBlocks().length);
File
TestHeartbeatHandling.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Variable
Chunk
Conflicting content
          assertEquals(MAX_REPLICATE_LIMIT, ((BlockCommand)cmds[0]).getBlocks().length);
          assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[1].getAction());
          assertEquals(MAX_INVALIDATE_LIMIT, ((BlockCommand)cmds[1]).getBlocks().length);
<<<<<<< HEAD

          cmds = namesystem.handleHeartbeat(
              nodeReg, dd.getCapacity(), dd.getDfsUsed(), dd.getRemaining(), 0, 0);
=======
          
          cmds = sendHeartBeat(nodeReg, dd, namesystem);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
          assertEquals(2, cmds.length);
          assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
          assertEquals(REMAINING_BLOCKS, ((BlockCommand)cmds[0]).getBlocks().length);
Solution content
          assertEquals(MAX_REPLICATE_LIMIT, ((BlockCommand)cmds[0]).getBlocks().length);
          assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[1].getAction());
          assertEquals(MAX_INVALIDATE_LIMIT, ((BlockCommand)cmds[1]).getBlocks().length);
          
          cmds = sendHeartBeat(nodeReg, dd, namesystem);
          assertEquals(2, cmds.length);
          assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
          assertEquals(REMAINING_BLOCKS, ((BlockCommand)cmds[0]).getBlocks().length);
File
TestHeartbeatHandling.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Variable
Chunk
Conflicting content
          assertEquals(REMAINING_BLOCKS, ((BlockCommand)cmds[0]).getBlocks().length);
          assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[1].getAction());
          assertEquals(MAX_INVALIDATE_LIMIT, ((BlockCommand)cmds[1]).getBlocks().length);
<<<<<<< HEAD

          cmds = namesystem.handleHeartbeat(
              nodeReg, dd.getCapacity(), dd.getDfsUsed(), dd.getRemaining(), 0, 0);
=======
          
          cmds = sendHeartBeat(nodeReg, dd, namesystem);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
          assertEquals(1, cmds.length);
          assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[0].getAction());
          assertEquals(REMAINING_BLOCKS, ((BlockCommand)cmds[0]).getBlocks().length);
Solution content
          assertEquals(REMAINING_BLOCKS, ((BlockCommand)cmds[0]).getBlocks().length);
          assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[1].getAction());
          assertEquals(MAX_INVALIDATE_LIMIT, ((BlockCommand)cmds[1]).getBlocks().length);
          
          cmds = sendHeartBeat(nodeReg, dd, namesystem);
          assertEquals(1, cmds.length);
          assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[0].getAction());
          assertEquals(REMAINING_BLOCKS, ((BlockCommand)cmds[0]).getBlocks().length);
File
TestHeartbeatHandling.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Variable
Chunk
Conflicting content
          assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[0].getAction());
          assertEquals(REMAINING_BLOCKS, ((BlockCommand)cmds[0]).getBlocks().length);

<<<<<<< HEAD
          cmds = namesystem.handleHeartbeat(
              nodeReg, dd.getCapacity(), dd.getDfsUsed(), dd.getRemaining(), 0, 0);
=======
          cmds = sendHeartBeat(nodeReg, dd, namesystem);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
          assertEquals(null, cmds);
        }
      } finally {
Solution content
          assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[0].getAction());
          assertEquals(REMAINING_BLOCKS, ((BlockCommand)cmds[0]).getBlocks().length);

          cmds = sendHeartBeat(nodeReg, dd, namesystem);
          assertEquals(null, cmds);
        }
      } finally {
File
TestHeartbeatHandling.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Variable
Chunk
Conflicting content
          + " corrupt files. Expecting None.", badFiles.size() == 0);

      // Now deliberately corrupt one block
<<<<<<< HEAD
      File data_dir = new File(System.getProperty("test.build.data"),
      "dfs/data/data1/current/finalized");
=======
      File storageDir = MiniDFSCluster.getStorageDir(0, 0);
      File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, 
          cluster.getNamesystem().getBlockPoolId());
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      assertTrue("data directory does not exist", data_dir.exists());
      File[] blocks = data_dir.listFiles();
      assertTrue("Blocks do not exist in data-dir", (blocks != null) &&
Solution content
          + " corrupt files. Expecting None.", badFiles.size() == 0);

      // Now deliberately corrupt one block
      File storageDir = MiniDFSCluster.getStorageDir(0, 0);
      File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, 
          cluster.getNamesystem().getBlockPoolId());
      assertTrue("data directory does not exist", data_dir.exists());
      File[] blocks = data_dir.listFiles();
      assertTrue("Blocks do not exist in data-dir", (blocks != null) &&
File
TestListCorruptFileBlocks.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Variable
Chunk
Conflicting content
          badFiles.size() == 1);
 
      // restart namenode
<<<<<<< HEAD
      cluster.restartNameNode();
=======
      cluster.restartNameNode(0);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      fs = cluster.getFileSystem();

      // wait until replication queues have been initialized
Solution content
          badFiles.size() == 1);
 
      // restart namenode
      cluster.restartNameNode(0);
      fs = cluster.getFileSystem();

      // wait until replication queues have been initialized
File
TestListCorruptFileBlocks.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Chunk
Conflicting content
      DFSTestUtil util = new DFSTestUtil("testGetCorruptFiles", 3, 1, 1024);
      util.createFiles(fs, "/corruptData");

<<<<<<< HEAD
      final NameNode namenode = cluster.getNameNode();
=======
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      RemoteIterator corruptFileBlocks = 
        dfs.listCorruptFileBlocks(new Path("/corruptData"));
      int numCorrupt = countPaths(corruptFileBlocks);
Solution content
      DFSTestUtil util = new DFSTestUtil("testGetCorruptFiles", 3, 1, 1024);
      util.createFiles(fs, "/corruptData");

      RemoteIterator corruptFileBlocks = 
        dfs.listCorruptFileBlocks(new Path("/corruptData"));
      int numCorrupt = countPaths(corruptFileBlocks);
File
TestListCorruptFileBlocks.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Variable
Chunk
Conflicting content
      int numCorrupt = countPaths(corruptFileBlocks);
      assertTrue(numCorrupt == 0);
      // delete the blocks
<<<<<<< HEAD
      File baseDir = new File(System.getProperty("test.build.data",
          "build/test/data"), "dfs/data");
      for (int i = 0; i < 8; i++) {
        File data_dir = new File(baseDir, "data" + (i + 1)
            + MiniDFSCluster.FINALIZED_DIR_NAME);
=======
      String bpid = cluster.getNamesystem().getBlockPoolId();
      // For loop through number of datadirectories per datanode (2)
      for (int i = 0; i < 2; i++) {
        File storageDir = MiniDFSCluster.getStorageDir(0, i);
        File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
        File[] blocks = data_dir.listFiles();
        if (blocks == null)
          continue;
Solution content
      int numCorrupt = countPaths(corruptFileBlocks);
      assertTrue(numCorrupt == 0);
      // delete the blocks
      String bpid = cluster.getNamesystem().getBlockPoolId();
      // For loop through number of datadirectories per datanode (2)
      for (int i = 0; i < 2; i++) {
        File storageDir = MiniDFSCluster.getStorageDir(0, i);
        File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
        File[] blocks = data_dir.listFiles();
        if (blocks == null)
          continue;
File
TestListCorruptFileBlocks.java
Developer's decision
Version 2
Kind of conflict
Comment
For statement
Method invocation
Variable
Chunk
Conflicting content
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.DF;
<<<<<<< HEAD
=======
import org.apache.hadoop.hdfs.DFSUtil;
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
Solution content
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.DF;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
File
TestNamenodeCapacityReport.java
Developer's decision
Version 2
Kind of conflict
Import
Chunk
Conflicting content
      cluster.restartDataNode(dnprop);
      cluster.waitActive();
      
<<<<<<< HEAD
      // check if excessive replica is detected (transient)
      initializeTimeout(TIMEOUT);
      while (countNodes(block, namesystem).excessReplicas() == 0) {
        checkTimeout("excess replicas not detected");
      }
=======
      // check if excessive replica is detected
      NumberReplicas num = null;
      do {
       synchronized (namesystem) {
         num = namesystem.blockManager.countNodes(block.getLocalBlock());
       }
      } while (num.excessReplicas() == 0);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      
      // find out a non-excess node
      Iterator iter = namesystem.blockManager.blocksMap
Solution content
      cluster.restartDataNode(dnprop);
      cluster.waitActive();
      
      // check if excessive replica is detected (transient)
      initializeTimeout(TIMEOUT);
      while (countNodes(block.getLocalBlock(), namesystem).excessReplicas() == 0) {
        checkTimeout("excess replicas not detected");
      }
      
      // find out a non-excess node
      Iterator iter = namesystem.blockManager.blocksMap
File
TestNodeCount.java
Developer's decision
Manual
Kind of conflict
Comment
Do statement
Method invocation
Variable
While statement
Chunk
Conflicting content
      }

      // The block should be replicated
<<<<<<< HEAD
      initializeTimeout(TIMEOUT);
      while (countNodes(block, namesystem).liveReplicas() != REPLICATION_FACTOR) {
        checkTimeout("live replica count not correct", 1000);
      }
=======
      do {
        num = namesystem.blockManager.countNodes(block.getLocalBlock());
      } while (num.liveReplicas() != REPLICATION_FACTOR);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0

      // restart the first datanode
      cluster.restartDataNode(dnprop);
Solution content
      }

      // The block should be replicated
      initializeTimeout(TIMEOUT);
      while (countNodes(block.getLocalBlock(), namesystem).liveReplicas() != REPLICATION_FACTOR) {
        checkTimeout("live replica count not correct", 1000);
      }

      // restart the first datanode
      cluster.restartDataNode(dnprop);
File
TestNodeCount.java
Developer's decision
Manual
Kind of conflict
Do statement
Method invocation
While statement
Chunk
Conflicting content
      cluster.restartDataNode(dnprop);
      cluster.waitActive();

<<<<<<< HEAD
      // check if excessive replica is detected (transient)
      initializeTimeout(TIMEOUT);
      while (countNodes(block, namesystem).excessReplicas() != 2) {
        checkTimeout("excess replica count not equal to 2");
      }

=======
      // check if excessive replica is detected
      do {
       num = namesystem.blockManager.countNodes(block.getLocalBlock());
      } while (num.excessReplicas() != 2);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    } finally {
      cluster.shutdown();
    }
Solution content
      cluster.restartDataNode(dnprop);
      cluster.waitActive();

      // check if excessive replica is detected (transient)
      initializeTimeout(TIMEOUT);
      while (countNodes(block.getLocalBlock(), namesystem).excessReplicas() != 2) {
        checkTimeout("excess replica count not equal to 2");
      }

    } finally {
      cluster.shutdown();
    }
File
TestNodeCount.java
Developer's decision
Manual
Kind of conflict
Comment
Do statement
Method invocation
While statement
Chunk
Conflicting content
  public void testProcesOverReplicateBlock() throws IOException {
    Configuration conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
<<<<<<< HEAD
    conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
=======
    conf.set(
        DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,
        Integer.toString(2));
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
    FileSystem fs = cluster.getFileSystem();
Solution content
  public void testProcesOverReplicateBlock() throws IOException {
    Configuration conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
    conf.set(
        DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,
        Integer.toString(2));
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
    FileSystem fs = cluster.getFileSystem();
File
TestOverReplicatedBlocks.java
Developer's decision
Version 1
Kind of conflict
Method invocation
Chunk
Conflicting content
      DFSTestUtil.waitReplication(fs, fileName, (short)3);
      
      // corrupt the block on datanode 0
<<<<<<< HEAD
      Block block = DFSTestUtil.getFirstBlock(fs, fileName);
      assertTrue(cluster.corruptReplica(block.getBlockName(), 0));
=======
      ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
      TestDatanodeBlockScanner.corruptReplica(block, 0);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
      DataNodeProperties dnProps = cluster.stopDataNode(0);
      // remove block scanner log to trigger block scanning
      File scanLog = new File(MiniDFSCluster.getFinalizedDir(
Solution content
      DFSTestUtil.waitReplication(fs, fileName, (short)3);
      
      // corrupt the block on datanode 0
      ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
      assertTrue(TestDatanodeBlockScanner.corruptReplica(block, 0));
      DataNodeProperties dnProps = cluster.stopDataNode(0);
      // remove block scanner log to trigger block scanning
      File scanLog = new File(MiniDFSCluster.getFinalizedDir(
File
TestOverReplicatedBlocks.java
Developer's decision
Manual
Kind of conflict
Method invocation
Variable
Chunk
Conflicting content
        synchronized (namesystem.heartbeats) {
          // set live datanode's remaining space to be 0 
          // so they will be chosen to be deleted when over-replication occurs
<<<<<<< HEAD
          for (DatanodeDescriptor datanode : namesystem.heartbeats) {
            if (!corruptDataNode.equals(datanode)) {
              datanode.updateHeartbeat(100L, 100L, 0L, 0);
=======
          String corruptMachineName = corruptDataNode.getName();
          for (DatanodeDescriptor datanode : namesystem.heartbeats) {
            if (!corruptMachineName.equals(datanode.getName())) {
              datanode.updateHeartbeat(100L, 100L, 0L, 100L, 0);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
            }
          }
Solution content
        synchronized (namesystem.heartbeats) {
          // set live datanode's remaining space to be 0 
          // so they will be chosen to be deleted when over-replication occurs
          String corruptMachineName = corruptDataNode.getName();
          for (DatanodeDescriptor datanode : namesystem.heartbeats) {
            if (!corruptMachineName.equals(datanode.getName())) {
              datanode.updateHeartbeat(100L, 100L, 0L, 100L, 0);
            }
          }
File
TestOverReplicatedBlocks.java
Developer's decision
Version 2
Kind of conflict
For statement
If statement
Method invocation
Variable
Chunk
Conflicting content
          // corrupt one won't be chosen to be excess one
          // without 4910 the number of live replicas would be 0: block gets lost
<<<<<<< HEAD
          assertEquals(1, namesystem.blockManager.countNodes(block).liveReplicas());
=======
          assertEquals(1, namesystem.blockManager.countNodes(block.getLocalBlock())
              .liveReplicas());
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
        }
      } finally {
        namesystem.writeUnlock();
Solution content
          // corrupt one won't be chosen to be excess one
          // without 4910 the number of live replicas would be 0: block gets lost
          assertEquals(1, namesystem.blockManager.countNodes(block.getLocalBlock())
              .liveReplicas());
        }
      } finally {
        namesystem.writeUnlock();
File
TestOverReplicatedBlocks.java
Developer's decision
Version 2
Kind of conflict
Method invocation
Chunk
Conflicting content
      sd = it.next();

      if(sd.getStorageDirType().isOfType(NameNodeDirType.IMAGE)) {
<<<<<<< HEAD
        File imf = img.getStorage().getStorageFile(sd, NameNodeFile.IMAGE);
        LOG.info("--image file " + imf.getAbsolutePath() + "; len = " + imf.length() + "; expected = " + expectedImgSize);
        assertEquals(expectedImgSize, imf.length());	
      } else if(sd.getStorageDirType().isOfType(NameNodeDirType.EDITS)) {
        File edf = img.getStorage().getStorageFile(sd, NameNodeFile.EDITS);
=======
        img.getStorage();
        File imf = NNStorage.getStorageFile(sd, NameNodeFile.IMAGE);
        LOG.info("--image file " + imf.getAbsolutePath() + "; len = " + imf.length() + "; expected = " + expectedImgSize);
        assertEquals(expectedImgSize, imf.length());	
      } else if(sd.getStorageDirType().isOfType(NameNodeDirType.EDITS)) {
        img.getStorage();
        File edf = NNStorage.getStorageFile(sd, NameNodeFile.EDITS);
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
        LOG.info("-- edits file " + edf.getAbsolutePath() + "; len = " + edf.length()  + "; expected = " + expectedEditsSize);
        assertEquals(expectedEditsSize, edf.length());	
      } else {
Solution content
      sd = it.next();

      if(sd.getStorageDirType().isOfType(NameNodeDirType.IMAGE)) {
        img.getStorage();
        File imf = NNStorage.getStorageFile(sd, NameNodeFile.IMAGE);
        LOG.info("--image file " + imf.getAbsolutePath() + "; len = " + imf.length() + "; expected = " + expectedImgSize);
        assertEquals(expectedImgSize, imf.length());	
      } else if(sd.getStorageDirType().isOfType(NameNodeDirType.EDITS)) {
        img.getStorage();
        File edf = NNStorage.getStorageFile(sd, NameNodeFile.EDITS);
        LOG.info("-- edits file " + edf.getAbsolutePath() + "; len = " + edf.length()  + "; expected = " + expectedEditsSize);
        assertEquals(expectedEditsSize, edf.length());	
      } else {
File
TestStartup.java
Developer's decision
Version 2
Kind of conflict
If statement
Method invocation
Variable
Chunk
Conflicting content
    syncList.add(record1);
    syncList.add(record2);
    
<<<<<<< HEAD
    when(dn1.updateReplicaUnderRecovery((Block)anyObject(), anyLong(), 
        anyLong())).thenReturn(new Block(block.getBlockId(), 
            expectLen, block.getGenerationStamp()));
    when(dn2.updateReplicaUnderRecovery((Block)anyObject(), anyLong(), 
        anyLong())).thenReturn(new Block(block.getBlockId(), 
            expectLen, block.getGenerationStamp()));
=======
    when(dn1.updateReplicaUnderRecovery((ExtendedBlock)anyObject(), anyLong(), 
        anyLong())).thenReturn(new ExtendedBlock(block.getBlockPoolId(), 
            block.getBlockId(), expectLen, block.getGenerationStamp()));
    when(dn2.updateReplicaUnderRecovery((ExtendedBlock)anyObject(), anyLong(), 
        anyLong())).thenReturn(new ExtendedBlock(block.getBlockPoolId(), 
            block.getBlockId(), expectLen, block.getGenerationStamp()));
>>>>>>> f529dfe74535d09556f82a0d9da0e1137410e8a0
    dn.syncBlock(rBlock, syncList);
  }
  
Solution content
    syncList.add(record1);
    syncList.add(record2);
    
    when(dn1.updateReplicaUnderRecovery((ExtendedBlock)anyObject(), anyLong(), 
        anyLong())).thenReturn(new ExtendedBlock(block.getBlockPoolId(), 
            block.getBlockId(), expectLen, block.getGenerationStamp()));
    when(dn2.updateReplicaUnderRecovery((ExtendedBlock)anyObject(), anyLong(), 
        anyLong())).thenReturn(new ExtendedBlock(block.getBlockPoolId(), 
            block.getBlockId(), expectLen, block.getGenerationStamp()));
    dn.syncBlock(rBlock, syncList);
  }
  
File
TestBlockRecovery.java
Developer's decision
Version 2
Kind of conflict
Method invocation