Java Code Examples for org.apache.hadoop.hdfs.DFSClient#createNamenode()
The following examples show how to use
org.apache.hadoop.hdfs.DFSClient#createNamenode() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestBalancer.java From RDFS with Apache License 2.0 | 6 votes |
private Block[] generateBlocks(long size, short numNodes) throws IOException { cluster = new MiniDFSCluster( CONF, numNodes, true, null); try { cluster.waitActive(); client = DFSClient.createNamenode(CONF); short replicationFactor = (short)(numNodes-1); long fileLen = size/replicationFactor; createFile(fileLen, replicationFactor); List<LocatedBlock> locatedBlocks = client. getBlockLocations(fileName, 0, fileLen).getLocatedBlocks(); int numOfBlocks = locatedBlocks.size(); Block[] blocks = new Block[numOfBlocks]; for(int i=0; i<numOfBlocks; i++) { Block b = locatedBlocks.get(i).getBlock(); blocks[i] = new Block(b.getBlockId(), b.getNumBytes(), b.getGenerationStamp()); } return blocks; } finally { cluster.shutdown(); } }
Example 2
Source File: Balancer.java From RDFS with Apache License 2.0 | 6 votes |
private void init(InetSocketAddress namenodeAddress) throws IOException { this.namenodeAddress = namenodeAddress; this.namenode = createNamenode(namenodeAddress, conf); this.client = DFSClient.createNamenode(namenodeAddress, conf); this.fs = FileSystem.get(NameNode.getUri(namenodeAddress), conf); this.moverExecutor = Executors.newFixedThreadPool(moveThreads); int dispatchThreads = (int)Math.max(1, moveThreads/maxConcurrentMoves); this.dispatcherExecutor = Executors.newFixedThreadPool(dispatchThreads); /* Check if there is another balancer running. * Exit if there is another one running. */ this.out = checkAndMarkRunningBalancer(); if (out == null) { throw new IOException("Another balancer is running"); } // get namespace id LocatedBlocksWithMetaInfo locations = client.openAndFetchMetaInfo(BALANCER_ID_PATH.toString(), 0L, 1L); this.namespaceId = locations.getNamespaceID(); }
Example 3
Source File: TestBalancer.java From hadoop-gpu with Apache License 2.0 | 6 votes |
private Block[] generateBlocks(long size, short numNodes) throws IOException { cluster = new MiniDFSCluster( CONF, numNodes, true, null); try { cluster.waitActive(); client = DFSClient.createNamenode(CONF); short replicationFactor = (short)(numNodes-1); long fileLen = size/replicationFactor; createFile(fileLen, replicationFactor); List<LocatedBlock> locatedBlocks = client. getBlockLocations(fileName, 0, fileLen).getLocatedBlocks(); int numOfBlocks = locatedBlocks.size(); Block[] blocks = new Block[numOfBlocks]; for(int i=0; i<numOfBlocks; i++) { Block b = locatedBlocks.get(i).getBlock(); blocks[i] = new Block(b.getBlockId(), b.getNumBytes(), b.getGenerationStamp()); } return blocks; } finally { cluster.shutdown(); } }
Example 4
Source File: FileChecksumServlets.java From hadoop-gpu with Apache License 2.0 | 6 votes |
/** {@inheritDoc} */ public void doGet(HttpServletRequest request, HttpServletResponse response ) throws ServletException, IOException { final UnixUserGroupInformation ugi = getUGI(request); final PrintWriter out = response.getWriter(); final String filename = getFilename(request, response); final XMLOutputter xml = new XMLOutputter(out, "UTF-8"); xml.declaration(); final Configuration conf = new Configuration(DataNode.getDataNode().getConf()); final int socketTimeout = conf.getInt("dfs.socket.timeout", HdfsConstants.READ_TIMEOUT); final SocketFactory socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class); UnixUserGroupInformation.saveToConf(conf, UnixUserGroupInformation.UGI_PROPERTY_NAME, ugi); final ClientProtocol nnproxy = DFSClient.createNamenode(conf); try { final MD5MD5CRC32FileChecksum checksum = DFSClient.getFileChecksum( filename, nnproxy, socketFactory, socketTimeout); MD5MD5CRC32FileChecksum.write(xml, checksum); } catch(IOException ioe) { new RemoteException(ioe.getClass().getName(), ioe.getMessage() ).writeXml(filename, xml); } xml.endDocument(); }
Example 5
Source File: TestBalancer.java From RDFS with Apache License 2.0 | 5 votes |
private void testUnevenDistribution( long distribution[], long capacities[], String[] racks) throws Exception { int numDatanodes = distribution.length; if (capacities.length != numDatanodes || racks.length != numDatanodes) { throw new IllegalArgumentException("Array length is not the same"); } // calculate total space that need to be filled long totalUsedSpace=0L; for(int i=0; i<distribution.length; i++) { totalUsedSpace += distribution[i]; } // fill the cluster Block[] blocks = generateBlocks(totalUsedSpace, (short)numDatanodes); // redistribute blocks Block[][] blocksDN = distributeBlocks( blocks, (short)(numDatanodes-1), distribution); // restart the cluster: do NOT format the cluster CONF.set("dfs.safemode.threshold.pct", "0.0f"); cluster = new MiniDFSCluster(0, CONF, numDatanodes, false, true, null, racks, capacities); cluster.waitActive(); client = DFSClient.createNamenode(CONF); cluster.injectBlocks(blocksDN); long totalCapacity = 0L; for(long capacity:capacities) { totalCapacity += capacity; } runBalancer(CONF, totalUsedSpace, totalCapacity); cluster.shutdown(); }
Example 6
Source File: TestBalancer.java From RDFS with Apache License 2.0 | 5 votes |
private void test(long[] capacities, String[] racks, long newCapacity, String newRack) throws Exception { int numOfDatanodes = capacities.length; assertEquals(numOfDatanodes, racks.length); cluster = new MiniDFSCluster(0, CONF, capacities.length, true, true, null, racks, capacities); try { cluster.waitActive(); client = DFSClient.createNamenode(CONF); long totalCapacity=0L; for(long capacity:capacities) { totalCapacity += capacity; } // fill up the cluster to be 30% full long totalUsedSpace = totalCapacity*3/10; createFile(totalUsedSpace/numOfDatanodes, (short)numOfDatanodes); // start up an empty node with the same capacity and on the same rack cluster.startDataNodes(CONF, 1, true, null, new String[]{newRack}, new long[]{newCapacity}); totalCapacity += newCapacity; // run balancer and validate results runBalancer(CONF, totalUsedSpace, totalCapacity); } finally { cluster.shutdown(); } }
Example 7
Source File: TestBalancer.java From RDFS with Apache License 2.0 | 5 votes |
private void testBalancerDefaultConstructor(Configuration conf, long[] capacities, String[] racks, long newCapacity, String newRack) throws Exception { int numOfDatanodes = capacities.length; assertEquals(numOfDatanodes, racks.length); cluster = new MiniDFSCluster(0, conf, capacities.length, true, true, null, racks, capacities); try { cluster.waitActive(); client = DFSClient.createNamenode(conf); long totalCapacity = 0L; for (long capacity : capacities) { totalCapacity += capacity; } // fill up the cluster to be 30% full long totalUsedSpace = totalCapacity * 3 / 10; createFile(totalUsedSpace / numOfDatanodes, (short) numOfDatanodes); // start up an empty node with the same capacity and on the same rack cluster.startDataNodes(conf, 1, true, null, new String[] { newRack }, new long[] { newCapacity }); totalCapacity += newCapacity; // run balancer and validate results runBalancer(conf, totalUsedSpace, totalCapacity); } finally { cluster.shutdown(); } }
Example 8
Source File: DfsServlet.java From RDFS with Apache License 2.0 | 5 votes |
/** * Create a {@link NameNode} proxy from the current {@link ServletContext}. */ protected ClientProtocol createNameNodeProxy(UnixUserGroupInformation ugi ) throws IOException { ServletContext context = getServletContext(); InetSocketAddress nnAddr = (InetSocketAddress)context.getAttribute("name.node.address"); if (nnAddr == null) { throw new IOException("The namenode is not out of safemode yet"); } Configuration conf = new Configuration( (Configuration)context.getAttribute("name.conf")); UnixUserGroupInformation.saveToConf(conf, UnixUserGroupInformation.UGI_PROPERTY_NAME, ugi); return DFSClient.createNamenode(nnAddr, conf); }
Example 9
Source File: TestBalancer.java From hadoop-gpu with Apache License 2.0 | 5 votes |
private void testUnevenDistribution( long distribution[], long capacities[], String[] racks) throws Exception { int numDatanodes = distribution.length; if (capacities.length != numDatanodes || racks.length != numDatanodes) { throw new IllegalArgumentException("Array length is not the same"); } // calculate total space that need to be filled long totalUsedSpace=0L; for(int i=0; i<distribution.length; i++) { totalUsedSpace += distribution[i]; } // fill the cluster Block[] blocks = generateBlocks(totalUsedSpace, (short)numDatanodes); // redistribute blocks Block[][] blocksDN = distributeBlocks( blocks, (short)(numDatanodes-1), distribution); // restart the cluster: do NOT format the cluster CONF.set("dfs.safemode.threshold.pct", "0.0f"); cluster = new MiniDFSCluster(0, CONF, numDatanodes, false, true, null, racks, capacities); cluster.waitActive(); client = DFSClient.createNamenode(CONF); cluster.injectBlocks(blocksDN); long totalCapacity = 0L; for(long capacity:capacities) { totalCapacity += capacity; } runBalancer(totalUsedSpace, totalCapacity); }
Example 10
Source File: TestBalancer.java From hadoop-gpu with Apache License 2.0 | 5 votes |
private void test(long[] capacities, String[] racks, long newCapacity, String newRack) throws Exception { int numOfDatanodes = capacities.length; assertEquals(numOfDatanodes, racks.length); cluster = new MiniDFSCluster(0, CONF, capacities.length, true, true, null, racks, capacities); try { cluster.waitActive(); client = DFSClient.createNamenode(CONF); long totalCapacity=0L; for(long capacity:capacities) { totalCapacity += capacity; } // fill up the cluster to be 30% full long totalUsedSpace = totalCapacity*3/10; createFile(totalUsedSpace/numOfDatanodes, (short)numOfDatanodes); // start up an empty node with the same capacity and on the same rack cluster.startDataNodes(CONF, 1, true, null, new String[]{newRack}, new long[]{newCapacity}); totalCapacity += newCapacity; // run balancer and validate results runBalancer(totalUsedSpace, totalCapacity); } finally { cluster.shutdown(); } }
Example 11
Source File: DfsServlet.java From hadoop-gpu with Apache License 2.0 | 5 votes |
/** * Create a {@link NameNode} proxy from the current {@link ServletContext}. */ protected ClientProtocol createNameNodeProxy(UnixUserGroupInformation ugi ) throws IOException { ServletContext context = getServletContext(); InetSocketAddress nnAddr = (InetSocketAddress)context.getAttribute("name.node.address"); Configuration conf = new Configuration( (Configuration)context.getAttribute("name.conf")); UnixUserGroupInformation.saveToConf(conf, UnixUserGroupInformation.UGI_PROPERTY_NAME, ugi); return DFSClient.createNamenode(nnAddr, conf); }
Example 12
Source File: Balancer.java From hadoop-gpu with Apache License 2.0 | 4 votes |
private void init(double threshold) throws IOException { this.threshold = threshold; this.namenode = createNamenode(conf); this.client = DFSClient.createNamenode(conf); this.fs = FileSystem.get(conf); }