Java Code Examples for org.apache.hadoop.ipc.RemoteException#unwrapRemoteException()
The following examples show how to use
org.apache.hadoop.ipc.RemoteException#unwrapRemoteException() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DFSClient.java From hadoop with Apache License 2.0 | 6 votes |
/** * Rename file or directory. * @see ClientProtocol#rename(String, String) * @deprecated Use {@link #rename(String, String, Options.Rename...)} instead. */ @Deprecated public boolean rename(String src, String dst) throws IOException { checkOpen(); TraceScope scope = getSrcDstTraceScope("rename", src, dst); try { return namenode.rename(src, dst); } catch(RemoteException re) { throw re.unwrapRemoteException(AccessControlException.class, NSQuotaExceededException.class, DSQuotaExceededException.class, UnresolvedPathException.class, SnapshotAccessControlException.class); } finally { scope.close(); } }
Example 2
Source File: DFSClient.java From RDFS with Apache License 2.0 | 6 votes |
/** * Get the data transfer protocol version supported in the cluster * assuming all the datanodes have the same version. * * @return the data transfer protocol version supported in the cluster */ int getDataTransferProtocolVersion() throws IOException { synchronized (dataTransferVersion) { if (dataTransferVersion == -1) { // Get the version number from NN try { int remoteDataTransferVersion = namenode.getDataTransferProtocolVersion(); updateDataTransferProtocolVersionIfNeeded(remoteDataTransferVersion); } catch (RemoteException re) { IOException ioe = re.unwrapRemoteException(IOException.class); if (ioe.getMessage().startsWith(IOException.class.getName() + ": " + NoSuchMethodException.class.getName())) { dataTransferVersion = 14; // last version not supportting this RPC } else { throw ioe; } } if (LOG.isDebugEnabled()) { LOG.debug("Data Transfer Protocal Version is "+ dataTransferVersion); } } return dataTransferVersion; } }
Example 3
Source File: DFSClient.java From big-c with Apache License 2.0 | 6 votes |
public void setXAttr(String src, String name, byte[] value, EnumSet<XAttrSetFlag> flag) throws IOException { checkOpen(); TraceScope scope = getPathTraceScope("setXAttr", src); try { namenode.setXAttr(src, XAttrHelper.buildXAttr(name, value), flag); } catch (RemoteException re) { throw re.unwrapRemoteException(AccessControlException.class, FileNotFoundException.class, NSQuotaExceededException.class, SafeModeException.class, SnapshotAccessControlException.class, UnresolvedPathException.class); } finally { scope.close(); } }
Example 4
Source File: DFSClient.java From hadoop with Apache License 2.0 | 6 votes |
/** * Set storage policy for an existing file/directory * @param src file/directory name * @param policyName name of the storage policy */ public void setStoragePolicy(String src, String policyName) throws IOException { TraceScope scope = getPathTraceScope("setStoragePolicy", src); try { namenode.setStoragePolicy(src, policyName); } catch (RemoteException e) { throw e.unwrapRemoteException(AccessControlException.class, FileNotFoundException.class, SafeModeException.class, NSQuotaExceededException.class, UnresolvedPathException.class, SnapshotAccessControlException.class); } finally { scope.close(); } }
Example 5
Source File: DFSClient.java From big-c with Apache License 2.0 | 6 votes |
/** Method to get stream returned by append call */ private DFSOutputStream callAppend(String src, int buffersize, EnumSet<CreateFlag> flag, Progressable progress, String[] favoredNodes) throws IOException { CreateFlag.validateForAppend(flag); try { LastBlockWithStatus blkWithStatus = namenode.append(src, clientName, new EnumSetWritable<>(flag, CreateFlag.class)); return DFSOutputStream.newStreamForAppend(this, src, flag, buffersize, progress, blkWithStatus.getLastBlock(), blkWithStatus.getFileStatus(), dfsClientConf.createChecksum(), favoredNodes); } catch(RemoteException re) { throw re.unwrapRemoteException(AccessControlException.class, FileNotFoundException.class, SafeModeException.class, DSQuotaExceededException.class, UnsupportedOperationException.class, UnresolvedPathException.class, SnapshotAccessControlException.class); } }
Example 6
Source File: SpliceFailFastInterceptor.java From spliceengine with GNU Affero General Public License v3.0 | 6 votes |
@Override public void handleFailure(RetryingCallerInterceptorContext context, Throwable t) throws IOException { if (t instanceof UndeclaredThrowableException) { t = t.getCause(); } if (t instanceof RemoteException) { RemoteException re = (RemoteException)t; t = re.unwrapRemoteException(); } if (t instanceof DoNotRetryIOException) { throw (DoNotRetryIOException)t; } if (t instanceof IOException) { throw (IOException) t; } throw new IOException(t); }
Example 7
Source File: DFSClient.java From RDFS with Apache License 2.0 | 6 votes |
/** * Sets or resets quotas for a directory. * @see org.apache.hadoop.hdfs.protocol.ClientProtocol#setQuota(String, long, long) */ void setQuota(String src, long namespaceQuota, long diskspaceQuota) throws IOException { // sanity check if ((namespaceQuota <= 0 && namespaceQuota != FSConstants.QUOTA_DONT_SET && namespaceQuota != FSConstants.QUOTA_RESET) || (diskspaceQuota <= 0 && diskspaceQuota != FSConstants.QUOTA_DONT_SET && diskspaceQuota != FSConstants.QUOTA_RESET)) { throw new IllegalArgumentException("Invalid values for quota : " + namespaceQuota + " and " + diskspaceQuota); } try { namenode.setQuota(src, namespaceQuota, diskspaceQuota); } catch(RemoteException re) { throw re.unwrapRemoteException(AccessControlException.class, FileNotFoundException.class, NSQuotaExceededException.class, DSQuotaExceededException.class); } }
Example 8
Source File: DFSClient.java From hadoop with Apache License 2.0 | 5 votes |
/** * Recover a file's lease * @param src a file's path * @return true if the file is already closed * @throws IOException */ boolean recoverLease(String src) throws IOException { checkOpen(); TraceScope scope = getPathTraceScope("recoverLease", src); try { return namenode.recoverLease(src, clientName); } catch (RemoteException re) { throw re.unwrapRemoteException(FileNotFoundException.class, AccessControlException.class, UnresolvedPathException.class); } finally { scope.close(); } }
Example 9
Source File: DFSClient.java From hadoop with Apache License 2.0 | 5 votes |
/** * Get the difference between two snapshots, or between a snapshot and the * current tree of a directory. * @see ClientProtocol#getSnapshotDiffReport(String, String, String) */ public SnapshotDiffReport getSnapshotDiffReport(String snapshotDir, String fromSnapshot, String toSnapshot) throws IOException { checkOpen(); TraceScope scope = Trace.startSpan("getSnapshotDiffReport", traceSampler); try { return namenode.getSnapshotDiffReport(snapshotDir, fromSnapshot, toSnapshot); } catch(RemoteException re) { throw re.unwrapRemoteException(); } finally { scope.close(); } }
Example 10
Source File: DFSClient.java From hadoop with Apache License 2.0 | 5 votes |
/** * Rolls the edit log on the active NameNode. * @return the txid of the new log segment * * @see ClientProtocol#rollEdits() */ long rollEdits() throws AccessControlException, IOException { TraceScope scope = Trace.startSpan("rollEdits", traceSampler); try { return namenode.rollEdits(); } catch(RemoteException re) { throw re.unwrapRemoteException(AccessControlException.class); } finally { scope.close(); } }
Example 11
Source File: DFSClient.java From RDFS with Apache License 2.0 | 5 votes |
/** * Fetch the list of files that have been open longer than a * specified amount of time. * @param prefix path prefix specifying subset of files to examine * @param millis select files that have been open longer that this * @param where to start searching when there are large numbers of * files returned. pass null the first time, then pass the last * value returned by the previous call for subsequent calls. * @return array of OpenFileInfo objects * @throw IOException */ public OpenFileInfo[] iterativeGetOpenFiles( Path prefix, int millis, String start) throws IOException { checkOpen(); try { return namenode.iterativeGetOpenFiles(prefix.toString(), millis, start); } catch (RemoteException re) { throw re.unwrapRemoteException(AccessControlException.class, NSQuotaExceededException.class, DSQuotaExceededException.class); } }
Example 12
Source File: DFSClient.java From hadoop with Apache License 2.0 | 5 votes |
public Map<String, byte[]> getXAttrs(String src, List<String> names) throws IOException { checkOpen(); TraceScope scope = getPathTraceScope("getXAttrs", src); try { return XAttrHelper.buildXAttrMap(namenode.getXAttrs( src, XAttrHelper.buildXAttrs(names))); } catch(RemoteException re) { throw re.unwrapRemoteException(AccessControlException.class, FileNotFoundException.class, UnresolvedPathException.class); } finally { scope.close(); } }
Example 13
Source File: DFSClient.java From big-c with Apache License 2.0 | 5 votes |
/** * Get the file info for a specific file or directory. * @param src The string representation of the path to the file * @return object containing information regarding the file * or null if file not found * * @see ClientProtocol#getFileInfo(String) for description of exceptions */ public HdfsFileStatus getFileInfo(String src) throws IOException { checkOpen(); TraceScope scope = getPathTraceScope("getFileInfo", src); try { return namenode.getFileInfo(src); } catch(RemoteException re) { throw re.unwrapRemoteException(AccessControlException.class, FileNotFoundException.class, UnresolvedPathException.class); } finally { scope.close(); } }
Example 14
Source File: DFSClient.java From hadoop with Apache License 2.0 | 5 votes |
/** * Get all the current snapshottable directories. * @return All the current snapshottable directories * @throws IOException * @see ClientProtocol#getSnapshottableDirListing() */ public SnapshottableDirectoryStatus[] getSnapshottableDirListing() throws IOException { checkOpen(); TraceScope scope = Trace.startSpan("getSnapshottableDirListing", traceSampler); try { return namenode.getSnapshottableDirListing(); } catch(RemoteException re) { throw re.unwrapRemoteException(); } finally { scope.close(); } }
Example 15
Source File: DFSClient.java From big-c with Apache License 2.0 | 5 votes |
/** * Resolve the *first* symlink, if any, in the path. * * @see ClientProtocol#getLinkTarget(String) */ public String getLinkTarget(String path) throws IOException { checkOpen(); TraceScope scope = getPathTraceScope("getLinkTarget", path); try { return namenode.getLinkTarget(path); } catch (RemoteException re) { throw re.unwrapRemoteException(AccessControlException.class, FileNotFoundException.class); } finally { scope.close(); } }
Example 16
Source File: DFSClient.java From big-c with Apache License 2.0 | 5 votes |
/** * Close status of a file * @return true if file is already closed */ public boolean isFileClosed(String src) throws IOException{ checkOpen(); TraceScope scope = getPathTraceScope("isFileClosed", src); try { return namenode.isFileClosed(src); } catch(RemoteException re) { throw re.unwrapRemoteException(AccessControlException.class, FileNotFoundException.class, UnresolvedPathException.class); } finally { scope.close(); } }
Example 17
Source File: DFSClient.java From RDFS with Apache License 2.0 | 5 votes |
public FileStatus getFileInfo(String src) throws IOException { checkOpen(); try { if (namenodeProtocolProxy == null) { return versionBasedGetFileInfo(src); } return methodBasedGetFileInfo(src); } catch(RemoteException re) { throw re.unwrapRemoteException(AccessControlException.class); } }
Example 18
Source File: TestShadeSaslAuthenticationProvider.java From hbase with Apache License 2.0 | 5 votes |
void validateRootCause(Throwable rootCause) { LOG.info("Root cause was", rootCause); if (rootCause instanceof RemoteException) { RemoteException re = (RemoteException) rootCause; IOException actualException = re.unwrapRemoteException(); assertEquals(InvalidToken.class, actualException.getClass()); } else { StringWriter writer = new StringWriter(); rootCause.printStackTrace(new PrintWriter(writer)); String text = writer.toString(); assertTrue("Message did not contain expected text", text.contains(InvalidToken.class.getName())); } }
Example 19
Source File: BlockReaderFactory.java From hadoop with Apache License 2.0 | 4 votes |
/** * Get {@link BlockReaderLocalLegacy} for short circuited local reads. * This block reader implements the path-based style of local reads * first introduced in HDFS-2246. */ private BlockReader getLegacyBlockReaderLocal() throws IOException { if (LOG.isTraceEnabled()) { LOG.trace(this + ": trying to construct BlockReaderLocalLegacy"); } if (!DFSClient.isLocalAddress(inetSocketAddress)) { if (LOG.isTraceEnabled()) { LOG.trace(this + ": can't construct BlockReaderLocalLegacy because " + "the address " + inetSocketAddress + " is not local"); } return null; } if (clientContext.getDisableLegacyBlockReaderLocal()) { PerformanceAdvisory.LOG.debug(this + ": can't construct " + "BlockReaderLocalLegacy because " + "disableLegacyBlockReaderLocal is set."); return null; } IOException ioe = null; try { return BlockReaderLocalLegacy.newBlockReader(conf, userGroupInformation, configuration, fileName, block, token, datanode, startOffset, length, storageType); } catch (RemoteException remoteException) { ioe = remoteException.unwrapRemoteException( InvalidToken.class, AccessControlException.class); } catch (IOException e) { ioe = e; } if ((!(ioe instanceof AccessControlException)) && isSecurityException(ioe)) { // Handle security exceptions. // We do not handle AccessControlException here, since // BlockReaderLocalLegacy#newBlockReader uses that exception to indicate // that the user is not in dfs.block.local-path-access.user, a condition // which requires us to disable legacy SCR. throw ioe; } LOG.warn(this + ": error creating legacy BlockReaderLocal. " + "Disabling legacy local reads.", ioe); clientContext.setDisableLegacyBlockReaderLocal(); return null; }
Example 20
Source File: BlockReaderFactory.java From big-c with Apache License 2.0 | 4 votes |
/** * Get {@link BlockReaderLocalLegacy} for short circuited local reads. * This block reader implements the path-based style of local reads * first introduced in HDFS-2246. */ private BlockReader getLegacyBlockReaderLocal() throws IOException { if (LOG.isTraceEnabled()) { LOG.trace(this + ": trying to construct BlockReaderLocalLegacy"); } if (!DFSClient.isLocalAddress(inetSocketAddress)) { if (LOG.isTraceEnabled()) { LOG.trace(this + ": can't construct BlockReaderLocalLegacy because " + "the address " + inetSocketAddress + " is not local"); } return null; } if (clientContext.getDisableLegacyBlockReaderLocal()) { PerformanceAdvisory.LOG.debug(this + ": can't construct " + "BlockReaderLocalLegacy because " + "disableLegacyBlockReaderLocal is set."); return null; } IOException ioe = null; try { return BlockReaderLocalLegacy.newBlockReader(conf, userGroupInformation, configuration, fileName, block, token, datanode, startOffset, length, storageType); } catch (RemoteException remoteException) { ioe = remoteException.unwrapRemoteException( InvalidToken.class, AccessControlException.class); } catch (IOException e) { ioe = e; } if ((!(ioe instanceof AccessControlException)) && isSecurityException(ioe)) { // Handle security exceptions. // We do not handle AccessControlException here, since // BlockReaderLocalLegacy#newBlockReader uses that exception to indicate // that the user is not in dfs.block.local-path-access.user, a condition // which requires us to disable legacy SCR. throw ioe; } LOG.warn(this + ": error creating legacy BlockReaderLocal. " + "Disabling legacy local reads.", ioe); clientContext.setDisableLegacyBlockReaderLocal(); return null; }