org.apache.hadoop.hive.shims
Class Hadoop20SShims
java.lang.Object
org.apache.hadoop.hive.shims.HadoopShimsSecure
org.apache.hadoop.hive.shims.Hadoop20SShims
- All Implemented Interfaces:
- HadoopShims
public class Hadoop20SShims
- extends HadoopShimsSecure
Implemention of shims against Hadoop 0.20 with Security.
|
Method Summary |
org.apache.hadoop.fs.FileSystem |
createProxyFileSystem(org.apache.hadoop.fs.FileSystem fs,
URI uri)
|
org.apache.hadoop.conf.Configuration |
getConfiguration(org.apache.hadoop.mapreduce.JobContext context)
|
long |
getDefaultBlockSize(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path path)
|
short |
getDefaultReplication(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path path)
|
HadoopShims.DirectDecompressorShim |
getDirectDecompressor(HadoopShims.DirectCompressionType codec)
|
Map<String,String> |
getHadoopConfNames()
|
HadoopShims.HCatHadoopShims |
getHCatShim()
|
String |
getJobLauncherHttpAddress(org.apache.hadoop.conf.Configuration conf)
|
String |
getJobLauncherRpcAddress(org.apache.hadoop.conf.Configuration conf)
|
HadoopShims.JobTrackerState |
getJobTrackerState(org.apache.hadoop.mapred.ClusterStatus clusterStatus)
|
org.apache.hadoop.fs.BlockLocation[] |
getLocations(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.FileStatus status)
|
Comparator<org.apache.hadoop.io.LongWritable> |
getLongComparator()
|
HadoopShims.MiniDFSShim |
getMiniDfs(org.apache.hadoop.conf.Configuration conf,
int numDataNodes,
boolean format,
String[] racks)
|
Hadoop20SShims.MiniMrShim |
getMiniMrCluster(org.apache.hadoop.conf.Configuration conf,
int numberOfTaskTrackers,
String nameNode,
int numDir)
Returns a shim to wrap MiniMrCluster |
org.apache.hadoop.fs.FileSystem |
getNonCachedFileSystem(URI uri,
org.apache.hadoop.conf.Configuration conf)
|
String |
getTaskAttemptLogUrl(org.apache.hadoop.mapred.JobConf conf,
String taskTrackerHttpAddress,
String taskAttemptId)
|
HadoopShims.WebHCatJTShim |
getWebHCatShim(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.security.UserGroupInformation ugi)
|
HadoopShims.ZeroCopyReaderShim |
getZeroCopyReader(org.apache.hadoop.fs.FSDataInputStream in,
HadoopShims.ByteBufferPoolShim pool)
|
void |
hflush(org.apache.hadoop.fs.FSDataOutputStream stream)
|
boolean |
isLocalMode(org.apache.hadoop.conf.Configuration conf)
|
List<org.apache.hadoop.fs.FileStatus> |
listLocatedStatus(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path path,
org.apache.hadoop.fs.PathFilter filter)
|
boolean |
moveToAppropriateTrash(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path path,
org.apache.hadoop.conf.Configuration conf)
|
org.apache.hadoop.mapreduce.JobContext |
newJobContext(org.apache.hadoop.mapreduce.Job job)
|
org.apache.hadoop.mapreduce.TaskAttemptContext |
newTaskAttemptContext(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.util.Progressable progressable)
|
org.apache.hadoop.mapreduce.TaskAttemptID |
newTaskAttemptID(org.apache.hadoop.mapreduce.JobID jobId,
boolean isMap,
int taskId,
int id)
|
void |
setJobLauncherRpcAddress(org.apache.hadoop.conf.Configuration conf,
String val)
|
void |
setTotalOrderPartitionFile(org.apache.hadoop.mapred.JobConf jobConf,
org.apache.hadoop.fs.Path partitionFile)
|
| Methods inherited from class org.apache.hadoop.hive.shims.HadoopShimsSecure |
addServiceToToken, authorizeProxyAccess, closeAllForUGI, createDelegationTokenFile, createHadoopArchive, createProxyUser, createRemoteUser, doAs, getCombineFileInputFormat, getHarUri, getInputFormatClassName, getShortUserName, getTokenFileLocEnvName, getTokenStrForm, getUGIForConf, isLoginKeytabBased, isSecureShimImpl, isSecurityEnabled, loginUserFromKeytab, loginUserFromKeytabAndReturnUGI, prepareJobOutput, reLoginUserFromKeytab, setTokenStr, unquoteHtmlChars |
| Methods inherited from class java.lang.Object |
clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait |
Hadoop20SShims
public Hadoop20SShims()
getTaskAttemptLogUrl
public String getTaskAttemptLogUrl(org.apache.hadoop.mapred.JobConf conf,
String taskTrackerHttpAddress,
String taskAttemptId)
throws MalformedURLException
- Throws:
MalformedURLException
getJobTrackerState
public HadoopShims.JobTrackerState getJobTrackerState(org.apache.hadoop.mapred.ClusterStatus clusterStatus)
throws Exception
- Specified by:
getJobTrackerState in interface HadoopShims- Specified by:
getJobTrackerState in class HadoopShimsSecure
- Throws:
Exception
newTaskAttemptContext
public org.apache.hadoop.mapreduce.TaskAttemptContext newTaskAttemptContext(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.util.Progressable progressable)
- Specified by:
newTaskAttemptContext in interface HadoopShims- Specified by:
newTaskAttemptContext in class HadoopShimsSecure
newTaskAttemptID
public org.apache.hadoop.mapreduce.TaskAttemptID newTaskAttemptID(org.apache.hadoop.mapreduce.JobID jobId,
boolean isMap,
int taskId,
int id)
newJobContext
public org.apache.hadoop.mapreduce.JobContext newJobContext(org.apache.hadoop.mapreduce.Job job)
- Specified by:
newJobContext in interface HadoopShims- Specified by:
newJobContext in class HadoopShimsSecure
isLocalMode
public boolean isLocalMode(org.apache.hadoop.conf.Configuration conf)
- Specified by:
isLocalMode in interface HadoopShims- Specified by:
isLocalMode in class HadoopShimsSecure
getJobLauncherRpcAddress
public String getJobLauncherRpcAddress(org.apache.hadoop.conf.Configuration conf)
- Specified by:
getJobLauncherRpcAddress in interface HadoopShims- Specified by:
getJobLauncherRpcAddress in class HadoopShimsSecure
setJobLauncherRpcAddress
public void setJobLauncherRpcAddress(org.apache.hadoop.conf.Configuration conf,
String val)
- Specified by:
setJobLauncherRpcAddress in interface HadoopShims- Specified by:
setJobLauncherRpcAddress in class HadoopShimsSecure
getJobLauncherHttpAddress
public String getJobLauncherHttpAddress(org.apache.hadoop.conf.Configuration conf)
- Specified by:
getJobLauncherHttpAddress in interface HadoopShims- Specified by:
getJobLauncherHttpAddress in class HadoopShimsSecure
moveToAppropriateTrash
public boolean moveToAppropriateTrash(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path path,
org.apache.hadoop.conf.Configuration conf)
throws IOException
- Specified by:
moveToAppropriateTrash in interface HadoopShims- Specified by:
moveToAppropriateTrash in class HadoopShimsSecure
- Throws:
IOException
getDefaultBlockSize
public long getDefaultBlockSize(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path path)
- Specified by:
getDefaultBlockSize in interface HadoopShims- Specified by:
getDefaultBlockSize in class HadoopShimsSecure
getDefaultReplication
public short getDefaultReplication(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path path)
- Specified by:
getDefaultReplication in interface HadoopShims- Specified by:
getDefaultReplication in class HadoopShimsSecure
setTotalOrderPartitionFile
public void setTotalOrderPartitionFile(org.apache.hadoop.mapred.JobConf jobConf,
org.apache.hadoop.fs.Path partitionFile)
getLongComparator
public Comparator<org.apache.hadoop.io.LongWritable> getLongComparator()
getMiniMrCluster
public Hadoop20SShims.MiniMrShim getMiniMrCluster(org.apache.hadoop.conf.Configuration conf,
int numberOfTaskTrackers,
String nameNode,
int numDir)
throws IOException
- Returns a shim to wrap MiniMrCluster
- Throws:
IOException
getMiniDfs
public HadoopShims.MiniDFSShim getMiniDfs(org.apache.hadoop.conf.Configuration conf,
int numDataNodes,
boolean format,
String[] racks)
throws IOException
- Throws:
IOException
getHCatShim
public HadoopShims.HCatHadoopShims getHCatShim()
getWebHCatShim
public HadoopShims.WebHCatJTShim getWebHCatShim(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.security.UserGroupInformation ugi)
throws IOException
- Throws:
IOException
listLocatedStatus
public List<org.apache.hadoop.fs.FileStatus> listLocatedStatus(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path path,
org.apache.hadoop.fs.PathFilter filter)
throws IOException
- Throws:
IOException
getLocations
public org.apache.hadoop.fs.BlockLocation[] getLocations(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.FileStatus status)
throws IOException
- Throws:
IOException
hflush
public void hflush(org.apache.hadoop.fs.FSDataOutputStream stream)
throws IOException
- Throws:
IOException
createProxyFileSystem
public org.apache.hadoop.fs.FileSystem createProxyFileSystem(org.apache.hadoop.fs.FileSystem fs,
URI uri)
- Specified by:
createProxyFileSystem in interface HadoopShims- Specified by:
createProxyFileSystem in class HadoopShimsSecure
getHadoopConfNames
public Map<String,String> getHadoopConfNames()
getZeroCopyReader
public HadoopShims.ZeroCopyReaderShim getZeroCopyReader(org.apache.hadoop.fs.FSDataInputStream in,
HadoopShims.ByteBufferPoolShim pool)
throws IOException
- Throws:
IOException
getDirectDecompressor
public HadoopShims.DirectDecompressorShim getDirectDecompressor(HadoopShims.DirectCompressionType codec)
getConfiguration
public org.apache.hadoop.conf.Configuration getConfiguration(org.apache.hadoop.mapreduce.JobContext context)
getNonCachedFileSystem
public org.apache.hadoop.fs.FileSystem getNonCachedFileSystem(URI uri,
org.apache.hadoop.conf.Configuration conf)
throws IOException
- Specified by:
getNonCachedFileSystem in interface HadoopShims- Specified by:
getNonCachedFileSystem in class HadoopShimsSecure
- Throws:
IOException
Copyright © 2014 The Apache Software Foundation. All rights reserved.