Deprecated API


Contents
Deprecated Interfaces
org.apache.hadoop.io.Closeable
          use java.io.Closeable 
 

Deprecated Classes
org.apache.hadoop.fs.InMemoryFileSystem
           
org.apache.hadoop.mapred.OutputFormatBase
          Use FileOutputFormat 
org.apache.hadoop.fs.ShellCommand
          Use Shell instead. 
org.apache.hadoop.io.UTF8
          replaced by Text 
 

Deprecated Methods
org.apache.hadoop.mapred.JobConf.addInputPath(Path)
          Use FileInputFormat.addInputPath(JobConf, Path) or FileInputFormat.addInputPaths(JobConf, String) 
org.apache.hadoop.dfs.DataNode.createSocketAddr(String)
           
org.apache.hadoop.fs.RawLocalFileSystem.delete(Path)
           
org.apache.hadoop.fs.FilterFileSystem.delete(Path)
           
org.apache.hadoop.fs.FileSystem.delete(Path)
          Use delete(Path, boolean) instead 
org.apache.hadoop.fs.ftp.FTPFileSystem.delete(Path)
          Use delete(Path, boolean) instead 
org.apache.hadoop.fs.kfs.KosmosFileSystem.delete(Path)
           
org.apache.hadoop.fs.s3.S3FileSystem.delete(Path)
           
org.apache.hadoop.fs.s3native.NativeS3FileSystem.delete(Path)
           
org.apache.hadoop.dfs.HftpFileSystem.delete(Path)
           
org.apache.hadoop.dfs.DistributedFileSystem.delete(Path)
           
org.apache.hadoop.dfs.NameNode.delete(String)
           
org.apache.hadoop.mapred.TaskTracker.done(String, boolean)
           
org.apache.hadoop.mapred.Counters.findCounter(String, int, String)
            
org.apache.hadoop.mapred.TaskTracker.fsError(String, String)
           
org.apache.hadoop.mapred.JobTracker.getAssignedTracker(String)
           
org.apache.hadoop.fs.FileSystem.getBlockSize(Path)
          Use getFileStatus() instead 
org.apache.hadoop.mapred.JobConf.getCombineOnceOnly()
           
org.apache.hadoop.io.SequenceFile.getCompressionType(Configuration)
          Use JobConf.getMapOutputCompressionType() to get SequenceFile.CompressionType for intermediate map-outputs or SequenceFileOutputFormat.getOutputCompressionType(org.apache.hadoop.mapred.JobConf) to get SequenceFile.CompressionType for job-outputs. 
org.apache.hadoop.mapred.Counters.Group.getCounter(int, String)
          use Counters.Group.getCounter(String) instead 
org.apache.hadoop.fs.FileSystem.getFileBlockLocations(Path, long, long)
          use FileSystem.getFileBlockLocations(FileStatus, long, long) 
org.apache.hadoop.mapred.JobConf.getInputPaths()
          Use FileInputFormat.getInputPaths(JobConf) 
org.apache.hadoop.mapred.JobClient.getJob(String)
          Applications should rather use JobClient.getJob(JobID). 
org.apache.hadoop.mapred.JobTracker.getJob(String)
           
org.apache.hadoop.mapred.JobTracker.getJobCounters(String)
           
org.apache.hadoop.mapred.JobProfile.getJobId()
          use getJobID() instead 
org.apache.hadoop.mapred.JobStatus.getJobId()
          use getJobID instead 
org.apache.hadoop.mapred.RunningJob.getJobID()
          This method is deprecated and will be removed. Applications should rather use RunningJob.getID(). 
org.apache.hadoop.mapred.JobTracker.getJobProfile(String)
           
org.apache.hadoop.mapred.JobTracker.getJobStatus(String)
           
org.apache.hadoop.fs.FileSystem.getLength(Path)
          Use getFileStatus() instead 
org.apache.hadoop.fs.kfs.KosmosFileSystem.getLength(Path)
           
org.apache.hadoop.mapred.JobHistory.JobInfo.getLocalJobFilePath(String)
           
org.apache.hadoop.mapred.JobTracker.getLocalJobFilePath(String)
           
org.apache.hadoop.mapred.TaskTracker.getMapCompletionEvents(String, int, int)
           
org.apache.hadoop.mapred.JobConf.getMapOutputCompressionType()
          SequenceFile.CompressionType is no longer valid for intermediate map-outputs. 
org.apache.hadoop.mapred.jobcontrol.Job.getMapredJobID()
          use Job.getAssignedJobID() instead 
org.apache.hadoop.mapred.JobClient.getMapTaskReports(String)
          Applications should rather use JobClient.getMapTaskReports(JobID) 
org.apache.hadoop.mapred.JobTracker.getMapTaskReports(String)
           
org.apache.hadoop.fs.RawLocalFileSystem.getName()
            
org.apache.hadoop.fs.FilterFileSystem.getName()
          call #getUri() instead. 
org.apache.hadoop.fs.FileSystem.getName()
          call #getUri() instead. 
org.apache.hadoop.fs.kfs.KosmosFileSystem.getName()
           
org.apache.hadoop.dfs.DistributedFileSystem.getName()
            
org.apache.hadoop.fs.FileSystem.getNamed(String, Configuration)
          call #get(URI,Configuration) instead. 
org.apache.hadoop.dfs.namenode.metrics.NameNodeStatisticsMBean.getNumFilesListed()
          Use getNumGetListingOps() instead 
org.apache.hadoop.dfs.namenode.metrics.NameNodeStatistics.getNumFilesListed()
          call getNumGetListingOps() instead 
org.apache.hadoop.mapred.JobConf.getOutputPath()
          Use FileOutputFormat.getOutputPath(JobConf) or FileOutputFormat.getWorkOutputPath(JobConf) Get the Path to the output directory for the map-reduce job. 
org.apache.hadoop.mapred.JobClient.getReduceTaskReports(String)
          Applications should rather use JobClient.getReduceTaskReports(JobID) 
org.apache.hadoop.mapred.JobTracker.getReduceTaskReports(String)
           
org.apache.hadoop.fs.FileSystem.getReplication(Path)
          Use getFileStatus() instead 
org.apache.hadoop.fs.kfs.KosmosFileSystem.getReplication(Path)
           
org.apache.hadoop.net.NetUtils.getServerAddress(Configuration, String, String, String)
           
org.apache.hadoop.mapred.JobConf.getSystemDir()
          Use JobClient.getSystemDir() instead. Get the system directory where job-specific files are to be placed. 
org.apache.hadoop.mapred.TaskTracker.getTask(String)
           
org.apache.hadoop.mapred.JobTracker.getTaskCompletionEvents(String, int, int)
           
org.apache.hadoop.mapred.JobTracker.getTaskDiagnostics(String, String, String)
           
org.apache.hadoop.mapred.TaskCompletionEvent.getTaskId()
          use TaskCompletionEvent.getTaskAttemptId() instead. 
org.apache.hadoop.mapred.TaskReport.getTaskId()
          use TaskReport.getTaskID() instead 
org.apache.hadoop.mapred.TaskLog.getTaskLogFile(String, TaskLog.LogName)
           
org.apache.hadoop.mapred.JobClient.getTaskOutputFilter()
           
org.apache.hadoop.fs.FileSystem.isDirectory(Path)
          Use getFileStatus() instead 
org.apache.hadoop.fs.kfs.KosmosFileSystem.isDirectory(Path)
           
org.apache.hadoop.fs.kfs.KosmosFileSystem.isFile(Path)
           
org.apache.hadoop.mapred.JobTracker.killJob(String)
           
org.apache.hadoop.mapred.RunningJob.killTask(String, boolean)
          Applications should rather use RunningJob.killTask(TaskAttemptID, boolean) 
org.apache.hadoop.mapred.JobTracker.killTask(String, boolean)
           
org.apache.hadoop.mapred.FileInputFormat.listPaths(JobConf)
          Use FileInputFormat.listStatus(JobConf) instead. 
org.apache.hadoop.fs.RawLocalFileSystem.lock(Path, boolean)
            
org.apache.hadoop.fs.kfs.KosmosFileSystem.lock(Path, boolean)
           
org.apache.hadoop.mapred.JobHistory.JobInfo.logFailed(String, long, int, int)
           
org.apache.hadoop.mapred.JobHistory.Task.logFailed(String, String, String, long, String)
           
org.apache.hadoop.mapred.JobHistory.MapAttempt.logFailed(String, String, String, long, String, String)
           
org.apache.hadoop.mapred.JobHistory.ReduceAttempt.logFailed(String, String, String, long, String, String)
           
org.apache.hadoop.mapred.JobHistory.JobInfo.logFinished(String, long, int, int, int, int, Counters)
           
org.apache.hadoop.mapred.JobHistory.Task.logFinished(String, String, String, long, Counters)
           
org.apache.hadoop.mapred.JobHistory.ReduceAttempt.logFinished(String, String, String, long, long, long, String)
           
org.apache.hadoop.mapred.JobHistory.MapAttempt.logFinished(String, String, String, long, String)
           
org.apache.hadoop.mapred.JobHistory.MapAttempt.logKilled(String, String, String, long, String, String)
           
org.apache.hadoop.mapred.JobHistory.ReduceAttempt.logKilled(String, String, String, long, String, String)
           
org.apache.hadoop.mapred.JobHistory.JobInfo.logStarted(String, long, int, int)
           
org.apache.hadoop.mapred.JobHistory.Task.logStarted(String, String, String, long)
           
org.apache.hadoop.mapred.JobHistory.MapAttempt.logStarted(String, String, String, long, String)
           
org.apache.hadoop.mapred.JobHistory.ReduceAttempt.logStarted(String, String, String, long, String)
           
org.apache.hadoop.mapred.JobHistory.JobInfo.logSubmitted(String, JobConf, String, long)
           
org.apache.hadoop.mapred.TaskTracker.mapOutputLost(String, String)
           
org.apache.hadoop.io.SequenceFile.Reader.next(DataOutputBuffer)
          Call SequenceFile.Reader.nextRaw(DataOutputBuffer,SequenceFile.ValueBytes). 
org.apache.hadoop.mapred.TaskTracker.ping(String)
           
org.apache.hadoop.fs.RawLocalFileSystem.release(Path)
            
org.apache.hadoop.fs.kfs.KosmosFileSystem.release(Path)
           
org.apache.hadoop.mapred.TaskTracker.reportDiagnosticInfo(String, String)
           
org.apache.hadoop.mapred.JobConf.setCombineOnceOnly(boolean)
           
org.apache.hadoop.io.SequenceFile.setCompressionType(Configuration, SequenceFile.CompressionType)
          Use the one of the many SequenceFile.createWriter methods to specify the SequenceFile.CompressionType while creating the SequenceFile or JobConf.setMapOutputCompressionType(org.apache.hadoop.io.SequenceFile.CompressionType) to specify the SequenceFile.CompressionType for intermediate map-outputs or SequenceFileOutputFormat.setOutputCompressionType(org.apache.hadoop.mapred.JobConf, org.apache.hadoop.io.SequenceFile.CompressionType) to specify the SequenceFile.CompressionType for job-outputs. or 
org.apache.hadoop.mapred.JobConf.setInputPath(Path)
          Use FileInputFormat.setInputPaths(JobConf, Path...) or FileInputFormat.setInputPaths(JobConf, String) 
org.apache.hadoop.mapred.JobConf.setMapOutputCompressionType(SequenceFile.CompressionType)
          SequenceFile.CompressionType is no longer valid for intermediate map-outputs. 
org.apache.hadoop.mapred.jobcontrol.Job.setMapredJobID(String)
          use Job.setAssignedJobID(JobID) instead 
org.apache.hadoop.mapred.JobConf.setOutputPath(Path)
          Use FileOutputFormat.setOutputPath(JobConf, Path) Set the Path of the output directory for the map-reduce job.

Note:

 
org.apache.hadoop.mapred.TaskCompletionEvent.setTaskId(String)
          use TaskCompletionEvent.setTaskID(TaskAttemptID) instead. 
org.apache.hadoop.mapred.JobClient.setTaskOutputFilter(JobClient.TaskStatusFilter)
           
org.apache.hadoop.ipc.Server.setTimeout(int)
           
org.apache.hadoop.mapred.TaskTracker.shuffleError(String, String)
           
org.apache.hadoop.mapred.TaskTracker.statusUpdate(String, TaskStatus)
           
org.apache.hadoop.mapred.JobTracker.submitJob(String)
           
org.apache.hadoop.mapred.FileInputFormat.validateInput(JobConf)
           
org.apache.hadoop.mapred.InputFormat.validateInput(JobConf)
          getSplits is called in the client and can perform any necessary validation of the input 
 

Deprecated Constructors
org.apache.hadoop.dfs.ChecksumDistributedFileSystem(InetSocketAddress, Configuration)
            
org.apache.hadoop.dfs.DistributedFileSystem(InetSocketAddress, Configuration)
            
org.apache.hadoop.mapred.FileSplit(Path, long, long, JobConf)
            
org.apache.hadoop.fs.FSDataOutputStream(OutputStream)
           
org.apache.hadoop.mapred.JobProfile(String, String, String, String, String)
          use JobProfile(String, JobID, String, String, String) instead 
org.apache.hadoop.mapred.JobStatus(String, float, float, int)
           
org.apache.hadoop.mapred.LineRecordReader(InputStream, long, long)
           
org.apache.hadoop.io.SetFile.Writer(FileSystem, String, Class)
          pass a Configuration too 
org.apache.hadoop.mapred.TaskCompletionEvent(int, String, int, boolean, TaskCompletionEvent.Status, String)
           
 



Copyright © 2008 The Apache Software Foundation