您的位置:首页 > 编程语言 > PHP开发

自定义Hive的InputFormat与OutputFormat

2013-05-09 11:17 267 查看
package org.apache.hadoop.mapred;

import java.io.*;

import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.compress.*;

/** An {@link InputFormat} for plain text files.  Files are broken into lines.
* Either linefeed or carriage-return are used to signal end of line.  Keys are
* the position in the file, and values are the line of text..
*/
public class TextInputFormat extends FileInputFormat<LongWritable, Text>
implements JobConfigurable {

private CompressionCodecFactory compressionCodecs = null;

public void configure(JobConf conf) {
compressionCodecs = new CompressionCodecFactory(conf);
}

protected boolean isSplitable(FileSystem fs, Path file) {
return compressionCodecs.getCodec(file) == null;
}

public RecordReader<LongWritable, Text> getRecordReader(
InputSplit genericSplit, JobConf job,
Reporter reporter)
throws IOException {

reporter.setStatus(genericSplit.toString());
return new LineRecordReader(job, (FileSplit) genericSplit);
}
}


package org.apache.hadoop.mapred;

import java.io.IOException;
import java.io.DataInput;

/**
* <code>RecordReader</code> reads <key, value> pairs from an
* {@link InputSplit}.
*
* <p><code>RecordReader</code>, typically, converts the byte-oriented view of
* the input, provided by the <code>InputSplit</code>, and presents a
* record-oriented view for the {@link Mapper} & {@link Reducer} tasks for
* processing. It thus assumes the responsibility of processing record
* boundaries and presenting the tasks with keys and values.</p>
*
* @see InputSplit
* @see InputFormat
*/
public interface RecordReader<K, V> {
/**
* Reads the next key/value pair from the input for processing.
*
* @param key the key to read data into
* @param value the value to read data into
* @return true iff a key/value was read, false if at EOF
*/
boolean next(K key, V value) throws IOException;

/**
* Create an object of the appropriate type to be used as a key.
*
* @return a new key object.
*/
K createKey();

/**
* Create an object of the appropriate type to be used as a value.
*
* @return a new value object.
*/
V createValue();

/**
* Returns the current position in the input.
*
* @return the current position in the input.
* @throws IOException
*/
long getPos() throws IOException;

/**
* Close this {@link InputSplit} to future operations.
*
* @throws IOException
*/
public void close() throws IOException;

/**
* How much of the input has the {@link RecordReader} consumed i.e.
* has been processed by?
*
* @return progress from <code>0.0</code> to <code>1.0</code>.
* @throws IOException
*/
float getProgress() throws IOException;
}


package org.apache.hadoop.mapred;

import java.io.IOException;
import java.io.InputStream;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.CompressionCodecFactory;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.Log;

/**
* Treats keys as offset in file and value as line.
*/
public class LineRecordReader implements RecordReader<LongWritable, Text> {
private static final Log LOG
= LogFactory.getLog(LineRecordReader.class.getName());

private CompressionCodecFactory compressionCodecs = null;
private long start;
private long pos;
private long end;
private LineReader in;
int maxLineLength;

/**
* A class that provides a line reader from an input stream.
* @deprecated Use {@link org.apache.hadoop.util.LineReader} instead.
*/
@Deprecated
public static class LineReader extends org.apache.hadoop.util.LineReader {
LineReader(InputStream in) {
super(in);
}
LineReader(InputStream in, int bufferSize) {
super(in, bufferSize);
}
public LineReader(InputStream in, Configuration conf) throws IOException {
super(in, conf);
}
}

public LineRecordReader(Configuration job,
FileSplit split) throws IOException {
this.maxLineLength = job.getInt("mapred.linerecordreader.maxlength",
Integer.MAX_VALUE);
start = split.getStart();
end = start + split.getLength();
final Path file = split.getPath();
compressionCodecs = new CompressionCodecFactory(job);
final CompressionCodec codec = compressionCodecs.getCodec(file);

// open the file and seek to the start of the split
FileSystem fs = file.getFileSystem(job);
FSDataInputStream fileIn = fs.open(split.getPath());
boolean skipFirstLine = false;
if (codec != null) {
in = new LineReader(codec.createInputStream(fileIn), job);
end = Long.MAX_VALUE;
} else {
if (start != 0) {
skipFirstLine = true;
--start;
fileIn.seek(start);
}
in = new LineReader(fileIn, job);
}
if (skipFirstLine) {  // skip first line and re-establish "start".
start += in.readLine(new Text(), 0,
(int)Math.min((long)Integer.MAX_VALUE, end - start));
}
this.pos = start;
}

public LineRecordReader(InputStream in, long offset, long endOffset,
int maxLineLength) {
this.maxLineLength = maxLineLength;
this.in = new LineReader(in);
this.start = offset;
this.pos = offset;
this.end = endOffset;
}

public LineRecordReader(InputStream in, long offset, long endOffset,
Configuration job)
throws IOException{
this.maxLineLength = job.getInt("mapred.linerecordreader.maxlength",
Integer.MAX_VALUE);
this.in = new LineReader(in, job);
this.start = offset;
this.pos = offset;
this.end = endOffset;
}

public LongWritable createKey() {
return new LongWritable();
}

public Text createValue() {
return new Text();
}

/** Read a line. */
public synchronized boolean next(LongWritable key, Text value)
throws IOException {

while (pos < end) {
key.set(pos);

int newSize = in.readLine(value, maxLineLength,
Math.max((int)Math.min(Integer.MAX_VALUE, end-pos),
maxLineLength));
if (newSize == 0) {
return false;
}
pos += newSize;
if (newSize < maxLineLength) {
return true;
}

// line too long. try again
LOG.info("Skipped line of size " + newSize + " at pos " + (pos - newSize));
}

return false;
}

/**
* Get the progress within the split
*/
public float getProgress() {
if (start == end) {
return 0.0f;
} else {
return Math.min(1.0f, (pos - start) / (float)(end - start));
}
}

public  synchronized long getPos() throws IOException {
return pos;
}

public synchronized void close() throws IOException {
if (in != null) {
in.close();
}
}
}


/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements.  See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership.  The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License.  You may obtain a copy of the License at
*
*     http://www.apache.org/licenses/LICENSE-2.0 *
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.hadoop.mapred;

import java.io.DataOutputStream;
import java.io.IOException;
import java.io.UnsupportedEncodingException;

import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FSDataOutputStream;

import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.GzipCodec;
import org.apache.hadoop.util.*;

/** An {@link OutputFormat} that writes plain text files.
*/
public class TextOutputFormat<K, V> extends FileOutputFormat<K, V> {

protected static class LineRecordWriter<K, V>
implements RecordWriter<K, V> {
private static final String utf8 = "UTF-8";
private static final byte[] newline;
static {
try {
newline = "\n".getBytes(utf8);
} catch (UnsupportedEncodingException uee) {
throw new IllegalArgumentException("can't find " + utf8 + " encoding");
}
}

protected DataOutputStream out;
private final byte[] keyValueSeparator;

public LineRecordWriter(DataOutputStream out, String keyValueSeparator) {
this.out = out;
try {
this.keyValueSeparator = keyValueSeparator.getBytes(utf8);
} catch (UnsupportedEncodingException uee) {
throw new IllegalArgumentException("can't find " + utf8 + " encoding");
}
}

public LineRecordWriter(DataOutputStream out) {
this(out, "\t");
}

/**
* Write the object to the byte stream, handling Text as a special
* case.
* @param o the object to print
* @throws IOException if the write throws, we pass it on
*/
private void writeObject(Object o) throws IOException {
if (o instanceof Text) {
Text to = (Text) o;
out.write(to.getBytes(), 0, to.getLength());
} else {
out.write(o.toString().getBytes(utf8));
}
}

public synchronized void write(K key, V value)
throws IOException {

boolean nullKey = key == null || key instanceof NullWritable;
boolean nullValue = value == null || value instanceof NullWritable;
if (nullKey && nullValue) {
return;
}
if (!nullKey) {
writeObject(key);
}
if (!(nullKey || nullValue)) {
out.write(keyValueSeparator);
}
if (!nullValue) {
writeObject(value);
}
out.write(newline);
}

public synchronized void close(Reporter reporter) throws IOException {
out.close();
}
}

public RecordWriter<K, V> getRecordWriter(FileSystem ignored,
JobConf job,
String name,
Progressable progress)
throws IOException {
boolean isCompressed = getCompressOutput(job);
String keyValueSeparator = job.get("mapred.textoutputformat.separator",
"\t");
if (!isCompressed) {
Path file = FileOutputFormat.getTaskOutputPath(job, name);
FileSystem fs = file.getFileSystem(job);
FSDataOutputStream fileOut = fs.create(file, progress);
return new LineRecordWriter<K, V>(fileOut, keyValueSeparator);
} else {
Class<? extends CompressionCodec> codecClass =
getOutputCompressorClass(job, GzipCodec.class);
// create the named codec
CompressionCodec codec = ReflectionUtils.newInstance(codecClass, job);
// build the filename including the extension
Path file =
FileOutputFormat.getTaskOutputPath(job,
name + codec.getDefaultExtension());
FileSystem fs = file.getFileSystem(job);
FSDataOutputStream fileOut = fs.create(file, progress);
return new LineRecordWriter<K, V>(new DataOutputStream
(codec.createOutputStream(fileOut)),
keyValueSeparator);
}
}
}


/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements.  See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership.  The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License.  You may obtain a copy of the License at
*
*     http://www.apache.org/licenses/LICENSE-2.0 *
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.hadoop.mapred;

import java.io.IOException;

import org.apache.hadoop.fs.FileSystem;

/**
* <code>RecordWriter</code> writes the output <key, value> pairs
* to an output file.

* <p><code>RecordWriter</code> implementations write the job outputs to the
* {@link FileSystem}.
*
* @see OutputFormat
*/
public interface RecordWriter<K, V> {
/**
* Writes a key/value pair.
*
* @param key the key to write.
* @param value the value to write.
* @throws IOException
*/
void write(K key, V value) throws IOException;

/**
* Close this <code>RecordWriter</code> to future operations.
*
* @param reporter facility to report progress.
* @throws IOException
*/
void close(Reporter reporter) throws IOException;
}


/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements.  See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership.  The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License.  You may obtain a copy of the License at
*
*     http://www.apache.org/licenses/LICENSE-2.0 *
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.hadoop.mapred;

import java.io.IOException;
import java.text.NumberFormat;

import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.mapreduce.security.TokenCache;
import org.apache.hadoop.util.Progressable;

/** A base class for {@link OutputFormat}. */
public abstract class FileOutputFormat<K, V> implements OutputFormat<K, V> {

public static enum Counter {
BYTES_WRITTEN
}

/**
* Set whether the output of the job is compressed.
* @param conf the {@link JobConf} to modify
* @param compress should the output of the job be compressed?
*/
public static void setCompressOutput(JobConf conf, boolean compress) {
conf.setBoolean("mapred.output.compress", compress);
}

/**
* Is the job output compressed?
* @param conf the {@link JobConf} to look in
* @return <code>true</code> if the job output should be compressed,
*         <code>false</code> otherwise
*/
public static boolean getCompressOutput(JobConf conf) {
return conf.getBoolean("mapred.output.compress", false);
}

/**
* Set the {@link CompressionCodec} to be used to compress job outputs.
* @param conf the {@link JobConf} to modify
* @param codecClass the {@link CompressionCodec} to be used to
*                   compress the job outputs
*/
public static void
setOutputCompressorClass(JobConf conf,
Class<? extends CompressionCodec> codecClass) {
setCompressOutput(conf, true);
conf.setClass("mapred.output.compression.codec", codecClass,
CompressionCodec.class);
}

/**
* Get the {@link CompressionCodec} for compressing the job outputs.
* @param conf the {@link JobConf} to look in
* @param defaultValue the {@link CompressionCodec} to return if not set
* @return the {@link CompressionCodec} to be used to compress the
*         job outputs
* @throws IllegalArgumentException if the class was specified, but not found
*/
public static Class<? extends CompressionCodec>
getOutputCompressorClass(JobConf conf,
Class<? extends CompressionCodec> defaultValue) {
Class<? extends CompressionCodec> codecClass = defaultValue;

String name = conf.get("mapred.output.compression.codec");
if (name != null) {
try {
codecClass =
conf.getClassByName(name).asSubclass(CompressionCodec.class);
} catch (ClassNotFoundException e) {
throw new IllegalArgumentException("Compression codec " + name +
" was not found.", e);
}
}
return codecClass;
}private final RecordWriter<K, V> mWriter;

public IgnoreKeyWriterprivate final RecordWriter<K, V> mWriter;

public IgnoreKeyWriter(RecordWriter<K, V> writer) {
this.mWriter = writer;
}

public synchronized void write(K key, V value) throws IOException {
this.mWriter.write(null, value);
}

public void close(Reporter reporter) throws IOException {
this.mWriter.close(reporter);
}(RecordWriter<K, V> writer) {
this.mWriter = writer;
}

public synchronized void write(K key, V value) throws IOException {
this.mWriter.write(null, value);
}

public void close(Reporter reporter) throws IOException {
this.mWriter.close(reporter);
}

public abstract RecordWriter<K, V> getRecordWriter(FileSystem ignored,
JobConf job, String name,
Progressable progress)
throws IOException;

public void checkOutputSpecs(FileSystem ignored, JobConf job)
throws FileAlreadyExistsException,
InvalidJobConfException, IOException {
// Ensure that the output directory is set and not already there
Path outDir = getOutputPath(job);
if (outDir == null && job.getNumReduceTasks() != 0) {
throw new InvalidJobConfException("Output directory not set in JobConf.");
}
if (outDir != null) {
FileSystem fs = outDir.getFileSystem(job);
// normalize the output directory
outDir = fs.makeQualified(outDir);
setOutputPath(job, outDir);

// get delegation token for the outDir's file system
TokenCache.obtainTokensForNamenodes(job.getCredentials(),
new Path[] {outDir}, job);

// check its existence
if (fs.exists(outDir)) {
throw new FileAlreadyExistsException("Output directory " + outDir +
" already exists");
}
}
}

/**
* Set the {@link Path} of the output directory for the map-reduce job.
*
* @param conf The configuration of the job.
* @param outputDir the {@link Path} of the output directory for
* the map-reduce job.
*/
public static void setOutputPath(JobConf conf, Path outputDir) {
outputDir = new Path(conf.getWorkingDirectory(), outputDir);
conf.set("mapred.output.dir", outputDir.toString());
}

/**
* Set the {@link Path} of the task's temporary output directory
* for the map-reduce job.
*
* <p><i>Note</i>: Task output path is set by the framework.
* </p>
* @param conf The configuration of the job.
* @param outputDir the {@link Path} of the output directory
* for the map-reduce job.
*/

static void setWorkOutputPath(JobConf conf, Path outputDir) {
outputDir = new Path(conf.getWorkingDirectory(), outputDir);
conf.set("mapred.work.output.dir", outputDir.toString());
}

/**
* Get the {@link Path} to the output directory for the map-reduce job.
*
* @return the {@link Path} to the output directory for the map-reduce job.
* @see FileOutputFormat#getWorkOutputPath(JobConf)
*/
public static Path getOutputPath(JobConf conf) {
String name = conf.get("mapred.output.dir");
return name == null ? null: new Path(name);
}

/**
*  Get the {@link Path} to the task's temporary output directory
*  for the map-reduce job
*
* <h4 id="SideEffectFiles">Tasks' Side-Effect Files</h4>
*
* <p><i>Note:</i> The following is valid only if the {@link OutputCommitter}
*  is {@link FileOutputCommitter}. If <code>OutputCommitter</code> is not
*  a <code>FileOutputCommitter</code>, the task's temporary output
*  directory is same as {@link #getOutputPath(JobConf)} i.e.
*  <tt>${mapred.output.dir}$</tt></p>
*
* <p>Some applications need to create/write-to side-files, which differ from
* the actual job-outputs.
*
* <p>In such cases there could be issues with 2 instances of the same TIP
* (running simultaneously e.g. speculative tasks) trying to open/write-to the
* same file (path) on HDFS. Hence the application-writer will have to pick
* unique names per task-attempt (e.g. using the attemptid, say
* <tt>attempt_200709221812_0001_m_000000_0</tt>), not just per TIP.</p>
*
* <p>To get around this the Map-Reduce framework helps the application-writer
* out by maintaining a special
* <tt>${mapred.output.dir}/_temporary/_${taskid}</tt>
* sub-directory for each task-attempt on HDFS where the output of the
* task-attempt goes. On successful completion of the task-attempt the files
* in the <tt>${mapred.output.dir}/_temporary/_${taskid}</tt> (only)
* are <i>promoted</i> to <tt>${mapred.output.dir}</tt>. Of course, the
* framework discards the sub-directory of unsuccessful task-attempts. This
* is completely transparent to the application.</p>
*
* <p>The application-writer can take advantage of this by creating any
* side-files required in <tt>${mapred.work.output.dir}</tt> during execution
* of his reduce-task i.e. via {@link #getWorkOutputPath(JobConf)}, and the
* framework will move them out similarly - thus she doesn't have to pick
* unique paths per task-attempt.</p>
*
* <p><i>Note</i>: the value of <tt>${mapred.work.output.dir}</tt> during
* execution of a particular task-attempt is actually
* <tt>${mapred.output.dir}/_temporary/_{$taskid}</tt>, and this value is
* set by the map-reduce framework. So, just create any side-files in the
* path  returned by {@link #getWorkOutputPath(JobConf)} from map/reduce
* task to take advantage of this feature.</p>
*
* <p>The entire discussion holds true for maps of jobs with
* reducer=NONE (i.e. 0 reduces) since output of the map, in that case,
* goes directly to HDFS.</p>
*
* @return the {@link Path} to the task's temporary output directory
* for the map-reduce job.
*/
public static Path getWorkOutputPath(JobConf conf) {
String name = conf.get("mapred.work.output.dir");
return name == null ? null: new Path(name);
}

/**
* Helper function to create the task's temporary output directory and
* return the path to the task's output file.
*
* @param conf job-configuration
* @param name temporary task-output filename
* @return path to the task's temporary output file
* @throws IOException
*/
public static Path getTaskOutputPath(JobConf conf, String name)
throws IOException {
// ${mapred.out.dir}
Path outputPath = getOutputPath(conf);
if (outputPath == null) {
throw new IOException("Undefined job output-path");
}

OutputCommitter committer = conf.getOutputCommitter();
Path workPath = outputPath;
TaskAttemptContext context = new TaskAttemptContext(conf,
TaskAttemptID.forName(conf.get("mapred.task.id")));
if (committer instanceof FileOutputCommitter) {
workPath = ((FileOutputCommitter)committer).getWorkPath(context,
outputPath);
}

// ${mapred.out.dir}/_temporary/_${taskid}/${name}
return new Path(workPath, name);
}

/**
* Helper function to generate a name that is unique for the task.
*
* <p>The generated name can be used to create custom files from within the
* different tasks for the job, the names for different tasks will not collide
* with each other.</p>
*
* <p>The given name is postfixed with the task type, 'm' for maps, 'r' for
* reduces and the task partition number. For example, give a name 'test'
* running on the first map o the job the generated name will be
* 'test-m-00000'.</p>
*
* @param conf the configuration for the job.
* @param name the name to make unique.
* @return a unique name accross all tasks of the job.
*/
public static String getUniqueName(JobConf conf, String name) {
int partition = conf.getInt("mapred.task.partition", -1);
if (partition == -1) {
throw new IllegalArgumentException(
"This method can only be called from within a Job");
}

String taskType = (conf.getBoolean("mapred.task.is.map", true)) ? "m" : "r";

NumberFormat numberFormat = NumberFormat.getInstance();
numberFormat.setMinimumIntegerDigits(5);
numberFormat.setGroupingUsed(false);

return name + "-" + taskType + "-" + numberFormat.format(partition);
}

/**
* Helper function to generate a {@link Path} for a file that is unique for
* the task within the job output directory.
*
* <p>The path can be used to create custom files from within the map and
* reduce tasks. The path name will be unique for each task. The path parent
* will be the job output directory.</p>ls
*
* <p>This method uses the {@link #getUniqueName} method to make the file name
* unique for the task.</p>
*
* @param conf the configuration for the job.
* @param name the name for the file.
* @return a unique path accross all tasks of the job.
*/
public static Path getPathForCustomFile(JobConf conf, String name) {
return new Path(getWorkOutputPath(conf), getUniqueName(conf, name));
}
}
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: