Map数量的修改-computeSplitSize
2014-09-11 16:50
106 查看
Class JobSubmitter
submitJobInternal(Jobjob, Cluster cluster)
int maps = writeSplits(job,submitJobDir)
private
int writeSplits(org.apache.hadoop.mapreduce.JobContextjob,
Path jobSubmitDir) throws IOException,
InterruptedException, ClassNotFoundException{
JobConf jConf =(JobConf)job.getConfiguration();
int maps;
if (jConf.getUseNewMapper()) {
maps = writeNewSplits(job, jobSubmitDir);
} else {
maps = writeOldSplits(jConf,jobSubmitDir);
}
return maps;
}
private <T
extends
InputSplit>
int writeNewSplits(JobContext job, Path jobSubmitDir)
throws IOException,
InterruptedException,ClassNotFoundException {
Configuration conf =job.getConfiguration();
InputFormat<?, ?> input =
ReflectionUtils.newInstance(job.getInputFormatClass(),conf);
List<InputSplit> splits = input.getSplits(job);
T[] array = (T[]) splits.toArray(new
InputSplit[splits.size()]);
// sort the splits into order based on size, so that thebiggest
// go first
Arrays.sort(array, new SplitComparator());
JobSplitWriter.createSplitFiles(jobSubmitDir,conf,
jobSubmitDir.getFileSystem(conf),array);
return array.length;
}
public List<InputSplit> getSplits(JobContext job)
throws IOException {
Stopwatch sw = new Stopwatch().start();
//protected
long getFormatMinSplitSize() {
return 1; }
//public
static long
getMinSplitSize (JobContextjob) {
return .getConfiguration().getLong
//(SPLIT_MINSIZE, 1L);}
// public
static long
getMaxSplitSize(JobContext context) {
return //context.getConfiguration().getLong(SPLIT_MAXSIZE, Long.MAX_VALUE);}
long minSize = Math.max(getFormatMinSplitSize(),getMinSplitSize(job));//1
long maxSize =
getMaxSplitSize(job);// Long.MAX_VALUE
// generate splits
List<InputSplit> splits =
new ArrayList<InputSplit>();
//listStatus列出输入目录,其中FileStatus是文件的客户端信息
List<FileStatus> files = listStatus(job);//
for (FileStatus file: files) {
Path path = file.getPath();
long length = file.getLen();
if (length != 0) {
BlockLocation[] blkLocations; //表示一个块的网络位置,包含块复制的主机的信息和其//他块元数据(比如与块相关联的文件偏移量、文件大小,是否是坏文件等)
if (file
instanceof LocatedFileStatus) {
blkLocations = ((LocatedFileStatus)file).getBlockLocations();
} else {
FileSystem fs =path.getFileSystem(job.getConfiguration());
blkLocations =fs.getFileBlockLocations(file, 0, length);
}
//protected boolean
isSplitable(JobContextcontext, Path filename) {return true;}
if (isSplitable(job, path)) {
long blockSize = file.getBlockSize();
// protected
long
computeSplitSize(long blockSize,
long minSize,long maxSize) {
// return Math.max(minSize, Math.min(maxSize,blockSize)); }
long splitSize = computeSplitSize(blockSize,minSize, maxSize);
long bytesRemaining = length;
while (((double) bytesRemaining)/splitSize >
SPLIT_SLOP) {
int blkIndex = getBlockIndex(blkLocations,length-bytesRemaining);
splits.add(makeSplit(path, length-bytesRemaining,splitSize,
blkLocations[blkIndex].getHosts()));
bytesRemaining -= splitSize;
}
if (bytesRemaining != 0) {
int blkIndex = getBlockIndex(blkLocations,length-bytesRemaining);
splits.add(makeSplit(path, length-bytesRemaining,bytesRemaining,
blkLocations[blkIndex].getHosts()));
}
} else {
// not splitable
splits.add(makeSplit(path, 0, length,blkLocations[0].getHosts()));
}
} else {
//Create empty hosts array forzero length files
splits.add(makeSplit(path, 0, length,
new String[0]));
}
}
// Save the number of input files for metrics/loadgen
job.getConfiguration().setLong(NUM_INPUT_FILES, files.size());
sw.stop();
if (LOG.isDebugEnabled()) {
LOG.debug("Total # of splits generated bygetSplits: " +
splits.size()
+ ", TimeTaken: " + sw.elapsedMillis());
}
return
splits;
}
submitJobInternal(Jobjob, Cluster cluster)
int maps = writeSplits(job,submitJobDir)
private
int writeSplits(org.apache.hadoop.mapreduce.JobContextjob,
Path jobSubmitDir) throws IOException,
InterruptedException, ClassNotFoundException{
JobConf jConf =(JobConf)job.getConfiguration();
int maps;
if (jConf.getUseNewMapper()) {
maps = writeNewSplits(job, jobSubmitDir);
} else {
maps = writeOldSplits(jConf,jobSubmitDir);
}
return maps;
}
private <T
extends
InputSplit>
int writeNewSplits(JobContext job, Path jobSubmitDir)
throws IOException,
InterruptedException,ClassNotFoundException {
Configuration conf =job.getConfiguration();
InputFormat<?, ?> input =
ReflectionUtils.newInstance(job.getInputFormatClass(),conf);
List<InputSplit> splits = input.getSplits(job);
T[] array = (T[]) splits.toArray(new
InputSplit[splits.size()]);
// sort the splits into order based on size, so that thebiggest
// go first
Arrays.sort(array, new SplitComparator());
JobSplitWriter.createSplitFiles(jobSubmitDir,conf,
jobSubmitDir.getFileSystem(conf),array);
return array.length;
}
public List<InputSplit> getSplits(JobContext job)
throws IOException {
Stopwatch sw = new Stopwatch().start();
//protected
long getFormatMinSplitSize() {
return 1; }
//public
static long
getMinSplitSize (JobContextjob) {
return .getConfiguration().getLong
//(SPLIT_MINSIZE, 1L);}
// public
static long
getMaxSplitSize(JobContext context) {
return //context.getConfiguration().getLong(SPLIT_MAXSIZE, Long.MAX_VALUE);}
long minSize = Math.max(getFormatMinSplitSize(),getMinSplitSize(job));//1
long maxSize =
getMaxSplitSize(job);// Long.MAX_VALUE
// generate splits
List<InputSplit> splits =
new ArrayList<InputSplit>();
//listStatus列出输入目录,其中FileStatus是文件的客户端信息
List<FileStatus> files = listStatus(job);//
for (FileStatus file: files) {
Path path = file.getPath();
long length = file.getLen();
if (length != 0) {
BlockLocation[] blkLocations; //表示一个块的网络位置,包含块复制的主机的信息和其//他块元数据(比如与块相关联的文件偏移量、文件大小,是否是坏文件等)
if (file
instanceof LocatedFileStatus) {
blkLocations = ((LocatedFileStatus)file).getBlockLocations();
} else {
FileSystem fs =path.getFileSystem(job.getConfiguration());
blkLocations =fs.getFileBlockLocations(file, 0, length);
}
//protected boolean
isSplitable(JobContextcontext, Path filename) {return true;}
if (isSplitable(job, path)) {
long blockSize = file.getBlockSize();
// protected
long
computeSplitSize(long blockSize,
long minSize,long maxSize) {
// return Math.max(minSize, Math.min(maxSize,blockSize)); }
long splitSize = computeSplitSize(blockSize,minSize, maxSize);
long bytesRemaining = length;
while (((double) bytesRemaining)/splitSize >
SPLIT_SLOP) {
int blkIndex = getBlockIndex(blkLocations,length-bytesRemaining);
splits.add(makeSplit(path, length-bytesRemaining,splitSize,
blkLocations[blkIndex].getHosts()));
bytesRemaining -= splitSize;
}
if (bytesRemaining != 0) {
int blkIndex = getBlockIndex(blkLocations,length-bytesRemaining);
splits.add(makeSplit(path, length-bytesRemaining,bytesRemaining,
blkLocations[blkIndex].getHosts()));
}
} else {
// not splitable
splits.add(makeSplit(path, 0, length,blkLocations[0].getHosts()));
}
} else {
//Create empty hosts array forzero length files
splits.add(makeSplit(path, 0, length,
new String[0]));
}
}
// Save the number of input files for metrics/loadgen
job.getConfiguration().setLong(NUM_INPUT_FILES, files.size());
sw.stop();
if (LOG.isDebugEnabled()) {
LOG.debug("Total # of splits generated bygetSplits: " +
splits.size()
+ ", TimeTaken: " + sw.elapsedMillis());
}
return
splits;
}
相关文章推荐
- map常用成员(关联容器)----创建map<主键,主键对应的值>,增insert,删erase,改查找find,然后修改second,遍历iterator,获取元素个数size,判空empty
- PRJ: Split a nodes-map into some triangles
- 修改wordpress列表页默认显示文章的数量和排序方式
- 修改mysql最大连接数量
- C++学习笔记之map键值对的修改
- 通过inputSplit分片size控制map数目
- 使easyui中combobox限制显示的数量以提升效率(修改源码)
- 如何确定 Hadoop map和reduce的个数--map和reduce数量之间的关系是什么
- EasyUI DataGrid 修改每页显示数量的最大值&&导出Grid到Excel
- 深度分析如何在Hadoop中控制Map的数量(摘抄)
- STL_map——map::size
- java实现从文件中读取信息保存在Map中,修改Map中的值,将Map 4000 更新到文件中
- ATM简易模拟更新1.0 控制台输出 Map存储账号密码 可注册 余额尚未研究出怎么修改存储
- hadoop中map和reduce的数量设置问题
- iOS 疑难杂症 — — 在 Storyboard 里 Add Size Class Customization 后再从代码里无法修改的问题
- 如何修改远程登录计算机的连接数量
- 修改购物车产品数量
- 修改map中的值
- 三句话告诉你 mapreduce 中MAP进程的数量怎么控制?