public void configure(JobConf job) {
// 'key' == sortInput for sort-input; key == sortOutput for sort-output
key = deduceInputFile(job);
if (key == sortOutput) {
partitioner = new HashPartitioner<WritableComparable, Writable>();
// Figure the 'current' partition and no. of reduces of the 'sort'
try {
URI inputURI = new URI(job.get(JobContext.MAP_INPUT_FILE));
String inputFile = inputURI.getPath();
// part file is of the form part-r-xxxxx
partition = Integer.valueOf(inputFile.substring(
inputFile.lastIndexOf("part") + 7)).intValue();
noSortReducers = job.getInt(SORT_REDUCES, -1);
} catch (Exception e) {
System.err.println("Caught: " + e);
System.exit(-1);
}
}
}
java类org.apache.hadoop.io.WritableComparable的实例源码
SortValidator.java 文件源码
项目:hadoop
阅读 25
收藏 0
点赞 0
评论 0
UtilsForTests.java 文件源码
项目:hadoop
阅读 23
收藏 0
点赞 0
评论 0
/** The waiting function. The map exits once it gets a signal. Here the
* signal is the file existence.
*/
public void map(WritableComparable key, Writable val,
OutputCollector<WritableComparable, Writable> output,
Reporter reporter)
throws IOException {
if (shouldWait(id)) {
if (fs != null) {
while (!fs.exists(getSignalFile(id))) {
try {
reporter.progress();
synchronized (this) {
this.wait(1000); // wait for 1 sec
}
} catch (InterruptedException ie) {
System.out.println("Interrupted while the map was waiting for "
+ " the signal.");
break;
}
}
} else {
throw new IOException("Could not get the DFS!!");
}
}
}
UtilsForTests.java 文件源码
项目:hadoop
阅读 28
收藏 0
点赞 0
评论 0
/** The waiting function. The reduce exits once it gets a signal. Here the
* signal is the file existence.
*/
public void reduce(WritableComparable key, Iterator<Writable> val,
OutputCollector<WritableComparable, Writable> output,
Reporter reporter)
throws IOException {
if (fs != null) {
while (!fs.exists(signal)) {
try {
reporter.progress();
synchronized (this) {
this.wait(1000); // wait for 1 sec
}
} catch (InterruptedException ie) {
System.out.println("Interrupted while the map was waiting for the"
+ " signal.");
break;
}
}
} else {
throw new IOException("Could not get the DFS!!");
}
}
TestMapRed.java 文件源码
项目:hadoop
阅读 32
收藏 0
点赞 0
评论 0
public void reduce(WritableComparable key, Iterator values,
OutputCollector output, Reporter reporter
) throws IOException {
if (first) {
first = false;
MapOutputFile mapOutputFile = new MROutputFiles();
mapOutputFile.setConf(conf);
Path input = mapOutputFile.getInputFile(0);
FileSystem fs = FileSystem.get(conf);
assertTrue("reduce input exists " + input, fs.exists(input));
SequenceFile.Reader rdr =
new SequenceFile.Reader(fs, input, conf);
assertEquals("is reduce input compressed " + input,
compressInput,
rdr.isCompressed());
rdr.close();
}
}
LargeSorter.java 文件源码
项目:hadoop
阅读 22
收藏 0
点赞 0
评论 0
/**
* Given an output filename, write a bunch of random records to it.
*/
public void map(WritableComparable key,
Writable value,
Context context) throws IOException,InterruptedException {
int itemCount = 0;
while (numBytesToWrite > 0) {
int keyLength = minKeySize +
(keySizeRange != 0 ? random.nextInt(keySizeRange) : 0);
randomKey.setSize(keyLength);
randomizeBytes(randomKey.getBytes(), 0, randomKey.getLength());
int valueLength = minValueSize +
(valueSizeRange != 0 ? random.nextInt(valueSizeRange) : 0);
randomValue.setSize(valueLength);
randomizeBytes(randomValue.getBytes(), 0, randomValue.getLength());
context.write(randomKey, randomValue);
numBytesToWrite -= keyLength + valueLength;
context.getCounter(Counters.BYTES_WRITTEN).increment(keyLength + valueLength);
context.getCounter(Counters.RECORDS_WRITTEN).increment(1);
if (++itemCount % 200 == 0) {
context.setStatus("wrote record " + itemCount + ". " +
numBytesToWrite + " bytes left.");
}
}
context.setStatus("done with " + itemCount + " records.");
}
TestTotalOrderPartitioner.java 文件源码
项目:hadoop
阅读 28
收藏 0
点赞 0
评论 0
private static <T extends WritableComparable<?>> Path writePartitionFile(
String testname, Configuration conf, T[] splits) throws IOException {
final FileSystem fs = FileSystem.getLocal(conf);
final Path testdir = new Path(System.getProperty("test.build.data", "/tmp")
).makeQualified(fs);
Path p = new Path(testdir, testname + "/_partition.lst");
TotalOrderPartitioner.setPartitionFile(conf, p);
conf.setInt(MRJobConfig.NUM_REDUCES, splits.length + 1);
SequenceFile.Writer w = null;
try {
w = SequenceFile.createWriter(fs, conf, p,
splits[0].getClass(), NullWritable.class,
SequenceFile.CompressionType.NONE);
for (int i = 0; i < splits.length; ++i) {
w.append(splits[i], NullWritable.get());
}
} finally {
if (null != w)
w.close();
}
return p;
}
RandomWriter.java 文件源码
项目:hadoop
阅读 25
收藏 0
点赞 0
评论 0
/**
* Given an output filename, write a bunch of random records to it.
*/
public void map(WritableComparable key,
Writable value,
Context context) throws IOException,InterruptedException {
int itemCount = 0;
while (numBytesToWrite > 0) {
int keyLength = minKeySize +
(keySizeRange != 0 ? random.nextInt(keySizeRange) : 0);
randomKey.setSize(keyLength);
randomizeBytes(randomKey.getBytes(), 0, randomKey.getLength());
int valueLength = minValueSize +
(valueSizeRange != 0 ? random.nextInt(valueSizeRange) : 0);
randomValue.setSize(valueLength);
randomizeBytes(randomValue.getBytes(), 0, randomValue.getLength());
context.write(randomKey, randomValue);
numBytesToWrite -= keyLength + valueLength;
context.getCounter(Counters.BYTES_WRITTEN).increment(keyLength + valueLength);
context.getCounter(Counters.RECORDS_WRITTEN).increment(1);
if (++itemCount % 200 == 0) {
context.setStatus("wrote record " + itemCount + ". " +
numBytesToWrite + " bytes left.");
}
}
context.setStatus("done with " + itemCount + " records.");
}
RandomWriter.java 文件源码
项目:hadoop
阅读 23
收藏 0
点赞 0
评论 0
/**
* Given an output filename, write a bunch of random records to it.
*/
public void map(WritableComparable key,
Writable value,
Context context) throws IOException,InterruptedException {
int itemCount = 0;
while (numBytesToWrite > 0) {
int keyLength = minKeySize +
(keySizeRange != 0 ? random.nextInt(keySizeRange) : 0);
randomKey.setSize(keyLength);
randomizeBytes(randomKey.getBytes(), 0, randomKey.getLength());
int valueLength = minValueSize +
(valueSizeRange != 0 ? random.nextInt(valueSizeRange) : 0);
randomValue.setSize(valueLength);
randomizeBytes(randomValue.getBytes(), 0, randomValue.getLength());
context.write(randomKey, randomValue);
numBytesToWrite -= keyLength + valueLength;
context.getCounter(Counters.BYTES_WRITTEN).increment(keyLength + valueLength);
context.getCounter(Counters.RECORDS_WRITTEN).increment(1);
if (++itemCount % 200 == 0) {
context.setStatus("wrote record " + itemCount + ". " +
numBytesToWrite + " bytes left.");
}
}
context.setStatus("done with " + itemCount + " records.");
}
DistCh.java 文件源码
项目:hadoop
阅读 25
收藏 0
点赞 0
评论 0
/** Run a FileOperation */
public void map(Text key, FileOperation value,
OutputCollector<WritableComparable<?>, Text> out, Reporter reporter
) throws IOException {
try {
value.run(jobconf);
++succeedcount;
reporter.incrCounter(Counter.SUCCEED, 1);
} catch (IOException e) {
++failcount;
reporter.incrCounter(Counter.FAIL, 1);
String s = "FAIL: " + value + ", " + StringUtils.stringifyException(e);
out.collect(null, new Text(s));
LOG.info(s);
} finally {
reporter.setStatus(getCountString());
}
}
SortValidator.java 文件源码
项目:aliyun-oss-hadoop-fs
阅读 24
收藏 0
点赞 0
评论 0
public void configure(JobConf job) {
// 'key' == sortInput for sort-input; key == sortOutput for sort-output
key = deduceInputFile(job);
if (key == sortOutput) {
partitioner = new HashPartitioner<WritableComparable, Writable>();
// Figure the 'current' partition and no. of reduces of the 'sort'
try {
URI inputURI = new URI(job.get(JobContext.MAP_INPUT_FILE));
String inputFile = inputURI.getPath();
// part file is of the form part-r-xxxxx
partition = Integer.valueOf(inputFile.substring(
inputFile.lastIndexOf("part") + 7)).intValue();
noSortReducers = job.getInt(SORT_REDUCES, -1);
} catch (Exception e) {
System.err.println("Caught: " + e);
System.exit(-1);
}
}
}
UtilsForTests.java 文件源码
项目:aliyun-oss-hadoop-fs
阅读 20
收藏 0
点赞 0
评论 0
/** The waiting function. The reduce exits once it gets a signal. Here the
* signal is the file existence.
*/
public void reduce(WritableComparable key, Iterator<Writable> val,
OutputCollector<WritableComparable, Writable> output,
Reporter reporter)
throws IOException {
if (fs != null) {
while (!fs.exists(signal)) {
try {
reporter.progress();
synchronized (this) {
this.wait(1000); // wait for 1 sec
}
} catch (InterruptedException ie) {
System.out.println("Interrupted while the map was waiting for the"
+ " signal.");
break;
}
}
} else {
throw new IOException("Could not get the DFS!!");
}
}
TestMapRed.java 文件源码
项目:aliyun-oss-hadoop-fs
阅读 21
收藏 0
点赞 0
评论 0
public void reduce(WritableComparable key, Iterator values,
OutputCollector output, Reporter reporter
) throws IOException {
if (first) {
first = false;
MapOutputFile mapOutputFile = new MROutputFiles();
mapOutputFile.setConf(conf);
Path input = mapOutputFile.getInputFile(0);
FileSystem fs = FileSystem.get(conf);
assertTrue("reduce input exists " + input, fs.exists(input));
SequenceFile.Reader rdr =
new SequenceFile.Reader(fs, input, conf);
assertEquals("is reduce input compressed " + input,
compressInput,
rdr.isCompressed());
rdr.close();
}
}
RandomWriter.java 文件源码
项目:aliyun-oss-hadoop-fs
阅读 21
收藏 0
点赞 0
评论 0
/**
* Given an output filename, write a bunch of random records to it.
*/
public void map(WritableComparable key,
Writable value,
Context context) throws IOException,InterruptedException {
int itemCount = 0;
while (numBytesToWrite > 0) {
int keyLength = minKeySize +
(keySizeRange != 0 ? random.nextInt(keySizeRange) : 0);
randomKey.setSize(keyLength);
randomizeBytes(randomKey.getBytes(), 0, randomKey.getLength());
int valueLength = minValueSize +
(valueSizeRange != 0 ? random.nextInt(valueSizeRange) : 0);
randomValue.setSize(valueLength);
randomizeBytes(randomValue.getBytes(), 0, randomValue.getLength());
context.write(randomKey, randomValue);
numBytesToWrite -= keyLength + valueLength;
context.getCounter(Counters.BYTES_WRITTEN).increment(keyLength + valueLength);
context.getCounter(Counters.RECORDS_WRITTEN).increment(1);
if (++itemCount % 200 == 0) {
context.setStatus("wrote record " + itemCount + ". " +
numBytesToWrite + " bytes left.");
}
}
context.setStatus("done with " + itemCount + " records.");
}
MapFileReader.java 文件源码
项目:DataVec
阅读 32
收藏 0
点赞 0
评论 0
/**
* It a single record from the map file for the given index
*
* @param index Index, between 0 and numRecords()-1
* @return Value from the MapFile
* @throws IOException If an error occurs during reading
*/
public V getRecord(long index) throws IOException {
//First: determine which reader to read from...
int readerIdx = -1;
for (int i = 0; i < recordIndexesEachReader.size(); i++) {
Pair<Long, Long> p = recordIndexesEachReader.get(i);
if (index >= p.getFirst() && index <= p.getSecond()) {
readerIdx = i;
break;
}
}
if (readerIdx == -1) {
throw new IllegalStateException("Index not found in any reader: " + index);
}
WritableComparable key = indexToKey.getKeyForIndex(index);
Writable value = ReflectionUtils.newInstance(recordClass, null);
V v = (V) readers[readerIdx].get(key, value);
return v;
}
RandomWriter.java 文件源码
项目:big-c
阅读 24
收藏 0
点赞 0
评论 0
/**
* Given an output filename, write a bunch of random records to it.
*/
public void map(WritableComparable key,
Writable value,
Context context) throws IOException,InterruptedException {
int itemCount = 0;
while (numBytesToWrite > 0) {
int keyLength = minKeySize +
(keySizeRange != 0 ? random.nextInt(keySizeRange) : 0);
randomKey.setSize(keyLength);
randomizeBytes(randomKey.getBytes(), 0, randomKey.getLength());
int valueLength = minValueSize +
(valueSizeRange != 0 ? random.nextInt(valueSizeRange) : 0);
randomValue.setSize(valueLength);
randomizeBytes(randomValue.getBytes(), 0, randomValue.getLength());
context.write(randomKey, randomValue);
numBytesToWrite -= keyLength + valueLength;
context.getCounter(Counters.BYTES_WRITTEN).increment(keyLength + valueLength);
context.getCounter(Counters.RECORDS_WRITTEN).increment(1);
if (++itemCount % 200 == 0) {
context.setStatus("wrote record " + itemCount + ". " +
numBytesToWrite + " bytes left.");
}
}
context.setStatus("done with " + itemCount + " records.");
}
RandomWriter.java 文件源码
项目:big-c
阅读 20
收藏 0
点赞 0
评论 0
/**
* Given an output filename, write a bunch of random records to it.
*/
public void map(WritableComparable key,
Writable value,
Context context) throws IOException,InterruptedException {
int itemCount = 0;
while (numBytesToWrite > 0) {
int keyLength = minKeySize +
(keySizeRange != 0 ? random.nextInt(keySizeRange) : 0);
randomKey.setSize(keyLength);
randomizeBytes(randomKey.getBytes(), 0, randomKey.getLength());
int valueLength = minValueSize +
(valueSizeRange != 0 ? random.nextInt(valueSizeRange) : 0);
randomValue.setSize(valueLength);
randomizeBytes(randomValue.getBytes(), 0, randomValue.getLength());
context.write(randomKey, randomValue);
numBytesToWrite -= keyLength + valueLength;
context.getCounter(Counters.BYTES_WRITTEN).increment(keyLength + valueLength);
context.getCounter(Counters.RECORDS_WRITTEN).increment(1);
if (++itemCount % 200 == 0) {
context.setStatus("wrote record " + itemCount + ". " +
numBytesToWrite + " bytes left.");
}
}
context.setStatus("done with " + itemCount + " records.");
}
DistCh.java 文件源码
项目:aliyun-oss-hadoop-fs
阅读 26
收藏 0
点赞 0
评论 0
/** Run a FileOperation */
public void map(Text key, FileOperation value,
OutputCollector<WritableComparable<?>, Text> out, Reporter reporter
) throws IOException {
try {
value.run(jobconf);
++succeedcount;
reporter.incrCounter(Counter.SUCCEED, 1);
} catch (IOException e) {
++failcount;
reporter.incrCounter(Counter.FAIL, 1);
String s = "FAIL: " + value + ", " + StringUtils.stringifyException(e);
out.collect(null, new Text(s));
LOG.info(s);
} finally {
reporter.setStatus(getCountString());
}
}
LargeSorter.java 文件源码
项目:big-c
阅读 31
收藏 0
点赞 0
评论 0
/**
* Given an output filename, write a bunch of random records to it.
*/
public void map(WritableComparable key,
Writable value,
Context context) throws IOException,InterruptedException {
int itemCount = 0;
while (numBytesToWrite > 0) {
int keyLength = minKeySize +
(keySizeRange != 0 ? random.nextInt(keySizeRange) : 0);
randomKey.setSize(keyLength);
randomizeBytes(randomKey.getBytes(), 0, randomKey.getLength());
int valueLength = minValueSize +
(valueSizeRange != 0 ? random.nextInt(valueSizeRange) : 0);
randomValue.setSize(valueLength);
randomizeBytes(randomValue.getBytes(), 0, randomValue.getLength());
context.write(randomKey, randomValue);
numBytesToWrite -= keyLength + valueLength;
context.getCounter(Counters.BYTES_WRITTEN).increment(keyLength + valueLength);
context.getCounter(Counters.RECORDS_WRITTEN).increment(1);
if (++itemCount % 200 == 0) {
context.setStatus("wrote record " + itemCount + ". " +
numBytesToWrite + " bytes left.");
}
}
context.setStatus("done with " + itemCount + " records.");
}
SegmentReader.java 文件源码
项目:GeoCrawler
阅读 29
收藏 0
点赞 0
评论 0
public RecordWriter<WritableComparable<?>, Writable> getRecordWriter(
final FileSystem fs, JobConf job, String name,
final Progressable progress) throws IOException {
final Path segmentDumpFile = new Path(
FileOutputFormat.getOutputPath(job), name);
// Get the old copy out of the way
if (fs.exists(segmentDumpFile))
fs.delete(segmentDumpFile, true);
final PrintStream printStream = new PrintStream(
fs.create(segmentDumpFile));
return new RecordWriter<WritableComparable<?>, Writable>() {
public synchronized void write(WritableComparable<?> key, Writable value)
throws IOException {
printStream.println(value);
}
public synchronized void close(Reporter reporter) throws IOException {
printStream.close();
}
};
}
ReadKeyGroupingComparator.java 文件源码
项目:mutation-server
阅读 17
收藏 0
点赞 0
评论 0
@Override
public int compare(WritableComparable o1, WritableComparable o2) {
ReadKey rk1 = (ReadKey) o1;
ReadKey rk2 = (ReadKey) o2;
if (rk1.sample.equals(rk2.sample)) {
if (rk1.sequence.equals(rk2.sequence)) {
return 0;
} else {
return rk1.sequence.compareTo(rk2.sequence);
}
} else {
return rk1.sample.compareTo(rk2.sample);
}
}
SortValidator.java 文件源码
项目:big-c
阅读 80
收藏 0
点赞 0
评论 0
public void configure(JobConf job) {
// 'key' == sortInput for sort-input; key == sortOutput for sort-output
key = deduceInputFile(job);
if (key == sortOutput) {
partitioner = new HashPartitioner<WritableComparable, Writable>();
// Figure the 'current' partition and no. of reduces of the 'sort'
try {
URI inputURI = new URI(job.get(JobContext.MAP_INPUT_FILE));
String inputFile = inputURI.getPath();
// part file is of the form part-r-xxxxx
partition = Integer.valueOf(inputFile.substring(
inputFile.lastIndexOf("part") + 7)).intValue();
noSortReducers = job.getInt(SORT_REDUCES, -1);
} catch (Exception e) {
System.err.println("Caught: " + e);
System.exit(-1);
}
}
}
UtilsForTests.java 文件源码
项目:big-c
阅读 20
收藏 0
点赞 0
评论 0
/** The waiting function. The map exits once it gets a signal. Here the
* signal is the file existence.
*/
public void map(WritableComparable key, Writable val,
OutputCollector<WritableComparable, Writable> output,
Reporter reporter)
throws IOException {
if (shouldWait(id)) {
if (fs != null) {
while (!fs.exists(getSignalFile(id))) {
try {
reporter.progress();
synchronized (this) {
this.wait(1000); // wait for 1 sec
}
} catch (InterruptedException ie) {
System.out.println("Interrupted while the map was waiting for "
+ " the signal.");
break;
}
}
} else {
throw new IOException("Could not get the DFS!!");
}
}
}
UtilsForTests.java 文件源码
项目:big-c
阅读 26
收藏 0
点赞 0
评论 0
/** The waiting function. The reduce exits once it gets a signal. Here the
* signal is the file existence.
*/
public void reduce(WritableComparable key, Iterator<Writable> val,
OutputCollector<WritableComparable, Writable> output,
Reporter reporter)
throws IOException {
if (fs != null) {
while (!fs.exists(signal)) {
try {
reporter.progress();
synchronized (this) {
this.wait(1000); // wait for 1 sec
}
} catch (InterruptedException ie) {
System.out.println("Interrupted while the map was waiting for the"
+ " signal.");
break;
}
}
} else {
throw new IOException("Could not get the DFS!!");
}
}
TestMapRed.java 文件源码
项目:big-c
阅读 26
收藏 0
点赞 0
评论 0
public void reduce(WritableComparable key, Iterator values,
OutputCollector output, Reporter reporter
) throws IOException {
if (first) {
first = false;
MapOutputFile mapOutputFile = new MROutputFiles();
mapOutputFile.setConf(conf);
Path input = mapOutputFile.getInputFile(0);
FileSystem fs = FileSystem.get(conf);
assertTrue("reduce input exists " + input, fs.exists(input));
SequenceFile.Reader rdr =
new SequenceFile.Reader(fs, input, conf);
assertEquals("is reduce input compressed " + input,
compressInput,
rdr.isCompressed());
rdr.close();
}
}
SqoopHCatExportFormat.java 文件源码
项目:aliyun-maxcompute-data-collectors
阅读 16
收藏 0
点赞 0
评论 0
@Override
public RecordReader<WritableComparable, HCatRecord>
createRecordReader(InputSplit split,
TaskAttemptContext taskContext)
throws IOException, InterruptedException {
LOG.debug("Creating a SqoopHCatRecordReader");
return new SqoopHCatRecordReader(split, taskContext, this);
}
SqoopHCatExportFormat.java 文件源码
项目:aliyun-maxcompute-data-collectors
阅读 24
收藏 0
点赞 0
评论 0
public RecordReader<WritableComparable, HCatRecord>
createHCatRecordReader(InputSplit split,
TaskAttemptContext taskContext)
throws IOException, InterruptedException {
LOG.debug("Creating a base HCatRecordReader");
return super.createRecordReader(split, taskContext);
}
HCatalogTestUtils.java 文件源码
项目:aliyun-maxcompute-data-collectors
阅读 21
收藏 0
点赞 0
评论 0
@Override
public void map(WritableComparable key, HCatRecord value,
Context context) throws IOException, InterruptedException {
try {
recsRead.add(value);
readRecordCount++;
} catch (Exception e) {
if (LOG.isDebugEnabled()) {
e.printStackTrace(System.err);
}
throw new IOException(e);
}
}
TestImportJob.java 文件源码
项目:aliyun-maxcompute-data-collectors
阅读 28
收藏 0
点赞 0
评论 0
private String[] getContent(Configuration conf, Path path) throws Exception {
ClassLoader prevClassLoader = ClassLoaderStack.addJarFile(
new Path(new Path(new SqoopOptions().getJarOutputDir()), getTableName() + ".jar").toString(),
getTableName());
FileSystem fs = FileSystem.getLocal(conf);
FileStatus[] stats = fs.listStatus(path);
Path[] paths = new Path[stats.length];
for (int i = 0; i < stats.length; i++) {
paths[i] = stats[i].getPath();
}
// Read all the files adding the value lines to the list.
List<String> strings = new ArrayList<String>();
for (Path filePath : paths) {
if (filePath.getName().startsWith("_") || filePath.getName().startsWith(".")) {
continue;
}
// Need to use new configuration object so that it has the proper classloaders.
SequenceFile.Reader reader = new SequenceFile.Reader(fs, filePath, new Configuration());
WritableComparable key = (WritableComparable)
reader.getKeyClass().newInstance();
Writable value = (Writable) reader.getValueClass().newInstance();
while (reader.next(key, value)) {
strings.add(value.toString());
}
}
ClassLoaderStack.setCurrentClassLoader(prevClassLoader);
return strings.toArray(new String[0]);
}
ExternalMapReduce.java 文件源码
项目:hadoop
阅读 23
收藏 0
点赞 0
评论 0
public void map(WritableComparable key, Writable value,
OutputCollector<WritableComparable, IntWritable> output,
Reporter reporter)
throws IOException {
//check for classpath
String classpath = System.getProperty("java.class.path");
if (classpath.indexOf("testjob.jar") == -1) {
throw new IOException("failed to find in the library " + classpath);
}
if (classpath.indexOf("test.jar") == -1) {
throw new IOException("failed to find the library test.jar in"
+ classpath);
}
//fork off ls to see if the file exists.
// java file.exists() will not work on
// Windows since it is a symlink
String[] argv = new String[7];
argv[0] = "ls";
argv[1] = "files_tmp";
argv[2] = "localfilelink";
argv[3] = "dfsfilelink";
argv[4] = "tarlink";
argv[5] = "ziplink";
argv[6] = "test.tgz";
Process p = Runtime.getRuntime().exec(argv);
int ret = -1;
try {
ret = p.waitFor();
} catch(InterruptedException ie) {
//do nothing here.
}
if (ret != 0) {
throw new IOException("files_tmp does not exist");
}
}
UtilsForTests.java 文件源码
项目:hadoop
阅读 26
收藏 0
点赞 0
评论 0
public void map(WritableComparable key, Writable value,
OutputCollector<WritableComparable, Writable> out, Reporter reporter)
throws IOException {
//NOTE- the next line is required for the TestDebugScript test to succeed
System.err.println("failing map");
throw new RuntimeException("failing map");
}