@SuppressWarnings("unchecked") // Explicit check for value class agreement
public V createValue() {
if (null == valueclass) {
Class<?> cls = kids[kids.length -1].createValue().getClass();
for (int i = kids.length -1; cls.equals(NullWritable.class); i--) {
cls = kids[i].createValue().getClass();
}
valueclass = cls.asSubclass(Writable.class);
}
if (valueclass.equals(NullWritable.class)) {
return (V) NullWritable.get();
}
return (V) ReflectionUtils.newInstance(valueclass, null);
}
java类org.apache.hadoop.io.NullWritable的实例源码
OverrideRecordReader.java 文件源码
项目:hadoop
阅读 16
收藏 0
点赞 0
评论 0
HDFSWriter.java 文件源码
项目:ViraPipe
阅读 19
收藏 0
点赞 0
评论 0
@Override
public RecordWriter<NullWritable, SAMRecordWritable> getRecordWriter(TaskAttemptContext ctx,
Path outputPath) throws IOException {
// the writers require a header in order to create a codec, even if
// the header isn't being written out
setSAMHeader(samheader);
setWriteHeader(writeHeader);
return super.getRecordWriter(ctx, outputPath);
}
VPReducer.java 文件源码
项目:PigSPARQL
阅读 15
收藏 0
点赞 0
评论 0
@Override
public void reduce(Text key, Iterable<TextPair> values, Context context) throws IOException, InterruptedException {
for (TextPair value : values) {
// Use key (Predicate) as folder name for Vertical Partitioning
multipleOutputs.write(NullWritable.get(), new Text(value.getFirst() + "\t" + value.getSecond()),
Util.generateFileName(key.toString()));
// Write all parsed triples also to "inputData" for queries where Predicate is not known
multipleOutputs.write(NullWritable.get(), new Text(value.getFirst() + "\t" + key.toString() + "\t" + value.getSecond()),
Util.generateFileName("inputData"));
}
}
GenerateData.java 文件源码
项目:hadoop
阅读 24
收藏 0
点赞 0
评论 0
@Override
public Job call() throws IOException, InterruptedException,
ClassNotFoundException {
UserGroupInformation ugi = UserGroupInformation.getLoginUser();
ugi.doAs( new PrivilegedExceptionAction <Job>() {
public Job run() throws IOException, ClassNotFoundException,
InterruptedException {
// check if compression emulation is enabled
if (CompressionEmulationUtil
.isCompressionEmulationEnabled(job.getConfiguration())) {
CompressionEmulationUtil.configure(job);
} else {
configureRandomBytesDataGenerator();
}
job.submit();
return job;
}
private void configureRandomBytesDataGenerator() {
job.setMapperClass(GenDataMapper.class);
job.setNumReduceTasks(0);
job.setMapOutputKeyClass(NullWritable.class);
job.setMapOutputValueClass(BytesWritable.class);
job.setInputFormatClass(GenDataFormat.class);
job.setOutputFormatClass(RawBytesOutputFormat.class);
job.setJarByClass(GenerateData.class);
try {
FileInputFormat.addInputPath(job, new Path("ignored"));
} catch (IOException e) {
LOG.error("Error while adding input path ", e);
}
}
});
return job;
}
HBaseImportJob.java 文件源码
项目:aliyun-maxcompute-data-collectors
阅读 16
收藏 0
点赞 0
评论 0
@Override
protected void configureMapper(Job job, String tableName,
String tableClassName) throws IOException {
job.setOutputKeyClass(SqoopRecord.class);
job.setOutputValueClass(NullWritable.class);
job.setMapperClass(getMapperClass());
}
TestTableSnapshotInputFormat.java 文件源码
项目:ditb
阅读 22
收藏 0
点赞 0
评论 0
public static void doTestWithMapReduce(HBaseTestingUtility util, TableName tableName,
String snapshotName, byte[] startRow, byte[] endRow, Path tableDir, int numRegions,
int expectedNumSplits, boolean shutdownCluster) throws Exception {
//create the table and snapshot
createTableAndSnapshot(util, tableName, snapshotName, startRow, endRow, numRegions);
if (shutdownCluster) {
util.shutdownMiniHBaseCluster();
}
try {
// create the job
JobConf jobConf = new JobConf(util.getConfiguration());
jobConf.setJarByClass(util.getClass());
org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addDependencyJars(jobConf,
TestTableSnapshotInputFormat.class);
TableMapReduceUtil.initTableSnapshotMapJob(snapshotName, COLUMNS,
TestTableSnapshotMapper.class, ImmutableBytesWritable.class,
NullWritable.class, jobConf, true, tableDir);
jobConf.setReducerClass(TestTableSnapshotInputFormat.TestTableSnapshotReducer.class);
jobConf.setNumReduceTasks(1);
jobConf.setOutputFormat(NullOutputFormat.class);
RunningJob job = JobClient.runJob(jobConf);
Assert.assertTrue(job.isSuccessful());
} finally {
if (!shutdownCluster) {
util.getHBaseAdmin().deleteSnapshot(snapshotName);
util.deleteTable(tableName);
}
}
}
MainframeDatasetImportMapper.java 文件源码
项目:aliyun-maxcompute-data-collectors
阅读 23
收藏 0
点赞 0
评论 0
public void map(LongWritable key, SqoopRecord val, Context context)
throws IOException, InterruptedException {
String dataset = inputSplit.getCurrentDataset();
outkey.set(val.toString());
numberOfRecords++;
mos.write(outkey, NullWritable.get(), dataset);
}
BytesWritableCompactionReducerTest.java 文件源码
项目:dataSqueeze
阅读 15
收藏 0
点赞 0
评论 0
@Test
public void testReduceParentKey() throws IOException {
configuration.set("compactionSourcePath", "/src/path");
configuration.set("compactionTargetPath", "/target/path");
values.add(value1);
values.add(value2);
reduceDriver.withInput(inputParentKey, values);
reduceDriver.withPathOutput(NullWritable.get(), value1, "value/target/path");
reduceDriver.withPathOutput(NullWritable.get(), value2, "value/target/path");
reduceDriver.runTest();
}
TestGridMixClasses.java 文件源码
项目:hadoop
阅读 29
收藏 0
点赞 0
评论 0
@Override
public DataInputBuffer getValue() throws IOException {
ByteArrayOutputStream dt = new ByteArrayOutputStream();
NullWritable key = NullWritable.get();
key.write(new DataOutputStream(dt));
DataInputBuffer result = new DataInputBuffer();
byte[] b = dt.toByteArray();
result.reset(b, 0, b.length);
return result;
}
AvroOutputFormat.java 文件源码
项目:aliyun-maxcompute-data-collectors
阅读 19
收藏 0
点赞 0
评论 0
@Override
public RecordWriter<AvroWrapper<T>, NullWritable> getRecordWriter(
TaskAttemptContext context) throws IOException, InterruptedException {
boolean isMapOnly = context.getNumReduceTasks() == 0;
Schema schema =
isMapOnly ? AvroJob.getMapOutputSchema(context.getConfiguration())
: AvroJob.getOutputSchema(context.getConfiguration());
final DataFileWriter<T> WRITER =
new DataFileWriter<T>(new ReflectDatumWriter<T>());
configureDataFileWriter(WRITER, context);
Path path = getDefaultWorkFile(context, EXT);
WRITER.create(schema,
path.getFileSystem(context.getConfiguration()).create(path));
return new RecordWriter<AvroWrapper<T>, NullWritable>() {
@Override
public void write(AvroWrapper<T> wrapper, NullWritable ignore)
throws IOException {
WRITER.append(wrapper.datum());
}
@Override
public void close(TaskAttemptContext taskAttemptContext)
throws IOException, InterruptedException {
WRITER.close();
}
};
}