/**
* Get the {@link RawComparator} comparator used to compare keys.
*
* @return the {@link RawComparator} comparator used to compare keys.
*/
public RawComparator getOutputKeyComparator() {
Class<? extends RawComparator> theClass = getClass(
JobContext.KEY_COMPARATOR, null, RawComparator.class);
if (theClass != null)
return ReflectionUtils.newInstance(theClass, this);
return WritableComparator.get(getMapOutputKeyClass().asSubclass(WritableComparable.class), this);
}
java类org.apache.hadoop.io.WritableComparator的实例源码
JobConf.java 文件源码
项目:hadoop
阅读 21
收藏 0
点赞 0
评论 0
BinaryPartitioner.java 文件源码
项目:hadoop
阅读 20
收藏 0
点赞 0
评论 0
/**
* Use (the specified slice of the array returned by)
* {@link BinaryComparable#getBytes()} to partition.
*/
@Override
public int getPartition(BinaryComparable key, V value, int numPartitions) {
int length = key.getLength();
int leftIndex = (leftOffset + length) % length;
int rightIndex = (rightOffset + length) % length;
int hash = WritableComparator.hashBytes(key.getBytes(),
leftIndex, rightIndex - leftIndex + 1);
return (hash & Integer.MAX_VALUE) % numPartitions;
}
WrappedRecordReader.java 文件源码
项目:hadoop
阅读 18
收藏 0
点赞 0
评论 0
public void initialize(InputSplit split,
TaskAttemptContext context)
throws IOException, InterruptedException {
rr.initialize(split, context);
conf = context.getConfiguration();
nextKeyValue();
if (!empty) {
keyclass = key.getClass().asSubclass(WritableComparable.class);
valueclass = value.getClass();
if (cmp == null) {
cmp = WritableComparator.get(keyclass, conf);
}
}
}
Parser.java 文件源码
项目:hadoop
阅读 23
收藏 0
点赞 0
评论 0
@Override
public void setKeyComparator(Class<? extends WritableComparator> cmpcl) {
super.setKeyComparator(cmpcl);
for (Node n : kids) {
n.setKeyComparator(cmpcl);
}
}
CompositeRecordReader.java 文件源码
项目:hadoop
阅读 20
收藏 0
点赞 0
评论 0
@SuppressWarnings("unchecked")
public void initialize(InputSplit split, TaskAttemptContext context)
throws IOException, InterruptedException {
if (kids != null) {
for (int i = 0; i < kids.length; ++i) {
kids[i].initialize(((CompositeInputSplit)split).get(i), context);
if (kids[i].key() == null) {
continue;
}
// get keyclass
if (keyclass == null) {
keyclass = kids[i].createKey().getClass().
asSubclass(WritableComparable.class);
}
// create priority queue
if (null == q) {
cmp = WritableComparator.get(keyclass, conf);
q = new PriorityQueue<ComposableRecordReader<K,?>>(3,
new Comparator<ComposableRecordReader<K,?>>() {
public int compare(ComposableRecordReader<K,?> o1,
ComposableRecordReader<K,?> o2) {
return cmp.compare(o1.key(), o2.key());
}
});
}
// Explicit check for key class agreement
if (!keyclass.equals(kids[i].key().getClass())) {
throw new ClassCastException("Child key classes fail to agree");
}
// add the kid to priority queue if it has any elements
if (kids[i].hasNext()) {
q.add(kids[i]);
}
}
}
}
TestGridmixRecord.java 文件源码
项目:hadoop
阅读 21
收藏 0
点赞 0
评论 0
static void binSortTest(GridmixRecord x, GridmixRecord y, int min,
int max, WritableComparator cmp) throws Exception {
final Random r = new Random();
final long s = r.nextLong();
r.setSeed(s);
LOG.info("sort: " + s);
final DataOutputBuffer out1 = new DataOutputBuffer();
final DataOutputBuffer out2 = new DataOutputBuffer();
for (int i = min; i < max; ++i) {
final long seed1 = r.nextLong();
setSerialize(x, seed1, i, out1);
assertEquals(0, x.compareSeed(seed1, Math.max(0, i - x.fixedBytes())));
final long seed2 = r.nextLong();
setSerialize(y, seed2, i, out2);
assertEquals(0, y.compareSeed(seed2, Math.max(0, i - x.fixedBytes())));
// for eq sized records, ensure byte cmp where req
final int chk = WritableComparator.compareBytes(
out1.getData(), 0, out1.getLength(),
out2.getData(), 0, out2.getLength());
assertEquals(Integer.signum(chk), Integer.signum(x.compareTo(y)));
assertEquals(Integer.signum(chk), Integer.signum(cmp.compare(
out1.getData(), 0, out1.getLength(),
out2.getData(), 0, out2.getLength())));
// write second copy, compare eq
final int s1 = out1.getLength();
x.write(out1);
assertEquals(0, cmp.compare(out1.getData(), 0, s1,
out1.getData(), s1, out1.getLength() - s1));
final int s2 = out2.getLength();
y.write(out2);
assertEquals(0, cmp.compare(out2.getData(), 0, s2,
out2.getData(), s2, out2.getLength() - s2));
assertEquals(Integer.signum(chk), Integer.signum(cmp.compare(out1.getData(), 0, s1,
out2.getData(), s2, out2.getLength() - s2)));
}
}
Merge.java 文件源码
项目:ditb
阅读 21
收藏 0
点赞 0
评论 0
private boolean notInTable(final TableName tn, final byte [] rn) {
if (WritableComparator.compareBytes(tn.getName(), 0, tn.getName().length,
rn, 0, tn.getName().length) != 0) {
LOG.error("Region " + Bytes.toStringBinary(rn) + " does not belong to table " +
tn);
return true;
}
return false;
}
LeveldbUtils.java 文件源码
项目:aliyun-oss-hadoop-fs
阅读 20
收藏 0
点赞 0
评论 0
/**
* Returns true if the byte array begins with the specified prefix.
*/
public static boolean prefixMatches(byte[] prefix, int prefixlen,
byte[] b) {
if (b.length < prefixlen) {
return false;
}
return WritableComparator.compareBytes(prefix, 0, prefixlen, b, 0,
prefixlen) == 0;
}
TestComparators.java 文件源码
项目:aliyun-oss-hadoop-fs
阅读 33
收藏 0
点赞 0
评论 0
/**
* Test a user comparator that relies on deserializing both arguments
* for each compare.
*/
@Test
public void testBakedUserComparator() throws Exception {
MyWritable a = new MyWritable(8, 8);
MyWritable b = new MyWritable(7, 9);
assertTrue(a.compareTo(b) > 0);
assertTrue(WritableComparator.get(MyWritable.class).compare(a, b) < 0);
}
JobConf.java 文件源码
项目:aliyun-oss-hadoop-fs
阅读 26
收藏 0
点赞 0
评论 0
/**
* Get the {@link RawComparator} comparator used to compare keys.
*
* @return the {@link RawComparator} comparator used to compare keys.
*/
public RawComparator getOutputKeyComparator() {
Class<? extends RawComparator> theClass = getClass(
JobContext.KEY_COMPARATOR, null, RawComparator.class);
if (theClass != null)
return ReflectionUtils.newInstance(theClass, this);
return WritableComparator.get(getMapOutputKeyClass().asSubclass(WritableComparable.class), this);
}