java类org.apache.hadoop.io.ObjectWritable的实例源码

RPC.java 文件源码 项目:spark_deep 阅读 22 收藏 0 点赞 0 评论 0
/**
 * Construct & cache an IPC client with the user-provided SocketFactory
 * if no cached client exists.
 *
 * @param conf Configuration
 * @return an IPC client
 */
private synchronized Client getClient(Configuration conf,
                                         SocketFactory factory) {
  // Construct & cache client.  The configuration is only used for timeout,
  // and Clients have connection pools.  So we can either (a) lose some
  // connection pooling and leak sockets, or (b) use the same timeout for all
  // configurations.  Since the IPC is usually intended globally, not
  // per-job, we choose (a).
  Client client = clients.get(factory);
  if (client == null) {
    client = new Client(ObjectWritable.class, conf, factory);
    clients.put(factory, client);
  } else {
    client.incCount();
  }
  return client;
}
RPC.java 文件源码 项目:spark_deep 阅读 24 收藏 0 点赞 0 评论 0
@Override
public Object invoke(Object proxy, Method method, Object[] args)
    throws Throwable {
    final boolean logDebug = LOG.isDebugEnabled();
    long startTime = 0;
    if (logDebug) {
        startTime = System.currentTimeMillis();
    }

     ObjectWritable value = (ObjectWritable)
             //
        client.call(new Invocation(method, args), remoteId);
     if (logDebug) {
        long callTime = System.currentTimeMillis() - startTime;
        LOG.debug("Call: " + method.getName() + " " + callTime);
      }
      return value.get();
      }
Loops.java 文件源码 项目:anthelion 阅读 21 收藏 0 点赞 0 评论 0
/**
 * Wrap values in ObjectWritable.
 */
public void map(Text key, Writable value,
  OutputCollector<Text, ObjectWritable> output, Reporter reporter)
  throws IOException {

  ObjectWritable objWrite = new ObjectWritable();
  Writable cloned = null;
  if (value instanceof LinkDatum) {
    cloned = new Text(((LinkDatum)value).getUrl());
  }
  else {
    cloned = WritableUtils.clone(value, conf);
  }
  objWrite.set(cloned);
  output.collect(key, objWrite);
}
RPC.java 文件源码 项目:hadoop-on-lustre 阅读 26 收藏 0 点赞 0 评论 0
/**
 * Construct & cache an IPC client with the user-provided SocketFactory 
 * if no cached client exists.
 * 
 * @param conf Configuration
 * @return an IPC client
 */
private synchronized Client getClient(Configuration conf,
    SocketFactory factory) {
  // Construct & cache client.  The configuration is only used for timeout,
  // and Clients have connection pools.  So we can either (a) lose some
  // connection pooling and leak sockets, or (b) use the same timeout for all
  // configurations.  Since the IPC is usually intended globally, not
  // per-job, we choose (a).
  Client client = clients.get(factory);
  if (client == null) {
    client = new Client(ObjectWritable.class, conf, factory);
    clients.put(factory, client);
  } else {
    client.incCount();
  }
  return client;
}
RPC.java 文件源码 项目:hadoop-on-lustre 阅读 23 收藏 0 点赞 0 评论 0
public Object invoke(Object proxy, Method method, Object[] args)
  throws Throwable {
  final boolean logDebug = LOG.isDebugEnabled();
  long startTime = 0;
  if (logDebug) {
    startTime = System.currentTimeMillis();
  }

  ObjectWritable value = (ObjectWritable)
    client.call(new Invocation(method, args), remoteId);
  if (logDebug) {
    long callTime = System.currentTimeMillis() - startTime;
    LOG.debug("Call: " + method.getName() + " " + callTime);
  }
  return value.get();
}
MapFieldValueFilter.java 文件源码 项目:gora 阅读 27 收藏 0 点赞 0 评论 0
@Override
public void readFields(DataInput in) throws IOException {
  fieldName = Text.readString(in);
  mapKey = new Utf8(Text.readString(in));
  filterOp = WritableUtils.readEnum(in, FilterOp.class);
  operands.clear();
  int operandsSize = WritableUtils.readVInt(in);
  for (int i = 0; i < operandsSize; i++) {
    Object operand = ObjectWritable.readObject(in, conf);
    if (operand instanceof String) {
      operand = new Utf8((String) operand);
    }
    operands.add(operand);
  }
  filterIfMissing = in.readBoolean();
}
RasterTileResizeCombiner.java 文件源码 项目:geowave 阅读 25 收藏 0 点赞 0 评论 0
@Override
protected void reduceNativeValues(
        final GeoWaveInputKey key,
        final Iterable<Object> values,
        final ReduceContext<GeoWaveInputKey, ObjectWritable, GeoWaveInputKey, Object> context )
        throws IOException,
        InterruptedException {
    final GridCoverage mergedCoverage = helper.getMergedCoverage(
            key,
            values);
    if (mergedCoverage != null) {
        context.write(
                key,
                mergedCoverage);
    }

}
RasterTileResizeReducer.java 文件源码 项目:geowave 阅读 27 收藏 0 点赞 0 评论 0
@Override
protected void reduceNativeValues(
        final GeoWaveInputKey key,
        final Iterable<Object> values,
        final Reducer<GeoWaveInputKey, ObjectWritable, GeoWaveOutputKey, GridCoverage>.Context context )
        throws IOException,
        InterruptedException {
    final GridCoverage mergedCoverage = helper.getMergedCoverage(
            key,
            values);
    if (mergedCoverage != null) {
        context.write(
                helper.getGeoWaveOutputKey(),
                mergedCoverage);
    }
}
ConvexHullMapReduce.java 文件源码 项目:geowave 阅读 23 收藏 0 点赞 0 评论 0
@Override
protected void mapWritableValue(
        final GeoWaveInputKey key,
        final ObjectWritable value,
        final Mapper<GeoWaveInputKey, ObjectWritable, GeoWaveInputKey, ObjectWritable>.Context context )
        throws IOException,
        InterruptedException {
    // cached for efficiency since the output is the input object
    // the de-serialized input object is only used for sampling.
    // For simplicity, allow the de-serialization to occur in all cases,
    // even though some sampling
    // functions do not inspect the input object.
    currentValue = value;
    super.mapWritableValue(
            key,
            value,
            context);
}
ConvexHullMapReduce.java 文件源码 项目:geowave 阅读 19 收藏 0 点赞 0 评论 0
@Override
protected void mapNativeValue(
        final GeoWaveInputKey key,
        final Object value,
        final org.apache.hadoop.mapreduce.Mapper<GeoWaveInputKey, ObjectWritable, GeoWaveInputKey, ObjectWritable>.Context context )
        throws IOException,
        InterruptedException {

    @SuppressWarnings("unchecked")
    final AnalyticItemWrapper<T> wrapper = itemWrapperFactory.create((T) value);
    outputKey.setAdapterId(key.getAdapterId());
    outputKey.setDataId(new ByteArrayId(
            StringUtils.stringToBinary(nestedGroupCentroidAssigner.getGroupForLevel(wrapper))));
    outputKey.setInsertionId(key.getInsertionId());
    context.write(
            outputKey,
            currentValue);
}
InputToOutputKeyReducer.java 文件源码 项目:geowave 阅读 31 收藏 0 点赞 0 评论 0
@Override
protected void setup(
        final Reducer<GeoWaveInputKey, ObjectWritable, GeoWaveOutputKey, Object>.Context context )
        throws IOException,
        InterruptedException {
    super.setup(context);
    final ScopedJobConfiguration config = new ScopedJobConfiguration(
            context.getConfiguration(),
            InputToOutputKeyReducer.class,
            LOGGER);
    final ByteArrayId indexId = new ByteArrayId(
            config.getString(
                    OutputParameters.Output.INDEX_ID,
                    "na"));
    final List<ByteArrayId> indexIds = new ArrayList<ByteArrayId>();
    indexIds.add(indexId);
    outputKey = new GeoWaveOutputKey(
            new ByteArrayId(
                    "na"),
            indexIds);
}
SimpleFeatureOutputReducer.java 文件源码 项目:geowave 阅读 29 收藏 0 点赞 0 评论 0
@Override
protected void reduceNativeValues(
        final GeoWaveInputKey key,
        final Iterable<Object> values,
        final ReduceContext<GeoWaveInputKey, ObjectWritable, GeoWaveInputKey, Object> context )
        throws IOException,
        InterruptedException {
    final Iterator<Object> valIt = values.iterator();
    if (valIt.hasNext()) {
        key.setAdapterId(outputAdapter.getAdapterId());
        final SimpleFeature feature = getSimpleFeature(
                key,
                valIt.next());
        context.write(
                key,
                feature);
    }
}
GeoWaveInputLoadJobRunner.java 文件源码 项目:geowave 阅读 24 收藏 0 点赞 0 评论 0
@Override
public void configure(
        final Job job )
        throws Exception {

    job.setMapperClass(Mapper.class);
    job.setReducerClass(InputToOutputKeyReducer.class);
    job.setMapOutputKeyClass(GeoWaveInputKey.class);
    job.setMapOutputValueClass(ObjectWritable.class);
    job.setOutputKeyClass(GeoWaveOutputKey.class);
    job.setOutputValueClass(Object.class);
    job.setSpeculativeExecution(false);

    job.setJobName("GeoWave Input to Output");
    job.setReduceSpeculativeExecution(false);

}
KSamplerMapReduce.java 文件源码 项目:geowave 阅读 22 收藏 0 点赞 0 评论 0
@Override
protected void mapWritableValue(
        final GeoWaveInputKey key,
        final ObjectWritable value,
        final Mapper<GeoWaveInputKey, ObjectWritable, GeoWaveInputKey, ObjectWritable>.Context context )
        throws IOException,
        InterruptedException {
    // cached for efficiency since the output is the input object
    // the de-serialized input object is only used for sampling.
    // For simplicity, allow the de-serialization to occur in all cases,
    // even though some sampling
    // functions do not inspect the input object.
    currentValue = value;
    super.mapWritableValue(
            key,
            value,
            context);
}
KSamplerMapReduce.java 文件源码 项目:geowave 阅读 25 收藏 0 点赞 0 评论 0
@Override
protected void mapNativeValue(
        final GeoWaveInputKey key,
        final Object value,
        final org.apache.hadoop.mapreduce.Mapper<GeoWaveInputKey, ObjectWritable, GeoWaveInputKey, ObjectWritable>.Context context )
        throws IOException,
        InterruptedException {
    @SuppressWarnings("unchecked")
    final double rank = samplingFunction.rank(
            sampleSize,
            (T) value);
    if (rank > 0.0000000001) {
        final AnalyticItemWrapper<Object> wrapper = itemWrapperFactory.create(value);
        outputKey.setDataId(new ByteArrayId(
                keyManager.putData(
                        nestedGroupCentroidAssigner.getGroupForLevel(wrapper),
                        1.0 - rank, // sorts in ascending order
                        key.getDataId().getBytes())));
        outputKey.setAdapterId(key.getAdapterId());
        outputKey.setInsertionId(key.getInsertionId());
        context.write(
                outputKey,
                currentValue);
    }
}
KMeansMapReduce.java 文件源码 项目:geowave 阅读 20 收藏 0 点赞 0 评论 0
@Override
protected void mapNativeValue(
        final GeoWaveInputKey key,
        final Object value,
        final org.apache.hadoop.mapreduce.Mapper<GeoWaveInputKey, ObjectWritable, GroupIDText, BytesWritable>.Context context )
        throws IOException,
        InterruptedException {
    final AnalyticItemWrapper<Object> item = itemWrapperFactory.create(value);
    nestedGroupCentroidAssigner.findCentroidForLevel(
            item,
            centroidAssociationFn);
    final byte[] outData = association.toBinary();
    outputValWritable.set(
            outData,
            0,
            outData.length);
    context.write(
            outputKeyWritable,
            outputValWritable);
}
UpdateCentroidCostMapReduce.java 文件源码 项目:geowave 阅读 29 收藏 0 点赞 0 评论 0
@Override
protected void mapNativeValue(
        final GeoWaveInputKey key,
        final Object value,
        final Mapper<GeoWaveInputKey, ObjectWritable, GroupIDText, CountofDoubleWritable>.Context context )
        throws IOException,
        InterruptedException {
    final AnalyticItemWrapper<Object> wrappedItem = itemWrapperFactory.create(value);
    dw.set(
            nestedGroupCentroidAssigner.findCentroidForLevel(
                    wrappedItem,
                    centroidAssociationFn),
            1.0);

    context.write(
            outputWritable,
            dw);
}
BasicMapReduceIT.java 文件源码 项目:geowave 阅读 23 收藏 0 点赞 0 评论 0
@Override
protected void mapNativeValue(
        final GeoWaveInputKey key,
        final Object value,
        final Mapper<GeoWaveInputKey, ObjectWritable, NullWritable, NullWritable>.Context context )
        throws IOException,
        InterruptedException {
    ResultCounterType resultType = ResultCounterType.ERROR;
    if (value instanceof SimpleFeature) {
        final SimpleFeature result = (SimpleFeature) value;
        final Geometry geometry = (Geometry) result.getDefaultGeometry();
        if (!geometry.isEmpty()) {
            resultType = expectedHashedCentroids.contains(TestUtils.hashCentroid(geometry)) ? ResultCounterType.EXPECTED
                    : ResultCounterType.UNEXPECTED;
        }
    }
    context.getCounter(
            resultType).increment(
            1);
}
BasicMapReduceIT.java 文件源码 项目:geowave 阅读 20 收藏 0 点赞 0 评论 0
@Override
protected void setup(
        final Mapper<GeoWaveInputKey, ObjectWritable, NullWritable, NullWritable>.Context context )
        throws IOException,
        InterruptedException {
    super.setup(context);
    final Configuration config = GeoWaveConfiguratorBase.getConfiguration(context);
    final String expectedResults = config.get(MapReduceTestUtils.EXPECTED_RESULTS_KEY);
    if (expectedResults != null) {
        expectedHashedCentroids = new HashSet<Long>();
        final byte[] expectedResultsBinary = ByteArrayUtils.byteArrayFromString(expectedResults);
        final ByteBuffer buf = ByteBuffer.wrap(expectedResultsBinary);
        final int count = buf.getInt();
        for (int i = 0; i < count; i++) {
            expectedHashedCentroids.add(buf.getLong());
        }
    }
}
GeoWaveReducer.java 文件源码 项目:geowave 阅读 24 收藏 0 点赞 0 评论 0
protected void reduceWritableValues(
        final GeoWaveInputKey key,
        final Iterable<ObjectWritable> values,
        final Reducer<GeoWaveInputKey, ObjectWritable, GeoWaveInputKey, ObjectWritable>.Context context )
        throws IOException,
        InterruptedException {
    final HadoopWritableSerializer<?, Writable> serializer = serializationTool
            .getHadoopWritableSerializerForAdapter(key.getAdapterId());
    final Iterable<Object> transformedValues = Iterables.transform(
            values,
            new Function<ObjectWritable, Object>() {
                @Override
                public Object apply(
                        final ObjectWritable writable ) {
                    final Object innerObj = writable.get();
                    return innerObj instanceof Writable ? serializer.fromWritable((Writable) innerObj) : innerObj;
                }
            });
    reduceNativeValues(
            key,
            transformedValues,
            new NativeReduceContext(
                    context,
                    serializationTool));
}
GeoWaveDedupeCombiner.java 文件源码 项目:geowave 阅读 25 收藏 0 点赞 0 评论 0
@Override
protected void reduce(
        final GeoWaveInputKey key,
        final Iterable<ObjectWritable> values,
        final Reducer<GeoWaveInputKey, ObjectWritable, GeoWaveInputKey, ObjectWritable>.Context context )
        throws IOException,
        InterruptedException {
    final Iterator<ObjectWritable> it = values.iterator();
    while (it.hasNext()) {
        final ObjectWritable next = it.next();
        if (next != null) {
            context.write(
                    key,
                    next);
            return;
        }
    }
}
StoreCopyReducer.java 文件源码 项目:geowave 阅读 24 收藏 0 点赞 0 评论 0
@Override
protected void reduceNativeValues(
        GeoWaveInputKey key,
        Iterable<Object> values,
        Reducer<GeoWaveInputKey, ObjectWritable, GeoWaveOutputKey, Object>.Context context )
        throws IOException,
        InterruptedException {
    final Iterator<Object> objects = values.iterator();
    while (objects.hasNext()) {
        final AdapterToIndexMapping mapping = store.getIndicesForAdapter(key.getAdapterId());
        context.write(
                new GeoWaveOutputKey<>(
                        mapping.getAdapterId(),
                        Arrays.asList(mapping.getIndexIds())),
                objects.next());
    }
}
GeoWaveWritableInputReducer.java 文件源码 项目:geowave 阅读 24 收藏 0 点赞 0 评论 0
protected void reduceWritableValues(
        final GeoWaveInputKey key,
        final Iterable<ObjectWritable> values,
        final Reducer<GeoWaveInputKey, ObjectWritable, KEYOUT, VALUEOUT>.Context context )
        throws IOException,
        InterruptedException {
    final HadoopWritableSerializer<?, Writable> serializer = serializationTool
            .getHadoopWritableSerializerForAdapter(key.getAdapterId());
    final Iterable<Object> transformedValues = Iterables.transform(
            values,
            new Function<ObjectWritable, Object>() {
                @Override
                public Object apply(
                        final ObjectWritable writable ) {
                    final Object innerObj = writable.get();
                    return (innerObj instanceof Writable) ? serializer.fromWritable((Writable) innerObj) : innerObj;
                }
            });

    reduceNativeValues(
            key,
            transformedValues,
            context);

}
RPC.java 文件源码 项目:hortonworks-extension 阅读 29 收藏 0 点赞 0 评论 0
/**
 * Construct & cache an IPC client with the user-provided SocketFactory 
 * if no cached client exists.
 * 
 * @param conf Configuration
 * @return an IPC client
 */
private synchronized Client getClient(Configuration conf,
    SocketFactory factory) {
  // Construct & cache client.  The configuration is only used for timeout,
  // and Clients have connection pools.  So we can either (a) lose some
  // connection pooling and leak sockets, or (b) use the same timeout for all
  // configurations.  Since the IPC is usually intended globally, not
  // per-job, we choose (a).
  Client client = clients.get(factory);
  if (client == null) {
    client = new Client(ObjectWritable.class, conf, factory);
    clients.put(factory, client);
  } else {
    client.incCount();
  }
  return client;
}
RPC.java 文件源码 项目:hortonworks-extension 阅读 24 收藏 0 点赞 0 评论 0
public Object invoke(Object proxy, Method method, Object[] args)
  throws Throwable {
  final boolean logDebug = LOG.isDebugEnabled();
  long startTime = 0;
  if (logDebug) {
    startTime = System.currentTimeMillis();
  }

  ObjectWritable value = (ObjectWritable)
    client.call(new Invocation(method, args), remoteId);
  if (logDebug) {
    long callTime = System.currentTimeMillis() - startTime;
    LOG.debug("Call: " + method.getName() + " " + callTime);
  }
  return value.get();
}
RPC.java 文件源码 项目:hortonworks-extension 阅读 25 收藏 0 点赞 0 评论 0
/**
 * Construct & cache an IPC client with the user-provided SocketFactory 
 * if no cached client exists.
 * 
 * @param conf Configuration
 * @return an IPC client
 */
private synchronized Client getClient(Configuration conf,
    SocketFactory factory) {
  // Construct & cache client.  The configuration is only used for timeout,
  // and Clients have connection pools.  So we can either (a) lose some
  // connection pooling and leak sockets, or (b) use the same timeout for all
  // configurations.  Since the IPC is usually intended globally, not
  // per-job, we choose (a).
  Client client = clients.get(factory);
  if (client == null) {
    client = new Client(ObjectWritable.class, conf, factory);
    clients.put(factory, client);
  } else {
    client.incCount();
  }
  return client;
}
RPC.java 文件源码 项目:hortonworks-extension 阅读 29 收藏 0 点赞 0 评论 0
public Object invoke(Object proxy, Method method, Object[] args)
  throws Throwable {
  final boolean logDebug = LOG.isDebugEnabled();
  long startTime = 0;
  if (logDebug) {
    startTime = System.currentTimeMillis();
  }

  ObjectWritable value = (ObjectWritable)
    client.call(new Invocation(method, args), remoteId);
  if (logDebug) {
    long callTime = System.currentTimeMillis() - startTime;
    LOG.debug("Call: " + method.getName() + " " + callTime);
  }
  return value.get();
}
IndexSpecification.java 文件源码 项目:hbase-secondary-index 阅读 22 收藏 0 点赞 0 评论 0
/** {@inheritDoc} */
public void readFields(DataInput in) throws IOException {
  indexId = in.readUTF();
  int numIndexedCols = in.readInt();
  indexedColumns = new byte[numIndexedCols][];
  for (int i = 0; i < numIndexedCols; i++) {
    indexedColumns[i] = Bytes.readByteArray(in);
  }
  int numAdditionalCols = in.readInt();
  additionalColumns = new byte[numAdditionalCols][];
  for (int i = 0; i < numAdditionalCols; i++) {
    additionalColumns[i] = Bytes.readByteArray(in);
  }
  makeAllColumns();
  keyGenerator = (IndexKeyGenerator) ObjectWritable.readObject(in, CONF);

  // FIXME this is to read the deprecated comparator, in existing data
  ObjectWritable.readObject(in, CONF);
}
RPC.java 文件源码 项目:spark_deep 阅读 35 收藏 0 点赞 0 评论 0
public void readFields(DataInput in) throws IOException {
    methodName = UTF8.readString(in);
    parameters = new Object[in.readInt()];
    parameterClasses = new Class[parameters.length];
    ObjectWritable objectWritable = new ObjectWritable();
    for(int i = 0; i < parameters.length; i++){
        parameters[i] = ObjectWritable.readObject(in, objectWritable, conf);
        parameterClasses[i] = objectWritable.getDeclaredClass();
    }
}
RPC.java 文件源码 项目:spark_deep 阅读 25 收藏 0 点赞 0 评论 0
public void write(DataOutput out) throws IOException {
    UTF8.writeString(out, methodName);
    out.writeInt(parameterClasses.length);
    for(int i = 0; i < parameterClasses.length; i++){
        ObjectWritable.writeObject(out, parameters[i], parameterClasses[i], conf);
    }
}


问题


面经


文章

微信
公众号

扫码关注公众号