java类org.apache.hadoop.io.DefaultStringifier的实例源码

TestJdbcExportJob.java 文件源码 项目:aliyun-maxcompute-data-collectors 阅读 26 收藏 0 点赞 0 评论 0
@Test
public void testAvroWithOneColumnSpecified() throws Exception {
  SqoopOptions opts = new SqoopOptions();
  opts.setExportDir("myexportdir");
  String[] columns = { "Gender" };
  opts.setColumns(columns);
  JdbcExportJob jdbcExportJob = stubJdbcExportJob(opts, FileType.AVRO_DATA_FILE);
  Job job = new Job();
  jdbcExportJob.configureInputFormat(job, null, null, null);
  assertEquals(asSetOfText("Gender"), DefaultStringifier.load(job.getConfiguration(), AvroExportMapper.AVRO_COLUMN_TYPES_MAP, MapWritable.class).keySet());
}
TestJdbcExportJob.java 文件源码 项目:aliyun-maxcompute-data-collectors 阅读 17 收藏 0 点赞 0 评论 0
@Test
public void testAvroWithSomeColumnsSpecified() throws Exception {
  SqoopOptions opts = new SqoopOptions();
  opts.setExportDir("myexportdir");
  String[] columns = { "Age", "Name" };
  opts.setColumns(columns);
  JdbcExportJob jdbcExportJob = stubJdbcExportJob(opts, FileType.AVRO_DATA_FILE);
  Job job = new Job();
  jdbcExportJob.configureInputFormat(job, null, null, null);
  assertEquals(asSetOfText("Age", "Name"), DefaultStringifier.load(job.getConfiguration(), AvroExportMapper.AVRO_COLUMN_TYPES_MAP, MapWritable.class).keySet());
}
TestJdbcExportJob.java 文件源码 项目:aliyun-maxcompute-data-collectors 阅读 18 收藏 0 点赞 0 评论 0
@Test
public void testAvroWithMoreColumnsSpecified() throws Exception {
  SqoopOptions opts = new SqoopOptions();
  opts.setExportDir("myexportdir");
  String[] columns = { "Age", "Name", "Gender", "Address" };
  opts.setColumns(columns);
  JdbcExportJob jdbcExportJob = stubJdbcExportJob(opts, FileType.AVRO_DATA_FILE);
  Job job = new Job();
  jdbcExportJob.configureInputFormat(job, null, null, null);
  assertEquals(asSetOfText("Age", "Name", "Gender"), DefaultStringifier.load(job.getConfiguration(), AvroExportMapper.AVRO_COLUMN_TYPES_MAP, MapWritable.class).keySet());
}
Chain.java 文件源码 项目:aliyun-oss-hadoop-fs 阅读 28 收藏 0 点赞 0 评论 0
protected static void setMapperConf(boolean isMap, Configuration jobConf,
    Class<?> inputKeyClass, Class<?> inputValueClass,
    Class<?> outputKeyClass, Class<?> outputValueClass,
    Configuration mapperConf, int index, String prefix) {
  // if the Mapper does not have a configuration, create an empty one
  if (mapperConf == null) {
    // using a Configuration without defaults to make it lightweight.
    // still the chain's conf may have all defaults and this conf is
    // overlapped to the chain configuration one.
    mapperConf = new Configuration(true);
  }

  // store the input/output classes of the mapper in the mapper conf
  mapperConf.setClass(MAPPER_INPUT_KEY_CLASS, inputKeyClass, Object.class);
  mapperConf
      .setClass(MAPPER_INPUT_VALUE_CLASS, inputValueClass, Object.class);
  mapperConf.setClass(MAPPER_OUTPUT_KEY_CLASS, outputKeyClass, Object.class);
  mapperConf.setClass(MAPPER_OUTPUT_VALUE_CLASS, outputValueClass,
      Object.class);
  // serialize the mapper configuration in the chain configuration.
  Stringifier<Configuration> stringifier = 
    new DefaultStringifier<Configuration>(jobConf, Configuration.class);
  try {
    jobConf.set(prefix + CHAIN_MAPPER_CONFIG + index, stringifier
        .toString(new Configuration(mapperConf)));
  } catch (IOException ioEx) {
    throw new RuntimeException(ioEx);
  }

  // increment the chain counter
  jobConf.setInt(prefix + CHAIN_MAPPER_SIZE, index + 1);
}
Chain.java 文件源码 项目:aliyun-oss-hadoop-fs 阅读 24 收藏 0 点赞 0 评论 0
protected static void setReducerConf(Configuration jobConf,
    Class<?> inputKeyClass, Class<?> inputValueClass,
    Class<?> outputKeyClass, Class<?> outputValueClass,
    Configuration reducerConf, String prefix) {
  // if the Reducer does not have a Configuration, create an empty one
  if (reducerConf == null) {
    // using a Configuration without defaults to make it lightweight.
    // still the chain's conf may have all defaults and this conf is
    // overlapped to the chain's Configuration one.
    reducerConf = new Configuration(false);
  }

  // store the input/output classes of the reducer in
  // the reducer configuration
  reducerConf.setClass(REDUCER_INPUT_KEY_CLASS, inputKeyClass, Object.class);
  reducerConf.setClass(REDUCER_INPUT_VALUE_CLASS, inputValueClass,
      Object.class);
  reducerConf
      .setClass(REDUCER_OUTPUT_KEY_CLASS, outputKeyClass, Object.class);
  reducerConf.setClass(REDUCER_OUTPUT_VALUE_CLASS, outputValueClass,
      Object.class);

  // serialize the reducer configuration in the chain's configuration.
  Stringifier<Configuration> stringifier = 
    new DefaultStringifier<Configuration>(jobConf, Configuration.class);
  try {
    jobConf.set(prefix + CHAIN_REDUCER_CONFIG, stringifier
        .toString(new Configuration(reducerConf)));
  } catch (IOException ioEx) {
    throw new RuntimeException(ioEx);
  }
}
TransformOutputFormat.java 文件源码 项目:marklogic-contentpump 阅读 17 收藏 0 点赞 0 评论 0
@Override
public void checkOutputSpecs(Configuration conf, ContentSource cs)
    throws IOException {
    super.checkOutputSpecs(conf, cs);

    // store mimetypes map into config system
    DefaultStringifier.store(conf, getMimetypesMap(),
        ConfigConstants.CONF_MIMETYPES);
}
RDFInputFormat.java 文件源码 项目:marklogic-contentpump 阅读 16 收藏 0 点赞 0 评论 0
protected LinkedMapWritable getRoleMap(TaskAttemptContext context) throws IOException{
    //Restores the object from the configuration.
    Configuration conf = context.getConfiguration();
    LinkedMapWritable fhmap = null;
    if(conf.get(ConfigConstants.CONF_ROLE_MAP)!=null) {
        fhmap = DefaultStringifier.load(conf, ConfigConstants.CONF_ROLE_MAP, 
            LinkedMapWritable.class);
    }
    return fhmap;
}
RDFInputFormat.java 文件源码 项目:marklogic-contentpump 阅读 16 收藏 0 点赞 0 评论 0
protected String getServerVersion(TaskAttemptContext context) throws IOException{
    //Restores the object from the configuration.
    Configuration conf = context.getConfiguration();
    Text version = DefaultStringifier.load(conf, ConfigConstants.CONF_ML_VERSION, 
        Text.class);
    return version.toString();
}
NodeOutputFormat.java 文件源码 项目:marklogic-contentpump 阅读 17 收藏 0 点赞 0 评论 0
@Override
public void checkOutputSpecs(Configuration conf, ContentSource cs) 
throws IOException {
    // warn against unsupported configuration
    if (conf.get(BATCH_SIZE) != null) {
        LOG.warn("Config entry for " +
                "\"mapreduce.marklogic.output.batchsize\" is not " +
                "supported for " + this.getClass().getName() + 
                " and will be ignored.");
    }     
    // store hosts into config system
    DefaultStringifier.store(conf, queryHosts(cs), OUTPUT_FOREST_HOST);
}
MarkLogicOutputFormat.java 文件源码 项目:marklogic-contentpump 阅读 41 收藏 0 点赞 0 评论 0
protected TextArrayWritable getHosts(Configuration conf) throws IOException {
    String forestHost = conf.get(OUTPUT_FOREST_HOST);
    if (forestHost != null) {
        // Restores the object from the configuration.
        TextArrayWritable hosts = DefaultStringifier.load(conf,
            OUTPUT_FOREST_HOST, TextArrayWritable.class);
        return hosts;
    } else {
        throw new IOException("Forest host map not found");
    }
}


问题


面经


文章

微信
公众号

扫码关注公众号