java类org.apache.hadoop.io.Text的实例源码

TestSpeculativeExecution.java 文件源码 项目:hadoop 阅读 18 收藏 0 点赞 0 评论 0
public void map(Object key, Text value, Context context)
        throws IOException, InterruptedException {
  // Make one mapper slower for speculative execution
  TaskAttemptID taid = context.getTaskAttemptID();
  long sleepTime = 100;
  Configuration conf = context.getConfiguration();
  boolean test_speculate_map =
          conf.getBoolean(MRJobConfig.MAP_SPECULATIVE, false);

  // IF TESTING MAPPER SPECULATIVE EXECUTION:
  //   Make the "*_m_000000_0" attempt take much longer than the others.
  //   When speculative execution is enabled, this should cause the attempt
  //   to be killed and restarted. At that point, the attempt ID will be
  //   "*_m_000000_1", so sleepTime will still remain 100ms.
  if ( (taid.getTaskType() == TaskType.MAP) && test_speculate_map
        && (taid.getTaskID().getId() == 0) && (taid.getId() == 0)) {
    sleepTime = 10000;
  }
  try{
    Thread.sleep(sleepTime);
  } catch(InterruptedException ie) {
    // Ignore
  }
  context.write(value, new IntWritable(1));
}
TestUserGroupInformation.java 文件源码 项目:hadoop-oss 阅读 17 收藏 0 点赞 0 评论 0
@SuppressWarnings("unchecked") // from Mockito mocks
@Test (timeout = 30000)
public <T extends TokenIdentifier> void testAddCreds() throws Exception {
  UserGroupInformation ugi = 
      UserGroupInformation.createRemoteUser("someone"); 

  Text service = new Text("service");
  Token<T> t1 = mock(Token.class);
  when(t1.getService()).thenReturn(service);
  Token<T> t2 = mock(Token.class);
  when(t2.getService()).thenReturn(new Text("service2"));
  byte[] secret = new byte[]{};
  Text secretKey = new Text("sshhh");

  // fill credentials
  Credentials creds = new Credentials();
  creds.addToken(t1.getService(), t1);
  creds.addToken(t2.getService(), t2);
  creds.addSecretKey(secretKey, secret);

  // add creds to ugi, and check ugi
  ugi.addCredentials(creds);
  checkTokens(ugi, t1, t2);
  assertSame(secret, ugi.getCredentials().getSecretKey(secretKey));
}
TF.java 文件源码 项目:Wikipedia-Index 阅读 27 收藏 0 点赞 0 评论 0
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
    String doc = value.toString();

    String text = slice(doc, "<text", "</text>", true);
    if (text.length() < 1) return;

    char txt[] = text.toLowerCase().toCharArray();
    for (int i = 0; i < txt.length; ++i) {
        if (!((txt[i] >= 'a' && txt[i] <= 'z') || (txt[i] >= 'A' && txt[i] <= 'Z')))
            txt[i] = ' ';
    }

    String id = slice(doc, "<id>", "</id>", false);
    if (id.length() < 1) return;
    StringTokenizer itr = new StringTokenizer(String.valueOf(txt));
    int sum = itr.countTokens();
    while (itr.hasMoreTokens()) {
        String s = itr.nextToken();
        word.set(id + '-' + s);
        IntWritable tmp[] = {new IntWritable(sum), new IntWritable(1)};
        IntArrayWritable temp = new IntArrayWritable(tmp);
        context.write(word, temp);
    }
}
OraOopDBInputSplit.java 文件源码 项目:aliyun-maxcompute-data-collectors 阅读 16 收藏 0 点赞 0 评论 0
@Override
/** {@inheritDoc} */
public void write(DataOutput output) throws IOException {

  output.writeInt(splitId);

  if (this.oracleDataChunks == null) {
    output.writeInt(0);
  } else {
    output.writeInt(this.oracleDataChunks.size());
    for (OraOopOracleDataChunk dataChunk : this.oracleDataChunks) {
      Text.writeString(output, dataChunk.getClass().getName());
      dataChunk.write(output);
    }
  }
}
ValueAggregatorCombiner.java 文件源码 项目:hadoop 阅读 23 收藏 0 点赞 0 评论 0
/** Combines values for a given key.  
 * @param key the key is expected to be a Text object, whose prefix indicates
 * the type of aggregation to aggregate the values. 
 * @param values the values to combine
 * @param context to collect combined values
 */
public void reduce(Text key, Iterable<Text> values, Context context) 
    throws IOException, InterruptedException {
  String keyStr = key.toString();
  int pos = keyStr.indexOf(ValueAggregatorDescriptor.TYPE_SEPARATOR);
  String type = keyStr.substring(0, pos);
  long uniqCount = context.getConfiguration().
    getLong(UniqValueCount.MAX_NUM_UNIQUE_VALUES, Long.MAX_VALUE);
  ValueAggregator aggregator = ValueAggregatorBaseDescriptor
    .generateValueAggregator(type, uniqCount);
  for (Text val : values) {
    aggregator.addNextValue(val);
  }
  Iterator<?> outputs = aggregator.getCombinerOutput().iterator();

  while (outputs.hasNext()) {
    Object v = outputs.next();
    if (v instanceof Text) {
      context.write(key, (Text)v);
    } else {
      context.write(key, new Text(v.toString()));
    }
  }
}
FilteringIT.java 文件源码 项目:PACE 阅读 20 收藏 0 点赞 0 评论 0
private void fetchColumnTest(String configuration) throws Exception {
  List<String> rows = Arrays.asList("row1", "row2");
  List<String> colFs = Arrays.asList("colF1", "colF2");
  List<String> colQs = Arrays.asList("colQ1", "colQ2");
  List<String> colVs = Collections.singletonList("");
  List<String> values = Collections.singletonList("value");

  clearTable();
  EncryptedBatchWriter writer = getEncryptedWriter(CHARLIE, configuration);
  writeData(writer, rows, colFs, colQs, colVs, values);
  writer.close();

  EncryptedBatchScanner scanner = getEncryptedScanner(CHARLIE, configuration);
  scanner.setRanges(Collections.singletonList(new Range()));
  scanner.fetchColumn(new IteratorSetting.Column(new Text("colF1"), new Text("colQ1")));
  assertThat("contains the filtered data", scanner, hasData(rows, Collections.singletonList("colF1"), Collections.singletonList("colQ1"), colVs, values));
}
TestFileSystemTokens.java 文件源码 项目:hadoop-oss 阅读 17 收藏 0 点赞 0 评论 0
@Test
public void testFsWithDuplicateChildrenTokenExists() throws Exception {
  Credentials credentials = new Credentials();
  Text service = new Text("singleTokenFs1");
  Token<?> token = mock(Token.class);
  credentials.addToken(service, token);

  MockFileSystem fs = createFileSystemForServiceName(service);
  MockFileSystem multiFs =
      createFileSystemForServiceName(null, fs, new FilterFileSystem(fs));

  multiFs.addDelegationTokens(renewer, credentials);
  verifyTokenFetch(multiFs, false);
  verifyTokenFetch(fs, false);

  assertEquals(1, credentials.numberOfTokens());
  assertSame(token, credentials.getToken(service));
}
RMDelegationTokenSelector.java 文件源码 项目:hadoop 阅读 18 收藏 0 点赞 0 评论 0
@SuppressWarnings("unchecked")
public Token<RMDelegationTokenIdentifier> selectToken(Text service,
    Collection<Token<? extends TokenIdentifier>> tokens) {
  if (service == null) {
    return null;
  }
  LOG.debug("Looking for a token with service " + service.toString());
  for (Token<? extends TokenIdentifier> token : tokens) {
    LOG.debug("Token kind is " + token.getKind().toString()
        + " and the token's service name is " + token.getService());
    if (RMDelegationTokenIdentifier.KIND_NAME.equals(token.getKind())
        && checkService(service, token)) {
      return (Token<RMDelegationTokenIdentifier>) token;
    }
  }
  return null;
}
LogfileRecordReader.java 文件源码 项目:hadoop-logfile-inputformat 阅读 26 收藏 0 点赞 0 评论 0
/**
 * Fetches next line from file.
 * 
 * The line is stored in {@link #line} for further processing. The fields {@link #pos} and {@link #lastReadPos} are
 * updated accordingly.
 * 
 * If the end of the file has been reached, {@link #line} is set to <code>null</code> and the flag {@link eof} is
 * set to {@code true}.
 * 
 * @throws IOException
 */
private void fetchLine() throws IOException {

    if (isSplittable && posInFile() >= end) {
        eos = true;
    }
    Text text = new Text();
    int length = lineReader.readLine(text);
    if (length == 0) {
        eof = true;
        line = null;
        return;
    }
    lastReadPos = pos;
    pos += length;
    line = text.toString();
}
HistoryClientService.java 文件源码 项目:hadoop 阅读 22 收藏 0 点赞 0 评论 0
@Override
public RenewDelegationTokenResponse renewDelegationToken(
    RenewDelegationTokenRequest request) throws IOException {
    if (!isAllowedDelegationTokenOp()) {
      throw new IOException(
          "Delegation Token can be renewed only with kerberos authentication");
    }

    org.apache.hadoop.yarn.api.records.Token protoToken = request.getDelegationToken();
    Token<MRDelegationTokenIdentifier> token =
        new Token<MRDelegationTokenIdentifier>(
            protoToken.getIdentifier().array(), protoToken.getPassword()
                .array(), new Text(protoToken.getKind()), new Text(
                protoToken.getService()));

    String user = UserGroupInformation.getCurrentUser().getShortUserName();
    long nextExpTime = jhsDTSecretManager.renewToken(token, user);
    RenewDelegationTokenResponse renewResponse = Records
        .newRecord(RenewDelegationTokenResponse.class);
    renewResponse.setNextExpirationTime(nextExpTime);
    return renewResponse;
}
DelegationTokenAuthenticationHandler.java 文件源码 项目:hadoop 阅读 23 收藏 0 点赞 0 评论 0
@VisibleForTesting
@SuppressWarnings("unchecked")
public void initTokenManager(Properties config) {
  Configuration conf = new Configuration(false);
  for (Map.Entry entry : config.entrySet()) {
    conf.set((String) entry.getKey(), (String) entry.getValue());
  }
  String tokenKind = conf.get(TOKEN_KIND);
  if (tokenKind == null) {
    throw new IllegalArgumentException(
        "The configuration does not define the token kind");
  }
  tokenKind = tokenKind.trim();
  tokenManager = new DelegationTokenManager(conf, new Text(tokenKind));
  tokenManager.init();
}
Employee.java 文件源码 项目:DocIT 阅读 26 收藏 0 点赞 0 评论 0
/**
 * Read (say, deserialize) an employee
 */
@Override
public void readFields(DataInput in) throws IOException {
    name = new Text();
    name.readFields(in);
    address = new Text();
    address.readFields(in);
    company = new Text();
    company.readFields(in);
    salary = new DoubleWritable();
    salary.readFields(in);
    department = new Text();
    department.readFields(in);
    isManager = new BooleanWritable();
    isManager.readFields(in);
}
TestDelegationToken.java 文件源码 项目:hadoop-oss 阅读 21 收藏 0 点赞 0 评论 0
@Test 
public void testDelegationTokenNullRenewer() throws Exception {
  TestDelegationTokenSecretManager dtSecretManager = 
    new TestDelegationTokenSecretManager(24*60*60*1000,
      10*1000,1*1000,3600000);
  dtSecretManager.startThreads();
  TestDelegationTokenIdentifier dtId = new TestDelegationTokenIdentifier(new Text(
      "theuser"), null, null);
  Token<TestDelegationTokenIdentifier> token = new Token<TestDelegationTokenIdentifier>(
      dtId, dtSecretManager);
  Assert.assertTrue(token != null);
  try {
    dtSecretManager.renewToken(token, "");
    Assert.fail("Renewal must not succeed");
  } catch (IOException e) {
    //PASS
  }
}
TestLocalRunner.java 文件源码 项目:hadoop 阅读 26 收藏 0 点赞 0 评论 0
public void map(LongWritable key, Text val, Context c)
    throws IOException, InterruptedException {

  // Create a whole bunch of objects.
  List<Integer> lst = new ArrayList<Integer>();
  for (int i = 0; i < 20000; i++) {
    lst.add(new Integer(i));
  }

  // Actually use this list, to ensure that it isn't just optimized away.
  int sum = 0;
  for (int x : lst) {
    sum += x;
  }

  // throw away the list and run a GC.
  lst = null;
  System.gc();

  c.write(new LongWritable(sum), val);
}
UnitMultiplication.java 文件源码 项目:mapreduce-samples 阅读 28 收藏 0 点赞 0 评论 0
public static void main(String[] args) throws Exception {

        Configuration conf = new Configuration();
        conf.setFloat("beta", Float.parseFloat(args[3]));
        Job job = Job.getInstance(conf);
        job.setJarByClass(UnitMultiplication.class);

        ChainMapper.addMapper(job, TransitionMapper.class, Object.class, Text.class, Text.class, Text.class, conf);
        ChainMapper.addMapper(job, PRMapper.class, Object.class, Text.class, Text.class, Text.class, conf);

        job.setReducerClass(MultiplicationReducer.class);

        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(Text.class);

        MultipleInputs.addInputPath(job, new Path(args[0]), TextInputFormat.class, TransitionMapper.class);
        MultipleInputs.addInputPath(job, new Path(args[1]), TextInputFormat.class, PRMapper.class);

        FileOutputFormat.setOutputPath(job, new Path(args[2]));
        job.waitForCompletion(true);
    }
ValueAggregatorBaseDescriptor.java 文件源码 项目:hadoop 阅读 18 收藏 0 点赞 0 评论 0
/**
 * Generate 1 or 2 aggregation-id/value pairs for the given key/value pair.
 * The first id will be of type LONG_VALUE_SUM, with "record_count" as
 * its aggregation id. If the input is a file split,
 * the second id of the same type will be generated too, with the file name 
 * as its aggregation id. This achieves the behavior of counting the total 
 * number of records in the input data, and the number of records 
 * in each input file.
 * 
 * @param key
 *          input key
 * @param val
 *          input value
 * @return a list of aggregation id/value pairs. An aggregation id encodes an
 *         aggregation type which is used to guide the way to aggregate the
 *         value in the reduce/combiner phrase of an Aggregate based job.
 */
public ArrayList<Entry<Text, Text>> generateKeyValPairs(Object key,
                                                        Object val) {
  ArrayList<Entry<Text, Text>> retv = new ArrayList<Entry<Text, Text>>();
  String countType = LONG_VALUE_SUM;
  String id = "record_count";
  Entry<Text, Text> e = generateEntry(countType, id, ONE);
  if (e != null) {
    retv.add(e);
  }
  if (this.inputFile != null) {
    e = generateEntry(countType, this.inputFile, ONE);
    if (e != null) {
      retv.add(e);
    }
  }
  return retv;
}
TestDelegationToken.java 文件源码 项目:hadoop-oss 阅读 29 收藏 0 点赞 0 评论 0
@Test
public void testGetUserWithOwnerAndReal() {
  Text owner = new Text("owner");
  Text realUser = new Text("realUser");
  TestDelegationTokenIdentifier ident =
      new TestDelegationTokenIdentifier(owner, null, realUser);
  UserGroupInformation ugi = ident.getUser();
  assertNotNull(ugi.getRealUser());
  assertNull(ugi.getRealUser().getRealUser());
  assertEquals("owner", ugi.getUserName());
  assertEquals("realUser", ugi.getRealUser().getUserName());
  assertEquals(AuthenticationMethod.PROXY,
               ugi.getAuthenticationMethod());
  assertEquals(AuthenticationMethod.TOKEN,
               ugi.getRealUser().getAuthenticationMethod());
}
RecordParser.java 文件源码 项目:aliyun-maxcompute-data-collectors 阅读 18 收藏 0 点赞 0 评论 0
/**
 * Return a list of strings representing the fields of the input line.
 * This list is backed by an internal buffer which is cleared by the
 * next call to parseRecord().
 */
public List<String> parseRecord(Text input)
    throws com.cloudera.sqoop.lib.RecordParser.ParseError {
  if (null == input) {
    throw new com.cloudera.sqoop.lib.RecordParser.ParseError(
        "null input string");
  }

  // TODO(aaron): The parser should be able to handle UTF-8 strings
  // as well, to avoid this transcode operation.
  return parseRecord(input.toString());
}
EncryptedBatchScannerTest.java 文件源码 项目:PACE 阅读 18 收藏 0 点赞 0 评论 0
@Test
public void setRangesSemanticEncryptionTest() throws Exception {
  when(mockConnector.createBatchScanner(TEST_TABLE, authorizations, 1)).thenReturn(mockScanner);

  EntryEncryptor encryptor = new EntryEncryptor(getConfig("encrypt-key.ini"), KEYS);
  List<Map.Entry<Key,Value>> entries = new ArrayList<>();
  Map.Entry<Key,Value> entry = new SimpleImmutableEntry<>(new Key(new byte[] {1}, new byte[] {2}, new byte[] {3},
      "secret".getBytes(Utils.VISIBILITY_CHARSET), 0, false, false), new Value(new byte[] {4}));
  Map.Entry<Key,Value> entry2 = new SimpleImmutableEntry<>(new Key(new byte[] {5}, new byte[] {6}, new byte[] {7},
      "secret".getBytes(Utils.VISIBILITY_CHARSET), 0, false, false), new Value(new byte[] {8}));
  entries.add(encryptor.encrypt(entry));
  entries.add(encryptor.encrypt(entry2));
  when(mockScanner.iterator()).thenReturn(entries.iterator()).thenReturn(entries.iterator()).thenReturn(entries.iterator());

  BatchScanner scanner = new EncryptedBatchScanner(mockConnector, TEST_TABLE, authorizations, 1, getConfig("encrypt-key.ini"), KEYS);
  assertThat("has correct number of elements", scanner, iterableWithSize(2));

  scanner.setRanges(Collections.singletonList(new Range(new Text(new byte[] {1}))));
  assertThat("has correct number of elements", scanner, iterableWithSize(1));

  scanner.setRanges(Collections.singletonList(new Range(new Text(new byte[] {1}), new Text(new byte[] {5}))));
  assertThat("has correct number of elements", scanner, iterableWithSize(2));

  // Should not have been handled server side.
  verify(mockScanner, times(2)).setRanges(captor.capture());

  for (Collection<Range> ranges : captor.getAllValues()) {
    assertThat("has the infinite range", ranges, hasSize(1));
    assertThat("has the infinite range", ranges.iterator().next(), equalTo(new Range()));
  }
}
RMDelegationTokenSelector.java 文件源码 项目:hadoop 阅读 18 收藏 0 点赞 0 评论 0
private boolean checkService(Text service,
    Token<? extends TokenIdentifier> token) {
  if (service == null || token.getService() == null) {
    return false;
  }
  return token.getService().toString().contains(service.toString());
}
Cut.java 文件源码 项目:DocIT 阅读 23 收藏 0 点赞 0 评论 0
protected void map(Text key, Employee value, Context context)
        throws IOException, InterruptedException {
    if (value.getCompany().toString().equals(name))
        value.setSalary(value.getSalary().get() / 2);
    // copy all Employees
    context.write(key, value);
}
AbstractDelegationTokenSelector.java 文件源码 项目:hadoop 阅读 22 收藏 0 点赞 0 评论 0
@SuppressWarnings("unchecked")
@Override
public Token<TokenIdent> selectToken(Text service,
    Collection<Token<? extends TokenIdentifier>> tokens) {
  if (service == null) {
    return null;
  }
  for (Token<? extends TokenIdentifier> token : tokens) {
    if (kindName.equals(token.getKind())
        && service.equals(token.getService())) {
      return (Token<TokenIdent>) token;
    }
  }
  return null;
}
TestLocalModeWithNewApis.java 文件源码 项目:hadoop 阅读 17 收藏 0 点赞 0 评论 0
@Test
public void testNewApis() throws Exception {
  Random r = new Random(System.currentTimeMillis());
  Path tmpBaseDir = new Path("/tmp/wc-" + r.nextInt());
  final Path inDir = new Path(tmpBaseDir, "input");
  final Path outDir = new Path(tmpBaseDir, "output");
  String input = "The quick brown fox\nhas many silly\nred fox sox\n";
  FileSystem inFs = inDir.getFileSystem(conf);
  FileSystem outFs = outDir.getFileSystem(conf);
  outFs.delete(outDir, true);
  if (!inFs.mkdirs(inDir)) {
    throw new IOException("Mkdirs failed to create " + inDir.toString());
  }
  {
    DataOutputStream file = inFs.create(new Path(inDir, "part-0"));
    file.writeBytes(input);
    file.close();
  }

  Job job = Job.getInstance(conf, "word count");
  job.setJarByClass(TestLocalModeWithNewApis.class);
  job.setMapperClass(TokenizerMapper.class);
  job.setCombinerClass(IntSumReducer.class);
  job.setReducerClass(IntSumReducer.class);
  job.setOutputKeyClass(Text.class);
  job.setOutputValueClass(IntWritable.class);
  FileInputFormat.addInputPath(job, inDir);
  FileOutputFormat.setOutputPath(job, outDir);
  assertEquals(job.waitForCompletion(true), true);

  String output = readOutput(outDir, conf);
  assertEquals("The\t1\nbrown\t1\nfox\t2\nhas\t1\nmany\t1\n" +
               "quick\t1\nred\t1\nsilly\t1\nsox\t1\n", output);

  outFs.delete(tmpBaseDir, true);
}
TestCombineFileInputFormat.java 文件源码 项目:hadoop 阅读 19 收藏 0 点赞 0 评论 0
@SuppressWarnings("unchecked")
@Override
public RecordReader<Text,Text> createRecordReader(InputSplit split, 
    TaskAttemptContext context) throws IOException {
  return new CombineFileRecordReader((CombineFileSplit) split, context,
      (Class) DummyRecordReader.class);
}
UDFReplace.java 文件源码 项目:hive-udf-backports 阅读 21 收藏 0 点赞 0 评论 0
public Text evaluate(Text s, Text search, Text replacement) {
  if (s == null || search == null || replacement == null) {
    return null;
  }
  String r = s.toString().replace(search.toString(), replacement.toString());
  result.set(r);
  return result;
}
TestUserGroupInformation.java 文件源码 项目:hadoop-oss 阅读 16 收藏 0 点赞 0 评论 0
@SuppressWarnings("unchecked") // from Mockito mocks
@Test (timeout = 30000)
public <T extends TokenIdentifier> void testAddToken() throws Exception {
  UserGroupInformation ugi = 
      UserGroupInformation.createRemoteUser("someone"); 

  Token<T> t1 = mock(Token.class);
  Token<T> t2 = mock(Token.class);
  Token<T> t3 = mock(Token.class);

  // add token to ugi
  ugi.addToken(t1);
  checkTokens(ugi, t1);

  // replace token t1 with t2 - with same key (null)
  ugi.addToken(t2);
  checkTokens(ugi, t2);

  // change t1 service and add token
  when(t1.getService()).thenReturn(new Text("t1"));
  ugi.addToken(t1);
  checkTokens(ugi, t1, t2);

  // overwrite t1 token with t3 - same key (!null)
  when(t3.getService()).thenReturn(new Text("t1"));
  ugi.addToken(t3);
  checkTokens(ugi, t2, t3);

  // just try to re-add with new name
  when(t1.getService()).thenReturn(new Text("t1.1"));
  ugi.addToken(t1);
  checkTokens(ugi, t1, t2, t3);    

  // just try to re-add with new name again
  ugi.addToken(t1);
  checkTokens(ugi, t1, t2, t3);    
}
JobClient.java 文件源码 项目:hadoop 阅读 20 收藏 0 点赞 0 评论 0
/**
 * Get a delegation token for the user from the JobTracker.
 * @param renewer the user who can renew the token
 * @return the new token
 * @throws IOException
 */
public Token<DelegationTokenIdentifier> 
  getDelegationToken(final Text renewer) throws IOException, InterruptedException {
  return clientUgi.doAs(new 
      PrivilegedExceptionAction<Token<DelegationTokenIdentifier>>() {
    public Token<DelegationTokenIdentifier> run() throws IOException, 
    InterruptedException {
      return cluster.getDelegationToken(renewer);
    }
  });
}
TupleWritable.java 文件源码 项目:hadoop 阅读 19 收藏 0 点赞 0 评论 0
/** Writes each Writable to <code>out</code>.
 * TupleWritable format:
 * {@code
 *  <count><type1><type2>...<typen><obj1><obj2>...<objn>
 * }
 */
public void write(DataOutput out) throws IOException {
  WritableUtils.writeVInt(out, values.length);
  writeBitSet(out, values.length, written);
  for (int i = 0; i < values.length; ++i) {
    Text.writeString(out, values[i].getClass().getName());
  }
  for (int i = 0; i < values.length; ++i) {
    if (has(i)) {
      values[i].write(out);
    }
  }
}
JobTokenSelector.java 文件源码 项目:hadoop 阅读 20 收藏 0 点赞 0 评论 0
@SuppressWarnings("unchecked")
@Override
public Token<JobTokenIdentifier> selectToken(Text service,
    Collection<Token<? extends TokenIdentifier>> tokens) {
  if (service == null) {
    return null;
  }
  for (Token<? extends TokenIdentifier> token : tokens) {
    if (JobTokenIdentifier.KIND_NAME.equals(token.getKind())
        && service.equals(token.getService())) {
      return (Token<JobTokenIdentifier>) token;
    }
  }
  return null;
}
TestProtocolRecords.java 文件源码 项目:hadoop 阅读 24 收藏 0 点赞 0 评论 0
@Test
public void testNodeHeartBeatResponse() throws IOException {
  NodeHeartbeatResponse record =
      Records.newRecord(NodeHeartbeatResponse.class);
  Map<ApplicationId, ByteBuffer> appCredentials =
      new HashMap<ApplicationId, ByteBuffer>();
  Credentials app1Cred = new Credentials();

  Token<DelegationTokenIdentifier> token1 =
      new Token<DelegationTokenIdentifier>();
  token1.setKind(new Text("kind1"));
  app1Cred.addToken(new Text("token1"), token1);
  Token<DelegationTokenIdentifier> token2 =
      new Token<DelegationTokenIdentifier>();
  token2.setKind(new Text("kind2"));
  app1Cred.addToken(new Text("token2"), token2);

  DataOutputBuffer dob = new DataOutputBuffer();
  app1Cred.writeTokenStorageToStream(dob);
  ByteBuffer byteBuffer1 = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
  appCredentials.put(ApplicationId.newInstance(1234, 1), byteBuffer1);
  record.setSystemCredentialsForApps(appCredentials);

  NodeHeartbeatResponse proto =
      new NodeHeartbeatResponsePBImpl(
        ((NodeHeartbeatResponsePBImpl) record).getProto());
  Assert.assertEquals(appCredentials, proto.getSystemCredentialsForApps());
}


问题


面经


文章

微信
公众号

扫码关注公众号