/** Create the named map using the named key comparator.
* @deprecated Use Writer(Configuration, Path, Option...) instead.
*/
@Deprecated
public Writer(Configuration conf, FileSystem fs, String dirName,
WritableComparator comparator, Class valClass,
SequenceFile.CompressionType compress, CompressionCodec codec,
Progressable progress) throws IOException {
this(conf, new Path(dirName), comparator(comparator),
valueClass(valClass), compression(compress, codec),
progressable(progress));
}
java类org.apache.hadoop.io.SequenceFile.CompressionType的实例源码
MapFile.java 文件源码
项目:hadoop-oss
阅读 25
收藏 0
点赞 0
评论 0
ArrayFile.java 文件源码
项目:hadoop-oss
阅读 19
收藏 0
点赞 0
评论 0
/** Create the named file for values of the named class. */
public Writer(Configuration conf, FileSystem fs,
String file, Class<? extends Writable> valClass,
CompressionType compress, Progressable progress)
throws IOException {
super(conf, new Path(file),
keyClass(LongWritable.class),
valueClass(valClass),
compression(compress),
progressable(progress));
}
TestCodec.java 文件源码
项目:hadoop-oss
阅读 21
收藏 0
点赞 0
评论 0
private static void createMapFile(Configuration conf, FileSystem fs, Path path,
CompressionCodec codec, CompressionType type, int records) throws IOException {
MapFile.Writer writer =
new MapFile.Writer(conf, path,
MapFile.Writer.keyClass(Text.class),
MapFile.Writer.valueClass(Text.class),
MapFile.Writer.compression(type, codec));
Text key = new Text();
for (int j = 0; j < records; j++) {
key.set(String.format("%03d", j));
writer.append(key, key);
}
writer.close();
}
TestSetFile.java 文件源码
项目:hadoop-oss
阅读 21
收藏 0
点赞 0
评论 0
@Test
public void testSetFile() throws Exception {
FileSystem fs = FileSystem.getLocal(conf);
try {
RandomDatum[] data = generate(10000);
writeTest(fs, data, FILE, CompressionType.NONE);
readTest(fs, data, FILE);
writeTest(fs, data, FILE, CompressionType.BLOCK);
readTest(fs, data, FILE);
} finally {
fs.close();
}
}
TestSetFile.java 文件源码
项目:hadoop-oss
阅读 19
收藏 0
点赞 0
评论 0
private static void writeTest(FileSystem fs, RandomDatum[] data,
String file, CompressionType compress)
throws IOException {
MapFile.delete(fs, file);
LOG.info("creating with " + data.length + " records");
SetFile.Writer writer =
new SetFile.Writer(conf, fs, file,
WritableComparator.get(RandomDatum.class),
compress);
for (int i = 0; i < data.length; i++)
writer.append(data[i]);
writer.close();
}
TestSequenceFileAppend.java 文件源码
项目:hadoop-oss
阅读 27
收藏 0
点赞 0
评论 0
@Test(timeout = 30000)
public void testAppendRecordCompression() throws Exception {
GenericTestUtils.assumeInNativeProfile();
Path file = new Path(ROOT_PATH, "testseqappendblockcompr.seq");
fs.delete(file, true);
Option compressOption = Writer.compression(CompressionType.RECORD,
new GzipCodec());
Writer writer = SequenceFile.createWriter(conf,
SequenceFile.Writer.file(file),
SequenceFile.Writer.keyClass(Long.class),
SequenceFile.Writer.valueClass(String.class), compressOption);
writer.append(1L, "one");
writer.append(2L, "two");
writer.close();
verify2Values(file);
writer = SequenceFile.createWriter(conf, SequenceFile.Writer.file(file),
SequenceFile.Writer.keyClass(Long.class),
SequenceFile.Writer.valueClass(String.class),
SequenceFile.Writer.appendIfExists(true), compressOption);
writer.append(3L, "three");
writer.append(4L, "four");
writer.close();
verifyAll4Values(file);
fs.deleteOnExit(file);
}
TestSequenceFile.java 文件源码
项目:hadoop-oss
阅读 25
收藏 0
点赞 0
评论 0
/**
* Test that makes sure createWriter succeeds on a file that was
* already created
* @throws IOException
*/
@SuppressWarnings("deprecation")
@Test
public void testCreateWriterOnExistingFile() throws IOException {
Configuration conf = new Configuration();
FileSystem fs = FileSystem.getLocal(conf);
Path name = new Path(new Path(System.getProperty("test.build.data","."),
"createWriterOnExistingFile") , "file");
fs.create(name);
SequenceFile.createWriter(fs, conf, name, RandomDatum.class,
RandomDatum.class, 512, (short) 1, 4096, false,
CompressionType.NONE, null, new Metadata());
}
BucketWriter.java 文件源码
项目:flume-release-1.7.0
阅读 23
收藏 0
点赞 0
评论 0
BucketWriter(long rollInterval, long rollSize, long rollCount, long batchSize,
Context context, String filePath, String fileName, String inUsePrefix,
String inUseSuffix, String fileSuffix, CompressionCodec codeC,
CompressionType compType, HDFSWriter writer,
ScheduledExecutorService timedRollerPool, PrivilegedExecutor proxyUser,
SinkCounter sinkCounter, int idleTimeout, WriterCallback onCloseCallback,
String onCloseCallbackPath, long callTimeout,
ExecutorService callTimeoutPool, long retryInterval,
int maxCloseTries) {
this.rollInterval = rollInterval;
this.rollSize = rollSize;
this.rollCount = rollCount;
this.batchSize = batchSize;
this.filePath = filePath;
this.fileName = fileName;
this.inUsePrefix = inUsePrefix;
this.inUseSuffix = inUseSuffix;
this.fileSuffix = fileSuffix;
this.codeC = codeC;
this.compType = compType;
this.writer = writer;
this.timedRollerPool = timedRollerPool;
this.proxyUser = proxyUser;
this.sinkCounter = sinkCounter;
this.idleTimeout = idleTimeout;
this.onCloseCallback = onCloseCallback;
this.onCloseCallbackPath = onCloseCallbackPath;
this.callTimeout = callTimeout;
this.callTimeoutPool = callTimeoutPool;
fileExtensionCounter = new AtomicLong(clock.currentTimeMillis());
this.retryInterval = retryInterval;
this.maxRenameTries = maxCloseTries;
isOpen = false;
isUnderReplicated = false;
this.writer.configure(context);
}
MapFile.java 文件源码
项目:hadoop
阅读 35
收藏 0
点赞 0
评论 0
/** Create the named map using the named key comparator.
* @deprecated Use Writer(Configuration, Path, Option...) instead.
*/
@Deprecated
public Writer(Configuration conf, FileSystem fs, String dirName,
WritableComparator comparator, Class valClass,
SequenceFile.CompressionType compress, CompressionCodec codec,
Progressable progress) throws IOException {
this(conf, new Path(dirName), comparator(comparator),
valueClass(valClass), compression(compress, codec),
progressable(progress));
}
TestBucketWriter.java 文件源码
项目:flume-release-1.7.0
阅读 25
收藏 0
点赞 0
评论 0
@Test
public void testFileSuffixNotGiven() throws IOException, InterruptedException {
final int ROLL_INTERVAL = 1000; // seconds. Make sure it doesn't change in course of test
final String suffix = null;
MockHDFSWriter hdfsWriter = new MockHDFSWriter();
BucketWriter bucketWriter = new BucketWriter(
ROLL_INTERVAL, 0, 0, 0, ctx, "/tmp", "file", "", ".tmp", suffix, null,
SequenceFile.CompressionType.NONE, hdfsWriter, timedRollerPool, proxy,
new SinkCounter("test-bucket-writer-" + System.currentTimeMillis()), 0, null, null, 30000,
Executors.newSingleThreadExecutor(), 0, 0);
// Need to override system time use for test so we know what to expect
final long testTime = System.currentTimeMillis();
Clock testClock = new Clock() {
public long currentTimeMillis() {
return testTime;
}
};
bucketWriter.setClock(testClock);
Event e = EventBuilder.withBody("foo", Charsets.UTF_8);
bucketWriter.append(e);
Assert.assertTrue("Incorrect suffix", hdfsWriter.getOpenedFilePath().endsWith(
Long.toString(testTime + 1) + ".tmp"));
}