/**
* Test that even a non-idempotent method will properly fail-over if the
* first IPC attempt times out trying to connect. Regression test for
* HDFS-4404.
*/
@Test
public void testFailoverOnConnectTimeout() throws Exception {
conf.setClass(CommonConfigurationKeysPublic.HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY,
InjectingSocketFactory.class, SocketFactory.class);
// Set up the InjectingSocketFactory to throw a ConnectTimeoutException
// when connecting to the first NN.
InjectingSocketFactory.portToInjectOn = cluster.getNameNodePort(0);
FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
// Make the second NN the active one.
cluster.shutdownNameNode(0);
cluster.transitionToActive(1);
// Call a non-idempotent method, and ensure the failover of the call proceeds
// successfully.
IOUtils.closeStream(fs.create(TEST_FILE));
}
java类org.apache.hadoop.io.IOUtils的实例源码
TestDFSClientFailover.java 文件源码
项目:hadoop
阅读 24
收藏 0
点赞 0
评论 0
TestSSLHttpServer.java 文件源码
项目:hadoop-oss
阅读 19
收藏 0
点赞 0
评论 0
/**
* Test that verifies that excluded ciphers (SSL_RSA_WITH_RC4_128_SHA,
* TLS_ECDH_ECDSA_WITH_RC4_128_SHA,TLS_ECDH_RSA_WITH_RC4_128_SHA,
* TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,TLS_ECDHE_RSA_WITH_RC4_128_SHA) are not
* available for negotiation during SSL connection.
*/
@Test
public void testExcludedCiphers() throws Exception {
URL url = new URL(baseUrl, "/echo?a=b&c=d");
HttpsURLConnection conn = (HttpsURLConnection) url.openConnection();
SSLSocketFactory sslSocketF = clientSslFactory.createSSLSocketFactory();
PrefferedCipherSSLSocketFactory testPreferredCipherSSLSocketF
= new PrefferedCipherSSLSocketFactory(sslSocketF,
excludeCiphers.split(","));
conn.setSSLSocketFactory(testPreferredCipherSSLSocketF);
assertFalse("excludedCipher list is empty", excludeCiphers.isEmpty());
try {
InputStream in = conn.getInputStream();
ByteArrayOutputStream out = new ByteArrayOutputStream();
IOUtils.copyBytes(in, out, 1024);
fail("No Ciphers in common, SSLHandshake must fail.");
} catch (SSLHandshakeException ex) {
LOG.info("No Ciphers in common, expected succesful test result.", ex);
}
}
TestBloomMapFile.java 文件源码
项目:hadoop
阅读 17
收藏 0
点赞 0
评论 0
/**
* test {@link BloomMapFile.Reader} constructor with
* IOException
*/
public void testIOExceptionInWriterConstructor() {
Path dirNameSpy = spy(TEST_FILE);
BloomMapFile.Reader reader = null;
BloomMapFile.Writer writer = null;
try {
writer = new BloomMapFile.Writer(conf, TEST_FILE,
MapFile.Writer.keyClass(IntWritable.class),
MapFile.Writer.valueClass(Text.class));
writer.append(new IntWritable(1), new Text("123124142"));
writer.close();
when(dirNameSpy.getFileSystem(conf)).thenThrow(new IOException());
reader = new BloomMapFile.Reader(dirNameSpy, conf,
MapFile.Reader.comparator(new WritableComparator(IntWritable.class)));
assertNull("testIOExceptionInWriterConstructor error !!!",
reader.getBloomFilter());
} catch (Exception ex) {
fail("unexpect ex in testIOExceptionInWriterConstructor !!!");
} finally {
IOUtils.cleanup(null, writer, reader);
}
}
TestFileBasedCopyListing.java 文件源码
项目:hadoop
阅读 17
收藏 0
点赞 0
评论 0
private void checkResult(Path listFile, int count) throws IOException {
if (count == 0) {
return;
}
int recCount = 0;
SequenceFile.Reader reader = new SequenceFile.Reader(config,
SequenceFile.Reader.file(listFile));
try {
Text relPath = new Text();
CopyListingFileStatus fileStatus = new CopyListingFileStatus();
while (reader.next(relPath, fileStatus)) {
if (fileStatus.isDirectory() && relPath.toString().equals("")) {
// ignore root with empty relPath, which is an entry to be
// used for preserving root attributes etc.
continue;
}
Assert.assertEquals(fileStatus.getPath().toUri().getPath(), map.get(relPath.toString()));
recCount++;
}
} finally {
IOUtils.closeStream(reader);
}
Assert.assertEquals(recCount, count);
}
VolumeScanner.java 文件源码
项目:hadoop
阅读 28
收藏 0
点赞 0
评论 0
/**
* Disallow the scanner from scanning the given block pool.
*
* @param bpid The block pool id.
*/
public synchronized void disableBlockPoolId(String bpid) {
Iterator<BlockIterator> i = blockIters.iterator();
while (i.hasNext()) {
BlockIterator iter = i.next();
if (iter.getBlockPoolId().equals(bpid)) {
LOG.trace("{}: disabling scanning on block pool {}", this, bpid);
i.remove();
IOUtils.cleanup(null, iter);
if (curBlockIter == iter) {
curBlockIter = null;
}
notify();
return;
}
}
LOG.warn("{}: can't remove block pool {}, because it was never " +
"added.", this, bpid);
}
HistoryServerLeveldbStateStoreService.java 文件源码
项目:hadoop
阅读 25
收藏 0
点赞 0
评论 0
@Override
public void storeToken(MRDelegationTokenIdentifier tokenId, Long renewDate)
throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Storing token " + tokenId.getSequenceNumber());
}
ByteArrayOutputStream memStream = new ByteArrayOutputStream();
DataOutputStream dataStream = new DataOutputStream(memStream);
try {
tokenId.write(dataStream);
dataStream.writeLong(renewDate);
dataStream.close();
dataStream = null;
} finally {
IOUtils.cleanup(LOG, dataStream);
}
String dbKey = getTokenDatabaseKey(tokenId);
try {
db.put(bytes(dbKey), memStream.toByteArray());
} catch (DBException e) {
throw new IOException(e);
}
}
TestCoprocessorClassLoader.java 文件源码
项目:ditb
阅读 24
收藏 0
点赞 0
评论 0
@Test
public void testCleanupOldJars() throws Exception {
String className = "TestCleanupOldJars";
String folder = TEST_UTIL.getDataTestDir().toString();
File jarFile = ClassLoaderTestHelper.buildJar(
folder, className, null, ClassLoaderTestHelper.localDirPath(conf));
File tmpJarFile = new File(jarFile.getParent(), "/tmp/" + className + ".test.jar");
if (tmpJarFile.exists()) tmpJarFile.delete();
assertFalse("tmp jar file should not exist", tmpJarFile.exists());
IOUtils.copyBytes(new FileInputStream(jarFile),
new FileOutputStream(tmpJarFile), conf, true);
assertTrue("tmp jar file should be created", tmpJarFile.exists());
Path path = new Path(jarFile.getAbsolutePath());
ClassLoader parent = TestCoprocessorClassLoader.class.getClassLoader();
CoprocessorClassLoader.parentDirLockSet.clear(); // So that clean up can be triggered
ClassLoader classLoader = CoprocessorClassLoader.getClassLoader(path, parent, "111", conf);
assertNotNull("Classloader should be created", classLoader);
assertFalse("tmp jar file should be removed", tmpJarFile.exists());
}
TagCompressionContext.java 文件源码
项目:ditb
阅读 23
收藏 0
点赞 0
评论 0
/**
* Uncompress tags from the InputStream and writes to the destination array.
* @param src Stream where the compressed tags are available
* @param dest Destination array where to write the uncompressed tags
* @param offset Offset in destination where tags to be written
* @param length Length of all tag bytes
* @throws IOException
*/
public void uncompressTags(InputStream src, byte[] dest, int offset, int length)
throws IOException {
int endOffset = offset + length;
while (offset < endOffset) {
byte status = (byte) src.read();
if (status == Dictionary.NOT_IN_DICTIONARY) {
int tagLen = StreamUtils.readRawVarint32(src);
offset = Bytes.putAsShort(dest, offset, tagLen);
IOUtils.readFully(src, dest, offset, tagLen);
tagDict.addEntry(dest, offset, tagLen);
offset += tagLen;
} else {
short dictIdx = StreamUtils.toShort(status, (byte) src.read());
byte[] entry = tagDict.getEntry(dictIdx);
if (entry == null) {
throw new IOException("Missing dictionary entry for index " + dictIdx);
}
offset = Bytes.putAsShort(dest, offset, entry.length);
System.arraycopy(entry, 0, dest, offset, entry.length);
offset += entry.length;
}
}
}
TestMapFile.java 文件源码
项目:hadoop
阅读 24
收藏 0
点赞 0
评论 0
/**
* test {@code MapFile.Reader.midKey() } method
*/
@Test
public void testMidKeyOnCurrentApi() throws Exception {
// Write a mapfile of simple data: keys are
final String TEST_PREFIX = "testMidKeyOnCurrentApi.mapfile";
MapFile.Writer writer = null;
MapFile.Reader reader = null;
try {
writer = createWriter(TEST_PREFIX, IntWritable.class, IntWritable.class);
// 0,1,....9
int SIZE = 10;
for (int i = 0; i < SIZE; i++)
writer.append(new IntWritable(i), new IntWritable(i));
writer.close();
reader = createReader(TEST_PREFIX, IntWritable.class);
assertEquals(new IntWritable((SIZE - 1) / 2), reader.midKey());
} finally {
IOUtils.cleanup(null, writer, reader);
}
}
TestStickyBit.java 文件源码
项目:hadoop
阅读 19
收藏 0
点赞 0
评论 0
/**
* Ensure that even if a file is in a directory with the sticky bit on,
* another user can write to that file (assuming correct permissions).
*/
private void confirmCanAppend(Configuration conf, Path p) throws Exception {
// Write a file to the new tmp directory as a regular user
Path file = new Path(p, "foo");
writeFile(hdfsAsUser1, file);
hdfsAsUser1.setPermission(file, new FsPermission((short) 0777));
// Log onto cluster as another user and attempt to append to file
Path file2 = new Path(p, "foo");
FSDataOutputStream h = null;
try {
h = hdfsAsUser2.append(file2);
h.write("Some more data".getBytes());
h.close();
h = null;
} finally {
IOUtils.cleanup(null, h);
}
}
TestFsShellReturnCode.java 文件源码
项目:hadoop-oss
阅读 19
收藏 0
点赞 0
评论 0
@Test (timeout = 30000)
public void testRmForceWithNonexistentGlob() throws Exception {
Configuration conf = new Configuration();
FsShell shell = new FsShell();
shell.setConf(conf);
final ByteArrayOutputStream bytes = new ByteArrayOutputStream();
final PrintStream err = new PrintStream(bytes);
final PrintStream oldErr = System.err;
System.setErr(err);
try {
int exit = shell.run(new String[]{"-rm", "-f", "nomatch*"});
assertEquals(0, exit);
assertTrue(bytes.toString().isEmpty());
} finally {
IOUtils.closeStream(err);
System.setErr(oldErr);
}
}
AbstractContractOpenTest.java 文件源码
项目:hadoop-oss
阅读 22
收藏 0
点赞 0
评论 0
@Test
public void testOpenFileTwice() throws Throwable {
describe("verify that two opened file streams are independent");
Path path = path("testopenfiletwice.txt");
byte[] block = dataset(TEST_FILE_LEN, 0, 255);
//this file now has a simple rule: offset => value
createFile(getFileSystem(), path, false, block);
//open first
FSDataInputStream instream1 = getFileSystem().open(path);
int c = instream1.read();
assertEquals(0,c);
FSDataInputStream instream2 = null;
try {
instream2 = getFileSystem().open(path);
assertEquals("first read of instream 2", 0, instream2.read());
assertEquals("second read of instream 1", 1, instream1.read());
instream1.close();
assertEquals("second read of instream 2", 1, instream2.read());
//close instream1 again
instream1.close();
} finally {
IOUtils.closeStream(instream1);
IOUtils.closeStream(instream2);
}
}
TestSaveNamespace.java 文件源码
项目:hadoop
阅读 20
收藏 0
点赞 0
评论 0
/**
* Test for save namespace should succeed when parent directory renamed with
* open lease and destination directory exist.
* This test is a regression for HDFS-2827
*/
@Test
public void testSaveNamespaceWithRenamedLease() throws Exception {
MiniDFSCluster cluster = new MiniDFSCluster.Builder(new Configuration())
.numDataNodes(1).build();
cluster.waitActive();
DistributedFileSystem fs = (DistributedFileSystem) cluster.getFileSystem();
OutputStream out = null;
try {
fs.mkdirs(new Path("/test-target"));
out = fs.create(new Path("/test-source/foo")); // don't close
fs.rename(new Path("/test-source/"), new Path("/test-target/"));
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
cluster.getNameNodeRpc().saveNamespace();
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
} finally {
IOUtils.cleanup(LOG, out, fs);
if (cluster != null) {
cluster.shutdown();
}
}
}
TestSSLHttpServer.java 文件源码
项目:hadoop-oss
阅读 24
收藏 0
点赞 0
评论 0
/** Test verifies that mutually exclusive server's disabled cipher suites and
* client's enabled cipher suites can successfully establish TLS connection.
*/
@Test
public void testExclusiveEnabledCiphers() throws Exception {
URL url = new URL(baseUrl, "/echo?a=b&c=d");
HttpsURLConnection conn = (HttpsURLConnection) url.openConnection();
SSLSocketFactory sslSocketF = clientSslFactory.createSSLSocketFactory();
PrefferedCipherSSLSocketFactory testPreferredCipherSSLSocketF
= new PrefferedCipherSSLSocketFactory(sslSocketF,
exclusiveEnabledCiphers.split(","));
conn.setSSLSocketFactory(testPreferredCipherSSLSocketF);
assertFalse("excludedCipher list is empty",
exclusiveEnabledCiphers.isEmpty());
try {
InputStream in = conn.getInputStream();
ByteArrayOutputStream out = new ByteArrayOutputStream();
IOUtils.copyBytes(in, out, 1024);
assertEquals(out.toString(), "a:b\nc:d\n");
LOG.info("Atleast one additional enabled cipher than excluded ciphers,"
+ " expected successful test result.");
} catch (SSLHandshakeException ex) {
fail("Atleast one additional cipher available for successful handshake."
+ " Unexpected test failure: " + ex);
}
}
TestFuseDFS.java 文件源码
项目:hadoop
阅读 17
收藏 0
点赞 0
评论 0
/** Check that the given file exists with the given contents */
private static void checkFile(File f, String expectedContents)
throws IOException {
FileInputStream fi = new FileInputStream(f);
int len = expectedContents.length();
byte[] b = new byte[len];
try {
IOUtils.readFully(fi, b, 0, len);
} catch (IOException ie) {
fail("Reading "+f.getName()+" failed with "+ie.getMessage());
} finally {
fi.close(); // NB: leaving f unclosed prevents unmount
}
String s = new String(b, 0, len);
assertEquals("File content differs", expectedContents, s);
}
MD5FileUtils.java 文件源码
项目:hadoop
阅读 23
收藏 0
点赞 0
评论 0
/**
* Read the md5 file stored alongside the given data file
* and match the md5 file content.
* @param dataFile the file containing data
* @return a matcher with two matched groups
* where group(1) is the md5 string and group(2) is the data file path.
*/
private static Matcher readStoredMd5(File md5File) throws IOException {
BufferedReader reader =
new BufferedReader(new InputStreamReader(new FileInputStream(
md5File), Charsets.UTF_8));
String md5Line;
try {
md5Line = reader.readLine();
if (md5Line == null) { md5Line = ""; }
md5Line = md5Line.trim();
} catch (IOException ioe) {
throw new IOException("Error reading md5 file at " + md5File, ioe);
} finally {
IOUtils.cleanup(LOG, reader);
}
Matcher matcher = LINE_REGEX.matcher(md5Line);
if (!matcher.matches()) {
throw new IOException("Invalid MD5 file " + md5File + ": the content \""
+ md5Line + "\" does not match the expected pattern.");
}
return matcher;
}
LeveldbRMStateStore.java 文件源码
项目:hadoop
阅读 25
收藏 0
点赞 0
评论 0
private void loadRMDTSecretManagerTokenSequenceNumber(RMState state)
throws IOException {
byte[] data = null;
try {
data = db.get(bytes(RM_DT_SEQUENCE_NUMBER_KEY));
} catch (DBException e) {
throw new IOException(e);
}
if (data != null) {
DataInputStream in = new DataInputStream(new ByteArrayInputStream(data));
try {
state.rmSecretManagerState.dtSequenceNumber = in.readInt();
} finally {
IOUtils.cleanup(LOG, in);
}
}
}
TestFileCreationClient.java 文件源码
项目:hadoop
阅读 17
收藏 0
点赞 0
评论 0
@Override
public void run() {
FSDataOutputStream out = null;
int i = 0;
try {
out = fs.create(filepath);
for(; running; i++) {
System.out.println(getName() + " writes " + i);
out.write(i);
out.hflush();
sleep(100);
}
}
catch(Exception e) {
System.out.println(getName() + " dies: e=" + e);
}
finally {
System.out.println(getName() + ": i=" + i);
IOUtils.closeStream(out);
}
}
TotalOrderPartitioner.java 文件源码
项目:hadoop
阅读 19
收藏 0
点赞 0
评论 0
/**
* Read the cut points from the given IFile.
* @param fs The file system
* @param p The path to read
* @param keyClass The map output key class
* @param job The job config
* @throws IOException
*/
// matching key types enforced by passing in
@SuppressWarnings("unchecked") // map output key class
private K[] readPartitions(FileSystem fs, Path p, Class<K> keyClass,
Configuration conf) throws IOException {
SequenceFile.Reader reader = new SequenceFile.Reader(fs, p, conf);
ArrayList<K> parts = new ArrayList<K>();
K key = ReflectionUtils.newInstance(keyClass, conf);
NullWritable value = NullWritable.get();
try {
while (reader.next(key, value)) {
parts.add(key);
key = ReflectionUtils.newInstance(keyClass, conf);
}
reader.close();
reader = null;
} finally {
IOUtils.cleanup(LOG, reader);
}
return parts.toArray((K[])Array.newInstance(keyClass, parts.size()));
}
TestMapFile.java 文件源码
项目:hadoop-oss
阅读 19
收藏 0
点赞 0
评论 0
@Test
public void testRenameWithFalse() {
final String ERROR_MESSAGE = "Could not rename";
final String NEW_FILE_NAME = "test-new.mapfile";
final String OLD_FILE_NAME = "test-old.mapfile";
MapFile.Writer writer = null;
try {
FileSystem fs = FileSystem.getLocal(conf);
FileSystem spyFs = spy(fs);
writer = createWriter(OLD_FILE_NAME, IntWritable.class, IntWritable.class);
writer.close();
Path oldDir = new Path(TEST_DIR, OLD_FILE_NAME);
Path newDir = new Path(TEST_DIR, NEW_FILE_NAME);
when(spyFs.rename(oldDir, newDir)).thenReturn(false);
MapFile.rename(spyFs, oldDir.toString(), newDir.toString());
fail("testRenameWithException no exception error !!!");
} catch (IOException ex) {
assertTrue("testRenameWithFalse invalid IOExceptionMessage error !!!", ex
.getMessage().startsWith(ERROR_MESSAGE));
} finally {
IOUtils.cleanup(null, writer);
}
}
TestMapFile.java 文件源码
项目:hadoop-oss
阅读 19
收藏 0
点赞 0
评论 0
/**
* test {@code MapFile.Reader.getClosest() } with wrong class key
*/
@Test
public void testReaderGetClosest() throws Exception {
final String TEST_METHOD_KEY = "testReaderWithWrongKeyClass.mapfile";
MapFile.Writer writer = null;
MapFile.Reader reader = null;
try {
writer = createWriter(TEST_METHOD_KEY, IntWritable.class, Text.class);
for (int i = 0; i < 10; i++)
writer.append(new IntWritable(i), new Text("value" + i));
writer.close();
reader = createReader(TEST_METHOD_KEY, Text.class);
reader.getClosest(new Text("2"), new Text(""));
fail("no excepted exception in testReaderWithWrongKeyClass !!!");
} catch (IOException ex) {
/* Should be thrown to pass the test */
} finally {
IOUtils.cleanup(null, writer, reader);
}
}
TestMRJobs.java 文件源码
项目:hadoop
阅读 22
收藏 0
点赞 0
评论 0
/**
* Used on Windows to determine if the specified file is a symlink that
* targets a directory. On most platforms, these checks can be done using
* commons-io. On Windows, the commons-io implementation is unreliable and
* always returns false. Instead, this method checks the output of the dir
* command. After migrating to Java 7, this method can be removed in favor
* of the new method java.nio.file.Files.isSymbolicLink, which is expected to
* work cross-platform.
*
* @param file File to check
* @return boolean true if the file is a symlink that targets a directory
* @throws IOException thrown for any I/O error
*/
private static boolean isWindowsSymlinkedDirectory(File file)
throws IOException {
String dirOut = Shell.execCommand("cmd", "/c", "dir",
file.getAbsoluteFile().getParent());
StringReader sr = new StringReader(dirOut);
BufferedReader br = new BufferedReader(sr);
try {
String line = br.readLine();
while (line != null) {
line = br.readLine();
if (line.contains(file.getName()) && line.contains("<SYMLINKD>")) {
return true;
}
}
return false;
} finally {
IOUtils.closeStream(br);
IOUtils.closeStream(sr);
}
}
TestFileJournalManager.java 文件源码
项目:hadoop
阅读 22
收藏 0
点赞 0
评论 0
/**
* Make sure that in-progress streams aren't counted if we don't ask for
* them.
*/
@Test
public void testExcludeInProgressStreams() throws CorruptionException,
IOException {
File f = new File(TestEditLog.TEST_DIR + "/excludeinprogressstreams");
// Don't close the edit log once the files have been set up.
NNStorage storage = setupEdits(Collections.<URI>singletonList(f.toURI()),
10, false);
StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next();
FileJournalManager jm = new FileJournalManager(conf, sd, storage);
// If we exclude the in-progess stream, we should only have 100 tx.
assertEquals(100, getNumberOfTransactions(jm, 1, false, false));
EditLogInputStream elis = getJournalInputStream(jm, 90, false);
try {
FSEditLogOp lastReadOp = null;
while ((lastReadOp = elis.readOp()) != null) {
assertTrue(lastReadOp.getTransactionId() <= 100);
}
} finally {
IOUtils.cleanup(LOG, elis);
}
}
BlockSender.java 文件源码
项目:hadoop
阅读 19
收藏 0
点赞 0
评论 0
/**
* Read checksum into given buffer
* @param buf buffer to read the checksum into
* @param checksumOffset offset at which to write the checksum into buf
* @param checksumLen length of checksum to write
* @throws IOException on error
*/
private void readChecksum(byte[] buf, final int checksumOffset,
final int checksumLen) throws IOException {
if (checksumSize <= 0 && checksumIn == null) {
return;
}
try {
checksumIn.readFully(buf, checksumOffset, checksumLen);
} catch (IOException e) {
LOG.warn(" Could not read or failed to veirfy checksum for data"
+ " at offset " + offset + " for block " + block, e);
IOUtils.closeStream(checksumIn);
checksumIn = null;
if (corruptChecksumOk) {
if (checksumOffset < checksumLen) {
// Just fill the array with zeros.
Arrays.fill(buf, checksumOffset, checksumLen, (byte) 0);
}
} else {
throw e;
}
}
}
HdfsUtil.java 文件源码
项目:rainbow
阅读 19
收藏 0
点赞 0
评论 0
public void upFile(InputStream fileInputStream, String hdfsPath)
throws IOException
{
InputStream in = new BufferedInputStream(fileInputStream);
OutputStream out = fileSystem.create(new Path(hdfsPath));
try
{
IOUtils.copyBytes(in, out, conf);
} catch (Exception e)
{
e.printStackTrace();
} finally
{
// close Stream
IOUtils.closeStream(in);
IOUtils.closeStream(out);
}
}
TestUniformSizeInputFormat.java 文件源码
项目:hadoop
阅读 22
收藏 0
点赞 0
评论 0
private void checkSplits(Path listFile, List<InputSplit> splits) throws IOException {
long lastEnd = 0;
//Verify if each split's start is matching with the previous end and
//we are not missing anything
for (InputSplit split : splits) {
FileSplit fileSplit = (FileSplit) split;
long start = fileSplit.getStart();
Assert.assertEquals(lastEnd, start);
lastEnd = start + fileSplit.getLength();
}
//Verify there is nothing more to read from the input file
SequenceFile.Reader reader
= new SequenceFile.Reader(cluster.getFileSystem().getConf(),
SequenceFile.Reader.file(listFile));
try {
reader.seek(lastEnd);
CopyListingFileStatus srcFileStatus = new CopyListingFileStatus();
Text srcRelPath = new Text();
Assert.assertFalse(reader.next(srcRelPath, srcFileStatus));
} finally {
IOUtils.closeStream(reader);
}
}
HdfsUtil.java 文件源码
项目:rainbow
阅读 16
收藏 0
点赞 0
评论 0
public void appendFile(String localFile, String hdfsPath)
throws IOException
{
InputStream in = new FileInputStream(localFile);
OutputStream out = fileSystem.append(new Path(hdfsPath));
try
{
IOUtils.copyBytes(in, out, conf);
} catch (Exception e)
{
e.printStackTrace();
} finally
{
IOUtils.closeStream(in);
IOUtils.closeStream(out);
}
}
HdfsUtil.java 文件源码
项目:rainbow
阅读 22
收藏 0
点赞 0
评论 0
public void downFile(String hdfsPath, Path localPath) throws IOException
{
FSDataInputStream in = fileSystem.open(new Path(hdfsPath));
/* FSDataOutputStream out = fileSystem.create(localPath); */
FileOutputStream out = new FileOutputStream(new File(
localPath.toString()));
try
{
// read
IOUtils.copyBytes(in, out, conf);
} catch (Exception e)
{
e.printStackTrace();
} finally
{
// close Stream
IOUtils.closeStream(in);
IOUtils.closeStream(out);
}
}
FileCopyWithProgress.java 文件源码
项目:HadoopGuides
阅读 22
收藏 0
点赞 0
评论 0
public static void main(String[] args) throws IOException {
final String localSrc = "/tmp/log/bigdata.pdf";
final String hdfsUri = "hdfs://master:8020/test/bigdata.pdf";
InputStream in = new BufferedInputStream(new FileInputStream(localSrc));
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(URI.create(hdfsUri), conf);
OutputStream out = fs.create(new Path(hdfsUri), new Progressable() {
// progress只有在Hadoop文件系统是HDFS的时候才调用,local,S3,FTP都不会调用
@Override
public void progress() {
System.out.print(">");
}
});
IOUtils.copyBytes(in, out, 4096, true);
}
FileContextTestHelper.java 文件源码
项目:hadoop
阅读 23
收藏 0
点赞 0
评论 0
public static byte[] readFile(FileContext fc, Path path, int len)
throws IOException {
DataInputStream dis = fc.open(path);
byte[] buffer = new byte[len];
IOUtils.readFully(dis, buffer, 0, len);
dis.close();
return buffer;
}