java类org.apache.hadoop.io.MultipleIOException的实例源码

DFSClientCache.java 文件源码 项目:hadoop 阅读 21 收藏 0 点赞 0 评论 0
/**
 * Close all DFSClient instances in the Cache.
 * @param onlyAutomatic only close those that are marked for automatic closing
 */
synchronized void closeAll(boolean onlyAutomatic) throws IOException {
  List<IOException> exceptions = new ArrayList<IOException>();

  ConcurrentMap<String, DFSClient> map = clientCache.asMap();

  for (Entry<String, DFSClient> item : map.entrySet()) {
    final DFSClient client = item.getValue();
    if (client != null) {
      try {
        client.close();
      } catch (IOException ioe) {
        exceptions.add(ioe);
      }
    }
  }

  if (!exceptions.isEmpty()) {
    throw MultipleIOException.createIOException(exceptions);
  }
}
WALSplitter.java 文件源码 项目:ditb 阅读 22 收藏 0 点赞 0 评论 0
/**
 * @return null if failed to report progress
 * @throws IOException
 */
@Override
public List<Path> finishWritingAndClose() throws IOException {
  boolean isSuccessful = false;
  List<Path> result = null;
  try {
    isSuccessful = finishWriting(false);
  } finally {
    result = close();
    List<IOException> thrown = closeLogWriters(null);
    if (thrown != null && !thrown.isEmpty()) {
      throw MultipleIOException.createIOException(thrown);
    }
  }
  if (isSuccessful) {
    splits = result;
  }
  return splits;
}
WALSplitter.java 文件源码 项目:ditb 阅读 24 收藏 0 点赞 0 评论 0
@Override
public List<Path> finishWritingAndClose() throws IOException {
  try {
    if (!finishWriting(false)) {
      return null;
    }
    if (hasEditsInDisablingOrDisabledTables) {
      splits = logRecoveredEditsOutputSink.finishWritingAndClose();
    } else {
      splits = new ArrayList<Path>();
    }
    // returns an empty array in order to keep interface same as old way
    return splits;
  } finally {
    List<IOException> thrown = closeRegionServerWriters();
    if (thrown != null && !thrown.isEmpty()) {
      throw MultipleIOException.createIOException(thrown);
    }
  }
}
HFileArchiver.java 文件源码 项目:ditb 阅读 16 收藏 0 点赞 0 评论 0
/**
 * Just do a simple delete of the given store files
 * <p>
 * A best effort is made to delete each of the files, rather than bailing on the first failure.
 * <p>
 * This method is preferable to {@link #deleteFilesWithoutArchiving(Collection)} since it consumes
 * less resources, but is limited in terms of usefulness
 * @param compactedFiles store files to delete from the file system.
 * @throws IOException if a file cannot be deleted. All files will be attempted to deleted before
 *           throwing the exception, rather than failing at the first file.
 */
private static void deleteStoreFilesWithoutArchiving(Collection<StoreFile> compactedFiles)
    throws IOException {
  LOG.debug("Deleting store files without archiving.");
  List<IOException> errors = new ArrayList<IOException>(0);
  for (StoreFile hsf : compactedFiles) {
    try {
      hsf.deleteReader();
    } catch (IOException e) {
      LOG.error("Failed to delete store file:" + hsf.getPath());
      errors.add(e);
    }
  }
  if (errors.size() > 0) {
    throw MultipleIOException.createIOException(errors);
  }
}
DFSClientCache.java 文件源码 项目:aliyun-oss-hadoop-fs 阅读 19 收藏 0 点赞 0 评论 0
/**
 * Close all DFSClient instances in the Cache.
 * @param onlyAutomatic only close those that are marked for automatic closing
 */
synchronized void closeAll(boolean onlyAutomatic) throws IOException {
  List<IOException> exceptions = new ArrayList<IOException>();

  ConcurrentMap<String, DFSClient> map = clientCache.asMap();

  for (Entry<String, DFSClient> item : map.entrySet()) {
    final DFSClient client = item.getValue();
    if (client != null) {
      try {
        client.close();
      } catch (IOException ioe) {
        exceptions.add(ioe);
      }
    }
  }

  if (!exceptions.isEmpty()) {
    throw MultipleIOException.createIOException(exceptions);
  }
}
DFSClientCache.java 文件源码 项目:big-c 阅读 23 收藏 0 点赞 0 评论 0
/**
 * Close all DFSClient instances in the Cache.
 * @param onlyAutomatic only close those that are marked for automatic closing
 */
synchronized void closeAll(boolean onlyAutomatic) throws IOException {
  List<IOException> exceptions = new ArrayList<IOException>();

  ConcurrentMap<String, DFSClient> map = clientCache.asMap();

  for (Entry<String, DFSClient> item : map.entrySet()) {
    final DFSClient client = item.getValue();
    if (client != null) {
      try {
        client.close();
      } catch (IOException ioe) {
        exceptions.add(ioe);
      }
    }
  }

  if (!exceptions.isEmpty()) {
    throw MultipleIOException.createIOException(exceptions);
  }
}
HFileArchiver.java 文件源码 项目:LCIndex-HBase-0.94.16 阅读 17 收藏 0 点赞 0 评论 0
/**
 * Simple delete of regular files from the {@link FileSystem}.
 * <p>
 * This method is a more generic implementation that the other deleteXXX methods in this class,
 * allowing more code reuse at the cost of a couple more, short-lived objects (which should have
 * minimum impact on the jvm).
 * @param fs {@link FileSystem} where the files live
 * @param files {@link Collection} of files to be deleted
 * @throws IOException if a file cannot be deleted. All files will be attempted to deleted before
 *           throwing the exception, rather than failing at the first file.
 */
private static void deleteFilesWithoutArchiving(Collection<File> files) throws IOException {
  List<IOException> errors = new ArrayList<IOException>(0);
  for (File file : files) {
    try {
      LOG.debug("Deleting region file:" + file);
      file.delete();
    } catch (IOException e) {
      LOG.error("Failed to delete file:" + file);
      errors.add(e);
    }
  }
  if (errors.size() > 0) {
    throw MultipleIOException.createIOException(errors);
  }
}
HFileArchiver.java 文件源码 项目:LCIndex-HBase-0.94.16 阅读 21 收藏 0 点赞 0 评论 0
/**
 * Just do a simple delete of the given store files
 * <p>
 * A best effort is made to delete each of the files, rather than bailing on the first failure.
 * <p>
 * This method is preferable to {@link #deleteFilesWithoutArchiving(Collection)} since it consumes
 * less resources, but is limited in terms of usefulness
 * @param compactedFiles store files to delete from the file system.
 * @throws IOException if a file cannot be deleted. All files will be attempted to deleted before
 *           throwing the exception, rather than failing at the first file.
 */
private static void deleteStoreFilesWithoutArchiving(Collection<StoreFile> compactedFiles)
    throws IOException {
  LOG.debug("Deleting store files without archiving.");
  List<IOException> errors = new ArrayList<IOException>(0);
  for (StoreFile hsf : compactedFiles) {
    try {
      hsf.deleteReader();
    } catch (IOException e) {
      LOG.error("Failed to delete store file:" + hsf.getPath());
      errors.add(e);
    }
  }
  if (errors.size() > 0) {
    throw MultipleIOException.createIOException(errors);
  }
}
DFSClientCache.java 文件源码 项目:hadoop-2.6.0-cdh5.4.3 阅读 23 收藏 0 点赞 0 评论 0
/**
 * Close all DFSClient instances in the Cache.
 * @param onlyAutomatic only close those that are marked for automatic closing
 */
synchronized void closeAll(boolean onlyAutomatic) throws IOException {
  List<IOException> exceptions = new ArrayList<IOException>();

  ConcurrentMap<String, DFSClient> map = clientCache.asMap();

  for (Entry<String, DFSClient> item : map.entrySet()) {
    final DFSClient client = item.getValue();
    if (client != null) {
      try {
        client.close();
      } catch (IOException ioe) {
        exceptions.add(ioe);
      }
    }
  }

  if (!exceptions.isEmpty()) {
    throw MultipleIOException.createIOException(exceptions);
  }
}
WALSplitter.java 文件源码 项目:pbase 阅读 25 收藏 0 点赞 0 评论 0
/**
 * @return null if failed to report progress
 * @throws IOException
 */
@Override
List<Path> finishWritingAndClose() throws IOException {
  boolean isSuccessful = false;
  List<Path> result = null;
  try {
    isSuccessful = finishWriting();
  } finally {
    result = close();
    List<IOException> thrown = closeLogWriters(null);
    if (thrown != null && !thrown.isEmpty()) {
      throw MultipleIOException.createIOException(thrown);
    }
  }
  if (isSuccessful) {
    splits = result;
  }
  return splits;
}
WALSplitter.java 文件源码 项目:pbase 阅读 24 收藏 0 点赞 0 评论 0
@Override
List<Path> finishWritingAndClose() throws IOException {
  try {
    if (!finishWriting()) {
      return null;
    }
    if (hasEditsInDisablingOrDisabledTables) {
      splits = logRecoveredEditsOutputSink.finishWritingAndClose();
    } else {
      splits = new ArrayList<Path>();
    }
    // returns an empty array in order to keep interface same as old way
    return splits;
  } finally {
    List<IOException> thrown = closeRegionServerWriters();
    if (thrown != null && !thrown.isEmpty()) {
      throw MultipleIOException.createIOException(thrown);
    }
  }
}
HFileArchiver.java 文件源码 项目:pbase 阅读 16 收藏 0 点赞 0 评论 0
/**
 * Just do a simple delete of the given store files
 * <p>
 * A best effort is made to delete each of the files, rather than bailing on the first failure.
 * <p>
 * This method is preferable to {@link #deleteFilesWithoutArchiving(Collection)} since it consumes
 * less resources, but is limited in terms of usefulness
 * @param compactedFiles store files to delete from the file system.
 * @throws IOException if a file cannot be deleted. All files will be attempted to deleted before
 *           throwing the exception, rather than failing at the first file.
 */
private static void deleteStoreFilesWithoutArchiving(Collection<StoreFile> compactedFiles)
    throws IOException {
  LOG.debug("Deleting store files without archiving.");
  List<IOException> errors = new ArrayList<IOException>(0);
  for (StoreFile hsf : compactedFiles) {
    try {
      hsf.deleteReader();
    } catch (IOException e) {
      LOG.error("Failed to delete store file:" + hsf.getPath());
      errors.add(e);
    }
  }
  if (errors.size() > 0) {
    throw MultipleIOException.createIOException(errors);
  }
}
HFileArchiver.java 文件源码 项目:HIndex 阅读 17 收藏 0 点赞 0 评论 0
/**
 * Just do a simple delete of the given store files
 * <p>
 * A best effort is made to delete each of the files, rather than bailing on the first failure.
 * <p>
 * This method is preferable to {@link #deleteFilesWithoutArchiving(Collection)} since it consumes
 * less resources, but is limited in terms of usefulness
 * @param compactedFiles store files to delete from the file system.
 * @throws IOException if a file cannot be deleted. All files will be attempted to deleted before
 *           throwing the exception, rather than failing at the first file.
 */
private static void deleteStoreFilesWithoutArchiving(Collection<StoreFile> compactedFiles)
    throws IOException {
  LOG.debug("Deleting store files without archiving.");
  List<IOException> errors = new ArrayList<IOException>(0);
  for (StoreFile hsf : compactedFiles) {
    try {
      hsf.deleteReader();
    } catch (IOException e) {
      LOG.error("Failed to delete store file:" + hsf.getPath());
      errors.add(e);
    }
  }
  if (errors.size() > 0) {
    throw MultipleIOException.createIOException(errors);
  }
}
HLogSplitter.java 文件源码 项目:HIndex 阅读 22 收藏 0 点赞 0 评论 0
/**
 * @return null if failed to report progress
 * @throws IOException
 */
@Override
List<Path> finishWritingAndClose() throws IOException {
  boolean isSuccessful = false;
  List<Path> result = null;
  try {
    isSuccessful = finishWriting();
  } finally {
    result = close();
    List<IOException> thrown = closeLogWriters(null);
    if (thrown != null && !thrown.isEmpty()) {
      throw MultipleIOException.createIOException(thrown);
    }
  }
  if (isSuccessful) {
    splits = result;
  }
  return splits;
}
HLogSplitter.java 文件源码 项目:HIndex 阅读 17 收藏 0 点赞 0 评论 0
@Override
List<Path> finishWritingAndClose() throws IOException {
  try {
    if (!finishWriting()) {
      return null;
    }
    if (hasEditsInDisablingOrDisabledTables) {
      splits = logRecoveredEditsOutputSink.finishWritingAndClose();
    } else {
      splits = new ArrayList<Path>();
    }
    // returns an empty array in order to keep interface same as old way
    return splits;
  } finally {
    List<IOException> thrown = closeRegionServerWriters();
    if (thrown != null && !thrown.isEmpty()) {
      throw MultipleIOException.createIOException(thrown);
    }
  }
}
DFSClientCache.java 文件源码 项目:hops 阅读 21 收藏 0 点赞 0 评论 0
/**
 * Close all DFSClient instances in the Cache.
 *
 * @param onlyAutomatic
 *     only close those that are marked for automatic closing
 */
synchronized void closeAll(boolean onlyAutomatic) throws IOException {
  List<IOException> exceptions = new ArrayList<IOException>();

  ConcurrentMap<String, DFSClient> map = clientCache.asMap();

  for (Entry<String, DFSClient> item : map.entrySet()) {
    final DFSClient client = item.getValue();
    if (client != null) {
      try {
        client.close();
      } catch (IOException ioe) {
        exceptions.add(ioe);
      }
    }
  }

  if (!exceptions.isEmpty()) {
    throw MultipleIOException.createIOException(exceptions);
  }
}
HFileArchiver.java 文件源码 项目:IRIndex 阅读 17 收藏 0 点赞 0 评论 0
/**
 * Simple delete of regular files from the {@link FileSystem}.
 * <p>
 * This method is a more generic implementation that the other deleteXXX
 * methods in this class, allowing more code reuse at the cost of a couple
 * more, short-lived objects (which should have minimum impact on the jvm).
 * @param fs {@link FileSystem} where the files live
 * @param files {@link Collection} of files to be deleted
 * @throws IOException if a file cannot be deleted. All files will be
 *           attempted to deleted before throwing the exception, rather than
 *           failing at the first file.
 */
private static void deleteFilesWithoutArchiving(Collection<File> files) throws IOException {
  List<IOException> errors = new ArrayList<IOException>(0);
  for (File file : files) {
    try {
      LOG.debug("Deleting region file:" + file);
      file.delete();
    } catch (IOException e) {
      LOG.error("Failed to delete file:" + file);
      errors.add(e);
    }
  }
  if (errors.size() > 0) {
    throw MultipleIOException.createIOException(errors);
  }
}
HFileArchiver.java 文件源码 项目:IRIndex 阅读 16 收藏 0 点赞 0 评论 0
/**
 * Just do a simple delete of the given store files
 * <p>
 * A best effort is made to delete each of the files, rather than bailing on the first failure.
 * <p>
 * This method is preferable to {@link #deleteFilesWithoutArchiving(Collection)} since it consumes
 * less resources, but is limited in terms of usefulness
 * @param compactedFiles store files to delete from the file system.
 * @throws IOException if a file cannot be deleted. All files will be attempted to deleted before
 *           throwing the exception, rather than failing at the first file.
 */
private static void deleteStoreFilesWithoutArchiving(Collection<StoreFile> compactedFiles)
    throws IOException {
  LOG.debug("Deleting store files without archiving.");
  List<IOException> errors = new ArrayList<IOException>(0);
  for (StoreFile hsf : compactedFiles) {
    try {
      hsf.deleteReader();
    } catch (IOException e) {
      LOG.error("Failed to delete store file:" + hsf.getPath());
      errors.add(e);
    }
  }
  if (errors.size() > 0) {
    throw MultipleIOException.createIOException(errors);
  }
}
WALSplitter.java 文件源码 项目:hbase 阅读 22 收藏 0 点赞 0 评论 0
/**
 * @return null if failed to report progress
 * @throws IOException
 */
@Override
public List<Path> finishWritingAndClose() throws IOException {
  boolean isSuccessful = false;
  List<Path> result = null;
  try {
    isSuccessful = finishWriting(false);
  } finally {
    result = close();
    List<IOException> thrown = closeLogWriters(null);
    if (CollectionUtils.isNotEmpty(thrown)) {
      throw MultipleIOException.createIOException(thrown);
    }
  }
  if (isSuccessful) {
    splits = result;
  }
  return splits;
}
WALSplitter.java 文件源码 项目:hbase 阅读 19 收藏 0 点赞 0 评论 0
private Path writeThenClose(RegionEntryBuffer buffer) throws IOException {
  WriterAndPath wap = appendBuffer(buffer, false);
  if(wap != null) {
    String encodedRegionName = Bytes.toString(buffer.encodedRegionName);
    Long value = regionRecoverStatMap.putIfAbsent(encodedRegionName, wap.editsWritten);
    if (value != null) {
      Long newValue = regionRecoverStatMap.get(encodedRegionName) + wap.editsWritten;
      regionRecoverStatMap.put(encodedRegionName, newValue);
    }
  }

  Path dst = null;
  List<IOException> thrown = new ArrayList<>();
  if(wap != null){
    dst = closeWriter(Bytes.toString(buffer.encodedRegionName), wap, thrown);
  }
  if (!thrown.isEmpty()) {
    throw MultipleIOException.createIOException(thrown);
  }
  return dst;
}
HFileArchiver.java 文件源码 项目:hbase 阅读 17 收藏 0 点赞 0 评论 0
/**
 * Just do a simple delete of the given store files
 * <p>
 * A best effort is made to delete each of the files, rather than bailing on the first failure.
 * <p>
 * @param compactedFiles store files to delete from the file system.
 * @throws IOException if a file cannot be deleted. All files will be attempted to deleted before
 *           throwing the exception, rather than failing at the first file.
 */
private static void deleteStoreFilesWithoutArchiving(Collection<HStoreFile> compactedFiles)
    throws IOException {
  LOG.debug("Deleting store files without archiving.");
  List<IOException> errors = new ArrayList<>(0);
  for (HStoreFile hsf : compactedFiles) {
    try {
      hsf.deleteStoreFile();
    } catch (IOException e) {
      LOG.error("Failed to delete store file:" + hsf.getPath());
      errors.add(e);
    }
  }
  if (errors.size() > 0) {
    throw MultipleIOException.createIOException(errors);
  }
}
HFileArchiver.java 文件源码 项目:PyroDB 阅读 22 收藏 0 点赞 0 评论 0
/**
 * Just do a simple delete of the given store files
 * <p>
 * A best effort is made to delete each of the files, rather than bailing on the first failure.
 * <p>
 * This method is preferable to {@link #deleteFilesWithoutArchiving(Collection)} since it consumes
 * less resources, but is limited in terms of usefulness
 * @param compactedFiles store files to delete from the file system.
 * @throws IOException if a file cannot be deleted. All files will be attempted to deleted before
 *           throwing the exception, rather than failing at the first file.
 */
private static void deleteStoreFilesWithoutArchiving(Collection<StoreFile> compactedFiles)
    throws IOException {
  LOG.debug("Deleting store files without archiving.");
  List<IOException> errors = new ArrayList<IOException>(0);
  for (StoreFile hsf : compactedFiles) {
    try {
      hsf.deleteReader();
    } catch (IOException e) {
      LOG.error("Failed to delete store file:" + hsf.getPath());
      errors.add(e);
    }
  }
  if (errors.size() > 0) {
    throw MultipleIOException.createIOException(errors);
  }
}
HLogSplitter.java 文件源码 项目:PyroDB 阅读 18 收藏 0 点赞 0 评论 0
/**
 * @return null if failed to report progress
 * @throws IOException
 */
@Override
List<Path> finishWritingAndClose() throws IOException {
  boolean isSuccessful = false;
  List<Path> result = null;
  try {
    isSuccessful = finishWriting();
  } finally {
    result = close();
    List<IOException> thrown = closeLogWriters(null);
    if (thrown != null && !thrown.isEmpty()) {
      throw MultipleIOException.createIOException(thrown);
    }
  }
  if (isSuccessful) {
    splits = result;
  }
  return splits;
}
HLogSplitter.java 文件源码 项目:PyroDB 阅读 17 收藏 0 点赞 0 评论 0
@Override
List<Path> finishWritingAndClose() throws IOException {
  try {
    if (!finishWriting()) {
      return null;
    }
    if (hasEditsInDisablingOrDisabledTables) {
      splits = logRecoveredEditsOutputSink.finishWritingAndClose();
    } else {
      splits = new ArrayList<Path>();
    }
    // returns an empty array in order to keep interface same as old way
    return splits;
  } finally {
    List<IOException> thrown = closeRegionServerWriters();
    if (thrown != null && !thrown.isEmpty()) {
      throw MultipleIOException.createIOException(thrown);
    }
  }
}
HFileArchiver.java 文件源码 项目:c5 阅读 21 收藏 0 点赞 0 评论 0
/**
 * Just do a simple delete of the given store files
 * <p>
 * A best effort is made to delete each of the files, rather than bailing on the first failure.
 * <p>
 * This method is preferable to {@link #deleteFilesWithoutArchiving(Collection)} since it consumes
 * less resources, but is limited in terms of usefulness
 * @param compactedFiles store files to delete from the file system.
 * @throws IOException if a file cannot be deleted. All files will be attempted to deleted before
 *           throwing the exception, rather than failing at the first file.
 */
private static void deleteStoreFilesWithoutArchiving(Collection<StoreFile> compactedFiles)
    throws IOException {
  LOG.debug("Deleting store files without archiving.");
  List<IOException> errors = new ArrayList<IOException>(0);
  for (StoreFile hsf : compactedFiles) {
    try {
      hsf.deleteReader();
    } catch (IOException e) {
      LOG.error("Failed to delete store file:" + hsf.getPath());
      errors.add(e);
    }
  }
  if (errors.size() > 0) {
    throw MultipleIOException.createIOException(errors);
  }
}
HLogSplitter.java 文件源码 项目:c5 阅读 16 收藏 0 点赞 0 评论 0
/**
 * @return null if failed to report progress
 * @throws IOException
 */
@Override
List<Path> finishWritingAndClose() throws IOException {
  boolean isSuccessful = false;
  List<Path> result = null;
  try {
    isSuccessful = finishWriting();
  } finally {
    result = close();
    List<IOException> thrown = closeLogWriters(null);
    if (thrown != null && !thrown.isEmpty()) {
      throw MultipleIOException.createIOException(thrown);
    }
  }
  if (isSuccessful) {
    splits = result;
  }
  return splits;
}
HLogSplitter.java 文件源码 项目:c5 阅读 18 收藏 0 点赞 0 评论 0
@Override
List<Path> finishWritingAndClose() throws IOException {
  try {
    if (!finishWriting()) {
      return null;
    }
    if (hasEditsInDisablingOrDisabledTables) {
      splits = logRecoveredEditsOutputSink.finishWritingAndClose();
    } else {
      splits = new ArrayList<Path>();
    }
    // returns an empty array in order to keep interface same as old way
    return splits;
  } finally {
    List<IOException> thrown = closeRegionServerWriters();
    if (thrown != null && !thrown.isEmpty()) {
      throw MultipleIOException.createIOException(thrown);
    }
  }
}
FileSystem.java 文件源码 项目:hadoop-on-lustre 阅读 17 收藏 0 点赞 0 评论 0
synchronized void closeAll() throws IOException {
  List<IOException> exceptions = new ArrayList<IOException>();
  for(; !map.isEmpty(); ) {
    Map.Entry<Key, FileSystem> e = map.entrySet().iterator().next();
    final Key key = e.getKey();
    final FileSystem fs = e.getValue();

    //remove from cache
    remove(key, fs);

    if (fs != null) {
      try {
        fs.close();
      }
      catch(IOException ioe) {
        exceptions.add(ioe);
      }
    }
  }

  if (!exceptions.isEmpty()) {
    throw MultipleIOException.createIOException(exceptions);
  }
}
DFSClientCache.java 文件源码 项目:hadoop-on-lustre2 阅读 19 收藏 0 点赞 0 评论 0
/**
 * Close all DFSClient instances in the Cache.
 * @param onlyAutomatic only close those that are marked for automatic closing
 */
synchronized void closeAll(boolean onlyAutomatic) throws IOException {
  List<IOException> exceptions = new ArrayList<IOException>();

  ConcurrentMap<String, DFSClient> map = clientCache.asMap();

  for (Entry<String, DFSClient> item : map.entrySet()) {
    final DFSClient client = item.getValue();
    if (client != null) {
      try {
        client.close();
      } catch (IOException ioe) {
        exceptions.add(ioe);
      }
    }
  }

  if (!exceptions.isEmpty()) {
    throw MultipleIOException.createIOException(exceptions);
  }
}
FileSystem.java 文件源码 项目:hadoop-0.20 阅读 35 收藏 0 点赞 0 评论 0
synchronized void closeAll() throws IOException {
  List<IOException> exceptions = new ArrayList<IOException>();
  for(; !map.isEmpty(); ) {
    Map.Entry<Key, FileSystem> e = map.entrySet().iterator().next();
    final Key key = e.getKey();
    final FileSystem fs = e.getValue();

    //remove from cache
    remove(key, fs);

    if (fs != null) {
      try {
        fs.close();
      }
      catch(IOException ioe) {
        exceptions.add(ioe);
      }
    }
  }

  if (!exceptions.isEmpty()) {
    throw MultipleIOException.createIOException(exceptions);
  }
}


问题


面经


文章

微信
公众号

扫码关注公众号