/**
* Inserts publication annex document.
*/
public void insertAnnexDocument(BinaryFile bf, String dateString) throws ParseException {
try {
GridFS gfs = new GridFS(db, MongoCollectionsInterface.PUB_ANNEXES);
BasicDBObject whereQuery = new BasicDBObject();
whereQuery.put("repositoryDocId", bf.getRepositoryDocId());
whereQuery.put("filename", bf.getFileName());
gfs.remove(whereQuery);
//version ?
GridFSInputFile gfsFile = gfs.createFile(bf.getStream(), true);
gfsFile.put("uploadDate", Utilities.parseStringDate(dateString));
gfsFile.setFilename(bf.getFileName());
gfsFile.put("source", bf.getSource());
gfsFile.put("version", bf.getRepositoryDocVersion());
gfsFile.put("repositoryDocId", bf.getRepositoryDocId());
gfsFile.put("anhalyticsId", bf.getAnhalyticsId());
gfsFile.save();
} catch (ParseException e) {
logger.error(e.getMessage(), e.getCause());
}
}
java类com.mongodb.gridfs.GridFSInputFile的实例源码
MongoFileManager.java 文件源码
项目:anhalytics-core
阅读 26
收藏 0
点赞 0
评论 0
DataManager.java 文件源码
项目:KernelHive
阅读 36
收藏 0
点赞 0
评论 0
public DataAddress uploadData(String data, DataAddress dataAddress) throws UnknownHostException {
ServerAddress server = new ServerAddress(dataAddress.hostname, dataAddress.port);
GridFS database = connectToDatabase(server);
logger.info("Database connected");
GridFSInputFile file = database.createFile(data.getBytes());
int newID = getNextId(database);
logger.info("Got new id for uploaded file: " + newID);
file.setFilename(String.valueOf(newID));
file.put("_id", newID);
file.save();
logger.info("after save");
return new DataAddress(dataAddress.hostname, dataAddress.port, newID);
}
GridFSBlobHandler.java 文件源码
项目:Rapture
阅读 27
收藏 0
点赞 0
评论 0
@Override
public Boolean storeBlob(CallingContext context, String docPath, InputStream newContent, Boolean append) {
GridFS gridFS = getGridFS();
GridFSInputFile file;
if (!append) {
gridFS.remove(docPath);
file = createNewFile(docPath, newContent);
} else {
GridFSDBFile existing = gridFS.findOne(docPath);
if (existing != null) {
try {
file = updateExisting(context, docPath, newContent, gridFS, existing);
} catch (IOException e) {
file = null;
log.error(String.format("Error while appending to docPath %s: %s", docPath, ExceptionToString.format(e)));
}
} else {
file = createNewFile(docPath, newContent);
}
}
return file != null;
}
MongoDbXmiWriter.java 文件源码
项目:biomedicus
阅读 26
收藏 0
点赞 0
评论 0
@Override
public void process(CAS aCAS) throws AnalysisEngineProcessException {
Type documentIdType = aCAS.getTypeSystem()
.getType("edu.umn.biomedicus.uima.type1_5.DocumentId");
Feature docIdFeat = documentIdType.getFeatureByBaseName("documentId");
String documentId = aCAS.getIndexRepository()
.getAllIndexedFS(documentIdType)
.get()
.getStringValue(docIdFeat);
if (documentId == null) {
documentId = UUID.randomUUID().toString();
}
GridFSInputFile file = gridFS.createFile(documentId + ".xmi");
try (OutputStream outputStream = file.getOutputStream()) {
XmiCasSerializer.serialize(aCAS, outputStream);
} catch (IOException | SAXException e) {
throw new AnalysisEngineProcessException(e);
}
}
MongoDbStore.java 文件源码
项目:Wiab.pro
阅读 26
收藏 0
点赞 0
评论 0
@Override
public void storeAttachment(AttachmentId attachmentId, InputStream data)
throws IOException {
GridFSInputFile file = getAttachmentGrid().createFile(data, attachmentId.serialise());
try {
file.save();
} catch (MongoException e) {
// Unfortunately, file.save() wraps any IOException thrown in a
// 'MongoException'. Since the interface explicitly throws IOExceptions,
// we unwrap any IOExceptions thrown.
Throwable innerException = e.getCause();
if (innerException instanceof IOException) {
throw (IOException) innerException;
} else {
throw e;
}
}
}
MongoService.java 文件源码
项目:BLELocalization
阅读 31
收藏 0
点赞 0
评论 0
public String saveFile(InputStream is, String contentType) throws IOException {
GridFSInputFile file = getNewFile();
String id = file.getId().toString();
OutputStream os = getFileOutputStream(id, contentType);
if (os != null) {
try {
byte data[] = new byte[4096];
int len = 0;
while ((len = is.read(data, 0, data.length)) > 0) {
os.write(data, 0, len);
}
return id;
} finally {
os.close();
}
}
return null;
}
MongoFileManager.java 文件源码
项目:anhalytics-core
阅读 30
收藏 0
点赞 0
评论 0
/**
* Inserts grobid tei using GridFS.
*/
public void insertGrobidTei(String teiString, String repositoryDocId, String anhalyticsId, String version, String source, String type, String date) {
try {
GridFS gfs = new GridFS(db, MongoCollectionsInterface.GROBID_TEIS);
gfs.remove(repositoryDocId + ".tei.xml");
GridFSInputFile gfsFile = gfs.createFile(new ByteArrayInputStream(teiString.getBytes()), true);
gfsFile.put("uploadDate", Utilities.parseStringDate(date));
gfsFile.setFilename(repositoryDocId + ".tei.xml");
gfsFile.put("repositoryDocId", repositoryDocId);
gfsFile.put("anhalyticsId", anhalyticsId);
gfsFile.put("source", source);
gfsFile.put("version", version);
gfsFile.put("documentType", type);
gfsFile.save();
} catch (ParseException e) {
logger.error(e.getMessage(), e.getCause());
}
}
MongoFileManager.java 文件源码
项目:anhalytics-core
阅读 29
收藏 0
点赞 0
评论 0
/**
* Inserts TEI metadata document in the GridFS.
*/
public void insertMetadataTei(String tei, String doi, String pdfUrl, String source, String repositoryDocId, String version, String type, String date) {
try {
GridFS gfs = new GridFS(db, MongoCollectionsInterface.METADATAS_TEIS);
gfs.remove(repositoryDocId + ".tei.xml");
GridFSInputFile gfsFile = gfs.createFile(new ByteArrayInputStream(tei.getBytes()), true);
gfsFile.put("uploadDate", Utilities.parseStringDate(date));
gfsFile.setFilename(repositoryDocId + ".tei.xml");
gfsFile.put("repositoryDocId", repositoryDocId);
gfsFile.put("anhalyticsId", generateAnhalyticsId(repositoryDocId, doi, pdfUrl));
gfsFile.put("source", source);
gfsFile.put("version", version);
gfsFile.put("documentType", type);
gfsFile.save();
} catch (ParseException e) {
logger.error(e.getMessage(), e.getCause());
}
}
MongoFileManager.java 文件源码
项目:anhalytics-core
阅读 26
收藏 0
点赞 0
评论 0
/**
* Inserts PDF binary document in the GridFS.
*/
public void insertBinaryDocument(BinaryFile bf, String date) {
try {
GridFS gfs = new GridFS(db, MongoCollectionsInterface.BINARIES);
gfs.remove(bf.getFileName());
GridFSInputFile gfsFile = gfs.createFile(bf.getStream(), true);
gfsFile.put("uploadDate", Utilities.parseStringDate(date));
gfsFile.setFilename(bf.getFileName());
gfsFile.put("repositoryDocId", bf.getRepositoryDocId());
gfsFile.put("anhalyticsId", bf.getAnhalyticsId());
gfsFile.put("source", bf.getSource());
gfsFile.put("version", bf.getRepositoryDocVersion());
gfsFile.put("documentType", bf.getDocumentType());
gfsFile.setContentType(bf.getFileType());
gfsFile.save();
} catch (ParseException e) {
logger.error(e.getMessage(), e.getCause());
}
}
MongoFileManager.java 文件源码
项目:anhalytics-core
阅读 34
收藏 0
点赞 0
评论 0
/**
* Updates already existing tei with new (more enriched one, fulltext..).
*/
public void updateTei(String newTei, String repositoryDocId, String collection) {
try {
GridFS gfs = new GridFS(db, collection);
GridFSDBFile gdf = gfs.findOne(repositoryDocId + ".tei.xml");
GridFSInputFile gfsNew = gfs.createFile(new ByteArrayInputStream(newTei.getBytes()), true);
gfsNew.put("uploadDate", gdf.getUploadDate());
gfsNew.setFilename(gdf.get("repositoryDocId") + ".tei.xml");
gfsNew.put("repositoryDocId", gdf.get("repositoryDocId"));
gfsNew.put("documentType", gdf.get("documentType"));
gfsNew.put("anhalyticsId", gdf.get("anhalyticsId"));
gfsNew.put("source", gdf.get("source"));
gfsNew.save();
gfs.remove(gdf);
} catch (Exception e) {
logger.error(e.getMessage(), e.getCause());
}
}
MongoFileManager.java 文件源码
项目:anhalytics-core
阅读 28
收藏 0
点赞 0
评论 0
/**
* inserts a Arxiv/istex TEI document in the GridFS.
*/
public void insertExternalTeiDocument(InputStream file, String identifier, String repository, String namespace, String dateString) {
try {
GridFS gfs = new GridFS(db, namespace);
GridFSInputFile gfsFile = gfs.createFile(file, true);
gfs.remove(identifier + ".pdf");
gfsFile.put("uploadDate", Utilities.parseStringDate(dateString));
gfsFile.setFilename(identifier + ".tei.xml");
gfsFile.put("identifier", identifier);
gfsFile.put("repository", repository);
gfsFile.setContentType("application/tei+xml");
gfsFile.save();
} catch (ParseException e) {
logger.error(e.getMessage(), e.getCause());
}
}
GridFsImmutableBlobContainer.java 文件源码
项目:elasticsearch-repository-gridfs
阅读 30
收藏 0
点赞 0
评论 0
@Override
public void writeBlob(final String blobName, final InputStream is, final long sizeInBytes, final WriterListener listener) {
blobStore.executor().execute(new Runnable() {
@Override
public void run() {
try {
blobStore.gridFS().remove(buildKey(blobName)); // need to remove old file if already exist
GridFSInputFile file = blobStore.gridFS().createFile(is, buildKey(blobName));
file.save();
listener.onCompleted();
} catch (Exception e) {
listener.onFailure(e);
}
}
});
}
MongoRepositoryItem.java 文件源码
项目:kurento-java
阅读 26
收藏 0
点赞 0
评论 0
@Override
public OutputStream createOutputStreamToWrite() {
checkState(State.NEW);
storingOutputStream = new FilterOutputStream(((GridFSInputFile) dbFile).getOutputStream()) {
@Override
public void close() throws IOException {
putMetadataInGridFS(false);
super.close();
refreshAttributesOnClose();
}
};
return storingOutputStream;
}
MongoQuery.java 文件源码
项目:minicli
阅读 31
收藏 0
点赞 0
评论 0
/**
* 保存文件到Mongo中
* @param file 文件对象
* @param id id_ 自定义序列
* @param metaData 元数据类型 Key Value
* @return
*/
public boolean concatGridFile(File file, Object id, DBObject metaData){
GridFSInputFile gridFSInputFile;
DBObject query = new BasicDBObject("_id", id);
GridFSDBFile gridFSDBFile = myFS.findOne(query);
if(gridFSDBFile!= null)
return false;
try {
gridFSInputFile = myFS.createFile(file);
gridFSInputFile.put("_id",id);
gridFSInputFile.setFilename(file.getName());
gridFSInputFile.setMetaData(metaData);
gridFSInputFile.setContentType(file.getName().substring(file.getName().lastIndexOf(".")));
gridFSInputFile.save();
} catch (Exception e) {
e.printStackTrace();
return false;
}
return true;
}
DumpFileToLocalFS.java 文件源码
项目:hdfs-archiver
阅读 25
收藏 0
点赞 0
评论 0
@Override
public void run() {
try{
File localPath = new File(localRoot, file.getFilename());
log.info("Save to local file:" + localPath.getAbsolutePath());
File dirName = localPath.getParentFile();
if(!dirName.exists()){
dirName.mkdirs();
}
file.writeTo(localPath);
GridFSInputFile newFile = fs.createFile(new byte[]{0, 0,});
newFile.setMetaData(file.getMetaData());
newFile.setFilename(file.getFilename());
newFile.put("localLength", file.getLength());
newFile.save(10);
//log.info("remove:%s" + file.getId() + ", fn:" + file.getFilename());
fs.remove((ObjectId)file.getId());
}catch(Throwable e){
log.error("Failed to dump file to local fs, error:" + e.toString(), e);
}
}
GridFSBlobStore.java 文件源码
项目:workspace_deluxe
阅读 27
收藏 0
点赞 0
评论 0
@Override
public void saveBlob(final MD5 md5, final InputStream data,
final boolean sorted)
throws BlobStoreCommunicationException {
if(data == null || md5 == null) {
throw new NullPointerException("Arguments cannot be null");
}
if (getFile(md5) != null) {
return; //already exists
}
final GridFSInputFile gif = gfs.createFile(data, true);
gif.setId(md5.getMD5());
gif.setFilename(md5.getMD5());
gif.put(Fields.GFS_SORTED, sorted);
try {
gif.save();
} catch (DuplicateKeyException dk) {
// already here, done
} catch (MongoException me) {
throw new BlobStoreCommunicationException(
"Could not write to the mongo database", me);
}
}
GridFSBlobStoreTest.java 文件源码
项目:workspace_deluxe
阅读 25
收藏 0
点赞 0
评论 0
@Test
public void dataWithoutSortMarker() throws Exception {
String s = "pootypoot";
final GridFSInputFile gif = gfs.createFile(s.getBytes("UTF-8"));
MD5 md5 = new MD5(a32);
gif.setId(md5.getMD5());
gif.setFilename(md5.getMD5());
gif.save();
ByteArrayFileCache d = gfsb.getBlob(md5,
new ByteArrayFileCacheManager(16000000, 2000000000L, tfm));
assertThat("data returned marked as unsorted", d.isSorted(), is(false));
String returned = IOUtils.toString(d.getJSON());
assertThat("Didn't get same data back from store", returned, is(s));
gfsb.removeBlob(md5);
}
GridFSBlobStore.java 文件源码
项目:jclouds-gridfs-blobstore
阅读 24
收藏 0
点赞 0
评论 0
@Override
public String putBlob(String container, Blob blob, PutOptions options) {
if (options != null && !options.isMultipart()) {
throw new IllegalArgumentException("only multipart is supported by this provider");
}
Payload payload = checkNotNull(blob.getPayload());
BlobMetadata metadata = blob.getMetadata();
ContentMetadata contentMetadata = metadata.getContentMetadata();
GridFS gridFS = parseGridFSIdentifier(container).connect(mongo);
GridFSInputFile inputFile = gridFS.createFile(payload.getInput(), metadata.getName(), true);
inputFile.setContentType(contentMetadata.getContentType());
DBObject fileMetadata = new BasicDBObject();
fileMetadata.putAll(metadata.getUserMetadata());
inputFile.setMetaData(fileMetadata);
inputFile.save();
return inputFile.getMD5();
}
FileStoreService.java 文件源码
项目:glados-wiki
阅读 45
收藏 0
点赞 0
评论 0
public Optional<FileEntry> save(final String name, final String mime,
final String creator, final boolean privateFile, final String description,
InputStream in) {
GridFS gf = gridFS;
GridFSInputFile f = gf.createFile(in);
f.setFilename(name);
f.setContentType(mime);
//
DBObject metadata = f.getMetaData();
if (metadata == null) {
metadata = new BasicDBObject();
f.setMetaData(metadata);
}
metadata.put("creator", creator);
metadata.put("private", privateFile);
metadata.put("description", description);
//
f.save();
//
return this.loadFileEntry((ObjectId) f.getId());
}
MongoDBDataStore.java 文件源码
项目:emf-fragments
阅读 36
收藏 0
点赞 0
评论 0
@Override
synchronized public OutputStream openOutputStream(final byte[] key) {
return new ByteArrayOutputStream(256) {
@Override
public void close() throws IOException {
super.close();
byte[] keyString = adoptKey(key);
byte[] byteArray = toByteArray();
if (byteArray.length < MAX_BSON_SIZE) {
collection.update(new BasicDBObject(KEY, keyString), new BasicDBObject(KEY, keyString).append(TYPE, TYPE_BSON).append(VALUE, byteArray), true, false);
} else {
// do grid fs
GridFSInputFile gridFsFile = gridFs.createFile(byteArray);
String fileName = URIUtils.encode(key);
gridFsFile.setFilename(fileName);
gridFsFile.save();
collection.update(new BasicDBObject(KEY, keyString), new BasicDBObject(KEY, keyString).append(TYPE, TYPE_GRID_FS).append(FILE_NAME, fileName), true, false);
}
}
};
}
GridFSPutFileCommand.java 文件源码
项目:MongoWorkBench
阅读 30
收藏 0
点赞 0
评论 0
@Override
public void execute() throws Exception {
MongoClient mdb = MongoFactory.getInst().getMongo( sName );
if ( mdb == null )
throw new Exception("no server selected");
if ( sDb == null )
throw new Exception("no database selected");
MongoFactory.getInst().setActiveDB(sDb);
DB db = mdb.getDB(sDb);
GridFS gfs = new GridFS( db, sColl.substring(0,sColl.lastIndexOf(".")) );
GridFSInputFile gridFSInputFile = gfs.createFile(getFile);
gridFSInputFile.setContentType( MimetypesFileTypeMap.getDefaultFileTypeMap().getContentType(getFile) );
gridFSInputFile.save();
setMessage( "fileLoaded=" + getFile + "; size=" + getFile.length() );
}
DataManager.java 文件源码
项目:KernelHive
阅读 29
收藏 0
点赞 0
评论 0
public DataAddress prefetchData(DataAddress givenAddress, ServerAddress destAddress) throws IOException {
logger.info("yo2");
ServerAddress givenServer = new ServerAddress(givenAddress.hostname, givenAddress.port);
GridFS givenDatabase = connectToDatabase(givenServer);
logger.info("yo");
GridFSDBFile givenPackage = givenDatabase.findOne(new BasicDBObject("_id", givenAddress.ID));
ByteArrayOutputStream baos = new ByteArrayOutputStream((int)givenPackage.getLength());
givenPackage.writeTo(baos);
logger.info("Prefetched");
GridFS destDatabase = connectToDatabase(destAddress);
GridFSInputFile destPackage = destDatabase.createFile(baos.toByteArray());
int newID = getNextId(destDatabase);
logger.info("Got new id for prefetched package: " + newID);
destPackage.put("_id", newID);
destPackage.save();
logger.info("after save");
DataAddress ret = new DataAddress();
ret.hostname = destAddress.getHost();
ret.port = destAddress.getPort();
ret.ID = newID;
return ret;
}
GridFSClient.java 文件源码
项目:whatsmars
阅读 28
收藏 0
点赞 0
评论 0
/**
*
* @param inputStream 文件流
* @param format 文件格式,“pdf”,“png”等,不包含后缀符号“.”
* @return
*/
public String saveFile(InputStream inputStream,String format,String uid) {
try {
GridFS gridFS = getInstance();
//随机生成文件名称,多次重试
String filename = this.randomFileName();
//如果有文件重复,则重新生成filename
while (true) {
GridFSDBFile _current = gridFS.findOne(filename);
//如果文件不存在,则保存操作
if (_current == null) {
break;
}
filename = this.randomFileName();
}
GridFSInputFile file = gridFS.createFile(inputStream, filename);
if(format != null) {
file.put("format", format);
}
if(uid != null) {
file.put("uid",uid);
}
file.put("content-type","application/octet-stream");
file.save();
return concat(filename,format);
}catch (Exception e) {
throw new RuntimeException(e);
} finally {
try{
inputStream.close();
}catch (Exception ex) {
//
}
}
}
DfsGridImpl.java 文件源码
项目:leopard
阅读 27
收藏 0
点赞 0
评论 0
@Override
public boolean create(String filename, byte[] data) {
this.delete(filename);
GridFSInputFile file = getGridFS().createFile(data);
file.setFilename(filename);
file.save();
return true;
}
MongoPojoCache.java 文件源码
项目:LiveQA
阅读 27
收藏 0
点赞 0
评论 0
@Override
public void put(String keyText, T obj) throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
ObjectOutputStream objectOutput = new ObjectOutputStream(baos);
objectOutput.writeObject(obj);
objectOutput.close();
byte[] binaryObject = baos.toByteArray();
GridFSInputFile objFile = gridfs.createFile(binaryObject);
objFile.setFilename(DigestUtils.sha256Hex(keyText));
//it will not update content of existing file
objFile.save();
}
GridFSBlobHandler.java 文件源码
项目:Rapture
阅读 26
收藏 0
点赞 0
评论 0
protected GridFSInputFile createNewFile(String docPath, InputStream content) {
GridFSInputFile file = getGridFS().createFile(content, docPath);
if (file != null) {
file.save();
}
return file;
}
DefaultMediaAssetHelper.java 文件源码
项目:geeCommerce-Java-Shop-Software-and-PIM
阅读 26
收藏 0
点赞 0
评论 0
public long createGridFsFile(Id id, InputStream inputStream, String filename, String mimeType) {
DB db = (DB) connections.getConnection("mongodb.dma");
GridFS fs = new GridFS(db);
GridFSInputFile gridFile = fs.createFile(inputStream, filename);
gridFile.setId(id);
gridFile.setContentType(mimeType);
gridFile.save();
return gridFile.getLength();
}
DefaultMediaAssetService.java 文件源码
项目:geeCommerce-Java-Shop-Software-and-PIM
阅读 33
收藏 0
点赞 0
评论 0
private long createGridFsFile(Id id, InputStream inputStream, String filename, String mimeType) {
DB db = (DB) connections.getConnection("mongodb.dma");
GridFS fs = new GridFS(db);
GridFSInputFile gridFile = fs.createFile(inputStream, filename);
gridFile.setId(id);
gridFile.setContentType(mimeType);
gridFile.save();
return gridFile.getLength();
}
UploadScreenshot.java 文件源码
项目:selenium-screenshot-watcher
阅读 31
收藏 0
点赞 0
评论 0
@SuppressWarnings("serial")
@POST
@Consumes(MediaType.APPLICATION_JSON)
public Response insertImageInDb(String jsonRequest, @Context Request request) throws IOException {
EndpointUtil.printClientInfo(request);
DBObject json = ((DBObject) JSON.parse(jsonRequest));
String imageData = (String) json.get("imageData");
byte[] screenshotBytes = Base64Utils.decode(imageData);
String testName = json.get(BaseScreenshotModel.TEST_NAME).toString();
String testBrowser = json.get(BaseScreenshotModel.TEST_BROWSER).toString();
String description = json.get(BaseScreenshotModel.DESCRIPTION).toString();
Type type = new TypeToken<List<Rectangle>>() {
}.getType();
String ignoreZonesString = ((Object) json.get(BaseScreenshotModel.IGNORE_ZONES)).toString();
List<Rectangle> ignoreZones = new ArrayList<>();
if (ignoreZonesString != null) {
ignoreZones = GsonUtil.gson.fromJson(ignoreZonesString, type);
}
File tmpFile = new File("tmpFile");
FileUtils.writeByteArrayToFile(tmpFile, screenshotBytes);
GridFSInputFile gfsFile = GFS_PHOTO.createFile(tmpFile);
gfsFile.setFilename(String.format("%s|%s|%s", testName, testBrowser, description));
gfsFile.save();
// after the file has been saved, get the id and add it into the table of base_images
BaseScreenshotModel up = new BaseScreenshotModel(testName, testBrowser, description,
new ObjectId(gfsFile.getId().toString()), ignoreZones);
TMP_IMAGES.save(up);
tmpFile.delete();
return Response.ok().entity(JSON.serialize(up)).build();
}
ArtifactsDAOMongoDBImpl.java 文件源码
项目:aet
阅读 31
收藏 0
点赞 0
评论 0
@Override
public String saveArtifact(DBKey dbKey, InputStream data, String contentType) {
String resultObjectId = null;
GridFS gfs = getGridFS(dbKey);
GridFSInputFile file = gfs.createFile(data);
if (file != null) {
file.setContentType(contentType);
file.save();
resultObjectId = file.getId().toString();
}
return resultObjectId;
}