Meteor-Community-Packages/Meteor-CollectionFS

no data (noob question perhaps)

atonyman opened this issue · 6 comments

Hi
I'm able to retrieve data using peerlibrary:aws-sdk, however when I try to read files using cfs:s3 it returns nothing (no data)
I've spent days trying to debug this (including turning on FS Debug) but I still get nothing

Here's my server side code and server log using a known good settings and bucket.

By the way my aws user has full s3 access.

Any help greatly appreciated.

Tony

var imageLargeStore = new FS.Store.S3("imagesLarge", {
  accessKeyId: Meteor.settings.AWS.accessKeyId,
  secretAccessKey: Meteor.settings.AWS.secretAccessKey,
  bucket: "salk-employee-pics"
});
var imageSmallStore = new FS.Store.S3("imagesSmall", {
  accessKeyId: Meteor.settings.AWS.accessKeyId,
  secretAccessKey: Meteor.settings.AWS.secretAccessKey,
  bucket: "salk-employee-pics",
  transformWrite: function(fileObj, readStream, writeStream) {
    gm(readStream, fileObj.name()).resize('80', '80').stream().pipe(writeStream)
  }
});


thumbs = new FS.Collection("images", {
  stores: [imageLargeStore,imageSmallStore]
});

console.log(thumbs.find());

Log

Cursor {
I20170126-21:53:56.273(-8)?   _mongo: 
I20170126-21:53:56.273(-8)?    MongoConnection {
I20170126-21:53:56.273(-8)?      _observeMultiplexers: 
I20170126-21:53:56.273(-8)?       { '{"ordered":false,"collectionName":"cfs.images.filerecord","selector":{"uploadedAt":{"$exists":true},"copies.imagesLarge":null,"failures.copies.imagesLarge.doneTrying":{"$ne":true}},"options":{"fields":{"copies":0}}}': [Object],
I20170126-21:53:56.274(-8)?         '{"ordered":false,"collectionName":"cfs.images.filerecord","selector":{"uploadedAt":{"$exists":true},"copies.imagesSmall":null,"failures.copies.imagesSmall.doneTrying":{"$ne":true}},"options":{"fields":{"copies":0}}}': [Object],
I20170126-21:53:56.274(-8)?         '{"ordered":false,"collectionName":"cfs.images.filerecord","selector":{"$and":[{"$or":[{"$and":[{"copies.imagesLarge":{"$ne":null}},{"copies.imagesLarge":{"$ne":false}}]},{"failures.copies.imagesLarge.doneTrying":true}]},{"$or":[{"$and":[{"copies.imagesSmall":{"$ne":null}},{"copies.imagesSmall":{"$ne":false}}]},{"failures.copies.imagesSmall.doneTrying":true}]}]},"options":{}}': [Object],
I20170126-21:53:56.274(-8)?         '{"ordered":false,"collectionName":"cfs.images.filerecord","selector":{},"options":{}}': [Object] },
I20170126-21:53:56.274(-8)?      _onFailoverHook: { nextCallbackId: 4, callbacks: [Object], bindEnvironment: true },
I20170126-21:53:56.274(-8)?      db: 
I20170126-21:53:56.275(-8)?       EventEmitter {
I20170126-21:53:56.275(-8)?         domain: null,
I20170126-21:53:56.275(-8)?         _events: {},
I20170126-21:53:56.275(-8)?         _eventsCount: 0,
I20170126-21:53:56.275(-8)?         _maxListeners: undefined,
I20170126-21:53:56.275(-8)?         s: [Object],
I20170126-21:53:56.276(-8)?         serverConfig: [Getter],
I20170126-21:53:56.276(-8)?         bufferMaxEntries: [Getter],
I20170126-21:53:56.276(-8)?         databaseName: [Getter] },
I20170126-21:53:56.276(-8)?      _primary: '127.0.0.1:3001',
I20170126-21:53:56.276(-8)?      _oplogHandle: 
I20170126-21:53:56.277(-8)?       OplogHandle {
I20170126-21:53:56.277(-8)?         _oplogUrl: 'mongodb://127.0.0.1:3001/local',
I20170126-21:53:56.277(-8)?         _dbName: 'meteor',
I20170126-21:53:56.277(-8)?         _oplogLastEntryConnection: [Object],
I20170126-21:53:56.277(-8)?         _oplogTailConnection: [Object],
I20170126-21:53:56.277(-8)?         _stopped: false,
I20170126-21:53:56.278(-8)?         _tailHandle: [Object],
I20170126-21:53:56.278(-8)?         _readyFuture: [Object],
I20170126-21:53:56.278(-8)?         _crossbar: [Object],
I20170126-21:53:56.278(-8)?         _baseOplogSelector: [Object],
I20170126-21:53:56.278(-8)?         _catchingUpFutures: [],
I20170126-21:53:56.278(-8)?         _lastProcessedTS: [Object],
I20170126-21:53:56.278(-8)?         _onSkippedEntriesHook: [Object],
I20170126-21:53:56.278(-8)?         _entryQueue: [Object],
I20170126-21:53:56.279(-8)?         _workerActive: false },
I20170126-21:53:56.279(-8)?      _docFetcher: DocFetcher { _mongoConnection: [Circular], _callbacksForCacheKey: {} } },
I20170126-21:53:56.279(-8)?   _cursorDescription: 
I20170126-21:53:56.279(-8)?    CursorDescription {
I20170126-21:53:56.279(-8)?      collectionName: 'cfs.images.filerecord',
I20170126-21:53:56.279(-8)?      selector: {},
I20170126-21:53:56.279(-8)?      options: { transform: [Object] } },
I20170126-21:53:56.280(-8)?   _synchronousCursor: null }

Am I missing anything for having a well formed description of the problem?

try..

console.log(thumbs.find().fetch())

instead of

console.log(thumbs.find())

Hi, I tried your suggestion and still no data.
Question, is it possible to use cfs:s3 on data that is already in a bucket? Or, do I have to use the cfs:s3 methods to upload data to the bucket and generate necessary metadata?
Many Thanks

I personally have no s3 account and am not so familiar with this particular s3:package, but i would assume there has to be some metadata in a mongo collection somewhere (like in the other cfs:stores), if you want to perform mongo queries on your files. A store is just there to write and delete files.

You could either use the common cfs storage adapter server insert method Images.insert(path||url) or generate the required metadata yourself somehow. I would suggest testing with an empty/test bucket first ;)

https://github.com/CollectionFS/Meteor-CollectionFS/blob/master/packages/storage-adapter/internal.api.md#selfinsertfsfile-options-callbackserver

I ended up using peerlibrary/meteor-aws-sdk to load the data from s3 into this package's adaptor (cfs:s3). Feeling like it's not a great solution, but may have to be good enough for now.

And, I used the insert method you recommended. Here' what I ended up with:
` updateEmployeeThumbs: function(){

	var configObj = {
		"accessKeyId": Meteor.settings.AWS.accessKeyId,
		"secretAccessKey": Meteor.settings.AWS.secretAccessKey,
		"bucket": "salk-employee-pics"	
	};
	
	var updateCollectionFs = function(listDataContents){
		
		_.each(listDataContents,(chunk)=>{
			
			
			
				_.each(chunk,(f)=>{
				var e = EmployeeThumbs.findOne({'original.name':f.Key});
				//for now only insert what's missing, don't update
				if(typeof e == 'undefined' ){
			
					console.log('here is supposed to be contents for insert');
					console.log(f);

					s3 = new AWS.S3();
			
					AWS.config.update({
						"accessKeyId": configObj.accessKeyId,
						"secretAccessKey": configObj.secretAccessKey,
						"bucket": configObj.bucket
					});
		
					var readStream = s3.getObjectSync({
						"Key":f.Key,
						"Bucket":"salk-employee-pics"}
					);
					// You must have a ReadStream with some data; it can be any stream.
					// We're using the standard output from a command as an example.
					//var readStream = spawn('ls', []).stdout;

					// Create the FS.File instance
					var newFile = new FS.File();

					// Attach the ReadStream data to it. 
					newFile.attachData(readStream.Body, {type: 'image/jpeg'});

					// Optionally provide a file name
					newFile.name(f.Key);

					// Insert the file, which will save the metadata and
					// store the stream data into all defined stores.
					// `Files` is an `FS.Collection` instance defined elsewhere.
					var thumb = EmployeeThumbs.findOne({'original.name':f.Key});
		
					if(typeof thumb !== 'undefined'){
						console.log('reloading thumb for '+f.Key);
						EmployeeThumbs.update({'original.name':f.Key},newFile);
		
					} else{
						console.log('inserting new thumb for '+f.Key);
						EmployeeThumbs.insert(newFile);
		
					}
				}
			});	
		
		});
	};
	
	var allKeys = [];
	
	var listAllObjectsFromS3Bucket = function(configObj,marker){

		AWS.config.update({
			"accessKeyId": configObj.accessKeyId,
			"secretAccessKey": configObj.secretAccessKey,
			"bucket": configObj.bucket
		});

		s3 = new AWS.S3();

		var list = s3.listObjectsSync({Bucket: configObj.bucket, Marker: marker});
		
		allKeys.push(list.Contents);
		
		console.log('pushed list of '+ list.Contents.length);
		
		//console.log(list); 
		
		if(list.IsTruncated){
			
			console.log('list was truncated, doing it again before upsert');

			var contents = list.Contents;

			var marker = contents[contents.length-1].Key;

			console.log('next marker is:'+marker);

			listAllObjectsFromS3Bucket(configObj,marker);
		
		} else {
		
			return allKeys;
			
		}
	};
	
	listAllObjectsFromS3Bucket(configObj,'');
	
	//console.log('here is the allKeys listing:');
	//console.log(allKeys);
	console.log('now going to upsert');
	updateCollectionFs(allKeys);

},`