Learn more at s3.scality.com
In order to contribute, please follow the Contributing Guidelines.
Building and running the S3 Server requires node.js 4.5 and npm v2 . Up-to-date versions can be found at Nodesource.
git clone https://github.com/scality/S3.git
Go to the ./S3 folder,
npm install
npm start
This starts an S3 server on port 8000. The default access key is accessKey1 with a secret key of verySecretKey1.
By default the metadata files will be saved in the localMetadata directory and the data files will be saved in the localData directory within the ./S3 directory on your machine. These directories have been pre-created within the repository. If you would like to save the data or metadata in different locations of your choice, you must specify them with absolute paths. So, when starting the server:
mkdir -m 700 $(pwd)/myFavoriteDataPath
mkdir -m 700 $(pwd)/myFavoriteMetadataPath
export S3DATAPATH="$(pwd)/myFavoriteDataPath"
export S3METADATAPATH="$(pwd)/myFavoriteMetadataPath"
npm start
npm run mem_backend
This starts an S3 server on port 8000. The default access key is accessKey1 with a secret key of verySecretKey1.
You can run the unit tests with the following command:
npm test
You can run the linter with:
npm run lint
You can run local functional tests with:
npm run mem_backend &
npm run ft_test
If you want to specify an endpoint (other than localhost), you need to add it to your config.json:
"regions": {
"localregion": ["localhost"],
"specifiedregion": ["myhostname.com"]
},
Note that our S3server supports both:
- path-style: http://myhostname.com/mybucket
- hosted-style: http://mybucket.myhostname.com
However, hosted-style requests will not hit the server if you are using an ip address for your host. So, make sure you are using path-style requests in that case. For instance, if you are using the AWS SDK for JavaScript, you would instantiate your client like this:
const s3 = new aws.S3({
endpoint: 'http://127.0.0.1:8000',
s3ForcePathStyle: true,
});
https://github.com/scality/S3/blob/master/tests/functional/s3curl/s3curl.pl
~/.aws/credentials
on Linux, OS X, or Unix or
C:\Users\USERNAME\.aws\credentials
on Windows
[default]
aws_access_key_id = accessKey1
aws_secret_access_key = verySecretKey1
See all buckets:
aws s3 ls --endpoint-url=http://localhost:8000
If using s3cmd as a client to S3 be aware that v4 signature format is buggy in s3cmd versions < 1.6.1.
~/.s3cfg
on Linux, OS X, or Unix or
C:\Users\USERNAME\.s3cfg
on Windows
[default]
access_key = accessKey1
secret_key = verySecretKey1
host_base = localhost:8000
host_bucket = %(bucket).localhost:8000
signature_v2 = False
use_https = False
See all buckets:
s3cmd ls
~/.rclone.conf
on Linux, OS X, or Unix or
C:\Users\USERNAME\.rclone.conf
on Windows
[remote]
type = s3
env_auth = false
access_key_id = accessKey1
secret_access_key = verySecretKey1
region = other-v2-signature
endpoint = http://localhost:8000
location_constraint =
acl = private
server_side_encryption =
storage_class =
See all buckets:
rclone lsd remote:
const AWS = require('aws-sdk');
const s3 = new AWS.S3({
accessKeyId: 'accessKey1',
secretAccessKey: 'verySecretKey1',
endpoint: 'localhost:8000',
sslEnabled: false,
s3ForcePathStyle: true,
});
import com.amazonaws.auth.AWSCredentials;
import com.amazonaws.auth.BasicAWSCredentials;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.AmazonS3Client;
import com.amazonaws.services.s3.S3ClientOptions;
import com.amazonaws.services.s3.model.Bucket;
public class S3 {
public static void main(String[] args) {
AWSCredentials credentials = new BasicAWSCredentials("accessKey1",
"verySecretKey1");
// Create a client connection based on credentials
AmazonS3 s3client = new AmazonS3Client(credentials);
s3client.setEndpoint("http://localhost:8000");
// Using path-style requests
// (deprecated) s3client.setS3ClientOptions(new S3ClientOptions().withPathStyleAccess(true));
s3client.setS3ClientOptions(S3ClientOptions.builder().setPathStyleAccess(true).build());
// Create bucket
String bucketName = "javabucket";
s3client.createBucket(bucketName);
// List off all buckets
for (Bucket bucket : s3client.listBuckets()) {
System.out.println(" - " + bucket.getName());
}
}
}
require 'aws-sdk'
s3 = Aws::S3::Client.new(
:access_key_id => 'accessKey1',
:secret_access_key => 'verySecretKey1',
:endpoint => 'http://localhost:8000',
:force_path_style => true
)
resp = s3.list_buckets
require "fog"
connection = Fog::Storage.new(
{
:provider => "AWS",
:aws_access_key_id => 'accessKey1',
:aws_secret_access_key => 'verySecretKey1',
:endpoint => 'http://localhost:8000',
:path_style => true,
:scheme => 'http',
})
import boto
from boto.s3.connection import S3Connection, OrdinaryCallingFormat
connection = S3Connection(
aws_access_key_id='accessKey1',
aws_secret_access_key='verySecretKey1',
is_secure=False,
port=8000,
calling_format=OrdinaryCallingFormat(),
host='localhost'
)
connection.create_bucket('mybucket')
import boto3
client = boto3.client(
's3',
aws_access_key_id='accessKey1',
aws_secret_access_key='verySecretKey1',
endpoint_url='http://localhost:8000'
)
lists = client.list_buckets()
Should use v3 over v2 because v2 would create virtual-hosted style URLs while v3 generates path-style URLs.
use Aws\S3\S3Client;
$client = S3Client::factory([
'region' => 'us-east-1',
'version' => 'latest',
'endpoint' => 'http://localhost:8000',
'credentials' => [
'key' => 'accessKey1',
'secret' => 'verySecretKey1'
]
]);
$client->createBucket(array(
'Bucket' => 'bucketphp',
));