Added logic to check the s3 bucket to make sure the file exists and delete orphan files
	
		
			
	
		
	
	
		
	
		
			All checks were successful
		
		
	
	
		
			
				
	
				Locusworks Team/aws-s3-sync/pipeline/head This commit looks good
				
			
		
		
	
	
				
					
				
			
		
			All checks were successful
		
		
	
	Locusworks Team/aws-s3-sync/pipeline/head This commit looks good
				
			This commit is contained in:
		| @@ -3,16 +3,16 @@ package net.locusworks.s3sync.client; | ||||
| import java.io.IOException; | ||||
| import java.nio.file.Files; | ||||
| import java.nio.file.Path; | ||||
| import java.util.HashSet; | ||||
| import java.util.Set; | ||||
| import com.amazonaws.AmazonClientException; | ||||
| import com.amazonaws.AmazonServiceException; | ||||
| import com.amazonaws.SdkClientException; | ||||
| import com.amazonaws.regions.Regions; | ||||
| import com.amazonaws.services.s3.AmazonS3; | ||||
| import com.amazonaws.services.s3.AmazonS3ClientBuilder; | ||||
| import com.amazonaws.services.s3.model.Bucket; | ||||
| import com.amazonaws.services.s3.model.GetObjectRequest; | ||||
| import com.amazonaws.services.s3.model.Permission; | ||||
| import com.amazonaws.services.s3.model.S3Object; | ||||
| import com.amazonaws.services.s3.model.S3ObjectSummary; | ||||
| import com.amazonaws.services.s3.transfer.Download; | ||||
| import com.amazonaws.services.s3.transfer.TransferManager; | ||||
| import com.amazonaws.services.s3.transfer.TransferManagerBuilder; | ||||
| @@ -59,10 +59,11 @@ public class S3Client implements AutoCloseable { | ||||
|     xferMgr = !xferMgrNull ? xferMgr : TransferManagerBuilder.standard().withS3Client(s3Client).build(); | ||||
|     FileDetail fd = null; | ||||
|     try { | ||||
|       fd = FileManager.getInstance().getFileDetail(file); | ||||
|       String key = getPath(file); | ||||
|       fd = FileManager.getInstance().getFileDetail(file, key); | ||||
|       if (fd.isUploaded()) return; | ||||
|       logger.info("Uploading file: %s", file); | ||||
|       Upload xfer = xferMgr.upload(bucket, getPath(file), file.toFile()); | ||||
|       Upload xfer = xferMgr.upload(bucket, key, file.toFile()); | ||||
|       xfer.waitForCompletion(); | ||||
|       fd.setUploaded(true); | ||||
|       FileManager.getInstance().addEntry(fd); | ||||
| @@ -109,7 +110,7 @@ public class S3Client implements AutoCloseable { | ||||
|  | ||||
|   public void syncFolder() throws IOException { | ||||
|     TransferManager xferMgr = TransferManagerBuilder.standard().withS3Client(s3Client).build(); | ||||
|     try (FileManager manager = FileManager.newInstance(this)) { | ||||
|     try (FileManager manager = FileManager.getInstance()) { | ||||
|       Files.walk(syncFolder) | ||||
|       .filter(f -> Files.isRegularFile(f)) | ||||
|       .forEach(f -> uploadFile(xferMgr, syncFolder.resolve(f))); | ||||
| @@ -120,65 +121,31 @@ public class S3Client implements AutoCloseable { | ||||
|     xferMgr.shutdownNow(false); | ||||
|   } | ||||
|    | ||||
|   /** | ||||
|    * <p> | ||||
|    * Gets the object stored in Amazon S3 under the specified bucket and key. | ||||
|    * </p> | ||||
|    * <p> | ||||
|    * Be extremely careful when using this method; the returned Amazon S3 | ||||
|    * object contains a direct stream of data from the HTTP connection. The | ||||
|    * underlying HTTP connection cannot be reused until the user finishes | ||||
|    * reading the data and closes the stream. Also note that if not all data | ||||
|    * is read from the stream then the SDK will abort the underlying connection, | ||||
|    * this may have a negative impact on performance. Therefore: | ||||
|    * </p> | ||||
|    * <ul> | ||||
|    * <li>Use the data from the input stream in Amazon S3 object as soon as possible</li> | ||||
|    * <li>Read all data from the stream (use {@link GetObjectRequest#setRange(long, long)} to request only the bytes you need)</li> | ||||
|    * <li>Close the input stream in Amazon S3 object as soon as possible</li> | ||||
|    * </ul> | ||||
|    * If these rules are not followed, the client can run out of resources by | ||||
|    * allocating too many open, but unused, HTTP connections. </p> | ||||
|    * <p> | ||||
|    * To get an object from Amazon S3, the caller must have | ||||
|    * {@link Permission#Read} access to the object. | ||||
|    * </p> | ||||
|    * <p> | ||||
|    * If the object fetched is publicly readable, it can also read it by | ||||
|    * pasting its URL into a browser. | ||||
|    * </p> | ||||
|    * <p> | ||||
|    * For more advanced options (such as downloading only a range of an | ||||
|    * object's content, or placing constraints on when the object should be | ||||
|    * downloaded) callers can use {@link #getObject(GetObjectRequest)}. | ||||
|    * </p> | ||||
|    * <p> | ||||
|    * If you are accessing <a href="http://aws.amazon.com/kms/">AWS | ||||
|    * KMS</a>-encrypted objects, you need to specify the correct region of the | ||||
|    * bucket on your client and configure AWS Signature Version 4 for added | ||||
|    * security. For more information on how to do this, see | ||||
|    * http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html# | ||||
|    * specify-signature-version | ||||
|    * </p> | ||||
|    * | ||||
|    * @param bucketName | ||||
|    *            The name of the bucket containing the desired object. | ||||
|    * @param key | ||||
|    *            The key under which the desired object is stored. | ||||
|    * | ||||
|    * @return The object stored in Amazon S3 in the specified bucket and key. | ||||
|    * | ||||
|    * @throws SdkClientException | ||||
|    *             If any errors are encountered in the client while making the | ||||
|    *             request or handling the response. | ||||
|    * | ||||
|    */ | ||||
|   public S3Object getObject(String bucketName, String key) { | ||||
|     try { | ||||
|       return s3Client.getObject(bucketName, key); | ||||
|     } catch (AmazonServiceException ex) {} | ||||
|     return null; | ||||
|   } | ||||
|    | ||||
|   public Set<String> getFileList() { | ||||
|     Set<String> fileList = new HashSet<String>(); | ||||
|     for(S3ObjectSummary os: s3Client.listObjectsV2(getBucket()).getObjectSummaries()) { | ||||
|       fileList.add(os.getKey()); | ||||
|     } | ||||
|     return fileList; | ||||
|   } | ||||
|    | ||||
|   public void removeFiles(Set<String> s3Files) { | ||||
|     for(String key : s3Files) { | ||||
|       try { | ||||
|         logger.info("Removing file: " + key); | ||||
|         s3Client.deleteObject(getBucket(), key); | ||||
|       } catch (AmazonServiceException ex) { | ||||
|         logger.warn(String.format("Unable to delete %s: %s", key, ex.getMessage()), ex); | ||||
|       } | ||||
|     } | ||||
|   } | ||||
|   | ||||
|   private String getPath(Path file) { | ||||
|     if (file.getParent() == null) return file.getFileName().toString(); | ||||
|   | ||||
		Reference in New Issue
	
	Block a user