I need to move number of files from one server into S3 with some requirements:
- Save transfer progress when connection drops
- In case of connection drop, next run must continue with non-uploaded files
- Run as service -- TODO
So this is what I have, it's not complete, because I'm having an issue with locating a file when running command from within script, and yet command works when running it raw.
How to make script find the $source file from within this script? Also, is there any better ways of doing this?
Command:
aws s3 cp '/var/www/files/folder/file1.mp4' 's3://s3-public/folder/file1.mp4'
Error:
script.sh: line 20: aws s3 cp '/var/www/files/folder/file1.mp4' 's3://s3-public/folder/file1.mp4': No such file or directory
Code:
#!/bin/bash
aws_bucket='s3://s3-public/'
home='/home/user/'
files='/var/www/files/'
input="${home}videos_list.txt"
output="${home}videos_done.txt"
videos_list='()'
videos_done='()'
arrays_hydrate() {
mapfile -t videos_list < "$input"
mapfile -t videos_done < "$output"
}
aws_init() {
for index in "${!videos_list[@]}"
do
source="${videos_list[$index]}"
destination=$( echo "${videos_list[$index]}" | sed -e "s#^$files##" )
length="${#source}"
if [ "$length" -ne "0" ]; then
command="aws s3 cp '${source}' '${aws_bucket}${destination}'"
echo "$command"
$( "$command" ) &
fi
done
}
init() {
arrays_hydrate
aws_init
}
init
exit 0