Skip to content

Commit 9a8530e

Browse files
author
Janez Justin
committed
code polish, Makefile and README update
1 parent ba9525d commit 9a8530e

File tree

4 files changed

+136
-64
lines changed

4 files changed

+136
-64
lines changed

rpm/Makefile

Lines changed: 16 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,19 @@
1-
ZIPPED := s3rpm.py gnupg.py pyrpm/* pyrpm/tools/*
1+
ZIPPED_FILES := s3rpm.py gnupg.py # files to compress in root of zip
2+
ZIPPED_DIRS := pyrpm # folders to compress to zip
3+
all: requires test package
24

5+
help: ## displays this message
6+
@grep -E '^[a-zA-Z_/%\-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
37

4-
set: requires package
8+
requires: ## installs required packages
9+
pip3 install -t . -r requirements.txt
510

6-
requires:
7-
pip3 install -t . -r requirements.txt --upgrade
8-
9-
package:
10-
zip code.zip $(ZIPPED)
11+
package: ## creates zip of code
12+
zip code.zip $(ZIPPED_FILES)
13+
zip -r code.zip $(ZIPPED_DIRS)
14+
15+
clean: ## cleans up the repository
16+
/bin/rm -rf code.zip
17+
18+
test: ## runs the tests
19+
python3.6 s3rpm_test.py

rpm/README.md

Lines changed: 92 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -2,36 +2,68 @@
22

33
Automatic YUM repository building inside S3 bucket using with lambda support
44

5-
## Setting up S3 and Lambda
6-
7-
Clone the repo and get all other required files
5+
## Readme contents
6+
7+
* [Setting up code, S3 and Lambda](#setting-up-code-s3-and-lambda)
8+
* [Getting the code](#getting-the-code)
9+
* [GPG key](#gpg-key)
10+
* [Environmental variables](#environmental-variables)
11+
* [Set up role](#set-up-role)
12+
* [Set up lambda with CLI](#set-up-lambda-with-cli)
13+
* [Set up lambda manually](#set-up-lambda-manually)
14+
* [The triggers](#the-triggers)
15+
* [Set up S3](#set-up-s3)
16+
* [Setting up yum](#setting-up-yum)
17+
* [First time set up](#first-time-set-up)
18+
* [Install/update](#installupdate)
19+
* [Notes](#notes)
20+
* [Tests](#tests)
21+
22+
## Setting up code, S3 and Lambda
23+
24+
### Getting the code
25+
Clone the repo, get all other required files and compress them
826
```
927
git clone https://github.com/tactycal/lambdaRepos.git
1028
cd lambdaRepos/rpm
11-
pip3 install -t . -r requirements.txt
29+
make all
1230
```
1331

14-
Compress all needed files
32+
### GPG key
33+
create your gpg key (skip to exporting your key, if you already have it)
1534
```
16-
zip code.zip s3rpm.py gnupg.py pyrpm/* pyrpm/tools/*
35+
gpg --gen-key
36+
# Follow the instructions
37+
# Create 'RSA and RSA' key - option 1
38+
# For maxium encryption it is recommended to make 4096 bits long key
39+
# Key should not expire
1740
```
1841

19-
Or just use `make set` instead of `zip` and `pip3` command
42+
export your key
2043

21-
Presuming you already have GPG key generated export secret key (you can skip this part if you don't want to GPG sign your repository)
2244
```
23-
gpg -a --export-secret-key > secret.key
45+
gpg --export-secret-key -a "User Name" > secret.key # exports secret key to secret.key
2446
```
2547

26-
Create new lambda function, set handler to **s3rpm.lambda_handler**, runtime to **python 3.6** and the triggers to:
48+
### Set up role
2749

28-
* Object Created, suffix 'rpm'
29-
* Object Removed, suffix 'rpm'
30-
* If you are using certain directory as a repo, set it as prefix
50+
Create new role with s3 write/read access
3151

32-
Upload `code.zip` to lambda function
52+
Here is a minimal requirement for the policy that is included in role:
53+
```
54+
{"Version": "2012-10-17",
55+
"Statement": [
56+
{"Sid": "<THIS IS UNIQE>",
57+
"Action": [
58+
"s3:GetObject",
59+
"s3:PutObject",
60+
"s3:PutObjectAcl"],
61+
"Effect": "Allow",
62+
"Resource": "arn:aws:s3:::<YOUR BUCKET NAME>/*"}]}
63+
```
3364

34-
Set the environmental variables
65+
### Environmental variables
66+
These are the environmental variables you will have to set:
3567

3668
| Key | Value |
3769
| --- | ---|
@@ -51,14 +83,44 @@ Set the environmental variables
5183

5284
**REPO_DIR** Path to repositroy from bucket root. If none is set, it is assumed root of repository is root of the bucket
5385

86+
### Set up lambda with CLI
87+
88+
[Install aws cli](http://docs.aws.amazon.com/cli/latest/userguide/installing.html)
89+
90+
Create new lambda function:
91+
```
92+
aws lambda create-function \
93+
--function-name <name the function> \
94+
--zip-file fileb://code.zip \
95+
--role <role's arn> \ # arn from role with S3 read/write access
96+
--handler s3rpm.handler \
97+
--runtime python3.6 \
98+
# Replace '<...>' with environmental variables
99+
--environment Variables='{PUBLIC=<bool>, GPG_KEY=<file>, GPG_PASS=<password>, BUCKET_NAME=<bucket name>, REPO_DIR=<dir>}'
100+
```
101+
102+
### Set up lambda manually
54103

104+
If CLI is not your thing, then you can upload code manaully
105+
106+
Create new lambda function, set handler to **s3rpm.lambda_handler**, runtime to **python 3.6**
107+
108+
Upload `code.zip` to lambda function
109+
110+
### The triggers
111+
112+
* Object Created(All), suffix 'rpm'
113+
* Object Removed(All), suffix 'rpm'
114+
* If you are using certain directory as a repo, set it as prefix
115+
116+
### Set up S3
55117
Upload secret key file to location you specified as GPG_KEY
56118

57-
Upload GPG SIGNED .rpm file to desired folder, lambda function should now keep your repository up to date
119+
Upload .rpm file to desired folder, lambda function should now keep your repository up to date
58120

59121
## Setting up yum
60122

61-
**First time set up**
123+
### First time set up
62124

63125
create `example.repo` file in `/etc/yum.repos.d/example.repo`
64126
```
@@ -78,23 +140,29 @@ gpgkey=<link to public key of key you used for signing metadata files>
78140
* You can do `repo_gpgcheck=0` to skip gpg verification when installing packages
79141
* You can do `gpgcheck=1` if you are uploading signed rpm packages(lambda does not sign them, it signs only metadata xml file)
80142

143+
### Install/update
81144
Install package
82145
```
83-
su
84-
yum install <package name>
146+
sudo yum install <package name>
85147
```
86148

87149
Upgrading package
88150
```
89-
su
90-
yum upgrade
151+
sudo yum upgrade
91152
```
92153

93154
## Notes
94155

95-
.rpm and repodata/* in repository directory are and should be publicly accessible
156+
* .rpm and repodata/* in repository directory are and should be publicly accessible for the
157+
158+
* Don't forget to increase the timeout of lambda function
96159

97-
Don't forget to increase the timeout of lambda function
160+
* If somebody tries to inject a malicious rpm file in your repo it will be automaticly added to repository. It is your job to make bucket secure enough for this not to happen.!!!
98161

99-
If somebody tries to inject a malicious rpm file in your repo it will be automaticly added to repository. It is your job to make bucket secure enough for this not to happen.!!!
162+
## Tests
100163

164+
To run unit tests:
165+
```
166+
make requires #gets dependancies
167+
make test #runs the tests
168+
```

rpm/s3rpm.py

Lines changed: 21 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -15,16 +15,13 @@ def lambda_handler(event, context):
1515
repo = YumRepository('/tmp/repo/') # set repository
1616
prefix = '/'.join(key.split('/')[0:-1])+'/'
1717

18-
if os.environ['REPO_DIR'].endswith('/'):
19-
os.environ['REPO_DIR'] = os.environ['REPO_DIR'][:-1]
20-
if os.environ['REPO_DIR'].startswith('/'):
21-
os.environ['REPO_DIR'] = os.environ['REPO_DIR'][1:]
18+
s3_repo_dir = os.environ['REPO_DIR'].strip('/')
2219

2320
#make sure we are working with correct files
24-
if bucket == os.environ['BUCKET_NAME'] and key.endswith(".rpm") and prefix.startswith(os.environ['REPO_DIR']):
21+
if bucket == os.environ['BUCKET_NAME'] and key.endswith(".rpm") and prefix.startswith(s3_repo_dir):
2522
#check if repodata already exist, if not create new with key file
2623
print('Bucket and key\'s file accepted')
27-
exists = check_bucket_file_existance(os.environ['REPO_DIR']+'/repodata/repomd.xml')
24+
exists = check_bucket_file_existance(s3_repo_dir+'/repodata/repomd.xml')
2825
files = ['repomd.xml', 'primary.xml.gz','filelists.xml.gz', 'other.xml.gz']
2926

3027
#make /tmp/repodata path
@@ -33,26 +30,26 @@ def lambda_handler(event, context):
3330
if exists:
3431
print('repodata already exists, old files will be overwriten')
3532
for f in files:
36-
s3.download_file(os.environ['BUCKET_NAME'], os.environ['REPO_DIR']+'/repodata/'+f, repo.repodir+'repodata/'+f)
33+
s3.download_file(os.environ['BUCKET_NAME'], s3_repo_dir+'/repodata/'+f, repo.repodir+'repodata/'+f)
3734
repo.read()
3835
print('Creating Metadata files')
39-
repo, cache = check_changed_files(repo)
36+
repo, cache = check_changed_files(repo, s3_repo_dir)
4037
#Check if object was removed
4138

4239
repo.save()
4340

4441
#sign metadata
4542
if not os.environ['GPG_KEY']=='':
46-
sign_md_file(repo)
43+
sign_md_file(repo, s3_repo_dir)
4744

4845
#save files to bucket
4946
s3 = boto3.resource('s3')
5047
for f in files:
5148
with open(repo.repodir+'repodata/'+f, 'rb') as g:
52-
f_index_obj = s3.Object(bucket_name=os.environ['BUCKET_NAME'], key=os.environ['REPO_DIR']+'/repodata/'+f)
49+
f_index_obj = s3.Object(bucket_name=os.environ['BUCKET_NAME'], key=s3_repo_dir+'/repodata/'+f)
5350
print("Writing file: %s" % (str(f_index_obj)))
5451
f_index_obj.put(Body=g.read(-1), ACL=get_public())
55-
f_index_obj = s3.Object(bucket_name=os.environ['BUCKET_NAME'], key=os.environ['REPO_DIR']+'/repo_cache')
52+
f_index_obj = s3.Object(bucket_name=os.environ['BUCKET_NAME'], key=s3_repo_dir+'/repo_cache')
5653
print("Writing file: %s" % (str(f_index_obj)))
5754
f_index_obj.put(Body=str(json.dumps(cache)))
5855

@@ -102,40 +99,40 @@ def get_public():
10299
acl = 'private'
103100
return acl
104101

105-
def get_cache(repo):
102+
def get_cache(repo, s3_repo_dir):
106103
"""
107104
Check for cache file
108105
"""
109-
if check_bucket_file_existance(os.environ['REPO_DIR']+'/repo_cache'):
110-
print('Repodata cache (%s) found, attempting to write to it' %(os.environ['REPO_DIR']+'/repo_cache'))
106+
if check_bucket_file_existance(s3_repo_dir+'/repo_cache'):
107+
print('Repodata cache (%s) found, attempting to write to it' %(s3_repo_dir+'/repo_cache'))
111108
s3 = boto3.client('s3')
112-
s3.download_file(os.environ['BUCKET_NAME'], os.environ['REPO_DIR']+'/repo_cache', repo.repodir + 'repo_cache')
109+
s3.download_file(os.environ['BUCKET_NAME'], s3_repo_dir+'/repo_cache', repo.repodir + 'repo_cache')
113110
with open(repo.repodir + 'repo_cache', 'r') as f:
114111
cache = json.loads(f.read(-1))
115112
else:
116113
print('repodata_cache file doesn\'t exist. Creating new one')
117114
cache = {}
118115
return cache
119116

120-
def check_changed_files(repo):
117+
def check_changed_files(repo, s3_repo_dir):
121118
"""
122119
check if there are any new files in bucket or any deleted files
123120
"""
124-
print("Checking for changes : %s" % (os.environ['REPO_DIR']))
125-
cache = get_cache(repo)
121+
print("Checking for changes : %s" % (s3_repo_dir))
122+
cache = get_cache(repo, s3_repo_dir)
126123
s3 = boto3.resource('s3')
127124
files = []
128125
#cycle through all objects ending with .rpm in REPO_DIR and check if they are already in repodata, if not add them
129-
for obj in s3.Bucket(os.environ['BUCKET_NAME']).objects.filter(Prefix=os.environ['REPO_DIR']):
126+
for obj in s3.Bucket(os.environ['BUCKET_NAME']).objects.filter(Prefix=s3_repo_dir):
130127
files.append(obj.key)
131128
if not obj.key.endswith(".rpm"):
132129
print('skipping %s - not rpm file' %(obj.key))
133130
continue
134-
fname = obj.key[len(os.environ['REPO_DIR']):] # '/filename.rpm' - without path
131+
fname = obj.key[len(s3_repo_dir):] # '/filename.rpm' - without path
135132
if fname not in cache:
136133
s3c = boto3.client('s3')
137134
#Create path to folder where to download file, if it not yet exists
138-
prefix = '/'.join(obj.key.split('/')[0:-1])[len(os.environ['REPO_DIR']):]
135+
prefix = '/'.join(obj.key.split('/')[0:-1])[len(s3_repo_dir):]
139136
create_new_dir_if_not_exist(repo.repodir+prefix)
140137
#Download file to repodir
141138
path = repo.repodir + fname
@@ -151,7 +148,7 @@ def check_changed_files(repo):
151148

152149
removedPkgs = []
153150
for f in cache:
154-
if f.endswith('.rpm') and os.environ['REPO_DIR']+f not in files:
151+
if f.endswith('.rpm') and s3_repo_dir+f not in files:
155152
print('removing ' +f)
156153
repo = remove_pkg(repo, cache, f)
157154
removedPkgs.append(f)
@@ -173,7 +170,7 @@ def remove_pkg(repo, cache, key):
173170
print('Tried to delete %s entry but was not found in cache' % (filename))
174171
return repo
175172

176-
def sign_md_file(repo):
173+
def sign_md_file(repo, s3_repo_dir):
177174
'''
178175
Using gpg password assigned in env variable `GPG_PASS` and key, which's file directory is
179176
assigned in env variable `GPG_KEY`
@@ -190,6 +187,6 @@ def sign_md_file(repo):
190187
signed = gpg.sign_file(stream, passphrase=os.environ['GPG_PASS'], clearsign=True, detach=True, binary=False)
191188

192189
s3 = boto3.resource('s3')
193-
sign_obj = s3.Object(bucket_name=os.environ['BUCKET_NAME'], key=os.environ['REPO_DIR'] + "/repodata/repomd.xml.asc")
190+
sign_obj = s3.Object(bucket_name=os.environ['BUCKET_NAME'], key=s3_repo_dir + "/repodata/repomd.xml.asc")
194191
print('uploading repomd.xml.asc to /repodata')
195192
sign_obj.put(Body=str(signed), ACL=get_public())

rpm/s3rpm_test.py

Lines changed: 7 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -66,13 +66,13 @@ def test_cache(self, check_mock, s3_mock, yum_mock):
6666
check_mock.return_value = True
6767

6868
with patch('s3rpm.open', m):
69-
cachenew = s3rpm.get_cache(repo)
69+
cachenew = s3rpm.get_cache(repo, os.environ['REPO_DIR'])
7070
s3_mock.client().download_file.assert_called_with('bucket', 'test_s3rpm/repo_cache', 'test_s3rpm/repo_cache')
7171
self.assertEqual(json.loads(cache), cachenew)
7272

7373
check_mock.return_value = False
7474

75-
cachenew = s3rpm.get_cache(repo)
75+
cachenew = s3rpm.get_cache(repo,os.environ['REPO_DIR'])
7676
self.assertEqual(cachenew, {})
7777

7878
@patch('s3rpm.YumRepository')
@@ -91,7 +91,7 @@ def test_new_files(self, s3_mock, cache_mock, yump_mock, yum_mock):
9191
s3_mock.resource().Bucket().objects.filter.return_value = [MagicMock(key='test.file'),MagicMock(key='test_s3rpm/pkgname-0.3.8-x86_64.rpm'), MagicMock(key='test_s3rpm/pkgname-0.3.7-x86_64.rpm')]
9292
m = mock_open(read_data='')
9393
with patch('s3rpm.open', m):
94-
reponew, cachenew = s3rpm.check_changed_files(repo)
94+
reponew, cachenew = s3rpm.check_changed_files(repo, os.environ['REPO_DIR'])
9595

9696
self.assertEqual(cache, cachenew)
9797
self.assertEqual(yum_mock.add_package.call_count, 1)
@@ -107,7 +107,7 @@ def test_delete_files(self, s3_mock, cache_mock, yum_mock):
107107
cache = {}
108108

109109
s3_mock.resource().Bucket().objects.filter.return_value = [MagicMock(key='test.file')]
110-
_, cachenew = s3rpm.check_changed_files(repo)
110+
_, cachenew = s3rpm.check_changed_files(repo, os.environ['REPO_DIR'])
111111
self.assertEqual(cache, cachenew)
112112
self.assertEqual(yum_mock.remove_package.call_count, 1)
113113

@@ -119,7 +119,7 @@ def test_gpg(self, s3_mock, gpg_mock, yum_mock):
119119
m = mock_open()
120120
repo = yum_mock()
121121
with patch('s3rpm.open', m):
122-
s3rpm.sign_md_file(repo)
122+
s3rpm.sign_md_file(repo, os.environ['REPO_DIR'])
123123
gpg_mock.GPG().sign_file.assert_called_with(s3rpm.open(), binary=False, clearsign=True, detach=True, passphrase='123')
124124
s3_mock.resource().Object().put.assert_called_with(ACL='public-read', Body=str(gpg_mock.GPG().sign_file()))
125125

@@ -159,7 +159,7 @@ def test_defined_repodir(self, s3_mock, yum_mock, cache_mock, ):
159159
@patch('s3rpm.get_cache')
160160
@patch('s3rpm.YumRepository')
161161
@patch('s3rpm.boto3')
162-
def test_gpg_test(self, s3_mock, yum_mock, cache_mock, sh_mock, gpg_mock):
162+
def test_gpg_from_handler(self, s3_mock, yum_mock, cache_mock, sh_mock, gpg_mock):
163163
cache_mock.return_value = {"pkgname":"ID"}
164164

165165
os.environ['GPG_KEY'] = 'KeyNowExists'
@@ -173,11 +173,9 @@ def test_gpg_test(self, s3_mock, yum_mock, cache_mock, sh_mock, gpg_mock):
173173
assert os.path.exists('test_s3rpm/testrepo/') == True
174174

175175
@patch('s3rpm.boto3')
176-
def test_bad_repo_dir_and_bucket_name(self, s3_mock):
177-
os.environ['REPO_DIR'] = '/test/repo/'
176+
def test_bad_bucket_name(self, s3_mock):
178177
os.environ['BUCKET_NAME'] = 'iamfakebucket'
179178
s3rpm.lambda_handler(S3_EVENT, {})
180-
self.assertEqual(os.environ['REPO_DIR'], 'test/repo')
181179
s3_mock.client.assert_called_with('s3')
182180
self.assertEqual(len(s3_mock.resource().Object().put.mock_calls), 0)
183181
S3_EVENT = {"Records":[{"s3": {"object": {"key": "test_s3rpm/repo/pkgname-0.3.8-x86_64.rpm",},"bucket": {"name": "bucket",},},"eventName": "ObjectCreated:Put"}]}

0 commit comments

Comments
 (0)