From 2cf43abe8ccf12476e9ee6ab3eebdc847d79c0f4 Mon Sep 17 00:00:00 2001 From: Sudarsan Reddy Date: Thu, 5 May 2022 16:04:48 +0100 Subject: [PATCH] TUN-6175: Simply debian packaging by structural upload The way apt works is: 1. It looks at the release file based on the `deb` added to sources.list. 2. It uses this release file to find the relative location of Packages or Packages.gz 3. It uses the pool information from packages to find the relative location of where the .deb file is located and then downloads and installs it. This PR seeks to take advantage of this information by simply arranging the files in a way apt expects thereby eliminating the need for an orchestrating endpoint. --- release_pkgs.py | 103 ++++++++++++++---------------------------------- 1 file changed, 30 insertions(+), 73 deletions(-) diff --git a/release_pkgs.py b/release_pkgs.py index d2b41946..3cc0949c 100644 --- a/release_pkgs.py +++ b/release_pkgs.py @@ -6,14 +6,10 @@ 1. Create deb and yum repositories from .deb and .rpm files. This is also responsible for signing the packages and generally preparing them to be in an uploadable state. - 2. Upload these packages to a storage and add metadata cross reference - for these to be accessed. + 2. Upload these packages to a storage in a format that apt and yum expect. """ -import requests import subprocess import os -import io -import shutil import logging from hashlib import sha256 @@ -21,52 +17,22 @@ import boto3 from botocore.client import Config from botocore.exceptions import ClientError -BASE_KV_URL = 'https://api.cloudflare.com/client/v4/accounts/' # The front facing R2 URL to access assets from. R2_ASSET_URL = 'https://demo-r2-worker.cloudflare-tunnel.workers.dev/' class PkgUploader: - def __init__(self, kv_api_token, namespace, account_id, bucket_name, client_id, client_secret): - self.kv_api_token = kv_api_token - self.namespace = namespace + def __init__(self, account_id, bucket_name, client_id, client_secret): self.account_id = account_id self.bucket_name = bucket_name self.client_id = client_id self.client_secret = client_secret - def send_to_kv(self, key, value): - headers = { - "Content-Type": "application/json", - "Authorization": "Bearer " + self.kv_api_token, - } - - kv_url = f"{BASE_KV_URL}{self.account_id}/storage/kv/namespaces/{self.namespace}/values/{key}" - response = requests.put( - kv_url, - headers=headers, - data=value - ) - - if response.status_code != 200: - jsonResponse = response.json() - errors = jsonResponse["errors"] - if len(errors) > 0: - raise Exception("failed to send to workers kv: {0}", errors[0]) - else: - raise Exception("recieved error code: {0}", response.status_code) - - - def send_pkg_info(self, binary, flavor, asset_name, arch, uploaded_package_location): - key = f"pkg_{binary}_{flavor}_{arch}_{asset_name}" - print(f"writing key:{key} , value: {uploaded_package_location}") - self.send_to_kv(key, uploaded_package_location) - - def upload_pkg_to_r2(self, filename, upload_file_path): endpoint_url = f"https://{self.account_id}.r2.cloudflarestorage.com" token_secret_hash = sha256(self.client_secret.encode()).hexdigest() config = Config( + region_name = 'auto', s3={ "addressing_style": "path", } @@ -141,15 +107,19 @@ class PkgCreator: def _clean_build_resources(self): subprocess.call(("reprepro", "clearvanished")) -def upload_from_directories(pkg_uploader, directory, arch, release): +""" + Walks through a directory and uploads it's assets to R2. + directory : root directory to walk through (String). + release: release string. If this value is none, a specific release path will not be created + and the release will be uploaded to the default path. + binary: name of the binary to upload +""" +def upload_from_directories(pkg_uploader, directory, release, binary): for root, _ , files in os.walk(directory): for file in files: - root_elements = root.split("/") - upload_file_name = os.path.join(root, arch, release, file) - flavor_prefix = root_elements[1] - if root_elements[0] == "pool": - upload_file_name = os.path.join(root, file) - flavor_prefix = "deb" + upload_file_name = os.path.join(binary, root, file) + if release: + upload_file_name = os.path.join(release, upload_file_name) filename = os.path.join(root,file) try: pkg_uploader.upload_pkg_to_r2(filename, upload_file_name) @@ -157,20 +127,10 @@ def upload_from_directories(pkg_uploader, directory, arch, release): logging.error(e) return - # save to workers kv in the following formats - # Example: - # key : pkg_cloudflared_bullseye_InRelease, - # value: https://r2.cloudflarestorage.com/dists/bullseye/amd64/2022_3_4/InRelease - r2_asset_url = f"{R2_ASSET_URL}{upload_file_name}" - pkg_uploader.send_pkg_info("cloudflared", flavor_prefix, upload_file_name, arch, r2_asset_url) - - # TODO https://jira.cfops.it/browse/TUN-6163: Add a key for latest version. - """ 1. looks into a built_artifacts folder for cloudflared debs 2. creates Packages.gz, InRelease (signed) files - 3. uploads them to Cloudflare R2 and - 4. adds a Workers KV reference + 3. uploads them to Cloudflare R2 pkg_creator, pkg_uploader: are instantiations of the two classes above. @@ -179,32 +139,32 @@ def upload_from_directories(pkg_uploader, directory, arch, release): release_version: is the cloudflared release version. """ -def create_deb_packaging(pkg_creator, pkg_uploader, flavors, gpg_key_id, binary_name, arch, package_component, release_version): +def create_deb_packaging(pkg_creator, pkg_uploader, flavors, gpg_key_id, binary_name, archs, package_component, release_version): # set configuration for package creation. - print(f"initialising configuration for {binary_name} , {arch}") + print(f"initialising configuration for {binary_name} , {archs}") pkg_creator.create_distribution_conf( "./conf/distributions", binary_name, binary_name, flavors, - [arch], + archs, package_component, f"apt repository for {binary_name}", gpg_key_id) # create deb pkgs for flavor in flavors: - print(f"creating deb pkgs for {flavor} and {arch}...") - pkg_creator.create_deb_pkgs(flavor, f"./built_artifacts/cloudflared-linux-{arch}.deb") + for arch in archs: + print(f"creating deb pkgs for {flavor} and {arch}...") + pkg_creator.create_deb_pkgs(flavor, f"./built_artifacts/cloudflared-linux-{arch}.deb") - print("uploading to r2...") - upload_from_directories(pkg_uploader, "dists", arch, release_version) - upload_from_directories(pkg_uploader, "pool", arch, release_version) + print("uploading latest to r2...") + upload_from_directories(pkg_uploader, "dists", None, binary_name) + upload_from_directories(pkg_uploader, "pool", None, binary_name) - print("cleaning up directories...") - shutil.rmtree("./dists") - shutil.rmtree("./pool") - shutil.rmtree("./db") + print(f"uploading versioned release {release_version} to r2...") + upload_from_directories(pkg_uploader, "dists", release_version, binary_name) + upload_from_directories(pkg_uploader, "pool", release_version, binary_name) #TODO: https://jira.cfops.it/browse/TUN-6146 will extract this into it's own command line script. if __name__ == "__main__": @@ -215,15 +175,12 @@ if __name__ == "__main__": bucket_name = os.getenv('R2_BUCKET_NAME') client_id = os.getenv('R2_CLIENT_ID') client_secret = os.getenv('R2_CLIENT_SECRET') - tunnel_account_id = '5ab4e9dfbd435d24068829fda0077963' - kv_namespace = os.getenv('KV_NAMESPACE') - kv_api_token = os.getenv('KV_API_TOKEN') + tunnel_account_id = os.getenv('R2_ACCOUNT_ID') release_version = os.getenv('RELEASE_VERSION') gpg_key_id = os.getenv('GPG_KEY_ID') - pkg_uploader = PkgUploader(kv_api_token, kv_namespace, tunnel_account_id, bucket_name, client_id, client_secret) + pkg_uploader = PkgUploader(tunnel_account_id, bucket_name, client_id, client_secret) archs = ["amd64", "386", "arm64"] flavors = ["bullseye", "buster", "bionic"] - for arch in archs: - create_deb_packaging(pkg_creator, pkg_uploader, flavors, gpg_key_id, "cloudflared", arch, "main", release_version) + create_deb_packaging(pkg_creator, pkg_uploader, flavors, gpg_key_id, "cloudflared", archs, "main", release_version)