diff --git a/backend/requirements.txt b/backend/requirements.txt index 526bd2fd..16969e97 100644 --- a/backend/requirements.txt +++ b/backend/requirements.txt @@ -1,9 +1,9 @@ anonlink==0.12.5 bitmath==1.3.1.2 celery==4.4.0 -clkhash==0.15.0 +clkhash==0.15.1 colorama==0.4.1 # required for structlog -connexion==1.4 +connexion[swagger-ui]==2.6 Flask-Opentracing==0.2.0 Flask==1.1.1 flower==0.9.2 diff --git a/benchmarking/VERSION b/benchmarking/VERSION index 268b0334..937cd784 100644 --- a/benchmarking/VERSION +++ b/benchmarking/VERSION @@ -1 +1 @@ -v0.3.0 +v0.3.1 diff --git a/benchmarking/benchmark.py b/benchmarking/benchmark.py index 39ed8965..b520d9a9 100644 --- a/benchmarking/benchmark.py +++ b/benchmarking/benchmark.py @@ -24,7 +24,7 @@ import time import os -from clkhash import rest_client +from anonlinkclient.rest_client import RestClient from pprint import pprint from traceback import format_exc @@ -41,6 +41,8 @@ logger = logging logger.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO) +rest_client = RestClient(os.getenv('SERVER')) + def load_experiments(filepath): @@ -89,7 +91,7 @@ def read_config(): 'results_path': results_path } - if 'OBJECT_STORE_ACCESS_KEY' in os.environ: + if 'OBJECT_STORE_BUCKET' in os.environ: object_store_server = os.getenv('OBJECT_STORE_SERVER') object_store_access_key = os.getenv('OBJECT_STORE_ACCESS_KEY') object_store_secret_key = os.getenv('OBJECT_STORE_SECRET_KEY') @@ -232,11 +234,11 @@ def compose_result(status, tt, experiment, sizes, threshold): return result -def delete_resources(config, credentials, run): +def delete_resources(credentials, run): try: if run is not None and 'run_id' in run: - rest_client.run_delete(config['server'], credentials['project_id'], run['run_id'], credentials['result_token']) - rest_client.project_delete(config['server'], credentials['project_id'], credentials['result_token']) + rest_client.run_delete(credentials['project_id'], run['run_id'], credentials['result_token']) + rest_client.project_delete(credentials['project_id'], credentials['result_token']) except Exception as e: logger.warning('Error while deleting resources... {}'.format(e)) @@ -299,8 +301,7 @@ def run_experiments(config): """ Run all the experiments specified in the configuration. """ - server = config['server'] - rest_client.server_get_status(server) + rest_client.server_get_status() results = {'experiments': []} for experiment in config['experiments']: @@ -314,39 +315,39 @@ def run_experiments(config): logger.info('running experiment: {}'.format(current_experiment)) if repetition != 1: logger.info('\trepetition {} out of {}'.format(rep + 1, repetition)) - result = run_single_experiment(server, config, threshold, sizes, current_experiment) + result = run_single_experiment(config, threshold, sizes, current_experiment) results['experiments'].append(result) return results -def run_single_experiment(server, config, threshold, sizes, experiment): +def run_single_experiment(config, threshold, sizes, experiment): result = {} credentials = {} run = {} logger.info("Starting time: {}".format(time.asctime())) nb_parties = len(sizes) try: - credentials = rest_client.project_create(server, config['schema'], 'groups', + credentials = rest_client.project_create(config['schema'], 'groups', "benchy_{}".format(experiment), parties=nb_parties) # upload clks upload_binary_clks(config, sizes, credentials) # create run project_id = credentials['project_id'] result['project_id'] = project_id - run = rest_client.run_create(server, project_id, credentials['result_token'], + run = rest_client.run_create(project_id, credentials['result_token'], threshold, "{}_{}".format(experiment, threshold)) # wait for result run_id = run['run_id'] result['run_id'] = run_id logger.info(f'waiting for run {run_id} from the project {project_id} to finish') - status = rest_client.wait_for_run(server, project_id, run_id, - credentials['result_token'], timeout=config['timeout']) + status = rest_client.wait_for_run(project_id, run_id, + credentials['result_token'], timeout=config['timeout'], update_period=5) if status['state'] != 'completed': raise RuntimeError('run did not finish!\n{}'.format(status)) logger.info('experiment successful. Evaluating results now...') - groups = rest_client.run_get_result_text(server, project_id, run_id, credentials['result_token']) + groups = rest_client.run_get_result_text(project_id, run_id, credentials['result_token']) groups = json.loads(groups)['groups'] truth_groups = load_truth(config, sizes) tt = score_accuracy(groups, truth_groups, nb_parties) @@ -357,7 +358,7 @@ def run_single_experiment(server, config, threshold, sizes, experiment): result.update({'name': experiment, 'status': 'ERROR', 'description': e_trace}) finally: logger.info('cleaning up...') - delete_resources(config, credentials, run) + delete_resources(credentials, run) logger.info("Ending time: {}".format(time.asctime())) return result @@ -383,7 +384,7 @@ def push_to_object_store(config): def main(): config = read_config() - server_status = rest_client.server_get_status(config['server']) + server_status = rest_client.server_get_status() version = requests.get(config['server'] + "/api/v1/version").json() logger.info(server_status) download_data(config) diff --git a/benchmarking/requirements.txt b/benchmarking/requirements.txt index e32210f6..35ebc401 100644 --- a/benchmarking/requirements.txt +++ b/benchmarking/requirements.txt @@ -1,6 +1,6 @@ +anonlink-client==0.0.1 arrow boto3 -clkhash==0.14.0 jsonschema pandas requests diff --git a/deployment/jobs/benchmark/timing-test-job.yaml b/deployment/jobs/benchmark/timing-test-job.yaml index 571e8628..56d44e63 100644 --- a/deployment/jobs/benchmark/timing-test-job.yaml +++ b/deployment/jobs/benchmark/timing-test-job.yaml @@ -21,7 +21,7 @@ spec: mountPath: /cache containers: - name: entitytester - image: data61/anonlink-benchmark:v0.3.0 + image: data61/anonlink-benchmark:v0.3.1 env: - name: SERVER value: "https://anonlink.easd.data61.xyz" @@ -35,16 +35,18 @@ spec: value: "/cache/schema.json" - name: RESULTS_PATH value: "/tmp/results.json" - - name: OBJECT_STORE_ACCESS_KEY - valueFrom: - secretKeyRef: - name: anonlink-benchmark-aws-credentials - key: OBJECT_STORE_ACCESS_KEY - - name: OBJECT_STORE_SECRET_KEY - valueFrom: - secretKeyRef: - name: anonlink-benchmark-aws-credentials - key: OBJECT_STORE_SECRET_KEY + - name: OBJECT_STORE_BUCKET + value: "anonlink-benchmark-results" +# - name: OBJECT_STORE_ACCESS_KEY +# valueFrom: +# secretKeyRef: +# name: anonlink-benchmark-aws-credentials +# key: OBJECT_STORE_ACCESS_KEY +# - name: OBJECT_STORE_SECRET_KEY +# valueFrom: +# secretKeyRef: +# name: anonlink-benchmark-aws-credentials +# key: OBJECT_STORE_SECRET_KEY volumeMounts: - name: experiments-volume mountPath: /config