From b2451e9a47c1f58657a9d08a006c443e6cadf8e1 Mon Sep 17 00:00:00 2001 From: mark-keaton Date: Thu, 15 Jun 2023 18:13:38 -0500 Subject: [PATCH 01/27] fix: Makes two properties optional on MigrateUserResponse. (#663) --- lambda-events/src/event/cognito/mod.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lambda-events/src/event/cognito/mod.rs b/lambda-events/src/event/cognito/mod.rs index 6874ee24..49f2eebd 100644 --- a/lambda-events/src/event/cognito/mod.rs +++ b/lambda-events/src/event/cognito/mod.rs @@ -254,7 +254,9 @@ pub struct CognitoEventUserPoolsMigrateUserResponse { pub final_user_status: Option, #[serde(default)] pub message_action: Option, - pub desired_delivery_mediums: Vec, + #[serde(default)] + pub desired_delivery_mediums: Option>, + #[serde(default, deserialize_with = "deserialize_nullish_boolean")] pub force_alias_creation: bool, } From aa80e74d30ac7d14264291bfee3e302c3b597e4d Mon Sep 17 00:00:00 2001 From: Peter Borkuti Date: Tue, 20 Jun 2023 04:59:54 +0200 Subject: [PATCH 02/27] Examples basic s3 object lambda thumbnail (#664) * fix example basic-s3-thumbnail test * basic-s3-object-lambda-thumbnail example (#625) Forwards a thumbnail to the user instead of the requested file --- .../Cargo.toml | 34 ++++ .../README.md | 40 ++++ .../src/main.rs | 178 ++++++++++++++++++ .../src/s3.rs | 90 +++++++++ .../testdata/image.png | Bin 0 -> 282 bytes .../testdata/thumbnail.png | Bin 0 -> 82 bytes examples/basic-s3-thumbnail/Cargo.toml | 1 + examples/basic-s3-thumbnail/src/main.rs | 6 +- 8 files changed, 346 insertions(+), 3 deletions(-) create mode 100644 examples/basic-s3-object-lambda-thumbnail/Cargo.toml create mode 100644 examples/basic-s3-object-lambda-thumbnail/README.md create mode 100644 examples/basic-s3-object-lambda-thumbnail/src/main.rs create mode 100644 examples/basic-s3-object-lambda-thumbnail/src/s3.rs create mode 100644 examples/basic-s3-object-lambda-thumbnail/testdata/image.png create mode 100644 examples/basic-s3-object-lambda-thumbnail/testdata/thumbnail.png diff --git a/examples/basic-s3-object-lambda-thumbnail/Cargo.toml b/examples/basic-s3-object-lambda-thumbnail/Cargo.toml new file mode 100644 index 00000000..493846ad --- /dev/null +++ b/examples/basic-s3-object-lambda-thumbnail/Cargo.toml @@ -0,0 +1,34 @@ +[package] +name = "basic-s3-object-lambda-thumbnail" +version = "0.1.0" +edition = "2021" + +# Starting in Rust 1.62 you can use `cargo add` to add dependencies +# to your project. +# +# If you're using an older Rust version, +# download cargo-edit(https://github.com/killercup/cargo-edit#installation) +# to install the `add` subcommand. +# +# Running `cargo add DEPENDENCY_NAME` will +# add the latest version of a dependency to the list, +# and it will keep the alphabetic ordering for you. + +[dependencies] +aws_lambda_events = "0.8.3" +lambda_runtime = { path = "../../lambda-runtime" } +serde = "1" +tokio = { version = "1", features = ["macros"] } +tracing = { version = "0.1" } +tracing-subscriber = { version = "0.3", default-features = false, features = ["ansi", "fmt"] } +aws-config = "0.55.3" +aws-sdk-s3 = "0.28.0" +thumbnailer = "0.4.0" +mime = "0.3.16" +async-trait = "0.1.66" +ureq = "2.6.2" +aws-smithy-http = "0.55.3" + +[dev-dependencies] +mockall = "0.11.3" +tokio-test = "0.4.2" diff --git a/examples/basic-s3-object-lambda-thumbnail/README.md b/examples/basic-s3-object-lambda-thumbnail/README.md new file mode 100644 index 00000000..e9347fbb --- /dev/null +++ b/examples/basic-s3-object-lambda-thumbnail/README.md @@ -0,0 +1,40 @@ +# AWS S3 Object Lambda Function + +It uses a GetObject event and it returns with a thumbnail instead of the real +object from the S3 bucket. +The thumbnail was tested only witn PNG files. + +## Build & Deploy + +1. Install [cargo-lambda](https://github.com/cargo-lambda/cargo-lambda#installation) +2. Build the function with `cargo lambda build --release --arm64 --output-format zip` +3. Upload the bootstrap.zip file from the directory:`target/lambda/basic-s3-object-lambda-thumbnail/` + +## Setup on AWS S3 + +1. You need a bucket and upload a PNG file to that bucket +2. Set Access Point for that bucket +3. Set Object Lambda Access Point for the access point and use the uploaded lambda function as a transformer + +## Set Up on AWS Lambda + +0. Click on Code tab +1. Runtime settings - runtime: Custom runtime on Amazon Linux 2 +2. Runtime settings - Architecture: arm64 + +## Set Up on AWS IAM + +1. Click on Roles +2. Search the lambda function name +3. Add the permission: AmazonS3ObjectLambdaExecutionRolePolicy + +## How to check this lambda + +1. Go to S3 +2. Click on Object Lambda Access Point +3. Click on your object lambda access point name +4. click on one uploaded PNG file +5. Click on the activated Open button + +### Expected: +A new browser tab opens with a 128x128 thumbnail diff --git a/examples/basic-s3-object-lambda-thumbnail/src/main.rs b/examples/basic-s3-object-lambda-thumbnail/src/main.rs new file mode 100644 index 00000000..7786f56e --- /dev/null +++ b/examples/basic-s3-object-lambda-thumbnail/src/main.rs @@ -0,0 +1,178 @@ +use std::{error, io::Cursor}; + +use aws_lambda_events::s3::object_lambda::{GetObjectContext, S3ObjectLambdaEvent}; +use aws_sdk_s3::Client as S3Client; +use lambda_runtime::{run, service_fn, Error, LambdaEvent}; +use s3::{GetFile, SendFile}; +use thumbnailer::{create_thumbnails, ThumbnailSize}; + +mod s3; + +/** +This s3 object lambda handler + * downloads the asked file + * creates a PNG thumbnail from it + * forwards it to the browser +*/ +pub(crate) async fn function_handler( + event: LambdaEvent, + size: u32, + client: &T, +) -> Result> { + tracing::info!("handler starts"); + + let context: GetObjectContext = event.payload.get_object_context.unwrap(); + + let route = context.output_route; + let token = context.output_token; + let s3_url = context.input_s3_url; + + tracing::info!("Route: {}, s3_url: {}", route, s3_url); + + let image = client.get_file(s3_url)?; + tracing::info!("Image loaded. Length: {}", image.len()); + + let thumbnail = get_thumbnail(image, size); + tracing::info!("thumbnail created. Length: {}", thumbnail.len()); + + // It sends the thumbnail back to the user + + client.send_file(route, token, thumbnail).await + + /* + match client.send_file(route, token, thumbnail).await { + Ok(msg) => tracing::info!(msg), + Err(msg) => tracing::info!(msg) + }; + + tracing::info!("handler ends"); + + Ok(()) + */ +} + +fn get_thumbnail(vec: Vec, size: u32) -> Vec { + let reader = Cursor::new(vec); + let mut thumbnails = create_thumbnails(reader, mime::IMAGE_PNG, [ThumbnailSize::Custom((size, size))]).unwrap(); + + let thumbnail = thumbnails.pop().unwrap(); + let mut buf = Cursor::new(Vec::new()); + thumbnail.write_png(&mut buf).unwrap(); + + buf.into_inner() +} + +#[tokio::main] +async fn main() -> Result<(), Error> { + // required to enable CloudWatch error logging by the runtime + tracing_subscriber::fmt() + .with_max_level(tracing::Level::TRACE) + // disable printing the name of the module in every log line. + .with_target(false) + // this needs to be set to false, otherwise ANSI color codes will + // show up in a confusing manner in CloudWatch logs. + .with_ansi(false) + // disabling time is handy because CloudWatch will add the ingestion time. + .without_time() + .init(); + + let shared_config = aws_config::load_from_env().await; + let client = S3Client::new(&shared_config); + let client_ref = &client; + + let func = service_fn(move |event| async move { function_handler(event, 128, client_ref).await }); + + let _ = run(func).await; + + Ok(()) +} + +#[cfg(test)] +mod tests { + use std::fs::File; + use std::io::BufReader; + use std::io::Read; + + use super::*; + use async_trait::async_trait; + use aws_lambda_events::s3::object_lambda::Configuration; + use aws_lambda_events::s3::object_lambda::HeadObjectContext; + use aws_lambda_events::s3::object_lambda::ListObjectsContext; + use aws_lambda_events::s3::object_lambda::ListObjectsV2Context; + use aws_lambda_events::s3::object_lambda::UserIdentity; + use aws_lambda_events::s3::object_lambda::UserRequest; + use aws_lambda_events::serde_json::json; + use lambda_runtime::{Context, LambdaEvent}; + use mockall::mock; + use s3::GetFile; + use s3::SendFile; + + #[tokio::test] + async fn response_is_good() { + mock! { + FakeS3Client {} + + #[async_trait] + impl GetFile for FakeS3Client { + pub fn get_file(&self, url: String) -> Result, Box>; + } + #[async_trait] + impl SendFile for FakeS3Client { + pub async fn send_file(&self, route: String, token: String, vec: Vec) -> Result>; + } + } + + let mut mock = MockFakeS3Client::new(); + + mock.expect_get_file() + .withf(|u: &String| u.eq("S3_URL")) + .returning(|_1| Ok(get_file("testdata/image.png"))); + + mock.expect_send_file() + .withf(|r: &String, t: &String, by| { + let thumbnail = get_file("testdata/thumbnail.png"); + return r.eq("O_ROUTE") && t.eq("O_TOKEN") && by == &thumbnail; + }) + .returning(|_1, _2, _3| Ok("File sent.".to_string())); + + let payload = get_s3_event(); + let context = Context::default(); + let event = LambdaEvent { payload, context }; + + let result = function_handler(event, 10, &mock).await.unwrap(); + + assert_eq!(("File sent."), result); + } + + fn get_file(name: &str) -> Vec { + let f = File::open(name); + let mut reader = BufReader::new(f.unwrap()); + let mut buffer = Vec::new(); + + reader.read_to_end(&mut buffer).unwrap(); + + return buffer; + } + + fn get_s3_event() -> S3ObjectLambdaEvent { + return S3ObjectLambdaEvent { + x_amz_request_id: ("ID".to_string()), + head_object_context: (Some(HeadObjectContext::default())), + list_objects_context: (Some(ListObjectsContext::default())), + get_object_context: (Some(GetObjectContext { + input_s3_url: ("S3_URL".to_string()), + output_route: ("O_ROUTE".to_string()), + output_token: ("O_TOKEN".to_string()), + })), + list_objects_v2_context: (Some(ListObjectsV2Context::default())), + protocol_version: ("VERSION".to_string()), + user_identity: (UserIdentity::default()), + user_request: (UserRequest::default()), + configuration: (Configuration { + access_point_arn: ("APRN".to_string()), + supporting_access_point_arn: ("SAPRN".to_string()), + payload: (json!(null)), + }), + }; + } +} diff --git a/examples/basic-s3-object-lambda-thumbnail/src/s3.rs b/examples/basic-s3-object-lambda-thumbnail/src/s3.rs new file mode 100644 index 00000000..71e03ffc --- /dev/null +++ b/examples/basic-s3-object-lambda-thumbnail/src/s3.rs @@ -0,0 +1,90 @@ +use async_trait::async_trait; +use aws_sdk_s3::{operation::write_get_object_response::WriteGetObjectResponseError, Client as S3Client}; +use aws_smithy_http::{byte_stream::ByteStream, result::SdkError}; +use std::{error, io::Read}; + +pub trait GetFile { + fn get_file(&self, url: String) -> Result, Box>; +} + +#[async_trait] +pub trait SendFile { + async fn send_file(&self, route: String, token: String, vec: Vec) -> Result>; +} + +impl GetFile for S3Client { + fn get_file(&self, url: String) -> Result, Box> { + tracing::info!("get file url {}", url); + + let resp = ureq::get(&url).call()?; + let len: usize = resp.header("Content-Length").unwrap().parse()?; + + let mut bytes: Vec = Vec::with_capacity(len); + + std::io::Read::take(resp.into_reader(), 10_000_000).read_to_end(&mut bytes)?; + + tracing::info!("got {} bytes", bytes.len()); + + Ok(bytes) + } +} + +#[async_trait] +impl SendFile for S3Client { + async fn send_file(&self, route: String, token: String, vec: Vec) -> Result> { + tracing::info!("send file route {}, token {}, length {}", route, token, vec.len()); + + let bytes = ByteStream::from(vec); + + let write = self + .write_get_object_response() + .request_route(route) + .request_token(token) + .status_code(200) + .body(bytes) + .send() + .await; + + if write.is_err() { + let sdk_error = write.err().unwrap(); + check_error(sdk_error); + Err("WriteGetObjectResponse creation error".into()) + } else { + Ok("File sent.".to_string()) + } + } +} + +fn check_error(error: SdkError) { + match error { + SdkError::ConstructionFailure(_err) => { + tracing::info!("ConstructionFailure"); + } + SdkError::DispatchFailure(err) => { + tracing::info!("DispatchFailure"); + if err.is_io() { + tracing::info!("IO error"); + }; + if err.is_timeout() { + tracing::info!("Timeout error"); + }; + if err.is_user() { + tracing::info!("User error"); + }; + if err.is_other().is_some() { + tracing::info!("Other error"); + }; + } + SdkError::ResponseError(_err) => tracing::info!("ResponseError"), + SdkError::TimeoutError(_err) => tracing::info!("TimeoutError"), + SdkError::ServiceError(err) => { + tracing::info!("ServiceError"); + let wgore = err.into_err(); + let meta = wgore.meta(); + let code = meta.code().unwrap_or_default(); + let msg = meta.message().unwrap_or_default(); + tracing::info!("code: {}, message: {}, meta: {}", code, msg, meta); + } + _ => tracing::info!("other error"), + } +} diff --git a/examples/basic-s3-object-lambda-thumbnail/testdata/image.png b/examples/basic-s3-object-lambda-thumbnail/testdata/image.png new file mode 100644 index 0000000000000000000000000000000000000000..078d155f6bf6735eb087eb0195b3e35f9f424d04 GIT binary patch literal 282 zcmeAS@N?(olHy`uVBq!ia0vp^DIm-UBp4!QuJ{S0SkfJR9T^xl_H+M9WCijSl0AZa z85pY67#JE_7#My5g&JNkFq9fFFuY1&V6d9Oz#v{QXIG#NP=cu>$S;_Ip=|P53lJ~K z+uenM@oty!5+IMg#M9T6{W-Ikn3DL(-uF5{ArVg(#}JFt$q5pyixWh8ngSjC85meA z7#KARcm4s&tCqM%l%ynf4NqV|ChEy=Vy|59;VQAR!XXWJ= d863$M85tR8F)&*H Date: Thu, 22 Jun 2023 07:54:56 -0700 Subject: [PATCH 03/27] Implement custom deserializer for LambdaRequest (#666) This deserializer gives us full control over the error message that we return for invalid payloads. The default message that Serde returns is usually very confusing, and it's been reported many times as something people don't understand. This code is a copy of the code that Serde generates when it expands the Deserialize macro. Signed-off-by: David Calavera --- lambda-events/Cargo.toml | 2 +- lambda-events/src/event/alb/mod.rs | 1 + lambda-events/src/event/apigw/mod.rs | 23 ++- .../src/fixtures/example-apigw-request.json | 145 +++++++++++------- lambda-http/Cargo.toml | 2 +- lambda-http/src/deserializer.rs | 117 ++++++++++++++ lambda-http/src/lib.rs | 1 + lambda-http/src/request.rs | 5 +- 8 files changed, 238 insertions(+), 58 deletions(-) create mode 100644 lambda-http/src/deserializer.rs diff --git a/lambda-events/Cargo.toml b/lambda-events/Cargo.toml index b1108c63..28df6b4a 100644 --- a/lambda-events/Cargo.toml +++ b/lambda-events/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "aws_lambda_events" -version = "0.10.0" +version = "0.11.0" description = "AWS Lambda event definitions" authors = [ "Christian Legnitto ", diff --git a/lambda-events/src/event/alb/mod.rs b/lambda-events/src/event/alb/mod.rs index 259dce23..7bb1eb7f 100644 --- a/lambda-events/src/event/alb/mod.rs +++ b/lambda-events/src/event/alb/mod.rs @@ -9,6 +9,7 @@ use serde::{Deserialize, Serialize}; /// `AlbTargetGroupRequest` contains data originating from the ALB Lambda target group integration #[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)] #[serde(rename_all = "camelCase")] +#[serde(deny_unknown_fields)] pub struct AlbTargetGroupRequest { #[serde(with = "http_method")] pub http_method: Method, diff --git a/lambda-events/src/event/apigw/mod.rs b/lambda-events/src/event/apigw/mod.rs index 917f06aa..b595d825 100644 --- a/lambda-events/src/event/apigw/mod.rs +++ b/lambda-events/src/event/apigw/mod.rs @@ -13,6 +13,7 @@ use std::collections::HashMap; /// `ApiGatewayProxyRequest` contains data coming from the API Gateway proxy #[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)] #[serde(rename_all = "camelCase")] +#[serde(deny_unknown_fields)] pub struct ApiGatewayProxyRequest where T1: DeserializeOwned, @@ -118,12 +119,25 @@ where /// `ApiGatewayV2httpRequest` contains data coming from the new HTTP API Gateway #[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)] #[serde(rename_all = "camelCase")] +#[serde(deny_unknown_fields)] pub struct ApiGatewayV2httpRequest { + #[serde(default, rename = "type")] + pub kind: Option, + #[serde(default)] + pub method_arn: Option, + #[serde(with = "http_method", default = "default_http_method")] + pub http_method: Method, + #[serde(default)] + pub identity_source: Option, + #[serde(default)] + pub authorization_token: Option, + #[serde(default)] + pub resource: Option, #[serde(default)] pub version: Option, #[serde(default)] pub route_key: Option, - #[serde(default)] + #[serde(default, alias = "path")] pub raw_path: Option, #[serde(default)] pub raw_query_string: Option, @@ -319,6 +333,7 @@ pub struct ApiGatewayRequestIdentity { /// `ApiGatewayWebsocketProxyRequest` contains data coming from the API Gateway proxy #[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)] #[serde(rename_all = "camelCase")] +#[serde(deny_unknown_fields)] pub struct ApiGatewayWebsocketProxyRequest where T1: DeserializeOwned, @@ -747,6 +762,10 @@ pub struct IamPolicyStatement { pub resource: Vec, } +fn default_http_method() -> Method { + Method::GET +} + #[cfg(test)] mod test { use super::*; @@ -901,6 +920,8 @@ mod test { let output: String = serde_json::to_string(&parsed).unwrap(); let reparsed: ApiGatewayV2httpRequest = serde_json::from_slice(output.as_bytes()).unwrap(); assert_eq!(parsed, reparsed); + assert_eq!("REQUEST", parsed.kind.unwrap()); + assert_eq!(Method::GET, parsed.http_method); } #[test] diff --git a/lambda-events/src/fixtures/example-apigw-request.json b/lambda-events/src/fixtures/example-apigw-request.json index 570f785b..d91e9609 100644 --- a/lambda-events/src/fixtures/example-apigw-request.json +++ b/lambda-events/src/fixtures/example-apigw-request.json @@ -1,55 +1,95 @@ { "resource": "/{proxy+}", - "path": "/hello/world", - "httpMethod": "POST", - "headers": { - "Accept": "*/*", - "Accept-Encoding": "gzip, deflate", - "cache-control": "no-cache", - "CloudFront-Forwarded-Proto": "https", - "CloudFront-Is-Desktop-Viewer": "true", - "CloudFront-Is-Mobile-Viewer": "false", - "CloudFront-Is-SmartTV-Viewer": "false", - "CloudFront-Is-Tablet-Viewer": "false", - "CloudFront-Viewer-Country": "US", - "Content-Type": "application/json", - "headerName": "headerValue", - "Host": "gy415nuibc.execute-api.us-east-1.amazonaws.com", - "Postman-Token": "9f583ef0-ed83-4a38-aef3-eb9ce3f7a57f", - "User-Agent": "PostmanRuntime/2.4.5", - "Via": "1.1 d98420743a69852491bbdea73f7680bd.cloudfront.net (CloudFront)", - "X-Amz-Cf-Id": "pn-PWIJc6thYnZm5P0NMgOUglL1DYtl0gdeJky8tqsg8iS_sgsKD1A==", - "X-Forwarded-For": "54.240.196.186, 54.182.214.83", - "X-Forwarded-Port": "443", - "X-Forwarded-Proto": "https" - }, - "multiValueHeaders": { - "Accept": ["*/*"], - "Accept-Encoding": ["gzip, deflate"], - "cache-control": ["no-cache"], - "CloudFront-Forwarded-Proto": ["https"], - "CloudFront-Is-Desktop-Viewer": ["true"], - "CloudFront-Is-Mobile-Viewer": ["false"], - "CloudFront-Is-SmartTV-Viewer": ["false"], - "CloudFront-Is-Tablet-Viewer": ["false"], - "CloudFront-Viewer-Country": ["US"], - "Content-Type": ["application/json"], - "headerName": ["headerValue"], - "Host": ["gy415nuibc.execute-api.us-east-1.amazonaws.com"], - "Postman-Token": ["9f583ef0-ed83-4a38-aef3-eb9ce3f7a57f"], - "User-Agent": ["PostmanRuntime/2.4.5"], - "Via": ["1.1 d98420743a69852491bbdea73f7680bd.cloudfront.net (CloudFront)"], - "X-Amz-Cf-Id": ["pn-PWIJc6thYnZm5P0NMgOUglL1DYtl0gdeJky8tqsg8iS_sgsKD1A=="], - "X-Forwarded-For": ["54.240.196.186, 54.182.214.83"], - "X-Forwarded-Port": ["443"], - "X-Forwarded-Proto": ["https"] - }, + "path": "/hello/world", + "httpMethod": "POST", + "headers": { + "Accept": "*/*", + "Accept-Encoding": "gzip, deflate", + "cache-control": "no-cache", + "CloudFront-Forwarded-Proto": "https", + "CloudFront-Is-Desktop-Viewer": "true", + "CloudFront-Is-Mobile-Viewer": "false", + "CloudFront-Is-SmartTV-Viewer": "false", + "CloudFront-Is-Tablet-Viewer": "false", + "CloudFront-Viewer-Country": "US", + "Content-Type": "application/json", + "headerName": "headerValue", + "Host": "gy415nuibc.execute-api.us-east-1.amazonaws.com", + "Postman-Token": "9f583ef0-ed83-4a38-aef3-eb9ce3f7a57f", + "User-Agent": "PostmanRuntime/2.4.5", + "Via": "1.1 d98420743a69852491bbdea73f7680bd.cloudfront.net (CloudFront)", + "X-Amz-Cf-Id": "pn-PWIJc6thYnZm5P0NMgOUglL1DYtl0gdeJky8tqsg8iS_sgsKD1A==", + "X-Forwarded-For": "54.240.196.186, 54.182.214.83", + "X-Forwarded-Port": "443", + "X-Forwarded-Proto": "https" + }, + "multiValueHeaders": { + "Accept": [ + "*/*" + ], + "Accept-Encoding": [ + "gzip, deflate" + ], + "cache-control": [ + "no-cache" + ], + "CloudFront-Forwarded-Proto": [ + "https" + ], + "CloudFront-Is-Desktop-Viewer": [ + "true" + ], + "CloudFront-Is-Mobile-Viewer": [ + "false" + ], + "CloudFront-Is-SmartTV-Viewer": [ + "false" + ], + "CloudFront-Is-Tablet-Viewer": [ + "false" + ], + "CloudFront-Viewer-Country": [ + "US" + ], + "Content-Type": [ + "application/json" + ], + "headerName": [ + "headerValue" + ], + "Host": [ + "gy415nuibc.execute-api.us-east-1.amazonaws.com" + ], + "Postman-Token": [ + "9f583ef0-ed83-4a38-aef3-eb9ce3f7a57f" + ], + "User-Agent": [ + "PostmanRuntime/2.4.5" + ], + "Via": [ + "1.1 d98420743a69852491bbdea73f7680bd.cloudfront.net (CloudFront)" + ], + "X-Amz-Cf-Id": [ + "pn-PWIJc6thYnZm5P0NMgOUglL1DYtl0gdeJky8tqsg8iS_sgsKD1A==" + ], + "X-Forwarded-For": [ + "54.240.196.186, 54.182.214.83" + ], + "X-Forwarded-Port": [ + "443" + ], + "X-Forwarded-Proto": [ + "https" + ] + }, "queryStringParameters": { "name": "me" - }, - "multiValueQueryStringParameters": { - "name": ["me"] - }, + }, + "multiValueQueryStringParameters": { + "name": [ + "me" + ] + }, "pathParameters": { "proxy": "hello/world" }, @@ -70,9 +110,9 @@ "accountId": "theAccountId", "cognitoIdentityId": "theCognitoIdentityId", "caller": "theCaller", - "apiKey": "theApiKey", - "apiKeyId": "theApiKeyId", - "accessKey": "ANEXAMPLEOFACCESSKEY", + "apiKey": "theApiKey", + "apiKeyId": "theApiKeyId", + "accessKey": "ANEXAMPLEOFACCESSKEY", "sourceIp": "192.168.196.186", "cognitoAuthenticationType": "theCognitoAuthenticationType", "cognitoAuthenticationProvider": "theCognitoAuthenticationProvider", @@ -92,5 +132,4 @@ "apiId": "gy415nuibc" }, "body": "{\r\n\t\"a\": 1\r\n}" -} - +} \ No newline at end of file diff --git a/lambda-http/Cargo.toml b/lambda-http/Cargo.toml index edc68650..be111092 100644 --- a/lambda-http/Cargo.toml +++ b/lambda-http/Cargo.toml @@ -40,7 +40,7 @@ percent-encoding = "2.2" [dependencies.aws_lambda_events] path = "../lambda-events" -version = "0.10.0" +version = "0.11.0" default-features = false features = ["alb", "apigw"] diff --git a/lambda-http/src/deserializer.rs b/lambda-http/src/deserializer.rs new file mode 100644 index 00000000..1771ea7b --- /dev/null +++ b/lambda-http/src/deserializer.rs @@ -0,0 +1,117 @@ +use crate::request::LambdaRequest; +use aws_lambda_events::{ + alb::AlbTargetGroupRequest, + apigw::{ApiGatewayProxyRequest, ApiGatewayV2httpRequest, ApiGatewayWebsocketProxyRequest}, +}; +use serde::{de::Error, Deserialize}; + +const ERROR_CONTEXT: &str = "this function expects a JSON payload from Amazon API Gateway, Amazon Elastic Load Balancer, or AWS Lambda Function URLs, but the data doesn't match any of those services' events"; + +impl<'de> Deserialize<'de> for LambdaRequest { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + let content = match serde::__private::de::Content::deserialize(deserializer) { + Ok(content) => content, + Err(err) => return Err(err), + }; + #[cfg(feature = "apigw_rest")] + if let Ok(res) = + ApiGatewayProxyRequest::deserialize(serde::__private::de::ContentRefDeserializer::::new(&content)) + { + return Ok(LambdaRequest::ApiGatewayV1(res)); + } + #[cfg(feature = "apigw_http")] + if let Ok(res) = ApiGatewayV2httpRequest::deserialize( + serde::__private::de::ContentRefDeserializer::::new(&content), + ) { + return Ok(LambdaRequest::ApiGatewayV2(res)); + } + #[cfg(feature = "alb")] + if let Ok(res) = + AlbTargetGroupRequest::deserialize(serde::__private::de::ContentRefDeserializer::::new(&content)) + { + return Ok(LambdaRequest::Alb(res)); + } + #[cfg(feature = "apigw_websockets")] + if let Ok(res) = ApiGatewayWebsocketProxyRequest::deserialize(serde::__private::de::ContentRefDeserializer::< + D::Error, + >::new(&content)) + { + return Ok(LambdaRequest::WebSocket(res)); + } + + Err(Error::custom(ERROR_CONTEXT)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_deserialize_apigw_rest() { + let data = include_bytes!("../../lambda-events/src/fixtures/example-apigw-request.json"); + + let req: LambdaRequest = serde_json::from_slice(data).expect("failed to deserialze apigw rest data"); + match req { + LambdaRequest::ApiGatewayV1(req) => { + assert_eq!("12345678912", req.request_context.account_id.unwrap()); + } + other => panic!("unexpected request variant: {:?}", other), + } + } + + #[test] + fn test_deserialize_apigw_http() { + let data = include_bytes!("../../lambda-events/src/fixtures/example-apigw-v2-request-iam.json"); + + let req: LambdaRequest = serde_json::from_slice(data).expect("failed to deserialze apigw http data"); + match req { + LambdaRequest::ApiGatewayV2(req) => { + assert_eq!("123456789012", req.request_context.account_id.unwrap()); + } + other => panic!("unexpected request variant: {:?}", other), + } + } + + #[test] + fn test_deserialize_alb() { + let data = include_bytes!( + "../../lambda-events/src/fixtures/example-alb-lambda-target-request-multivalue-headers.json" + ); + + let req: LambdaRequest = serde_json::from_slice(data).expect("failed to deserialze alb rest data"); + match req { + LambdaRequest::Alb(req) => { + assert_eq!( + "arn:aws:elasticloadbalancing:us-east-1:123456789012:targetgroup/lambda-target/abcdefgh", + req.request_context.elb.target_group_arn.unwrap() + ); + } + other => panic!("unexpected request variant: {:?}", other), + } + } + + #[test] + fn test_deserialize_apigw_websocket() { + let data = + include_bytes!("../../lambda-events/src/fixtures/example-apigw-websocket-request-without-method.json"); + + let req: LambdaRequest = serde_json::from_slice(data).expect("failed to deserialze apigw websocket data"); + match req { + LambdaRequest::WebSocket(req) => { + assert_eq!("CONNECT", req.request_context.event_type.unwrap()); + } + other => panic!("unexpected request variant: {:?}", other), + } + } + + #[test] + fn test_deserialize_error() { + let err = serde_json::from_str::("{\"command\": \"hi\"}").unwrap_err(); + + assert_eq!(ERROR_CONTEXT, err.to_string()); + } +} diff --git a/lambda-http/src/lib.rs b/lambda-http/src/lib.rs index 37c167a0..bc9e753d 100644 --- a/lambda-http/src/lib.rs +++ b/lambda-http/src/lib.rs @@ -70,6 +70,7 @@ pub use lambda_runtime::{self, service_fn, tower, Context, Error, Service}; use request::RequestFuture; use response::ResponseFuture; +mod deserializer; pub mod ext; pub mod request; mod response; diff --git a/lambda-http/src/request.rs b/lambda-http/src/request.rs index 5ed3effe..ea418595 100644 --- a/lambda-http/src/request.rs +++ b/lambda-http/src/request.rs @@ -20,8 +20,10 @@ use aws_lambda_events::apigw::{ApiGatewayWebsocketProxyRequest, ApiGatewayWebsoc use aws_lambda_events::{encodings::Body, query_map::QueryMap}; use http::header::HeaderName; use http::{HeaderMap, HeaderValue}; + use serde::{Deserialize, Serialize}; use serde_json::error::Error as JsonError; + use std::future::Future; use std::pin::Pin; use std::{env, io::Read, mem}; @@ -33,8 +35,7 @@ use url::Url; /// This is not intended to be a type consumed by crate users directly. The order /// of the variants are notable. Serde will try to deserialize in this order. #[doc(hidden)] -#[derive(Deserialize, Debug)] -#[serde(untagged)] +#[derive(Debug)] pub enum LambdaRequest { #[cfg(feature = "apigw_rest")] ApiGatewayV1(ApiGatewayProxyRequest), From 6ca8a3670d813f90a1d65b73b578d573365863c5 Mon Sep 17 00:00:00 2001 From: David Calavera Date: Sun, 2 Jul 2023 08:40:03 -0700 Subject: [PATCH 04/27] Fix APIGW path with stage (#669) * Fix APIGW path with stage APIGW HTTP has started adding the stage to the path in the event. This change checks if the stage is already a prefix in the path, and skips adding it if so. Signed-off-by: David Calavera * Add env variable to shortcircuit stage behavior. There might be cases when you don't want the runtime to do anything with paths and stages. By setting AWS_LAMBDA_HTTP_IGNORE_STAGE_IN_PATH in the environment, we ignore this behavior completely. Signed-off-by: David Calavera --------- Signed-off-by: David Calavera --- lambda-http/src/request.rs | 44 ++++++++++++-- lambda-http/tests/data/apigw_no_host.json | 2 +- .../tests/data/apigw_proxy_request.json | 2 +- ...w_v2_proxy_request_with_stage_in_path.json | 57 +++++++++++++++++++ 4 files changed, 97 insertions(+), 8 deletions(-) create mode 100644 lambda-http/tests/data/apigw_v2_proxy_request_with_stage_in_path.json diff --git a/lambda-http/src/request.rs b/lambda-http/src/request.rs index ea418595..bdb755ed 100644 --- a/lambda-http/src/request.rs +++ b/lambda-http/src/request.rs @@ -327,10 +327,21 @@ fn into_websocket_request(ag: ApiGatewayWebsocketProxyRequest) -> http::Request< #[cfg(any(feature = "apigw_rest", feature = "apigw_http", feature = "apigw_websockets"))] fn apigw_path_with_stage(stage: &Option, path: &str) -> String { - match stage { - None => path.into(), - Some(stage) if stage == "$default" => path.into(), - Some(stage) => format!("/{stage}{path}"), + if env::var("AWS_LAMBDA_HTTP_IGNORE_STAGE_IN_PATH").is_ok() { + return path.into(); + } + + let stage = match stage { + None => return path.into(), + Some(stage) if stage == "$default" => return path.into(), + Some(stage) => stage, + }; + + let prefix = format!("/{stage}/"); + if path.starts_with(&prefix) { + path.into() + } else { + format!("/{stage}{path}") } } @@ -531,7 +542,7 @@ mod tests { assert_eq!(req.method(), "GET"); assert_eq!( req.uri(), - "https://wt6mne2s9k.execute-api.us-west-2.amazonaws.com/test/test/hello?name=me" + "https://wt6mne2s9k.execute-api.us-west-2.amazonaws.com/test/hello?name=me" ); // Ensure this is an APIGW request @@ -733,7 +744,7 @@ mod tests { ); let req = result.expect("failed to parse request"); assert_eq!(req.method(), "GET"); - assert_eq!(req.uri(), "/test/test/hello?name=me"); + assert_eq!(req.uri(), "/test/hello?name=me"); } #[test] @@ -768,4 +779,25 @@ mod tests { let url = build_request_uri("/path with spaces/and multiple segments", &HeaderMap::new(), None, None); assert_eq!("/path%20with%20spaces/and%20multiple%20segments", url); } + + #[test] + fn deserializes_apigw_http_request_with_stage_in_path() { + let input = include_str!("../tests/data/apigw_v2_proxy_request_with_stage_in_path.json"); + let result = from_str(input); + assert!( + result.is_ok(), + "event was not parsed as expected {result:?} given {input}" + ); + let req = result.expect("failed to parse request"); + assert_eq!("/Prod/my/path", req.uri().path()); + assert_eq!("/Prod/my/path", req.raw_http_path()); + } + + #[test] + fn test_apigw_path_with_stage() { + assert_eq!("/path", apigw_path_with_stage(&None, "/path")); + assert_eq!("/path", apigw_path_with_stage(&Some("$default".into()), "/path")); + assert_eq!("/Prod/path", apigw_path_with_stage(&Some("Prod".into()), "/Prod/path")); + assert_eq!("/Prod/path", apigw_path_with_stage(&Some("Prod".into()), "/path")); + } } diff --git a/lambda-http/tests/data/apigw_no_host.json b/lambda-http/tests/data/apigw_no_host.json index 3143c81b..78a40dee 100644 --- a/lambda-http/tests/data/apigw_no_host.json +++ b/lambda-http/tests/data/apigw_no_host.json @@ -1,5 +1,5 @@ { - "path": "/test/hello", + "path": "/hello", "headers": { "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8", "Accept-Encoding": "gzip, deflate, lzma, sdch, br", diff --git a/lambda-http/tests/data/apigw_proxy_request.json b/lambda-http/tests/data/apigw_proxy_request.json index 3b7cc9d2..61183846 100644 --- a/lambda-http/tests/data/apigw_proxy_request.json +++ b/lambda-http/tests/data/apigw_proxy_request.json @@ -1,5 +1,5 @@ { - "path": "/test/hello", + "path": "/hello", "headers": { "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8", "Accept-Encoding": "gzip, deflate, lzma, sdch, br", diff --git a/lambda-http/tests/data/apigw_v2_proxy_request_with_stage_in_path.json b/lambda-http/tests/data/apigw_v2_proxy_request_with_stage_in_path.json new file mode 100644 index 00000000..86e173c6 --- /dev/null +++ b/lambda-http/tests/data/apigw_v2_proxy_request_with_stage_in_path.json @@ -0,0 +1,57 @@ +{ + "version": "2.0", + "routeKey": "Prod", + "rawPath": "/Prod/my/path", + "rawQueryString": "parameter1=value1¶meter1=value2¶meter2=value", + "cookies": [ + "cookie1=value1", + "cookie2=value2" + ], + "headers": { + "Header1": "value1", + "Header2": "value2" + }, + "queryStringParameters": { + "parameter1": "value1,value2", + "parameter2": "value" + }, + "requestContext": { + "accountId": "123456789012", + "apiId": "api-id", + "authorizer": { + "jwt": { + "claims": { + "claim1": "value1", + "claim2": "value2" + }, + "scopes": [ + "scope1", + "scope2" + ] + } + }, + "domainName": "id.execute-api.us-east-1.amazonaws.com", + "domainPrefix": "id", + "http": { + "method": "POST", + "path": "/Prod/my/path", + "protocol": "HTTP/1.1", + "sourceIp": "IP", + "userAgent": "agent" + }, + "requestId": "id", + "routeKey": "Prod", + "stage": "Prod", + "time": "12/Mar/2020:19:03:58 +0000", + "timeEpoch": 1583348638390 + }, + "body": "Hello from Lambda", + "pathParameters": { + "parameter1": "value1" + }, + "isBase64Encoded": false, + "stageVariables": { + "stageVariable1": "value1", + "stageVariable2": "value2" + } +} \ No newline at end of file From a00fe5d386d6700fb6e9bc92ae30bb2513f41d7e Mon Sep 17 00:00:00 2001 From: BMorinDrifter <108374412+BMorinDrifter@users.noreply.github.com> Date: Fri, 7 Jul 2023 11:11:08 -0700 Subject: [PATCH 05/27] Fix check examples workflow to only run once in pull requests (#673) --- .github/workflows/check-examples.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/check-examples.yml b/.github/workflows/check-examples.yml index ba7bc709..5ef1536a 100644 --- a/.github/workflows/check-examples.yml +++ b/.github/workflows/check-examples.yml @@ -1,6 +1,9 @@ name: Check examples -on: [push, pull_request] +on: + push: + branches: [main] + pull_request: jobs: check: From 2b7d1619750d569a6876a4733d98992a02722fb6 Mon Sep 17 00:00:00 2001 From: BMorinDrifter <108374412+BMorinDrifter@users.noreply.github.com> Date: Sun, 9 Jul 2023 11:09:19 -0700 Subject: [PATCH 06/27] Exclude the ansi feature of tracing-subscriber instead of calling .with_ansi(false) in the builder (#674) * Disable the ansi feature for tracing_subscriber instead of building with_ansi(false) * use tracing::Level::INFO instead of via tracing-subscriber in README --- .../advanced-sqs-partial-batch-failures/Cargo.toml | 2 +- .../advanced-sqs-partial-batch-failures/src/main.rs | 11 ++--------- examples/basic-error-handling/Cargo.toml | 2 +- examples/basic-error-handling/src/main.rs | 3 --- examples/basic-lambda-external-runtime/Cargo.toml | 2 +- examples/basic-lambda-external-runtime/src/main.rs | 3 --- examples/basic-lambda/Cargo.toml | 2 +- examples/basic-lambda/src/main.rs | 3 --- examples/basic-s3-object-lambda-thumbnail/Cargo.toml | 2 +- .../basic-s3-object-lambda-thumbnail/src/main.rs | 3 --- examples/basic-s3-thumbnail/Cargo.toml | 2 +- examples/basic-s3-thumbnail/src/main.rs | 3 --- examples/basic-sdk/Cargo.toml | 2 +- examples/basic-sdk/src/main.rs | 3 --- examples/basic-shared-resource/Cargo.toml | 3 +-- examples/basic-shared-resource/src/main.rs | 3 --- examples/basic-sqs/Cargo.toml | 2 +- examples/basic-sqs/src/main.rs | 3 --- examples/basic-streaming-response/Cargo.toml | 2 +- examples/basic-streaming-response/src/main.rs | 3 --- examples/extension-basic/Cargo.toml | 2 +- examples/extension-basic/src/main.rs | 3 --- examples/extension-combined/Cargo.toml | 2 +- examples/extension-combined/src/main.rs | 3 --- examples/extension-custom-events/Cargo.toml | 2 +- examples/extension-custom-events/src/main.rs | 3 --- examples/extension-custom-service/Cargo.toml | 2 +- examples/extension-custom-service/src/main.rs | 3 --- examples/extension-logs-basic/Cargo.toml | 2 +- examples/extension-logs-basic/src/main.rs | 3 --- examples/extension-logs-custom-service/Cargo.toml | 2 +- examples/extension-logs-custom-service/src/main.rs | 3 --- examples/extension-logs-kinesis-firehose/Cargo.toml | 2 +- examples/extension-logs-kinesis-firehose/src/main.rs | 3 --- examples/extension-telemetry-basic/Cargo.toml | 2 +- examples/extension-telemetry-basic/src/main.rs | 3 --- examples/http-axum-diesel/Cargo.toml | 2 +- examples/http-axum-diesel/src/main.rs | 3 --- examples/http-axum/Cargo.toml | 2 +- examples/http-axum/src/main.rs | 3 --- examples/http-basic-lambda/Cargo.toml | 2 +- examples/http-basic-lambda/src/main.rs | 3 --- examples/http-cors/Cargo.toml | 2 +- examples/http-cors/src/main.rs | 3 --- examples/http-dynamodb/Cargo.toml | 2 +- examples/http-dynamodb/src/main.rs | 3 --- examples/http-query-parameters/Cargo.toml | 2 +- examples/http-query-parameters/src/main.rs | 3 --- examples/http-raw-path/Cargo.toml | 2 +- examples/http-raw-path/src/main.rs | 3 --- examples/http-shared-resource/Cargo.toml | 2 +- examples/http-shared-resource/src/main.rs | 3 --- examples/http-tower-trace/Cargo.toml | 2 +- examples/http-tower-trace/src/main.rs | 3 --- lambda-http/README.md | 12 ++++-------- lambda-integration-tests/Cargo.toml | 2 +- lambda-integration-tests/src/bin/extension-fn.rs | 3 --- lambda-integration-tests/src/bin/extension-trait.rs | 3 --- lambda-integration-tests/src/bin/http-fn.rs | 3 --- lambda-integration-tests/src/bin/http-trait.rs | 3 --- lambda-integration-tests/src/bin/logs-trait.rs | 3 --- lambda-integration-tests/src/bin/runtime-fn.rs | 3 --- lambda-integration-tests/src/bin/runtime-trait.rs | 3 --- 63 files changed, 34 insertions(+), 145 deletions(-) diff --git a/examples/advanced-sqs-partial-batch-failures/Cargo.toml b/examples/advanced-sqs-partial-batch-failures/Cargo.toml index 0dfe49d9..04158320 100644 --- a/examples/advanced-sqs-partial-batch-failures/Cargo.toml +++ b/examples/advanced-sqs-partial-batch-failures/Cargo.toml @@ -13,4 +13,4 @@ lambda_runtime = { path = "../../lambda-runtime" } tokio = { version = "1", features = ["macros"] } futures = "0.3" tracing = { version = "0.1", features = ["log"] } -tracing-subscriber = { version = "0.3", default-features = false, features = ["ansi", "fmt"] } +tracing-subscriber = { version = "0.3", default-features = false, features = ["fmt"] } diff --git a/examples/advanced-sqs-partial-batch-failures/src/main.rs b/examples/advanced-sqs-partial-batch-failures/src/main.rs index 2923d2e4..23faa68f 100644 --- a/examples/advanced-sqs-partial-batch-failures/src/main.rs +++ b/examples/advanced-sqs-partial-batch-failures/src/main.rs @@ -33,9 +33,6 @@ async fn main() -> Result<(), Error> { .with_max_level(tracing::Level::INFO) // disable printing the name of the module in every log line. .with_target(false) - // this needs to be set to false, otherwise ANSI color codes will - // show up in a confusing manner in CloudWatch logs. - .with_ansi(false) // disabling time is handy because CloudWatch will add the ingestion time. .without_time() .init(); @@ -78,9 +75,7 @@ where tracing::trace!("Handling batch size {}", event.payload.records.len()); let create_task = |msg| { // We need to keep the message_id to report failures to SQS - let SqsMessageObj { - message_id, body, .. - } = msg; + let SqsMessageObj { message_id, body, .. } = msg; let span = tracing::span!(tracing::Level::INFO, "Handling SQS msg", message_id); let task = async { //TODO catch panics like the `run` function from lambda_runtime @@ -104,9 +99,7 @@ where } }, ) - .map(|id| BatchItemFailure { - item_identifier: id, - }) + .map(|id| BatchItemFailure { item_identifier: id }) .collect(); Ok(SqsBatchResponse { diff --git a/examples/basic-error-handling/Cargo.toml b/examples/basic-error-handling/Cargo.toml index 325b08e1..e8699141 100644 --- a/examples/basic-error-handling/Cargo.toml +++ b/examples/basic-error-handling/Cargo.toml @@ -17,6 +17,6 @@ serde_json = "1.0.81" simple-error = "0.2.3" tokio = { version = "1", features = ["macros"] } tracing = { version = "0.1", features = ["log"] } -tracing-subscriber = { version = "0.3", default-features = false, features = ["ansi", "fmt"] } +tracing-subscriber = { version = "0.3", default-features = false, features = ["fmt"] } diff --git a/examples/basic-error-handling/src/main.rs b/examples/basic-error-handling/src/main.rs index 8d317a24..0939d2d0 100644 --- a/examples/basic-error-handling/src/main.rs +++ b/examples/basic-error-handling/src/main.rs @@ -54,9 +54,6 @@ async fn main() -> Result<(), Error> { .with_max_level(tracing::Level::INFO) // disable printing the name of the module in every log line. .with_target(false) - // this needs to be set to false, otherwise ANSI color codes will - // show up in a confusing manner in CloudWatch logs. - .with_ansi(false) // disabling time is handy because CloudWatch will add the ingestion time. .without_time() .init(); diff --git a/examples/basic-lambda-external-runtime/Cargo.toml b/examples/basic-lambda-external-runtime/Cargo.toml index 9c732b2f..0682efaf 100644 --- a/examples/basic-lambda-external-runtime/Cargo.toml +++ b/examples/basic-lambda-external-runtime/Cargo.toml @@ -12,4 +12,4 @@ serde = "1.0.163" tokio = "1.28.2" tokio-test = "0.4.2" tracing = "0.1.37" -tracing-subscriber = "0.3.17" +tracing-subscriber = { version = "0.3", default-features = false, features = ["fmt"] } diff --git a/examples/basic-lambda-external-runtime/src/main.rs b/examples/basic-lambda-external-runtime/src/main.rs index 71bd123b..9419b17b 100644 --- a/examples/basic-lambda-external-runtime/src/main.rs +++ b/examples/basic-lambda-external-runtime/src/main.rs @@ -29,9 +29,6 @@ fn main() -> Result<(), io::Error> { .with_max_level(tracing::Level::INFO) // disable printing the name of the module in every log line. .with_target(false) - // this needs to be set to false, otherwise ANSI color codes will - // show up in a confusing manner in CloudWatch logs. - .with_ansi(false) // disabling time is handy because CloudWatch will add the ingestion time. .without_time() .init(); diff --git a/examples/basic-lambda/Cargo.toml b/examples/basic-lambda/Cargo.toml index fd6bd5b2..cd2efa42 100644 --- a/examples/basic-lambda/Cargo.toml +++ b/examples/basic-lambda/Cargo.toml @@ -15,5 +15,5 @@ lambda_runtime = { path = "../../lambda-runtime" } serde = "1.0.136" tokio = { version = "1", features = ["macros"] } tracing = { version = "0.1", features = ["log"] } -tracing-subscriber = { version = "0.3", default-features = false, features = ["ansi", "fmt"] } +tracing-subscriber = { version = "0.3", default-features = false, features = ["fmt"] } tokio-test = "0.4.2" \ No newline at end of file diff --git a/examples/basic-lambda/src/main.rs b/examples/basic-lambda/src/main.rs index 2bb4aeb3..09145bb3 100644 --- a/examples/basic-lambda/src/main.rs +++ b/examples/basic-lambda/src/main.rs @@ -29,9 +29,6 @@ async fn main() -> Result<(), Error> { .with_max_level(tracing::Level::INFO) // disable printing the name of the module in every log line. .with_target(false) - // this needs to be set to false, otherwise ANSI color codes will - // show up in a confusing manner in CloudWatch logs. - .with_ansi(false) // disabling time is handy because CloudWatch will add the ingestion time. .without_time() .init(); diff --git a/examples/basic-s3-object-lambda-thumbnail/Cargo.toml b/examples/basic-s3-object-lambda-thumbnail/Cargo.toml index 493846ad..79640cc2 100644 --- a/examples/basic-s3-object-lambda-thumbnail/Cargo.toml +++ b/examples/basic-s3-object-lambda-thumbnail/Cargo.toml @@ -20,7 +20,7 @@ lambda_runtime = { path = "../../lambda-runtime" } serde = "1" tokio = { version = "1", features = ["macros"] } tracing = { version = "0.1" } -tracing-subscriber = { version = "0.3", default-features = false, features = ["ansi", "fmt"] } +tracing-subscriber = { version = "0.3", default-features = false, features = ["fmt"] } aws-config = "0.55.3" aws-sdk-s3 = "0.28.0" thumbnailer = "0.4.0" diff --git a/examples/basic-s3-object-lambda-thumbnail/src/main.rs b/examples/basic-s3-object-lambda-thumbnail/src/main.rs index 7786f56e..771a829c 100644 --- a/examples/basic-s3-object-lambda-thumbnail/src/main.rs +++ b/examples/basic-s3-object-lambda-thumbnail/src/main.rs @@ -69,9 +69,6 @@ async fn main() -> Result<(), Error> { .with_max_level(tracing::Level::TRACE) // disable printing the name of the module in every log line. .with_target(false) - // this needs to be set to false, otherwise ANSI color codes will - // show up in a confusing manner in CloudWatch logs. - .with_ansi(false) // disabling time is handy because CloudWatch will add the ingestion time. .without_time() .init(); diff --git a/examples/basic-s3-thumbnail/Cargo.toml b/examples/basic-s3-thumbnail/Cargo.toml index dc5e67f8..7c788a00 100644 --- a/examples/basic-s3-thumbnail/Cargo.toml +++ b/examples/basic-s3-thumbnail/Cargo.toml @@ -20,7 +20,7 @@ lambda_runtime = { path = "../../lambda-runtime" } serde = "1" tokio = { version = "1", features = ["macros"] } tracing = { version = "0.1" } -tracing-subscriber = { version = "0.3", default-features = false, features = ["ansi", "fmt"] } +tracing-subscriber = { version = "0.3", default-features = false, features = ["fmt"] } aws-config = "0.54.1" aws-sdk-s3 = "0.24.0" thumbnailer = "0.4.0" diff --git a/examples/basic-s3-thumbnail/src/main.rs b/examples/basic-s3-thumbnail/src/main.rs index f9cb0716..d996de0c 100644 --- a/examples/basic-s3-thumbnail/src/main.rs +++ b/examples/basic-s3-thumbnail/src/main.rs @@ -111,9 +111,6 @@ async fn main() -> Result<(), Error> { .with_max_level(tracing::Level::INFO) // disable printing the name of the module in every log line. .with_target(false) - // this needs to be set to false, otherwise ANSI color codes will - // show up in a confusing manner in CloudWatch logs. - .with_ansi(false) // disabling time is handy because CloudWatch will add the ingestion time. .without_time() .init(); diff --git a/examples/basic-sdk/Cargo.toml b/examples/basic-sdk/Cargo.toml index 0ffea2da..0e930f7c 100644 --- a/examples/basic-sdk/Cargo.toml +++ b/examples/basic-sdk/Cargo.toml @@ -13,7 +13,7 @@ lambda_runtime = { path = "../../lambda-runtime" } serde = "1.0.136" tokio = { version = "1", features = ["macros"] } tracing = { version = "0.1", features = ["log"] } -tracing-subscriber = { version = "0.3", default-features = false, features = ["ansi", "fmt"] } +tracing-subscriber = { version = "0.3", default-features = false, features = ["fmt"] } [dev-dependencies] mockall = "0.11.3" diff --git a/examples/basic-sdk/src/main.rs b/examples/basic-sdk/src/main.rs index 5838d7c8..6e2654a4 100644 --- a/examples/basic-sdk/src/main.rs +++ b/examples/basic-sdk/src/main.rs @@ -38,9 +38,6 @@ async fn main() -> Result<(), Error> { .with_max_level(tracing::Level::INFO) // disable printing the name of the module in every log line. .with_target(false) - // this needs to be set to false, otherwise ANSI color codes will - // show up in a confusing manner in CloudWatch logs. - .with_ansi(false) // disabling time is handy because CloudWatch will add the ingestion time. .without_time() .init(); diff --git a/examples/basic-shared-resource/Cargo.toml b/examples/basic-shared-resource/Cargo.toml index 25637976..b3e2faa5 100644 --- a/examples/basic-shared-resource/Cargo.toml +++ b/examples/basic-shared-resource/Cargo.toml @@ -15,6 +15,5 @@ lambda_runtime = { path = "../../lambda-runtime" } serde = "1.0.136" tokio = { version = "1", features = ["macros"] } tracing = { version = "0.1", features = ["log"] } -tracing-subscriber = { version = "0.3", default-features = false, features = ["ansi", "fmt"] } - +tracing-subscriber = { version = "0.3", default-features = false, features = ["fmt"] } diff --git a/examples/basic-shared-resource/src/main.rs b/examples/basic-shared-resource/src/main.rs index 15c38741..15ababa0 100644 --- a/examples/basic-shared-resource/src/main.rs +++ b/examples/basic-shared-resource/src/main.rs @@ -49,9 +49,6 @@ async fn main() -> Result<(), Error> { .with_max_level(tracing::Level::INFO) // disable printing the name of the module in every log line. .with_target(false) - // this needs to be set to false, otherwise ANSI color codes will - // show up in a confusing manner in CloudWatch logs. - .with_ansi(false) // disabling time is handy because CloudWatch will add the ingestion time. .without_time() .init(); diff --git a/examples/basic-sqs/Cargo.toml b/examples/basic-sqs/Cargo.toml index a1b11567..9d259218 100644 --- a/examples/basic-sqs/Cargo.toml +++ b/examples/basic-sqs/Cargo.toml @@ -20,4 +20,4 @@ lambda_runtime = { path = "../../lambda-runtime" } serde = "1.0.136" tokio = { version = "1", features = ["macros"] } tracing = { version = "0.1", features = ["log"] } -tracing-subscriber = { version = "0.3", default-features = false, features = ["ansi", "fmt"] } +tracing-subscriber = { version = "0.3", default-features = false, features = ["fmt"] } diff --git a/examples/basic-sqs/src/main.rs b/examples/basic-sqs/src/main.rs index 319e4519..63967893 100644 --- a/examples/basic-sqs/src/main.rs +++ b/examples/basic-sqs/src/main.rs @@ -25,9 +25,6 @@ async fn main() -> Result<(), Error> { .with_max_level(tracing::Level::INFO) // disable printing the name of the module in every log line. .with_target(false) - // this needs to be set to false, otherwise ANSI color codes will - // show up in a confusing manner in CloudWatch logs. - .with_ansi(false) // disabling time is handy because CloudWatch will add the ingestion time. .without_time() .init(); diff --git a/examples/basic-streaming-response/Cargo.toml b/examples/basic-streaming-response/Cargo.toml index fc284674..4bbe66f4 100644 --- a/examples/basic-streaming-response/Cargo.toml +++ b/examples/basic-streaming-response/Cargo.toml @@ -14,5 +14,5 @@ hyper = { version = "0.14", features = [ lambda_runtime = { path = "../../lambda-runtime" } tokio = { version = "1", features = ["macros"] } tracing = { version = "0.1", features = ["log"] } -tracing-subscriber = { version = "0.3", default-features = false, features = ["ansi", "fmt"] } +tracing-subscriber = { version = "0.3", default-features = false, features = ["fmt"] } serde_json = "1.0" \ No newline at end of file diff --git a/examples/basic-streaming-response/src/main.rs b/examples/basic-streaming-response/src/main.rs index 04c7f8ec..d90ebd33 100644 --- a/examples/basic-streaming-response/src/main.rs +++ b/examples/basic-streaming-response/src/main.rs @@ -30,9 +30,6 @@ async fn main() -> Result<(), Error> { .with_max_level(tracing::Level::INFO) // disable printing the name of the module in every log line. .with_target(false) - // this needs to be set to false, otherwise ANSI color codes will - // show up in a confusing manner in CloudWatch logs. - .with_ansi(false) // disabling time is handy because CloudWatch will add the ingestion time. .without_time() .init(); diff --git a/examples/extension-basic/Cargo.toml b/examples/extension-basic/Cargo.toml index 94ee4926..caf0818c 100644 --- a/examples/extension-basic/Cargo.toml +++ b/examples/extension-basic/Cargo.toml @@ -15,6 +15,6 @@ lambda-extension = { path = "../../lambda-extension" } serde = "1.0.136" tokio = { version = "1", features = ["macros"] } tracing = { version = "0.1", features = ["log"] } -tracing-subscriber = { version = "0.3", default-features = false, features = ["ansi", "fmt"] } +tracing-subscriber = { version = "0.3", default-features = false, features = ["fmt"] } diff --git a/examples/extension-basic/src/main.rs b/examples/extension-basic/src/main.rs index 4af6a47f..f9838c6b 100644 --- a/examples/extension-basic/src/main.rs +++ b/examples/extension-basic/src/main.rs @@ -19,9 +19,6 @@ async fn main() -> Result<(), Error> { .with_max_level(tracing::Level::INFO) // disable printing the name of the module in every log line. .with_target(false) - // this needs to be set to false, otherwise ANSI color codes will - // show up in a confusing manner in CloudWatch logs. - .with_ansi(false) // disabling time is handy because CloudWatch will add the ingestion time. .without_time() .init(); diff --git a/examples/extension-combined/Cargo.toml b/examples/extension-combined/Cargo.toml index e585516a..d776f488 100644 --- a/examples/extension-combined/Cargo.toml +++ b/examples/extension-combined/Cargo.toml @@ -15,6 +15,6 @@ lambda-extension = { path = "../../lambda-extension" } serde = "1.0.136" tokio = { version = "1", features = ["macros"] } tracing = { version = "0.1", features = ["log"] } -tracing-subscriber = { version = "0.3", default-features = false, features = ["ansi", "fmt"] } +tracing-subscriber = { version = "0.3", default-features = false, features = ["fmt"] } diff --git a/examples/extension-combined/src/main.rs b/examples/extension-combined/src/main.rs index 60d0f9e1..e05b1b7d 100644 --- a/examples/extension-combined/src/main.rs +++ b/examples/extension-combined/src/main.rs @@ -34,9 +34,6 @@ async fn main() -> Result<(), Error> { .with_max_level(tracing::Level::INFO) // disable printing the name of the module in every log line. .with_target(false) - // this needs to be set to false, otherwise ANSI color codes will - // show up in a confusing manner in CloudWatch logs. - .with_ansi(false) // disabling time is handy because CloudWatch will add the ingestion time. .without_time() .init(); diff --git a/examples/extension-custom-events/Cargo.toml b/examples/extension-custom-events/Cargo.toml index 90c5d322..a826a137 100644 --- a/examples/extension-custom-events/Cargo.toml +++ b/examples/extension-custom-events/Cargo.toml @@ -15,6 +15,6 @@ lambda-extension = { path = "../../lambda-extension" } serde = "1.0.136" tokio = { version = "1", features = ["macros"] } tracing = { version = "0.1", features = ["log"] } -tracing-subscriber = { version = "0.3", default-features = false, features = ["ansi", "fmt"] } +tracing-subscriber = { version = "0.3", default-features = false, features = ["fmt"] } diff --git a/examples/extension-custom-events/src/main.rs b/examples/extension-custom-events/src/main.rs index b7574642..1d39e20f 100644 --- a/examples/extension-custom-events/src/main.rs +++ b/examples/extension-custom-events/src/main.rs @@ -21,9 +21,6 @@ async fn main() -> Result<(), Error> { .with_max_level(tracing::Level::INFO) // disable printing the name of the module in every log line. .with_target(false) - // this needs to be set to false, otherwise ANSI color codes will - // show up in a confusing manner in CloudWatch logs. - .with_ansi(false) // disabling time is handy because CloudWatch will add the ingestion time. .without_time() .init(); diff --git a/examples/extension-custom-service/Cargo.toml b/examples/extension-custom-service/Cargo.toml index 5396c137..c9ff789a 100644 --- a/examples/extension-custom-service/Cargo.toml +++ b/examples/extension-custom-service/Cargo.toml @@ -15,6 +15,6 @@ lambda-extension = { path = "../../lambda-extension" } serde = "1.0.136" tokio = { version = "1", features = ["macros"] } tracing = { version = "0.1", features = ["log"] } -tracing-subscriber = { version = "0.3", default-features = false, features = ["ansi", "fmt"] } +tracing-subscriber = { version = "0.3", default-features = false, features = ["fmt"] } diff --git a/examples/extension-custom-service/src/main.rs b/examples/extension-custom-service/src/main.rs index fd85c91d..ec8ca68f 100644 --- a/examples/extension-custom-service/src/main.rs +++ b/examples/extension-custom-service/src/main.rs @@ -38,9 +38,6 @@ async fn main() -> Result<(), Error> { .with_max_level(tracing::Level::INFO) // disable printing the name of the module in every log line. .with_target(false) - // this needs to be set to false, otherwise ANSI color codes will - // show up in a confusing manner in CloudWatch logs. - .with_ansi(false) // disabling time is handy because CloudWatch will add the ingestion time. .without_time() .init(); diff --git a/examples/extension-logs-basic/Cargo.toml b/examples/extension-logs-basic/Cargo.toml index 30c09117..d1983db8 100644 --- a/examples/extension-logs-basic/Cargo.toml +++ b/examples/extension-logs-basic/Cargo.toml @@ -15,6 +15,6 @@ lambda-extension = { path = "../../lambda-extension" } serde = "1.0.136" tokio = { version = "1", features = ["macros", "rt"] } tracing = { version = "0.1", features = ["log"] } -tracing-subscriber = { version = "0.3", default-features = false, features = ["ansi", "fmt"] } +tracing-subscriber = { version = "0.3", default-features = false, features = ["fmt"] } diff --git a/examples/extension-logs-basic/src/main.rs b/examples/extension-logs-basic/src/main.rs index 5543dec9..77065cca 100644 --- a/examples/extension-logs-basic/src/main.rs +++ b/examples/extension-logs-basic/src/main.rs @@ -20,9 +20,6 @@ async fn main() -> Result<(), Error> { .with_max_level(tracing::Level::INFO) // disable printing the name of the module in every log line. .with_target(false) - // this needs to be set to false, otherwise ANSI color codes will - // show up in a confusing manner in CloudWatch logs. - .with_ansi(false) // disabling time is handy because CloudWatch will add the ingestion time. .without_time() .init(); diff --git a/examples/extension-logs-custom-service/Cargo.toml b/examples/extension-logs-custom-service/Cargo.toml index 35a9e05d..cbbe20f6 100644 --- a/examples/extension-logs-custom-service/Cargo.toml +++ b/examples/extension-logs-custom-service/Cargo.toml @@ -15,6 +15,6 @@ lambda-extension = { path = "../../lambda-extension" } serde = "1.0.136" tokio = { version = "1", features = ["macros"] } tracing = { version = "0.1", features = ["log"] } -tracing-subscriber = { version = "0.3", default-features = false, features = ["ansi", "fmt"] } +tracing-subscriber = { version = "0.3", default-features = false, features = ["fmt"] } diff --git a/examples/extension-logs-custom-service/src/main.rs b/examples/extension-logs-custom-service/src/main.rs index 9137c017..ebe1330d 100644 --- a/examples/extension-logs-custom-service/src/main.rs +++ b/examples/extension-logs-custom-service/src/main.rs @@ -61,9 +61,6 @@ async fn main() -> Result<(), Error> { .with_max_level(tracing::Level::INFO) // disable printing the name of the module in every log line. .with_target(false) - // this needs to be set to false, otherwise ANSI color codes will - // show up in a confusing manner in CloudWatch logs. - .with_ansi(false) // disabling time is handy because CloudWatch will add the ingestion time. .without_time() .init(); diff --git a/examples/extension-logs-kinesis-firehose/Cargo.toml b/examples/extension-logs-kinesis-firehose/Cargo.toml index 547ad48e..0e056b1c 100644 --- a/examples/extension-logs-kinesis-firehose/Cargo.toml +++ b/examples/extension-logs-kinesis-firehose/Cargo.toml @@ -13,7 +13,7 @@ edition = "2021" lambda-extension = { path = "../../lambda-extension" } tokio = { version = "1.17.0", features = ["full"] } tracing = { version = "0.1", features = ["log"] } -tracing-subscriber = { version = "0.3", default-features = false, features = ["ansi", "fmt"] } +tracing-subscriber = { version = "0.3", default-features = false, features = ["fmt"] } aws-config = "0.13.0" aws-sdk-firehose = "0.13.0" diff --git a/examples/extension-logs-kinesis-firehose/src/main.rs b/examples/extension-logs-kinesis-firehose/src/main.rs index 68c9421c..8586e1a9 100644 --- a/examples/extension-logs-kinesis-firehose/src/main.rs +++ b/examples/extension-logs-kinesis-firehose/src/main.rs @@ -58,9 +58,6 @@ async fn main() -> Result<(), Error> { .with_max_level(tracing::Level::INFO) // disable printing the name of the module in every log line. .with_target(false) - // this needs to be set to false, otherwise ANSI color codes will - // show up in a confusing manner in CloudWatch logs. - .with_ansi(false) // disabling time is handy because CloudWatch will add the ingestion time. .without_time() .init(); diff --git a/examples/extension-telemetry-basic/Cargo.toml b/examples/extension-telemetry-basic/Cargo.toml index bc426b68..869b604d 100644 --- a/examples/extension-telemetry-basic/Cargo.toml +++ b/examples/extension-telemetry-basic/Cargo.toml @@ -15,6 +15,6 @@ lambda-extension = { path = "../../lambda-extension" } serde = "1.0.136" tokio = { version = "1", features = ["macros", "rt"] } tracing = { version = "0.1", features = ["log"] } -tracing-subscriber = { version = "0.3", default-features = false, features = ["ansi", "fmt"] } +tracing-subscriber = { version = "0.3", default-features = false, features = ["fmt"] } diff --git a/examples/extension-telemetry-basic/src/main.rs b/examples/extension-telemetry-basic/src/main.rs index f522808c..03974bf6 100644 --- a/examples/extension-telemetry-basic/src/main.rs +++ b/examples/extension-telemetry-basic/src/main.rs @@ -45,9 +45,6 @@ async fn main() -> Result<(), Error> { .with_max_level(tracing::Level::INFO) // disable printing the name of the module in every log line. .with_target(false) - // this needs to be set to false, otherwise ANSI color codes will - // show up in a confusing manner in CloudWatch logs. - .with_ansi(false) // disabling time is handy because CloudWatch will add the ingestion time. .without_time() .init(); diff --git a/examples/http-axum-diesel/Cargo.toml b/examples/http-axum-diesel/Cargo.toml index dd37346f..5a97cfab 100644 --- a/examples/http-axum-diesel/Cargo.toml +++ b/examples/http-axum-diesel/Cargo.toml @@ -20,4 +20,4 @@ lambda_runtime = { path = "../../lambda-runtime" } serde = "1.0.159" tokio = { version = "1", features = ["macros"] } tracing = { version = "0.1", features = ["log"] } -tracing-subscriber = { version = "0.3", default-features = false, features = ["ansi", "fmt"] } +tracing-subscriber = { version = "0.3", default-features = false, features = ["fmt"] } diff --git a/examples/http-axum-diesel/src/main.rs b/examples/http-axum-diesel/src/main.rs index 227e23dd..bb47152d 100644 --- a/examples/http-axum-diesel/src/main.rs +++ b/examples/http-axum-diesel/src/main.rs @@ -97,9 +97,6 @@ async fn main() -> Result<(), Error> { .with_max_level(tracing::Level::INFO) // disable printing the name of the module in every log line. .with_target(false) - // this needs to be set to false, otherwise ANSI color codes will - // show up in a confusing manner in CloudWatch logs. - .with_ansi(false) // disabling time is handy because CloudWatch will add the ingestion time. .without_time() .init(); diff --git a/examples/http-axum/Cargo.toml b/examples/http-axum/Cargo.toml index 6a0e8905..50db3ebf 100644 --- a/examples/http-axum/Cargo.toml +++ b/examples/http-axum/Cargo.toml @@ -15,7 +15,7 @@ lambda_http = { path = "../../lambda-http" } lambda_runtime = { path = "../../lambda-runtime" } tokio = { version = "1", features = ["macros"] } tracing = { version = "0.1", features = ["log"] } -tracing-subscriber = { version = "0.3", default-features = false, features = ["ansi", "fmt"] } +tracing-subscriber = { version = "0.3", default-features = false, features = ["fmt"] } axum = "0.6.4" serde_json = "1.0" diff --git a/examples/http-axum/src/main.rs b/examples/http-axum/src/main.rs index 7770d861..c2805be1 100644 --- a/examples/http-axum/src/main.rs +++ b/examples/http-axum/src/main.rs @@ -42,9 +42,6 @@ async fn main() -> Result<(), Error> { .with_max_level(tracing::Level::INFO) // disable printing the name of the module in every log line. .with_target(false) - // this needs to be set to false, otherwise ANSI color codes will - // show up in a confusing manner in CloudWatch logs. - .with_ansi(false) // disabling time is handy because CloudWatch will add the ingestion time. .without_time() .init(); diff --git a/examples/http-basic-lambda/Cargo.toml b/examples/http-basic-lambda/Cargo.toml index ad53161d..1a218330 100644 --- a/examples/http-basic-lambda/Cargo.toml +++ b/examples/http-basic-lambda/Cargo.toml @@ -15,6 +15,6 @@ lambda_http = { path = "../../lambda-http" } lambda_runtime = { path = "../../lambda-runtime" } tokio = { version = "1", features = ["macros"] } tracing = { version = "0.1", features = ["log"] } -tracing-subscriber = { version = "0.3", default-features = false, features = ["ansi", "fmt"] } +tracing-subscriber = { version = "0.3", default-features = false, features = ["fmt"] } diff --git a/examples/http-basic-lambda/src/main.rs b/examples/http-basic-lambda/src/main.rs index 88db4886..5794dc8b 100644 --- a/examples/http-basic-lambda/src/main.rs +++ b/examples/http-basic-lambda/src/main.rs @@ -24,9 +24,6 @@ async fn main() -> Result<(), Error> { .with_max_level(tracing::Level::INFO) // disable printing the name of the module in every log line. .with_target(false) - // this needs to be set to false, otherwise ANSI color codes will - // show up in a confusing manner in CloudWatch logs. - .with_ansi(false) // disabling time is handy because CloudWatch will add the ingestion time. .without_time() .init(); diff --git a/examples/http-cors/Cargo.toml b/examples/http-cors/Cargo.toml index 65df64d4..9fd7f25b 100644 --- a/examples/http-cors/Cargo.toml +++ b/examples/http-cors/Cargo.toml @@ -16,6 +16,6 @@ lambda_runtime = { path = "../../lambda-runtime" } tokio = { version = "1", features = ["macros"] } tower-http = { version = "0.3.3", features = ["cors"] } tracing = { version = "0.1", features = ["log"] } -tracing-subscriber = { version = "0.3", default-features = false, features = ["ansi", "fmt"] } +tracing-subscriber = { version = "0.3", default-features = false, features = ["fmt"] } diff --git a/examples/http-cors/src/main.rs b/examples/http-cors/src/main.rs index ea1f0372..e60fb441 100644 --- a/examples/http-cors/src/main.rs +++ b/examples/http-cors/src/main.rs @@ -10,9 +10,6 @@ async fn main() -> Result<(), Error> { .with_max_level(tracing::Level::INFO) // disable printing the name of the module in every log line. .with_target(false) - // this needs to be set to false, otherwise ANSI color codes will - // show up in a confusing manner in CloudWatch logs. - .with_ansi(false) // disabling time is handy because CloudWatch will add the ingestion time. .without_time() .init(); diff --git a/examples/http-dynamodb/Cargo.toml b/examples/http-dynamodb/Cargo.toml index 6b6a0205..c3f6d8be 100644 --- a/examples/http-dynamodb/Cargo.toml +++ b/examples/http-dynamodb/Cargo.toml @@ -20,6 +20,6 @@ aws-sdk-dynamodb = "0.21.0" aws-config = "0.51.0" tokio = { version = "1", features = ["macros"] } tracing = { version = "0.1", features = ["log"] } -tracing-subscriber = { version = "0.3", default-features = false, features = ["ansi", "fmt"] } +tracing-subscriber = { version = "0.3", default-features = false, features = ["fmt"] } diff --git a/examples/http-dynamodb/src/main.rs b/examples/http-dynamodb/src/main.rs index 6a0c8947..5a7030f9 100644 --- a/examples/http-dynamodb/src/main.rs +++ b/examples/http-dynamodb/src/main.rs @@ -59,9 +59,6 @@ async fn main() -> Result<(), Error> { .with_max_level(tracing::Level::INFO) // disable printing the name of the module in every log line. .with_target(false) - // this needs to be set to false, otherwise ANSI color codes will - // show up in a confusing manner in CloudWatch logs. - .with_ansi(false) // disabling time is handy because CloudWatch will add the ingestion time. .without_time() .init(); diff --git a/examples/http-query-parameters/Cargo.toml b/examples/http-query-parameters/Cargo.toml index 5089a85e..7aeb1189 100644 --- a/examples/http-query-parameters/Cargo.toml +++ b/examples/http-query-parameters/Cargo.toml @@ -15,6 +15,6 @@ lambda_http = { path = "../../lambda-http" } lambda_runtime = { path = "../../lambda-runtime" } tokio = { version = "1", features = ["macros"] } tracing = { version = "0.1", features = ["log"] } -tracing-subscriber = { version = "0.3", default-features = false, features = ["ansi", "fmt"] } +tracing-subscriber = { version = "0.3", default-features = false, features = ["fmt"] } diff --git a/examples/http-query-parameters/src/main.rs b/examples/http-query-parameters/src/main.rs index 0300df37..e189d12d 100644 --- a/examples/http-query-parameters/src/main.rs +++ b/examples/http-query-parameters/src/main.rs @@ -27,9 +27,6 @@ async fn main() -> Result<(), Error> { .with_max_level(tracing::Level::INFO) // disable printing the name of the module in every log line. .with_target(false) - // this needs to be set to false, otherwise ANSI color codes will - // show up in a confusing manner in CloudWatch logs. - .with_ansi(false) // disabling time is handy because CloudWatch will add the ingestion time. .without_time() .init(); diff --git a/examples/http-raw-path/Cargo.toml b/examples/http-raw-path/Cargo.toml index 4cb3e23f..f4060428 100644 --- a/examples/http-raw-path/Cargo.toml +++ b/examples/http-raw-path/Cargo.toml @@ -15,6 +15,6 @@ lambda_http = { path = "../../lambda-http" } lambda_runtime = { path = "../../lambda-runtime" } tokio = { version = "1", features = ["macros"] } tracing = { version = "0.1", features = ["log"] } -tracing-subscriber = { version = "0.3", default-features = false, features = ["ansi", "fmt"] } +tracing-subscriber = { version = "0.3", default-features = false, features = ["fmt"] } diff --git a/examples/http-raw-path/src/main.rs b/examples/http-raw-path/src/main.rs index 3694e61b..7fa6e6d5 100644 --- a/examples/http-raw-path/src/main.rs +++ b/examples/http-raw-path/src/main.rs @@ -7,9 +7,6 @@ async fn main() -> Result<(), Error> { .with_max_level(tracing::Level::INFO) // disable printing the name of the module in every log line. .with_target(false) - // this needs to be set to false, otherwise ANSI color codes will - // show up in a confusing manner in CloudWatch logs. - .with_ansi(false) // disabling time is handy because CloudWatch will add the ingestion time. .without_time() .init(); diff --git a/examples/http-shared-resource/Cargo.toml b/examples/http-shared-resource/Cargo.toml index 4d655489..207f253b 100644 --- a/examples/http-shared-resource/Cargo.toml +++ b/examples/http-shared-resource/Cargo.toml @@ -15,6 +15,6 @@ lambda_http = { path = "../../lambda-http" } lambda_runtime = { path = "../../lambda-runtime" } tokio = { version = "1", features = ["macros"] } tracing = { version = "0.1", features = ["log"] } -tracing-subscriber = { version = "0.3", default-features = false, features = ["ansi", "fmt"] } +tracing-subscriber = { version = "0.3", default-features = false, features = ["fmt"] } diff --git a/examples/http-shared-resource/src/main.rs b/examples/http-shared-resource/src/main.rs index 16493452..d76ccec4 100644 --- a/examples/http-shared-resource/src/main.rs +++ b/examples/http-shared-resource/src/main.rs @@ -17,9 +17,6 @@ async fn main() -> Result<(), Error> { .with_max_level(tracing::Level::INFO) // disable printing the name of the module in every log line. .with_target(false) - // this needs to be set to false, otherwise ANSI color codes will - // show up in a confusing manner in CloudWatch logs. - .with_ansi(false) // disabling time is handy because CloudWatch will add the ingestion time. .without_time() .init(); diff --git a/examples/http-tower-trace/Cargo.toml b/examples/http-tower-trace/Cargo.toml index bf4a5b0c..2b8f7a60 100644 --- a/examples/http-tower-trace/Cargo.toml +++ b/examples/http-tower-trace/Cargo.toml @@ -16,4 +16,4 @@ lambda_runtime = "0.5.1" tokio = { version = "1", features = ["macros"] } tower-http = { version = "0.3.4", features = ["trace"] } tracing = { version = "0.1", features = ["log"] } -tracing-subscriber = { version = "0.3", default-features = false, features = ["ansi", "fmt"] } +tracing-subscriber = { version = "0.3", default-features = false, features = ["fmt"] } diff --git a/examples/http-tower-trace/src/main.rs b/examples/http-tower-trace/src/main.rs index 678d79cd..072f8256 100644 --- a/examples/http-tower-trace/src/main.rs +++ b/examples/http-tower-trace/src/main.rs @@ -14,9 +14,6 @@ async fn main() -> Result<(), Error> { .with_max_level(tracing::Level::INFO) // disable printing the name of the module in every log line. .with_target(false) - // this needs to be set to false, otherwise ANSI color codes will - // show up in a confusing manner in CloudWatch logs. - .with_ansi(false) // disabling time is handy because CloudWatch will add the ingestion time. .without_time() .init(); diff --git a/lambda-http/README.md b/lambda-http/README.md index 161a5576..464be88b 100644 --- a/lambda-http/README.md +++ b/lambda-http/README.md @@ -50,9 +50,8 @@ use serde_json::json; #[tokio::main] async fn main() -> Result<(), Error> { tracing_subscriber::fmt() - .with_ansi(false) .without_time() - .with_max_level(tracing_subscriber::filter::LevelFilter::INFO) + .with_max_level(tracing::Level::INFO) .init(); run(service_fn(function_handler)).await @@ -89,9 +88,8 @@ use serde_json::json; #[tokio::main] async fn main() -> Result<(), Error> { tracing_subscriber::fmt() - .with_ansi(false) .without_time() - .with_max_level(tracing_subscriber::filter::LevelFilter::INFO) + .with_max_level(tracing::Level::INFO) .init(); run(service_fn(function_handler)).await @@ -131,9 +129,8 @@ use serde_json::json; #[tokio::main] async fn main() -> Result<(), Error> { tracing_subscriber::fmt() - .with_ansi(false) .without_time() - .with_max_level(tracing_subscriber::filter::LevelFilter::INFO) + .with_max_level(tracing::Level::INFO) .init(); run(service_fn(function_handler)).await @@ -191,9 +188,8 @@ use serde_json::json; #[tokio::main] async fn main() -> Result<(), Error> { tracing_subscriber::fmt() - .with_ansi(false) .without_time() - .with_max_level(tracing_subscriber::filter::LevelFilter::INFO) + .with_max_level(tracing::Level::INFO) .init(); let config = aws_config::from_env() diff --git a/lambda-integration-tests/Cargo.toml b/lambda-integration-tests/Cargo.toml index 8dd28f0a..1b0fc3ef 100644 --- a/lambda-integration-tests/Cargo.toml +++ b/lambda-integration-tests/Cargo.toml @@ -19,4 +19,4 @@ lambda-extension = { path = "../lambda-extension" } serde = { version = "1", features = ["derive"] } tokio = { version = "1", features = ["full"] } tracing = { version = "0.1", features = ["log"] } -tracing-subscriber = "0.3" +tracing-subscriber = { version = "0.3", default-features = false, features = ["fmt"] } diff --git a/lambda-integration-tests/src/bin/extension-fn.rs b/lambda-integration-tests/src/bin/extension-fn.rs index ea5fc26c..5e9ec553 100644 --- a/lambda-integration-tests/src/bin/extension-fn.rs +++ b/lambda-integration-tests/src/bin/extension-fn.rs @@ -20,9 +20,6 @@ async fn main() -> Result<(), Error> { // While `tracing` is used internally, `log` can be used as well if preferred. tracing_subscriber::fmt() .with_max_level(tracing::Level::INFO) - // this needs to be set to false, otherwise ANSI color codes will - // show up in a confusing manner in CloudWatch logs. - .with_ansi(false) // disabling time is handy because CloudWatch will add the ingestion time. .without_time() .init(); diff --git a/lambda-integration-tests/src/bin/extension-trait.rs b/lambda-integration-tests/src/bin/extension-trait.rs index ecf46c81..e2c73fa3 100644 --- a/lambda-integration-tests/src/bin/extension-trait.rs +++ b/lambda-integration-tests/src/bin/extension-trait.rs @@ -80,9 +80,6 @@ async fn main() -> Result<(), Error> { // While `tracing` is used internally, `log` can be used as well if preferred. tracing_subscriber::fmt() .with_max_level(tracing::Level::INFO) - // this needs to be set to false, otherwise ANSI color codes will - // show up in a confusing manner in CloudWatch logs. - .with_ansi(false) // disabling time is handy because CloudWatch will add the ingestion time. .without_time() .init(); diff --git a/lambda-integration-tests/src/bin/http-fn.rs b/lambda-integration-tests/src/bin/http-fn.rs index cd252280..8107f423 100644 --- a/lambda-integration-tests/src/bin/http-fn.rs +++ b/lambda-integration-tests/src/bin/http-fn.rs @@ -17,9 +17,6 @@ async fn main() -> Result<(), Error> { // While `tracing` is used internally, `log` can be used as well if preferred. tracing_subscriber::fmt() .with_max_level(tracing::Level::INFO) - // this needs to be set to false, otherwise ANSI color codes will - // show up in a confusing manner in CloudWatch logs. - .with_ansi(false) // disabling time is handy because CloudWatch will add the ingestion time. .without_time() .init(); diff --git a/lambda-integration-tests/src/bin/http-trait.rs b/lambda-integration-tests/src/bin/http-trait.rs index 765b0d66..d8e6f74f 100644 --- a/lambda-integration-tests/src/bin/http-trait.rs +++ b/lambda-integration-tests/src/bin/http-trait.rs @@ -75,9 +75,6 @@ impl Service for MyHandler { async fn main() -> Result<(), Error> { tracing_subscriber::fmt() .with_max_level(tracing::Level::INFO) - // this needs to be set to false, otherwise ANSI color codes will - // show up in a confusing manner in CloudWatch logs. - .with_ansi(false) // disabling time is handy because CloudWatch will add the ingestion time. .without_time() .init(); diff --git a/lambda-integration-tests/src/bin/logs-trait.rs b/lambda-integration-tests/src/bin/logs-trait.rs index 3f5a4909..b474bc8d 100644 --- a/lambda-integration-tests/src/bin/logs-trait.rs +++ b/lambda-integration-tests/src/bin/logs-trait.rs @@ -64,9 +64,6 @@ impl Service> for MyLogsProcessor { async fn main() -> Result<(), Error> { tracing_subscriber::fmt() .with_max_level(tracing::Level::INFO) - // this needs to be set to false, otherwise ANSI color codes will - // show up in a confusing manner in CloudWatch logs. - .with_ansi(false) // disabling time is handy because CloudWatch will add the ingestion time. .without_time() .init(); diff --git a/lambda-integration-tests/src/bin/runtime-fn.rs b/lambda-integration-tests/src/bin/runtime-fn.rs index 1b3f3e0d..d16717aa 100644 --- a/lambda-integration-tests/src/bin/runtime-fn.rs +++ b/lambda-integration-tests/src/bin/runtime-fn.rs @@ -28,9 +28,6 @@ async fn main() -> Result<(), Error> { // While `tracing` is used internally, `log` can be used as well if preferred. tracing_subscriber::fmt() .with_max_level(tracing::Level::INFO) - // this needs to be set to false, otherwise ANSI color codes will - // show up in a confusing manner in CloudWatch logs. - .with_ansi(false) // disabling time is handy because CloudWatch will add the ingestion time. .without_time() .init(); diff --git a/lambda-integration-tests/src/bin/runtime-trait.rs b/lambda-integration-tests/src/bin/runtime-trait.rs index b925e138..0bf31e43 100644 --- a/lambda-integration-tests/src/bin/runtime-trait.rs +++ b/lambda-integration-tests/src/bin/runtime-trait.rs @@ -84,9 +84,6 @@ impl Service> for MyHandler { async fn main() -> Result<(), Error> { tracing_subscriber::fmt() .with_max_level(tracing::Level::INFO) - // this needs to be set to false, otherwise ANSI color codes will - // show up in a confusing manner in CloudWatch logs. - .with_ansi(false) // disabling time is handy because CloudWatch will add the ingestion time. .without_time() .init(); From 33cce7745373685fe4e615aa7621fe3272c0f371 Mon Sep 17 00:00:00 2001 From: Peter Borkuti Date: Mon, 10 Jul 2023 16:53:55 +0200 Subject: [PATCH 07/27] Fix s3 tests for s3 examples (#675) * upgrade basic-s3-thumbnail example + refactor test I noticed that I am testing thumbnailer module when I compared the generated thumbnail with a stored one. This can lead test failures. So I mocked the thumbnail generation. * refactor test for basic-s3-object-lambda example --------- Co-authored-by: borkupe --- .../src/main.rs | 61 +++++++----------- examples/basic-s3-thumbnail/Cargo.toml | 13 ++-- examples/basic-s3-thumbnail/README.md | 16 +++++ examples/basic-s3-thumbnail/src/main.rs | 44 ++++++------- examples/basic-s3-thumbnail/src/s3.rs | 4 +- .../basic-s3-thumbnail/testdata/image.png | Bin 282 -> 0 bytes .../basic-s3-thumbnail/testdata/thumbnail.png | Bin 82 -> 0 bytes 7 files changed, 71 insertions(+), 67 deletions(-) delete mode 100644 examples/basic-s3-thumbnail/testdata/image.png delete mode 100644 examples/basic-s3-thumbnail/testdata/thumbnail.png diff --git a/examples/basic-s3-object-lambda-thumbnail/src/main.rs b/examples/basic-s3-object-lambda-thumbnail/src/main.rs index 771a829c..328e7500 100644 --- a/examples/basic-s3-object-lambda-thumbnail/src/main.rs +++ b/examples/basic-s3-object-lambda-thumbnail/src/main.rs @@ -1,10 +1,9 @@ -use std::{error, io::Cursor}; +use std::error; use aws_lambda_events::s3::object_lambda::{GetObjectContext, S3ObjectLambdaEvent}; use aws_sdk_s3::Client as S3Client; use lambda_runtime::{run, service_fn, Error, LambdaEvent}; use s3::{GetFile, SendFile}; -use thumbnailer::{create_thumbnails, ThumbnailSize}; mod s3; @@ -35,28 +34,21 @@ pub(crate) async fn function_handler( let thumbnail = get_thumbnail(image, size); tracing::info!("thumbnail created. Length: {}", thumbnail.len()); - // It sends the thumbnail back to the user - client.send_file(route, token, thumbnail).await - - /* - match client.send_file(route, token, thumbnail).await { - Ok(msg) => tracing::info!(msg), - Err(msg) => tracing::info!(msg) - }; - - tracing::info!("handler ends"); - - Ok(()) - */ } +#[cfg(not(test))] fn get_thumbnail(vec: Vec, size: u32) -> Vec { - let reader = Cursor::new(vec); - let mut thumbnails = create_thumbnails(reader, mime::IMAGE_PNG, [ThumbnailSize::Custom((size, size))]).unwrap(); + let reader = std::io::Cursor::new(vec); + let mut thumbnails = thumbnailer::create_thumbnails( + reader, + mime::IMAGE_PNG, + [thumbnailer::ThumbnailSize::Custom((size, size))], + ) + .unwrap(); let thumbnail = thumbnails.pop().unwrap(); - let mut buf = Cursor::new(Vec::new()); + let mut buf = std::io::Cursor::new(Vec::new()); thumbnail.write_png(&mut buf).unwrap(); buf.into_inner() @@ -85,11 +77,17 @@ async fn main() -> Result<(), Error> { } #[cfg(test)] -mod tests { - use std::fs::File; - use std::io::BufReader; - use std::io::Read; +fn get_thumbnail(vec: Vec, _size: u32) -> Vec { + let s = unsafe { std::str::from_utf8_unchecked(&vec) }; + match s { + "IMAGE" => "THUMBNAIL".into(), + _ => "Input is not IMAGE".into(), + } +} + +#[cfg(test)] +mod tests { use super::*; use async_trait::async_trait; use aws_lambda_events::s3::object_lambda::Configuration; @@ -122,13 +120,12 @@ mod tests { let mut mock = MockFakeS3Client::new(); mock.expect_get_file() - .withf(|u: &String| u.eq("S3_URL")) - .returning(|_1| Ok(get_file("testdata/image.png"))); + .withf(|u| u.eq("S3_URL")) + .returning(|_1| Ok("IMAGE".into())); mock.expect_send_file() - .withf(|r: &String, t: &String, by| { - let thumbnail = get_file("testdata/thumbnail.png"); - return r.eq("O_ROUTE") && t.eq("O_TOKEN") && by == &thumbnail; + .withf(|r, t, by| { + return r.eq("O_ROUTE") && t.eq("O_TOKEN") && by == "THUMBNAIL".as_bytes(); }) .returning(|_1, _2, _3| Ok("File sent.".to_string())); @@ -141,16 +138,6 @@ mod tests { assert_eq!(("File sent."), result); } - fn get_file(name: &str) -> Vec { - let f = File::open(name); - let mut reader = BufReader::new(f.unwrap()); - let mut buffer = Vec::new(); - - reader.read_to_end(&mut buffer).unwrap(); - - return buffer; - } - fn get_s3_event() -> S3ObjectLambdaEvent { return S3ObjectLambdaEvent { x_amz_request_id: ("ID".to_string()), diff --git a/examples/basic-s3-thumbnail/Cargo.toml b/examples/basic-s3-thumbnail/Cargo.toml index 7c788a00..6bbe11b7 100644 --- a/examples/basic-s3-thumbnail/Cargo.toml +++ b/examples/basic-s3-thumbnail/Cargo.toml @@ -20,14 +20,15 @@ lambda_runtime = { path = "../../lambda-runtime" } serde = "1" tokio = { version = "1", features = ["macros"] } tracing = { version = "0.1" } -tracing-subscriber = { version = "0.3", default-features = false, features = ["fmt"] } -aws-config = "0.54.1" -aws-sdk-s3 = "0.24.0" +tracing-subscriber = { version = "0.3", default-features = false, features = ["ansi", "fmt"] } +aws-config = "0.55" +aws-smithy-http = "0.55.3" +aws-sdk-s3 = "0.28" thumbnailer = "0.4.0" mime = "0.3.16" -async-trait = "0.1.66" +async-trait = "0.1.68" [dev-dependencies] -mockall = "0.11.3" -tokio-test = "0.4.2" +mockall = "0.11" +tokio-test = "0.4" chrono = "0.4" diff --git a/examples/basic-s3-thumbnail/README.md b/examples/basic-s3-thumbnail/README.md index de2d56f8..000874cc 100644 --- a/examples/basic-s3-thumbnail/README.md +++ b/examples/basic-s3-thumbnail/README.md @@ -5,6 +5,22 @@ it downloads the created file, generates a thumbnail from it (it assumes that the file is an image) and uploads it to S3 into a bucket named [original-bucket-name]-thumbs. +## Set up +1. Create a lambda function and upload the bootloader.zip +2. Go to aws services S3 +3. Create a bucket, let's say with name bucketx +4. Create another bucket bucketx-thumbs +5. Got to the bucketx properties tab, event notifications +6. Create lambda event notification for "all object create event" and select your lambda function +7. Go to the lambda function, configuration and open the role name +8. Add "AmazonS3FullAccess" permission + +## Test + +1. Go to S3 and upload a png picture into bucketx. Beware to not have spaces or any special characters in the file name +2. Go to S3 bucketx-thumbs and check if an image is created there. + + ## Build & Deploy 1. Install [cargo-lambda](https://github.com/cargo-lambda/cargo-lambda#installation) diff --git a/examples/basic-s3-thumbnail/src/main.rs b/examples/basic-s3-thumbnail/src/main.rs index d996de0c..d92c822b 100644 --- a/examples/basic-s3-thumbnail/src/main.rs +++ b/examples/basic-s3-thumbnail/src/main.rs @@ -1,10 +1,7 @@ -use std::io::Cursor; - use aws_lambda_events::{event::s3::S3Event, s3::S3EventRecord}; use aws_sdk_s3::Client as S3Client; use lambda_runtime::{run, service_fn, Error, LambdaEvent}; use s3::{GetFile, PutFile}; -use thumbnailer::{create_thumbnails, ThumbnailSize}; mod s3; @@ -86,7 +83,12 @@ fn get_file_props(record: S3EventRecord) -> Result<(String, String), String> { Ok((bucket, key)) } +#[cfg(not(test))] fn get_thumbnail(vec: Vec, size: u32) -> Result, String> { + use std::io::Cursor; + + use thumbnailer::{create_thumbnails, ThumbnailSize}; + let reader = Cursor::new(vec); let mime = mime::IMAGE_PNG; let sizes = [ThumbnailSize::Custom((size, size))]; @@ -126,12 +128,19 @@ async fn main() -> Result<(), Error> { Ok(()) } +#[cfg(test)] +fn get_thumbnail(vec: Vec, _size: u32) -> Result, String> { + let s = unsafe { std::str::from_utf8_unchecked(&vec) }; + + match s { + "IMAGE" => Ok("THUMBNAIL".into()), + _ => Err("Input is not IMAGE".to_string()), + } +} + #[cfg(test)] mod tests { use std::collections::HashMap; - use std::fs::File; - use std::io::BufReader; - use std::io::Read; use super::*; use async_trait::async_trait; @@ -141,7 +150,7 @@ mod tests { use aws_lambda_events::s3::S3Object; use aws_lambda_events::s3::S3RequestParameters; use aws_lambda_events::s3::S3UserIdentity; - use aws_sdk_s3::error::GetObjectError; + use aws_sdk_s3::operation::get_object::GetObjectError; use lambda_runtime::{Context, LambdaEvent}; use mockall::mock; use s3::GetFile; @@ -171,15 +180,14 @@ mod tests { let mut mock = MockFakeS3Client::new(); mock.expect_get_file() - .withf(|b: &str, k: &str| b.eq(bucket) && k.eq(key)) - .returning(|_1, _2| Ok(get_file("testdata/image.png"))); + .withf(|b, k| b.eq(bucket) && k.eq(key)) + .returning(|_1, _2| Ok("IMAGE".into())); mock.expect_put_file() - .withf(|bu: &str, ke: &str, by| { - let thumbnail = get_file("testdata/thumbnail.png"); - return bu.eq("test-bucket-thumbs") && ke.eq(key) && by == &thumbnail; + .withf(|bu, ke, by| { + return bu.eq("test-bucket-thumbs") && ke.eq(key) && by.eq("THUMBNAIL".as_bytes()); }) - .returning(|_1, _2, _3| Ok("Done".to_string())); + .return_const(Ok("Done".to_string())); let payload = get_s3_event("ObjectCreated", bucket, key); let event = LambdaEvent { payload, context }; @@ -189,16 +197,6 @@ mod tests { assert_eq!((), result); } - fn get_file(name: &str) -> Vec { - let f = File::open(name); - let mut reader = BufReader::new(f.unwrap()); - let mut buffer = Vec::new(); - - reader.read_to_end(&mut buffer).unwrap(); - - return buffer; - } - fn get_s3_event(event_name: &str, bucket_name: &str, object_key: &str) -> S3Event { return S3Event { records: (vec![get_s3_event_record(event_name, bucket_name, object_key)]), diff --git a/examples/basic-s3-thumbnail/src/s3.rs b/examples/basic-s3-thumbnail/src/s3.rs index 83ef7bc7..17d7f975 100644 --- a/examples/basic-s3-thumbnail/src/s3.rs +++ b/examples/basic-s3-thumbnail/src/s3.rs @@ -1,5 +1,7 @@ use async_trait::async_trait; -use aws_sdk_s3::{error::GetObjectError, types::ByteStream, Client as S3Client}; +use aws_sdk_s3::operation::get_object::GetObjectError; +use aws_sdk_s3::Client as S3Client; +use aws_smithy_http::byte_stream::ByteStream; #[async_trait] pub trait GetFile { diff --git a/examples/basic-s3-thumbnail/testdata/image.png b/examples/basic-s3-thumbnail/testdata/image.png deleted file mode 100644 index 078d155f6bf6735eb087eb0195b3e35f9f424d04..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 282 zcmeAS@N?(olHy`uVBq!ia0vp^DIm-UBp4!QuJ{S0SkfJR9T^xl_H+M9WCijSl0AZa z85pY67#JE_7#My5g&JNkFq9fFFuY1&V6d9Oz#v{QXIG#NP=cu>$S;_Ip=|P53lJ~K z+uenM@oty!5+IMg#M9T6{W-Ikn3DL(-uF5{ArVg(#}JFt$q5pyixWh8ngSjC85meA z7#KARcm4s&tCqM%l%ynf4NqV|ChEy=Vy|59;VQAR!XXWJ= d863$M85tR8F)&*H Date: Sun, 16 Jul 2023 23:53:01 +0300 Subject: [PATCH 08/27] Serialize APIGW queryStringParameters properly (#676) Reference: https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-develop-integrations-lambda.html 'queryStringParameters' were serialized incorrectly to format like queryStringParameters:{"key":["value"]} when it should be like queryStringParameters:{"key":"value"} --- lambda-events/Cargo.toml | 2 +- lambda-events/src/event/apigw/mod.rs | 30 ++++ ...-apigw-request-multi-value-parameters.json | 136 ++++++++++++++++++ ...igw-v2-request-multi-value-parameters.json | 61 ++++++++ 4 files changed, 228 insertions(+), 1 deletion(-) create mode 100644 lambda-events/src/fixtures/example-apigw-request-multi-value-parameters.json create mode 100644 lambda-events/src/fixtures/example-apigw-v2-request-multi-value-parameters.json diff --git a/lambda-events/Cargo.toml b/lambda-events/Cargo.toml index 28df6b4a..27b577cb 100644 --- a/lambda-events/Cargo.toml +++ b/lambda-events/Cargo.toml @@ -30,7 +30,7 @@ chrono = { version = "0.4.23", default-features = false, features = [ "serde", "std", ], optional = true } -query_map = { version = "^0.6", features = ["serde", "url-query"], optional = true } +query_map = { version = "^0.7", features = ["serde", "url-query"], optional = true } flate2 = { version = "1.0.24", optional = true } [features] diff --git a/lambda-events/src/event/apigw/mod.rs b/lambda-events/src/event/apigw/mod.rs index b595d825..9119bdc7 100644 --- a/lambda-events/src/event/apigw/mod.rs +++ b/lambda-events/src/event/apigw/mod.rs @@ -34,6 +34,7 @@ where #[serde(serialize_with = "serialize_multi_value_headers")] pub multi_value_headers: HeaderMap, #[serde(default, deserialize_with = "query_map::serde::standard::deserialize_empty")] + #[serde(serialize_with = "query_map::serde::aws_api_gateway_v1::serialize_query_string_parameters")] pub query_string_parameters: QueryMap, #[serde(default, deserialize_with = "query_map::serde::standard::deserialize_empty")] pub multi_value_query_string_parameters: QueryMap, @@ -151,6 +152,7 @@ pub struct ApiGatewayV2httpRequest { deserialize_with = "query_map::serde::aws_api_gateway_v2::deserialize_empty" )] #[serde(skip_serializing_if = "QueryMap::is_empty")] + #[serde(serialize_with = "query_map::serde::aws_api_gateway_v2::serialize_query_string_parameters")] pub query_string_parameters: QueryMap, #[serde(deserialize_with = "deserialize_lambda_map")] #[serde(default)] @@ -832,6 +834,21 @@ mod test { assert_eq!(parsed, reparsed); } + #[test] + #[cfg(feature = "apigw")] + fn example_apigw_request_multi_value_parameters() { + let data = include_bytes!("../../fixtures/example-apigw-request-multi-value-parameters.json"); + let parsed: ApiGatewayProxyRequest = serde_json::from_slice(data).unwrap(); + let output: String = serde_json::to_string(&parsed).unwrap(); + let reparsed: ApiGatewayProxyRequest = serde_json::from_slice(output.as_bytes()).unwrap(); + assert_eq!(parsed, reparsed); + + assert!(output.contains(r#""multiValueQueryStringParameters":{"name":["me","me2"]}"#)); + assert!(output.contains(r#""queryStringParameters":{"name":"me"}"#)); + assert!(output.contains(r#""headername":["headerValue","headerValue2"]"#)); + assert!(output.contains(r#""headername":"headerValue2""#)); + } + #[test] #[cfg(feature = "apigw")] fn example_apigw_restapi_openapi_request() { @@ -872,6 +889,19 @@ mod test { assert_eq!(parsed, reparsed); } + #[test] + #[cfg(feature = "apigw")] + fn example_apigw_v2_request_multi_value_parameters() { + let data = include_bytes!("../../fixtures/example-apigw-v2-request-multi-value-parameters.json"); + let parsed: ApiGatewayV2httpRequest = serde_json::from_slice(data).unwrap(); + let output: String = serde_json::to_string(&parsed).unwrap(); + let reparsed: ApiGatewayV2httpRequest = serde_json::from_slice(output.as_bytes()).unwrap(); + assert_eq!(parsed, reparsed); + + assert!(output.contains(r#""header2":"value1,value2""#)); + assert!(output.contains(r#""queryStringParameters":{"Parameter1":"value1,value2"}"#)); + } + #[test] #[cfg(feature = "apigw")] fn example_apigw_v2_request_no_authorizer() { diff --git a/lambda-events/src/fixtures/example-apigw-request-multi-value-parameters.json b/lambda-events/src/fixtures/example-apigw-request-multi-value-parameters.json new file mode 100644 index 00000000..340b1df1 --- /dev/null +++ b/lambda-events/src/fixtures/example-apigw-request-multi-value-parameters.json @@ -0,0 +1,136 @@ +{ + "resource": "/{proxy+}", + "path": "/hello/world", + "httpMethod": "POST", + "headers": { + "Accept": "*/*", + "Accept-Encoding": "gzip, deflate", + "cache-control": "no-cache", + "CloudFront-Forwarded-Proto": "https", + "CloudFront-Is-Desktop-Viewer": "true", + "CloudFront-Is-Mobile-Viewer": "false", + "CloudFront-Is-SmartTV-Viewer": "false", + "CloudFront-Is-Tablet-Viewer": "false", + "CloudFront-Viewer-Country": "US", + "Content-Type": "application/json", + "headerName": "headerValue2", + "Host": "gy415nuibc.execute-api.us-east-1.amazonaws.com", + "Postman-Token": "9f583ef0-ed83-4a38-aef3-eb9ce3f7a57f", + "User-Agent": "PostmanRuntime/2.4.5", + "Via": "1.1 d98420743a69852491bbdea73f7680bd.cloudfront.net (CloudFront)", + "X-Amz-Cf-Id": "pn-PWIJc6thYnZm5P0NMgOUglL1DYtl0gdeJky8tqsg8iS_sgsKD1A==", + "X-Forwarded-For": "54.240.196.186, 54.182.214.83", + "X-Forwarded-Port": "443", + "X-Forwarded-Proto": "https" + }, + "multiValueHeaders": { + "Accept": [ + "*/*" + ], + "Accept-Encoding": [ + "gzip, deflate" + ], + "cache-control": [ + "no-cache" + ], + "CloudFront-Forwarded-Proto": [ + "https" + ], + "CloudFront-Is-Desktop-Viewer": [ + "true" + ], + "CloudFront-Is-Mobile-Viewer": [ + "false" + ], + "CloudFront-Is-SmartTV-Viewer": [ + "false" + ], + "CloudFront-Is-Tablet-Viewer": [ + "false" + ], + "CloudFront-Viewer-Country": [ + "US" + ], + "Content-Type": [ + "application/json" + ], + "headerName": [ + "headerValue", + "headerValue2" + ], + "Host": [ + "gy415nuibc.execute-api.us-east-1.amazonaws.com" + ], + "Postman-Token": [ + "9f583ef0-ed83-4a38-aef3-eb9ce3f7a57f" + ], + "User-Agent": [ + "PostmanRuntime/2.4.5" + ], + "Via": [ + "1.1 d98420743a69852491bbdea73f7680bd.cloudfront.net (CloudFront)" + ], + "X-Amz-Cf-Id": [ + "pn-PWIJc6thYnZm5P0NMgOUglL1DYtl0gdeJky8tqsg8iS_sgsKD1A==" + ], + "X-Forwarded-For": [ + "54.240.196.186, 54.182.214.83" + ], + "X-Forwarded-Port": [ + "443" + ], + "X-Forwarded-Proto": [ + "https" + ] + }, + "queryStringParameters": { + "name": "me" + }, + "multiValueQueryStringParameters": { + "name": [ + "me", "me2" + ] + }, + "pathParameters": { + "proxy": "hello/world" + }, + "stageVariables": { + "stageVariableName": "stageVariableValue" + }, + "requestContext": { + "accountId": "12345678912", + "resourceId": "roq9wj", + "path": "/hello/world", + "stage": "testStage", + "domainName": "gy415nuibc.execute-api.us-east-2.amazonaws.com", + "domainPrefix": "y0ne18dixk", + "requestId": "deef4878-7910-11e6-8f14-25afc3e9ae33", + "protocol": "HTTP/1.1", + "identity": { + "cognitoIdentityPoolId": "theCognitoIdentityPoolId", + "accountId": "theAccountId", + "cognitoIdentityId": "theCognitoIdentityId", + "caller": "theCaller", + "apiKey": "theApiKey", + "apiKeyId": "theApiKeyId", + "accessKey": "ANEXAMPLEOFACCESSKEY", + "sourceIp": "192.168.196.186", + "cognitoAuthenticationType": "theCognitoAuthenticationType", + "cognitoAuthenticationProvider": "theCognitoAuthenticationProvider", + "userArn": "theUserArn", + "userAgent": "PostmanRuntime/2.4.5", + "user": "theUser" + }, + "authorizer": { + "principalId": "admin", + "clientId": 1, + "clientName": "Exata" + }, + "resourcePath": "/{proxy+}", + "httpMethod": "POST", + "requestTime": "15/May/2020:06:01:09 +0000", + "requestTimeEpoch": 1589522469693, + "apiId": "gy415nuibc" + }, + "body": "{\r\n\t\"a\": 1\r\n}" +} \ No newline at end of file diff --git a/lambda-events/src/fixtures/example-apigw-v2-request-multi-value-parameters.json b/lambda-events/src/fixtures/example-apigw-v2-request-multi-value-parameters.json new file mode 100644 index 00000000..5094d6c2 --- /dev/null +++ b/lambda-events/src/fixtures/example-apigw-v2-request-multi-value-parameters.json @@ -0,0 +1,61 @@ +{ + "version": "2.0", + "routeKey": "$default", + "rawPath": "/my/path", + "rawQueryString": "Parameter1=value1&Parameter1=value2", + "cookies": [ + "cookie1", + "cookie2" + ], + "headers": { + "Header2": "value1,value2" + }, + "queryStringParameters": { + "Parameter1": "value1,value2" + }, + "pathParameters": { + "proxy": "hello/world" + }, + "requestContext": { + "routeKey": "$default", + "accountId": "123456789012", + "stage": "$default", + "requestId": "id", + "authorizer": { + "lambda": { + "key": "value" + } + }, + "apiId": "api-id", + "authentication": { + "clientCert": { + "clientCertPem": "-----BEGIN CERTIFICATE-----\nMIIEZTCCAk0CAQEwDQ...", + "issuerDN": "C=US,ST=Washington,L=Seattle,O=Amazon Web Services,OU=Security,CN=My Private CA", + "serialNumber": "1", + "subjectDN": "C=US,ST=Washington,L=Seattle,O=Amazon Web Services,OU=Security,CN=My Client", + "validity": { + "notAfter": "Aug 5 00:28:21 2120 GMT", + "notBefore": "Aug 29 00:28:21 2020 GMT" + } + } + }, + "domainName": "id.execute-api.us-east-1.amazonaws.com", + "domainPrefix": "id", + "time": "12/Mar/2020:19:03:58+0000", + "timeEpoch": 1583348638390, + "http": { + "method": "GET", + "path": "/my/path", + "protocol": "HTTP/1.1", + "sourceIp": "IP", + "userAgent": "agent" + } + }, + "stageVariables": { + "stageVariable1": "value1", + "stageVariable2": "value2" + }, + "body": "{\r\n\t\"a\": 1\r\n}", + "isBase64Encoded": false +} + From b7f198d4437470273a751f4c12ad153a5049149b Mon Sep 17 00:00:00 2001 From: sumagowda Date: Mon, 31 Jul 2023 08:26:55 -0700 Subject: [PATCH 09/27] Example that uses Axum and Diesel to connect to a PostgreSQL database with SSLmode ON (#682) Co-authored-by: Suma Gowda --- examples/http-axum-diesel-ssl/.DS_Store | Bin 0 -> 6148 bytes examples/http-axum-diesel-ssl/Cargo.toml | 28 +++ examples/http-axum-diesel-ssl/README.md | 13 ++ .../2023-04-07-231632_create_posts/down.sql | 2 + .../2023-04-07-231632_create_posts/up.sql | 7 + examples/http-axum-diesel-ssl/src/main.rs | 165 ++++++++++++++++++ 6 files changed, 215 insertions(+) create mode 100644 examples/http-axum-diesel-ssl/.DS_Store create mode 100755 examples/http-axum-diesel-ssl/Cargo.toml create mode 100755 examples/http-axum-diesel-ssl/README.md create mode 100755 examples/http-axum-diesel-ssl/migrations/2023-04-07-231632_create_posts/down.sql create mode 100755 examples/http-axum-diesel-ssl/migrations/2023-04-07-231632_create_posts/up.sql create mode 100755 examples/http-axum-diesel-ssl/src/main.rs diff --git a/examples/http-axum-diesel-ssl/.DS_Store b/examples/http-axum-diesel-ssl/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..06fbd0f6d1e6ab54258b97a348681bc810b1d5ae GIT binary patch literal 6148 zcmeHK%}T>S5Z-O8O({YS3Oz1(Etp6_#Y>3w1&ruHr6#6mFlI}V+CwSitS{t~_&m<+ zZop#BB6bFLzxmzGevtiPjB$S+yNub4F$)?ZN2Ni~-56?_WJHc*6tgIm5m+D5%*6gW z;J3F~!E*KxExvz$niQqueDYSiv$fr|x>n!13!da6D8q7I_+fs7)}@qbRO?}Ml}r~C zd-qIcWte2sTooko6jE-llPs1CU(U0*P_=;$SUsyZu?NfL$T=Pg*L7Ayu{;@#hT`bZ zU9Ec7-u}Vq#pF4C$>f`+lLOmIb_~|=4vJaLYmjHD%pSp0XV+PT!~iis3=jjG&44)< zto~-xK&vMPh=B$MaDNcc5IuvHMzwW7hu3F}cMwrP$F~HcFz6YqG(rS~>ry~n%FPpl z>vHf5ljj+%H0pB3)yy!CnYn(va5X#lg-U1K(?~rrKn$!i(AK7n=l?nUGL4V?^%Am( z0b<~vF~B?HVC=)9%-Q;5d3e?eX!p=iFt0)d1oX8_02sKB3{+6Z1?rIJ8LTwoDCk$^ PfOHX1giuEe`~m}CkhDpD literal 0 HcmV?d00001 diff --git a/examples/http-axum-diesel-ssl/Cargo.toml b/examples/http-axum-diesel-ssl/Cargo.toml new file mode 100755 index 00000000..cdcdd4ef --- /dev/null +++ b/examples/http-axum-diesel-ssl/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name = "http-axum-diesel" +version = "0.1.0" +edition = "2021" + + +# Use cargo-edit(https://github.com/killercup/cargo-edit#installation) +# to manage dependencies. +# Running `cargo add DEPENDENCY_NAME` will +# add the latest version of a dependency to the list, +# and it will keep the alphabetic ordering for you. + +[dependencies] +axum = "0.6.4" +bb8 = "0.8.0" +diesel = "2.0.3" +diesel-async = { version = "0.2.1", features = ["postgres", "bb8"] } +lambda_http = { path = "../../lambda-http" } +lambda_runtime = { path = "../../lambda-runtime" } +serde = "1.0.159" +tracing = { version = "0.1", features = ["log"] } +tracing-subscriber = { version = "0.3", default-features = false, features = ["fmt"] } +futures-util = "0.3.21" +rustls = "0.20.8" +rustls-native-certs = "0.6.2" +tokio = { version = "1.2.0", default-features = false, features = ["macros", "rt-multi-thread"] } +tokio-postgres = "0.7.7" +tokio-postgres-rustls = "0.9.0" \ No newline at end of file diff --git a/examples/http-axum-diesel-ssl/README.md b/examples/http-axum-diesel-ssl/README.md new file mode 100755 index 00000000..8b2330f5 --- /dev/null +++ b/examples/http-axum-diesel-ssl/README.md @@ -0,0 +1,13 @@ +# AWS Lambda Function example + +This example shows how to develop a REST API with Axum and Diesel that connects to a Postgres database. + +## Build & Deploy + +1. Install [cargo-lambda](https://github.com/cargo-lambda/cargo-lambda#installation) +2. Build the function with `cargo lambda build --release` +3. Deploy the function to AWS Lambda with `cargo lambda deploy --iam-role YOUR_ROLE` + +## Build for ARM 64 + +Build the function with `cargo lambda build --release --arm64` diff --git a/examples/http-axum-diesel-ssl/migrations/2023-04-07-231632_create_posts/down.sql b/examples/http-axum-diesel-ssl/migrations/2023-04-07-231632_create_posts/down.sql new file mode 100755 index 00000000..e00da655 --- /dev/null +++ b/examples/http-axum-diesel-ssl/migrations/2023-04-07-231632_create_posts/down.sql @@ -0,0 +1,2 @@ +-- This file should undo anything in `up.sql` +DROP TABLE posts \ No newline at end of file diff --git a/examples/http-axum-diesel-ssl/migrations/2023-04-07-231632_create_posts/up.sql b/examples/http-axum-diesel-ssl/migrations/2023-04-07-231632_create_posts/up.sql new file mode 100755 index 00000000..aa684de6 --- /dev/null +++ b/examples/http-axum-diesel-ssl/migrations/2023-04-07-231632_create_posts/up.sql @@ -0,0 +1,7 @@ +-- Your SQL goes here +CREATE TABLE posts ( + id SERIAL PRIMARY KEY, + title VARCHAR NOT NULL, + content TEXT NOT NULL, + published BOOLEAN NOT NULL DEFAULT FALSE +) \ No newline at end of file diff --git a/examples/http-axum-diesel-ssl/src/main.rs b/examples/http-axum-diesel-ssl/src/main.rs new file mode 100755 index 00000000..c2f6b933 --- /dev/null +++ b/examples/http-axum-diesel-ssl/src/main.rs @@ -0,0 +1,165 @@ +use diesel::{ConnectionError, ConnectionResult}; +use futures_util::future::BoxFuture; +use futures_util::FutureExt; +use std::time::Duration; + +use axum::{ + extract::{Path, State}, + response::Json, + routing::get, + Router, +}; +use bb8::Pool; +use diesel::prelude::*; +use diesel_async::{pooled_connection::AsyncDieselConnectionManager, AsyncPgConnection, RunQueryDsl}; +use lambda_http::{http::StatusCode, run, Error}; +use serde::{Deserialize, Serialize}; + +table! { + posts (id) { + id -> Integer, + title -> Text, + content -> Text, + published -> Bool, + } +} + +#[derive(Default, Queryable, Selectable, Serialize)] +struct Post { + id: i32, + title: String, + content: String, + published: bool, +} + +#[derive(Deserialize, Insertable)] +#[diesel(table_name = posts)] +struct NewPost { + title: String, + content: String, + published: bool, +} + +type AsyncPool = Pool>; +type ServerError = (StatusCode, String); + +async fn create_post(State(pool): State, Json(post): Json) -> Result, ServerError> { + let mut conn = pool.get().await.map_err(internal_server_error)?; + + let post = diesel::insert_into(posts::table) + .values(post) + .returning(Post::as_returning()) + .get_result(&mut conn) + .await + .map_err(internal_server_error)?; + + Ok(Json(post)) +} + +async fn list_posts(State(pool): State) -> Result>, ServerError> { + let mut conn = pool.get().await.map_err(internal_server_error)?; + + let posts = posts::table + .filter(posts::dsl::published.eq(true)) + .load(&mut conn) + .await + .map_err(internal_server_error)?; + + Ok(Json(posts)) +} + +async fn get_post(State(pool): State, Path(post_id): Path) -> Result, ServerError> { + let mut conn = pool.get().await.map_err(internal_server_error)?; + + let post = posts::table + .find(post_id) + .first(&mut conn) + .await + .map_err(internal_server_error)?; + + Ok(Json(post)) +} + +async fn delete_post(State(pool): State, Path(post_id): Path) -> Result<(), ServerError> { + let mut conn = pool.get().await.map_err(internal_server_error)?; + + diesel::delete(posts::table.find(post_id)) + .execute(&mut conn) + .await + .map_err(internal_server_error)?; + + Ok(()) +} + +fn internal_server_error(err: E) -> ServerError { + (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()) +} + +#[tokio::main] +async fn main() -> Result<(), Error> { + // required to enable CloudWatch error logging by the runtime + tracing_subscriber::fmt() + .with_max_level(tracing::Level::INFO) + // disable printing the name of the module in every log line. + .with_target(false) + // disabling time is handy because CloudWatch will add the ingestion time. + .without_time() + .init(); + + // Set up the database connection + // Format for DATABASE_URL=postgres://your_username:your_password@your_host:5432/your_db?sslmode=require + let db_url = std::env::var("DATABASE_URL").expect("Env var `DATABASE_URL` not set"); + + let mgr = AsyncDieselConnectionManager::::new_with_setup( + db_url, + establish_connection, + ); + + let pool = Pool::builder() + .max_size(10) + .min_idle(Some(5)) + .max_lifetime(Some(Duration::from_secs(60 * 60 * 24))) + .idle_timeout(Some(Duration::from_secs(60 * 2))) + .build(mgr) + .await?; + + // Set up the API routes + let posts_api = Router::new() + .route("/", get(list_posts).post(create_post)) + .route("/:id", get(get_post).delete(delete_post)) + .route("/get", get(list_posts)) + .route("/get/:id", get(get_post)); + let app = Router::new().nest("/posts", posts_api).with_state(pool); + + run(app).await +} + + +fn establish_connection(config: &str) -> BoxFuture> { + let fut = async { + // We first set up the way we want rustls to work. + let rustls_config = rustls::ClientConfig::builder() + .with_safe_defaults() + .with_root_certificates(root_certs()) + .with_no_client_auth(); + let tls = tokio_postgres_rustls::MakeRustlsConnect::new(rustls_config); + let (client, conn) = tokio_postgres::connect(config, tls) + .await + .map_err(|e| ConnectionError::BadConnection(e.to_string()))?; + tokio::spawn(async move { + if let Err(e) = conn.await { + eprintln!("Database connection: {e}"); + } + }); + AsyncPgConnection::try_from(client).await + }; + fut.boxed() +} + +fn root_certs() -> rustls::RootCertStore { + let mut roots = rustls::RootCertStore::empty(); + let certs = rustls_native_certs::load_native_certs().expect("Certs not loadable!"); + let certs: Vec<_> = certs.into_iter().map(|cert| cert.0).collect(); + roots.add_parsable_certificates(&certs); + roots +} From 15dbde0a9c77d741e58578662a8cd36987f915a5 Mon Sep 17 00:00:00 2001 From: David Calavera Date: Mon, 28 Aug 2023 17:59:24 -0700 Subject: [PATCH 10/27] Add history section to the readme. (#687) Signed-off-by: David Calavera --- lambda-events/README.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lambda-events/README.md b/lambda-events/README.md index 0813c63a..8446ce55 100644 --- a/lambda-events/README.md +++ b/lambda-events/README.md @@ -27,6 +27,12 @@ This crate divides all Lambda Events into features named after the service that cargo add aws_lambda_events --no-default-features --features apigw,alb ``` +## History + +The AWS Lambda Events crate was created by [Christian Legnitto](https://github.com/LegNeato). Without all his work and dedication, this project could have not been possible. + +In 2023, the AWS Lambda Event crate was moved into this repository to continue its support for all AWS customers that use Rust on AWS Lambda. + [//]: # 'badges' [crate-image]: https://img.shields.io/crates/v/aws_lambda_events.svg [crate-link]: https://crates.io/crates/aws_lambda_events From 1781f890a3e63fd73b5a00b8d62bce057a19e65a Mon Sep 17 00:00:00 2001 From: FalkWoldmann <52786457+FalkWoldmann@users.noreply.github.com> Date: Wed, 30 Aug 2023 18:21:14 +0200 Subject: [PATCH 11/27] Make Kafka header values i8 instead of u8 (#689) --- lambda-events/src/event/kafka/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lambda-events/src/event/kafka/mod.rs b/lambda-events/src/event/kafka/mod.rs index 07299859..8cd92bdf 100644 --- a/lambda-events/src/event/kafka/mod.rs +++ b/lambda-events/src/event/kafka/mod.rs @@ -28,7 +28,7 @@ pub struct KafkaRecord { pub timestamp_type: Option, pub key: Option, pub value: Option, - pub headers: Vec>>, + pub headers: Vec>>, } #[cfg(test)] From e6ac88fe43edf1fcb59ad0c91487f23f6b267e88 Mon Sep 17 00:00:00 2001 From: David Calavera Date: Mon, 4 Sep 2023 19:59:44 -0700 Subject: [PATCH 12/27] Fix streaming prelude serialization (#692) * Fix streaming prelude serialization HTTP headers can be multi-value, but the current implementation ignores this fact and only serializes the first value for each header. This changes uses http-serde to serialize the prelude correctly. Signed-off-by: David Calavera * Update MSRV Some dependencies have dropped support for 1.62 already. Signed-off-by: David Calavera * Remove unwrap Signed-off-by: David Calavera --------- Signed-off-by: David Calavera --- .github/workflows/build-events.yml | 2 +- .github/workflows/build-extension.yml | 2 +- .github/workflows/build-runtime.yml | 2 +- README.md | 2 +- lambda-runtime/Cargo.toml | 1 + lambda-runtime/src/streaming.rs | 68 ++++++++++++++------------- 6 files changed, 40 insertions(+), 37 deletions(-) diff --git a/.github/workflows/build-events.yml b/.github/workflows/build-events.yml index 3a56e597..4e5fb34d 100644 --- a/.github/workflows/build-events.yml +++ b/.github/workflows/build-events.yml @@ -14,7 +14,7 @@ jobs: strategy: matrix: toolchain: - - "1.62.0" # Current MSRV + - "1.64.0" # Current MSRV - stable env: RUST_BACKTRACE: 1 diff --git a/.github/workflows/build-extension.yml b/.github/workflows/build-extension.yml index 0905f289..7365bc64 100644 --- a/.github/workflows/build-extension.yml +++ b/.github/workflows/build-extension.yml @@ -18,7 +18,7 @@ jobs: strategy: matrix: toolchain: - - "1.62.0" # Current MSRV + - "1.64.0" # Current MSRV - stable env: RUST_BACKTRACE: 1 diff --git a/.github/workflows/build-runtime.yml b/.github/workflows/build-runtime.yml index 68913c95..9657a840 100644 --- a/.github/workflows/build-runtime.yml +++ b/.github/workflows/build-runtime.yml @@ -19,7 +19,7 @@ jobs: strategy: matrix: toolchain: - - "1.62.0" # Current MSRV + - "1.64.0" # Current MSRV - stable env: RUST_BACKTRACE: 1 diff --git a/README.md b/README.md index 12dbf523..5f6f899a 100644 --- a/README.md +++ b/README.md @@ -440,7 +440,7 @@ This will make your function compile much faster. ## Supported Rust Versions (MSRV) -The AWS Lambda Rust Runtime requires a minimum of Rust 1.62, and is not guaranteed to build on compiler versions earlier than that. +The AWS Lambda Rust Runtime requires a minimum of Rust 1.64, and is not guaranteed to build on compiler versions earlier than that. ## Security diff --git a/lambda-runtime/Cargo.toml b/lambda-runtime/Cargo.toml index 1de6f361..197265ab 100644 --- a/lambda-runtime/Cargo.toml +++ b/lambda-runtime/Cargo.toml @@ -42,3 +42,4 @@ tower = { version = "0.4", features = ["util"] } tokio-stream = "0.1.2" lambda_runtime_api_client = { version = "0.8", path = "../lambda-runtime-api-client" } serde_path_to_error = "0.1.11" +http-serde = "1.1.3" diff --git a/lambda-runtime/src/streaming.rs b/lambda-runtime/src/streaming.rs index e541f3d6..5ea369ad 100644 --- a/lambda-runtime/src/streaming.rs +++ b/lambda-runtime/src/streaming.rs @@ -5,13 +5,11 @@ use crate::{ use bytes::Bytes; use futures::FutureExt; use http::header::{CONTENT_TYPE, SET_COOKIE}; -use http::{Method, Request, Response, Uri}; +use http::{HeaderMap, Method, Request, Response, StatusCode, Uri}; use hyper::body::HttpBody; use hyper::{client::connect::Connection, Body}; use lambda_runtime_api_client::{build_request, Client}; -use serde::Deserialize; -use serde_json::json; -use std::collections::HashMap; +use serde::{Deserialize, Serialize}; use std::str::FromStr; use std::{ env, @@ -203,6 +201,16 @@ pub(crate) struct EventCompletionStreamingRequest<'a, B> { pub(crate) body: Response, } +#[derive(Debug, Serialize)] +#[serde(rename_all = "camelCase")] +struct MetadataPrelude { + #[serde(serialize_with = "http_serde::status_code::serialize")] + status_code: StatusCode, + #[serde(serialize_with = "http_serde::header_map::serialize")] + headers: HeaderMap, + cookies: Vec, +} + impl<'a, B> IntoRequest for EventCompletionStreamingRequest<'a, B> where B: HttpBody + Unpin + Send + 'static, @@ -216,45 +224,39 @@ where let (parts, mut body) = self.body.into_parts(); let mut builder = build_request().method(Method::POST).uri(uri); - let headers = builder.headers_mut().unwrap(); + let req_headers = builder.headers_mut().unwrap(); - headers.insert("Transfer-Encoding", "chunked".parse()?); - headers.insert("Lambda-Runtime-Function-Response-Mode", "streaming".parse()?); - headers.insert( + req_headers.insert("Transfer-Encoding", "chunked".parse()?); + req_headers.insert("Lambda-Runtime-Function-Response-Mode", "streaming".parse()?); + req_headers.insert( "Content-Type", "application/vnd.awslambda.http-integration-response".parse()?, ); - let (mut tx, rx) = Body::channel(); + let mut prelude_headers = parts.headers; + // default Content-Type + prelude_headers + .entry(CONTENT_TYPE) + .or_insert("application/octet-stream".parse()?); - tokio::spawn(async move { - let mut header_map = parts.headers; - // default Content-Type - header_map - .entry(CONTENT_TYPE) - .or_insert("application/octet-stream".parse().unwrap()); + let cookies = prelude_headers.get_all(SET_COOKIE); + let cookies = cookies + .iter() + .map(|c| String::from_utf8_lossy(c.as_bytes()).to_string()) + .collect::>(); + prelude_headers.remove(SET_COOKIE); - let cookies = header_map.get_all(SET_COOKIE); - let cookies = cookies - .iter() - .map(|c| String::from_utf8_lossy(c.as_bytes()).to_string()) - .collect::>(); + let metadata_prelude = serde_json::to_string(&MetadataPrelude { + status_code: parts.status, + headers: prelude_headers, + cookies, + })?; - let headers = header_map - .iter() - .filter(|(k, _)| *k != SET_COOKIE) - .map(|(k, v)| (k.as_str(), String::from_utf8_lossy(v.as_bytes()).to_string())) - .collect::>(); + trace!(?metadata_prelude); - let metadata_prelude = json!({ - "statusCode": parts.status.as_u16(), - "headers": headers, - "cookies": cookies, - }) - .to_string(); - - trace!("metadata_prelude: {}", metadata_prelude); + let (mut tx, rx) = Body::channel(); + tokio::spawn(async move { tx.send_data(metadata_prelude.into()).await.unwrap(); tx.send_data("\u{0}".repeat(8).into()).await.unwrap(); From d1d58235a48511448c47f1314a75ed397cbf94d2 Mon Sep 17 00:00:00 2001 From: David Calavera Date: Tue, 5 Sep 2023 07:54:30 -0700 Subject: [PATCH 13/27] Release runtime and events new versions (#693) Signed-off-by: David Calavera --- lambda-events/Cargo.toml | 2 +- lambda-runtime/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lambda-events/Cargo.toml b/lambda-events/Cargo.toml index 27b577cb..a21973b2 100644 --- a/lambda-events/Cargo.toml +++ b/lambda-events/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "aws_lambda_events" -version = "0.11.0" +version = "0.11.1" description = "AWS Lambda event definitions" authors = [ "Christian Legnitto ", diff --git a/lambda-runtime/Cargo.toml b/lambda-runtime/Cargo.toml index 197265ab..1137f845 100644 --- a/lambda-runtime/Cargo.toml +++ b/lambda-runtime/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lambda_runtime" -version = "0.8.1" +version = "0.8.2" authors = [ "David Calavera ", "Harold Sun ", From e2d51ad4e3bb2c04162049eaf4b50cbdff0fe06c Mon Sep 17 00:00:00 2001 From: Chris Leach <7308018+chris-leach@users.noreply.github.com> Date: Mon, 11 Sep 2023 19:26:28 +0100 Subject: [PATCH 14/27] Add event definitions for CloudFormation custom resources (#695) * Add event definitions for CloudFormation custom resources * Remove let else statements * Fix codebuild_time for chrono ^0.4.29 by parsing via NaiveDateTime --------- Co-authored-by: Chris Leach --- lambda-events/Cargo.toml | 2 + .../src/custom_serde/codebuild_time.rs | 17 ++- lambda-events/src/event/cloudformation/mod.rs | 143 ++++++++++++++++++ lambda-events/src/event/mod.rs | 4 + ...mation-custom-resource-create-request.json | 14 ++ ...mation-custom-resource-delete-request.json | 15 ++ ...mation-custom-resource-update-request.json | 20 +++ lambda-events/src/lib.rs | 5 + 8 files changed, 212 insertions(+), 8 deletions(-) create mode 100644 lambda-events/src/event/cloudformation/mod.rs create mode 100644 lambda-events/src/fixtures/example-cloudformation-custom-resource-create-request.json create mode 100644 lambda-events/src/fixtures/example-cloudformation-custom-resource-delete-request.json create mode 100644 lambda-events/src/fixtures/example-cloudformation-custom-resource-update-request.json diff --git a/lambda-events/Cargo.toml b/lambda-events/Cargo.toml index a21973b2..e401bfff 100644 --- a/lambda-events/Cargo.toml +++ b/lambda-events/Cargo.toml @@ -42,6 +42,7 @@ default = [ "autoscaling", "chime_bot", "clientvpn", + "cloudformation", "cloudwatch_events", "cloudwatch_logs", "code_commit", @@ -81,6 +82,7 @@ appsync = [] autoscaling = ["chrono"] chime_bot = ["chrono"] clientvpn = [] +cloudformation = [] cloudwatch_events = ["chrono"] cloudwatch_logs = ["flate2"] code_commit = ["chrono"] diff --git a/lambda-events/src/custom_serde/codebuild_time.rs b/lambda-events/src/custom_serde/codebuild_time.rs index 94d0e2f5..bd132b23 100644 --- a/lambda-events/src/custom_serde/codebuild_time.rs +++ b/lambda-events/src/custom_serde/codebuild_time.rs @@ -1,4 +1,4 @@ -use chrono::{DateTime, TimeZone, Utc}; +use chrono::{DateTime, NaiveDateTime, Utc}; use serde::ser::Serializer; use serde::{ de::{Deserializer, Error as DeError, Visitor}, @@ -18,7 +18,8 @@ impl<'de> Visitor<'de> for TimeVisitor { } fn visit_str(self, val: &str) -> Result { - Utc.datetime_from_str(val, CODEBUILD_TIME_FORMAT) + NaiveDateTime::parse_from_str(val, CODEBUILD_TIME_FORMAT) + .map(|naive| naive.and_utc()) .map_err(|e| DeError::custom(format!("Parse error {} for {}", e, val))) } } @@ -81,9 +82,9 @@ mod tests { "date": "Sep 1, 2017 4:12:29 PM" }); - let expected = Utc - .datetime_from_str("Sep 1, 2017 4:12:29 PM", CODEBUILD_TIME_FORMAT) - .unwrap(); + let expected = NaiveDateTime::parse_from_str("Sep 1, 2017 4:12:29 PM", CODEBUILD_TIME_FORMAT) + .unwrap() + .and_utc(); let decoded: Test = serde_json::from_value(data).unwrap(); assert_eq!(expected, decoded.date); } @@ -99,9 +100,9 @@ mod tests { "date": "Sep 1, 2017 4:12:29 PM" }); - let expected = Utc - .datetime_from_str("Sep 1, 2017 4:12:29 PM", CODEBUILD_TIME_FORMAT) - .unwrap(); + let expected = NaiveDateTime::parse_from_str("Sep 1, 2017 4:12:29 PM", CODEBUILD_TIME_FORMAT) + .unwrap() + .and_utc(); let decoded: Test = serde_json::from_value(data).unwrap(); assert_eq!(Some(expected), decoded.date); } diff --git a/lambda-events/src/event/cloudformation/mod.rs b/lambda-events/src/event/cloudformation/mod.rs new file mode 100644 index 00000000..e2d51745 --- /dev/null +++ b/lambda-events/src/event/cloudformation/mod.rs @@ -0,0 +1,143 @@ +use serde::{de::DeserializeOwned, Deserialize, Serialize}; +use serde_json::Value; + +#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)] +#[serde(tag = "RequestType")] +pub enum CloudFormationCustomResourceRequest +where + P1: DeserializeOwned + Serialize, + P2: DeserializeOwned + Serialize, +{ + #[serde(bound = "")] + Create(CreateRequest), + #[serde(bound = "")] + Update(UpdateRequest), + #[serde(bound = "")] + Delete(DeleteRequest), +} + +#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)] +#[serde(rename_all = "PascalCase")] +#[serde(deny_unknown_fields)] +pub struct CreateRequest +where + P2: DeserializeOwned + Serialize, +{ + #[serde(default)] + pub service_token: Option, + pub request_id: String, + #[serde(rename = "ResponseURL")] + pub response_url: String, + pub stack_id: String, + pub resource_type: String, + pub logical_resource_id: String, + #[serde(bound = "")] + pub resource_properties: P2, +} + +#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)] +#[serde(rename_all = "PascalCase")] +#[serde(deny_unknown_fields)] +pub struct UpdateRequest +where + P1: DeserializeOwned + Serialize, + P2: DeserializeOwned + Serialize, +{ + #[serde(default)] + pub service_token: Option, + pub request_id: String, + #[serde(rename = "ResponseURL")] + pub response_url: String, + pub stack_id: String, + pub resource_type: String, + pub logical_resource_id: String, + pub physical_resource_id: String, + #[serde(bound = "")] + pub resource_properties: P2, + #[serde(bound = "")] + pub old_resource_properties: P1, +} + +#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)] +#[serde(rename_all = "PascalCase")] +#[serde(deny_unknown_fields)] +pub struct DeleteRequest +where + P2: DeserializeOwned + Serialize, +{ + #[serde(default)] + pub service_token: Option, + pub request_id: String, + #[serde(rename = "ResponseURL")] + pub response_url: String, + pub stack_id: String, + pub resource_type: String, + pub logical_resource_id: String, + pub physical_resource_id: String, + #[serde(bound = "")] + pub resource_properties: P2, +} + +#[cfg(test)] +mod test { + use std::collections::HashMap; + + use super::CloudFormationCustomResourceRequest::*; + use super::*; + + #[derive(Clone, Debug, Deserialize, PartialEq, Serialize)] + #[serde(rename_all = "PascalCase")] + #[serde(deny_unknown_fields)] + struct TestProperties { + key_1: String, + key_2: Vec, + key_3: HashMap, + } + + type TestRequest = CloudFormationCustomResourceRequest; + + #[test] + fn example_cloudformation_custom_resource_create_request() { + let data = include_bytes!("../../fixtures/example-cloudformation-custom-resource-create-request.json"); + let parsed: TestRequest = serde_json::from_slice(data).unwrap(); + + match parsed { + Create(_) => (), + _ => panic!("expected Create request"), + } + + let output: String = serde_json::to_string(&parsed).unwrap(); + let reparsed: TestRequest = serde_json::from_slice(output.as_bytes()).unwrap(); + assert_eq!(parsed, reparsed); + } + + #[test] + fn example_cloudformation_custom_resource_update_request() { + let data = include_bytes!("../../fixtures/example-cloudformation-custom-resource-update-request.json"); + let parsed: TestRequest = serde_json::from_slice(data).unwrap(); + + match parsed { + Update(_) => (), + _ => panic!("expected Update request"), + } + + let output: String = serde_json::to_string(&parsed).unwrap(); + let reparsed: TestRequest = serde_json::from_slice(output.as_bytes()).unwrap(); + assert_eq!(parsed, reparsed); + } + + #[test] + fn example_cloudformation_custom_resource_delete_request() { + let data = include_bytes!("../../fixtures/example-cloudformation-custom-resource-delete-request.json"); + let parsed: TestRequest = serde_json::from_slice(data).unwrap(); + + match parsed { + Delete(_) => (), + _ => panic!("expected Delete request"), + } + + let output: String = serde_json::to_string(&parsed).unwrap(); + let reparsed: TestRequest = serde_json::from_slice(output.as_bytes()).unwrap(); + assert_eq!(parsed, reparsed); + } +} diff --git a/lambda-events/src/event/mod.rs b/lambda-events/src/event/mod.rs index 1aa56697..4ce71dfc 100644 --- a/lambda-events/src/event/mod.rs +++ b/lambda-events/src/event/mod.rs @@ -25,6 +25,10 @@ pub mod chime_bot; #[cfg(feature = "clientvpn")] pub mod clientvpn; +/// AWS Lambda event definitions for cloudformation. +#[cfg(feature = "cloudformation")] +pub mod cloudformation; + /// CloudWatch Events payload #[cfg(feature = "cloudwatch_events")] pub mod cloudwatch_events; diff --git a/lambda-events/src/fixtures/example-cloudformation-custom-resource-create-request.json b/lambda-events/src/fixtures/example-cloudformation-custom-resource-create-request.json new file mode 100644 index 00000000..d35dd6f7 --- /dev/null +++ b/lambda-events/src/fixtures/example-cloudformation-custom-resource-create-request.json @@ -0,0 +1,14 @@ +{ + "ServiceToken": "arn:aws:lambda:eu-west-2:123456789012:function:custom-resource-handler", + "RequestType" : "Create", + "RequestId" : "82304eb2-bdda-469f-a33b-a3f1406d0a52", + "ResponseURL": "https://cr-response-bucket.s3.us-east-1.amazonaws.com/cr-response-key?sig-params=sig-values", + "StackId" : "arn:aws:cloudformation:us-east-1:123456789012:stack/stack-name/16580499-7622-4a9c-b32f-4eba35da93da", + "ResourceType" : "Custom::MyCustomResourceType", + "LogicalResourceId" : "CustomResource", + "ResourceProperties" : { + "Key1" : "string", + "Key2" : [ "list" ], + "Key3" : { "Key4" : "map" } + } +} diff --git a/lambda-events/src/fixtures/example-cloudformation-custom-resource-delete-request.json b/lambda-events/src/fixtures/example-cloudformation-custom-resource-delete-request.json new file mode 100644 index 00000000..bd788c99 --- /dev/null +++ b/lambda-events/src/fixtures/example-cloudformation-custom-resource-delete-request.json @@ -0,0 +1,15 @@ +{ + "ServiceToken": "arn:aws:lambda:eu-west-2:123456789012:function:custom-resource-handler", + "RequestType" : "Delete", + "RequestId" : "ef70561d-d4ba-42a4-801b-33ad88dafc37", + "ResponseURL": "https://cr-response-bucket.s3.us-east-1.amazonaws.com/cr-response-key?sig-params=sig-values", + "StackId" : "arn:aws:cloudformation:us-east-1:123456789012:stack/stack-name/16580499-7622-4a9c-b32f-4eba35da93da", + "ResourceType" : "Custom::MyCustomResourceType", + "LogicalResourceId" : "CustomResource", + "PhysicalResourceId" : "custom-resource-f4bd5382-3de3-4caf-b7ad-1be06b899647", + "ResourceProperties" : { + "Key1" : "string", + "Key2" : [ "list" ], + "Key3" : { "Key4" : "map" } + } +} diff --git a/lambda-events/src/fixtures/example-cloudformation-custom-resource-update-request.json b/lambda-events/src/fixtures/example-cloudformation-custom-resource-update-request.json new file mode 100644 index 00000000..4fc4378a --- /dev/null +++ b/lambda-events/src/fixtures/example-cloudformation-custom-resource-update-request.json @@ -0,0 +1,20 @@ +{ + "ServiceToken": "arn:aws:lambda:eu-west-2:123456789012:function:custom-resource-handler", + "RequestType" : "Update", + "RequestId" : "49347ca5-c603-44e5-a34b-10cf1854a887", + "ResponseURL": "https://cr-response-bucket.s3.us-east-1.amazonaws.com/cr-response-key?sig-params=sig-values", + "StackId" : "arn:aws:cloudformation:us-east-1:123456789012:stack/stack-name/16580499-7622-4a9c-b32f-4eba35da93da", + "ResourceType" : "Custom::MyCustomResourceType", + "LogicalResourceId" : "CustomResource", + "PhysicalResourceId" : "custom-resource-f4bd5382-3de3-4caf-b7ad-1be06b899647", + "ResourceProperties" : { + "Key1" : "new-string", + "Key2" : [ "new-list" ], + "Key3" : { "Key4" : "new-map" } + }, + "OldResourceProperties" : { + "Key1" : "string", + "Key2" : [ "list" ], + "Key3" : { "Key4" : "map" } + } +} diff --git a/lambda-events/src/lib.rs b/lambda-events/src/lib.rs index 564debd7..7402a8f4 100644 --- a/lambda-events/src/lib.rs +++ b/lambda-events/src/lib.rs @@ -20,6 +20,7 @@ pub use event::activemq; /// AWS Lambda event definitions for alb. #[cfg(feature = "alb")] pub use event::alb; + /// AWS Lambda event definitions for apigw. #[cfg(feature = "apigw")] pub use event::apigw; @@ -40,6 +41,10 @@ pub use event::chime_bot; #[cfg(feature = "clientvpn")] pub use event::clientvpn; +/// AWS Lambda event definitions for cloudformation +#[cfg(feature = "cloudformation")] +pub use event::cloudformation; + /// CloudWatch Events payload #[cfg(feature = "cloudwatch_events")] pub use event::cloudwatch_events; From cf72bb05c59c3ca094d169e924774f17972f4b2d Mon Sep 17 00:00:00 2001 From: Harold Sun Date: Fri, 15 Sep 2023 10:46:17 +0800 Subject: [PATCH 15/27] Refactor Lambda response streaming. (#696) * Refactor Lambda response streaming. Remove the separate streaming.rs from lambda-runtime crate. Merge into the `run` method. Added FunctionResponse enum to capture both buffered response and streaming response. Added IntoFunctionResponse trait to convert `Serialize` response into FunctionResponse::BufferedResponse, and convert `Stream` response into FunctionResponse::StreamingResponse. Existing handler functions should continue to work. Improved error handling in response streaming. Return trailers to report errors instead of panic. * Add comments for reporting midstream errors using error trailers * Remove "pub" from internal run method --- examples/basic-streaming-response/README.md | 2 +- examples/basic-streaming-response/src/main.rs | 18 +- lambda-http/Cargo.toml | 1 + lambda-http/src/streaming.rs | 67 ++++- lambda-runtime/Cargo.toml | 2 + lambda-runtime/src/lib.rs | 29 +- lambda-runtime/src/requests.rs | 94 +++++- lambda-runtime/src/streaming.rs | 272 ------------------ lambda-runtime/src/types.rs | 89 +++++- 9 files changed, 266 insertions(+), 308 deletions(-) delete mode 100644 lambda-runtime/src/streaming.rs diff --git a/examples/basic-streaming-response/README.md b/examples/basic-streaming-response/README.md index 3b68f518..ac744a33 100644 --- a/examples/basic-streaming-response/README.md +++ b/examples/basic-streaming-response/README.md @@ -6,7 +6,7 @@ 2. Build the function with `cargo lambda build --release` 3. Deploy the function to AWS Lambda with `cargo lambda deploy --enable-function-url --iam-role YOUR_ROLE` 4. Enable Lambda streaming response on Lambda console: change the function url's invoke mode to `RESPONSE_STREAM` -5. Verify the function works: `curl `. The results should be streamed back with 0.5 second pause between each word. +5. Verify the function works: `curl -v -N `. The results should be streamed back with 0.5 second pause between each word. ## Build for ARM 64 diff --git a/examples/basic-streaming-response/src/main.rs b/examples/basic-streaming-response/src/main.rs index d90ebd33..9d505206 100644 --- a/examples/basic-streaming-response/src/main.rs +++ b/examples/basic-streaming-response/src/main.rs @@ -1,9 +1,9 @@ -use hyper::{body::Body, Response}; -use lambda_runtime::{service_fn, Error, LambdaEvent}; +use hyper::body::Body; +use lambda_runtime::{service_fn, Error, LambdaEvent, StreamResponse}; use serde_json::Value; use std::{thread, time::Duration}; -async fn func(_event: LambdaEvent) -> Result, Error> { +async fn func(_event: LambdaEvent) -> Result, Error> { let messages = vec!["Hello", "world", "from", "Lambda!"]; let (mut tx, rx) = Body::channel(); @@ -15,12 +15,10 @@ async fn func(_event: LambdaEvent) -> Result, Error> { } }); - let resp = Response::builder() - .header("content-type", "text/html") - .header("CustomHeader", "outerspace") - .body(rx)?; - - Ok(resp) + Ok(StreamResponse { + metadata_prelude: Default::default(), + stream: rx, + }) } #[tokio::main] @@ -34,6 +32,6 @@ async fn main() -> Result<(), Error> { .without_time() .init(); - lambda_runtime::run_with_streaming_response(service_fn(func)).await?; + lambda_runtime::run(service_fn(func)).await?; Ok(()) } diff --git a/lambda-http/Cargo.toml b/lambda-http/Cargo.toml index be111092..ea4a5fba 100644 --- a/lambda-http/Cargo.toml +++ b/lambda-http/Cargo.toml @@ -33,6 +33,7 @@ lambda_runtime = { path = "../lambda-runtime", version = "0.8" } serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" serde_urlencoded = "0.7" +tokio-stream = "0.1.2" mime = "0.3" encoding_rs = "0.8" url = "2.2" diff --git a/lambda-http/src/streaming.rs b/lambda-http/src/streaming.rs index 9a27d915..a59cf700 100644 --- a/lambda-http/src/streaming.rs +++ b/lambda-http/src/streaming.rs @@ -1,3 +1,4 @@ +use crate::http::header::SET_COOKIE; use crate::tower::ServiceBuilder; use crate::Request; use crate::{request::LambdaRequest, RequestExt}; @@ -5,9 +6,14 @@ pub use aws_lambda_events::encodings::Body as LambdaEventBody; use bytes::Bytes; pub use http::{self, Response}; use http_body::Body; -use lambda_runtime::LambdaEvent; -pub use lambda_runtime::{self, service_fn, tower, Context, Error, Service}; +pub use lambda_runtime::{ + self, service_fn, tower, tower::ServiceExt, Error, FunctionResponse, LambdaEvent, MetadataPrelude, Service, + StreamResponse, +}; use std::fmt::{Debug, Display}; +use std::pin::Pin; +use std::task::{Context, Poll}; +use tokio_stream::Stream; /// Starts the Lambda Rust runtime and stream response back [Configure Lambda /// Streaming Response](https://docs.aws.amazon.com/lambda/latest/dg/configuration-response-streaming.html). @@ -28,7 +34,60 @@ where let event: Request = req.payload.into(); event.with_lambda_context(req.context) }) - .service(handler); + .service(handler) + .map_response(|res| { + let (parts, body) = res.into_parts(); - lambda_runtime::run_with_streaming_response(svc).await + let mut prelude_headers = parts.headers; + + let cookies = prelude_headers.get_all(SET_COOKIE); + let cookies = cookies + .iter() + .map(|c| String::from_utf8_lossy(c.as_bytes()).to_string()) + .collect::>(); + + prelude_headers.remove(SET_COOKIE); + + let metadata_prelude = MetadataPrelude { + headers: prelude_headers, + status_code: parts.status, + cookies, + }; + + StreamResponse { + metadata_prelude, + stream: BodyStream { body }, + } + }); + + lambda_runtime::run(svc).await +} + +pub struct BodyStream { + pub(crate) body: B, +} + +impl BodyStream +where + B: Body + Unpin + Send + 'static, + B::Data: Into + Send, + B::Error: Into + Send + Debug, +{ + fn project(self: Pin<&mut Self>) -> Pin<&mut B> { + unsafe { self.map_unchecked_mut(|s| &mut s.body) } + } +} + +impl Stream for BodyStream +where + B: Body + Unpin + Send + 'static, + B::Data: Into + Send, + B::Error: Into + Send + Debug, +{ + type Item = Result; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let body = self.project(); + body.poll_data(cx) + } } diff --git a/lambda-runtime/Cargo.toml b/lambda-runtime/Cargo.toml index 1137f845..9202b1c1 100644 --- a/lambda-runtime/Cargo.toml +++ b/lambda-runtime/Cargo.toml @@ -43,3 +43,5 @@ tokio-stream = "0.1.2" lambda_runtime_api_client = { version = "0.8", path = "../lambda-runtime-api-client" } serde_path_to_error = "0.1.11" http-serde = "1.1.3" +base64 = "0.20.0" +http-body = "0.4" diff --git a/lambda-runtime/src/lib.rs b/lambda-runtime/src/lib.rs index e3ffd49d..18b1066e 100644 --- a/lambda-runtime/src/lib.rs +++ b/lambda-runtime/src/lib.rs @@ -7,6 +7,7 @@ //! Create a type that conforms to the [`tower::Service`] trait. This type can //! then be passed to the the `lambda_runtime::run` function, which launches //! and runs the Lambda runtime. +use bytes::Bytes; use futures::FutureExt; use hyper::{ client::{connect::Connection, HttpConnector}, @@ -20,6 +21,7 @@ use std::{ env, fmt::{self, Debug, Display}, future::Future, + marker::PhantomData, panic, }; use tokio::io::{AsyncRead, AsyncWrite}; @@ -35,11 +37,8 @@ mod simulated; /// Types available to a Lambda function. mod types; -mod streaming; -pub use streaming::run_with_streaming_response; - use requests::{EventCompletionRequest, EventErrorRequest, IntoRequest, NextEventRequest}; -pub use types::{Context, LambdaEvent}; +pub use types::{Context, FunctionResponse, IntoFunctionResponse, LambdaEvent, MetadataPrelude, StreamResponse}; /// Error type that lambdas may result in pub type Error = lambda_runtime_api_client::Error; @@ -97,17 +96,21 @@ where C::Error: Into>, C::Response: AsyncRead + AsyncWrite + Connection + Unpin + Send + 'static, { - async fn run( + async fn run( &self, incoming: impl Stream, Error>> + Send, mut handler: F, ) -> Result<(), Error> where F: Service>, - F::Future: Future>, + F::Future: Future>, F::Error: fmt::Debug + fmt::Display, A: for<'de> Deserialize<'de>, + R: IntoFunctionResponse, B: Serialize, + S: Stream> + Unpin + Send + 'static, + D: Into + Send, + E: Into + Send + Debug, { let client = &self.client; tokio::pin!(incoming); @@ -177,6 +180,8 @@ where EventCompletionRequest { request_id, body: response, + _unused_b: PhantomData, + _unused_s: PhantomData, } .into_req() } @@ -243,13 +248,17 @@ where /// Ok(event.payload) /// } /// ``` -pub async fn run(handler: F) -> Result<(), Error> +pub async fn run(handler: F) -> Result<(), Error> where F: Service>, - F::Future: Future>, + F::Future: Future>, F::Error: fmt::Debug + fmt::Display, A: for<'de> Deserialize<'de>, + R: IntoFunctionResponse, B: Serialize, + S: Stream> + Unpin + Send + 'static, + D: Into + Send, + E: Into + Send + Debug, { trace!("Loading config from env"); let config = Config::from_env()?; @@ -293,7 +302,7 @@ mod endpoint_tests { use lambda_runtime_api_client::Client; use serde_json::json; use simulated::DuplexStreamWrapper; - use std::{convert::TryFrom, env}; + use std::{convert::TryFrom, env, marker::PhantomData}; use tokio::{ io::{self, AsyncRead, AsyncWrite}, select, @@ -430,6 +439,8 @@ mod endpoint_tests { let req = EventCompletionRequest { request_id: "156cb537-e2d4-11e8-9b34-d36013741fb9", body: "done", + _unused_b: PhantomData::<&str>, + _unused_s: PhantomData::, }; let req = req.into_req()?; diff --git a/lambda-runtime/src/requests.rs b/lambda-runtime/src/requests.rs index 26257d20..8e72fc2d 100644 --- a/lambda-runtime/src/requests.rs +++ b/lambda-runtime/src/requests.rs @@ -1,9 +1,15 @@ -use crate::{types::Diagnostic, Error}; +use crate::types::ToStreamErrorTrailer; +use crate::{types::Diagnostic, Error, FunctionResponse, IntoFunctionResponse}; +use bytes::Bytes; +use http::header::CONTENT_TYPE; use http::{Method, Request, Response, Uri}; use hyper::Body; use lambda_runtime_api_client::build_request; use serde::Serialize; +use std::fmt::Debug; +use std::marker::PhantomData; use std::str::FromStr; +use tokio_stream::{Stream, StreamExt}; pub(crate) trait IntoRequest { fn into_req(self) -> Result, Error>; @@ -65,23 +71,87 @@ fn test_next_event_request() { } // /runtime/invocation/{AwsRequestId}/response -pub(crate) struct EventCompletionRequest<'a, T> { +pub(crate) struct EventCompletionRequest<'a, R, B, S, D, E> +where + R: IntoFunctionResponse, + B: Serialize, + S: Stream> + Unpin + Send + 'static, + D: Into + Send, + E: Into + Send + Debug, +{ pub(crate) request_id: &'a str, - pub(crate) body: T, + pub(crate) body: R, + pub(crate) _unused_b: PhantomData, + pub(crate) _unused_s: PhantomData, } -impl<'a, T> IntoRequest for EventCompletionRequest<'a, T> +impl<'a, R, B, S, D, E> IntoRequest for EventCompletionRequest<'a, R, B, S, D, E> where - T: for<'serialize> Serialize, + R: IntoFunctionResponse, + B: Serialize, + S: Stream> + Unpin + Send + 'static, + D: Into + Send, + E: Into + Send + Debug, { fn into_req(self) -> Result, Error> { - let uri = format!("/2018-06-01/runtime/invocation/{}/response", self.request_id); - let uri = Uri::from_str(&uri)?; - let body = serde_json::to_vec(&self.body)?; - let body = Body::from(body); + match self.body.into_response() { + FunctionResponse::BufferedResponse(body) => { + let uri = format!("/2018-06-01/runtime/invocation/{}/response", self.request_id); + let uri = Uri::from_str(&uri)?; - let req = build_request().method(Method::POST).uri(uri).body(body)?; - Ok(req) + let body = serde_json::to_vec(&body)?; + let body = Body::from(body); + + let req = build_request().method(Method::POST).uri(uri).body(body)?; + Ok(req) + } + FunctionResponse::StreamingResponse(mut response) => { + let uri = format!("/2018-06-01/runtime/invocation/{}/response", self.request_id); + let uri = Uri::from_str(&uri)?; + + let mut builder = build_request().method(Method::POST).uri(uri); + let req_headers = builder.headers_mut().unwrap(); + + req_headers.insert("Transfer-Encoding", "chunked".parse()?); + req_headers.insert("Lambda-Runtime-Function-Response-Mode", "streaming".parse()?); + // Report midstream errors using error trailers. + // See the details in Lambda Developer Doc: https://docs.aws.amazon.com/lambda/latest/dg/runtimes-custom.html#runtimes-custom-response-streaming + req_headers.append("Trailer", "Lambda-Runtime-Function-Error-Type".parse()?); + req_headers.append("Trailer", "Lambda-Runtime-Function-Error-Body".parse()?); + req_headers.insert( + "Content-Type", + "application/vnd.awslambda.http-integration-response".parse()?, + ); + + // default Content-Type + let preloud_headers = &mut response.metadata_prelude.headers; + preloud_headers + .entry(CONTENT_TYPE) + .or_insert("application/octet-stream".parse()?); + + let metadata_prelude = serde_json::to_string(&response.metadata_prelude)?; + + tracing::trace!(?metadata_prelude); + + let (mut tx, rx) = Body::channel(); + + tokio::spawn(async move { + tx.send_data(metadata_prelude.into()).await.unwrap(); + tx.send_data("\u{0}".repeat(8).into()).await.unwrap(); + + while let Some(chunk) = response.stream.next().await { + let chunk = match chunk { + Ok(chunk) => chunk.into(), + Err(err) => err.into().to_tailer().into(), + }; + tx.send_data(chunk).await.unwrap(); + } + }); + + let req = builder.body(rx)?; + Ok(req) + } + } } } @@ -90,6 +160,8 @@ fn test_event_completion_request() { let req = EventCompletionRequest { request_id: "id", body: "hello, world!", + _unused_b: PhantomData::<&str>, + _unused_s: PhantomData::, }; let req = req.into_req().unwrap(); let expected = Uri::from_static("/2018-06-01/runtime/invocation/id/response"); diff --git a/lambda-runtime/src/streaming.rs b/lambda-runtime/src/streaming.rs deleted file mode 100644 index 5ea369ad..00000000 --- a/lambda-runtime/src/streaming.rs +++ /dev/null @@ -1,272 +0,0 @@ -use crate::{ - build_event_error_request, deserializer, incoming, type_name_of_val, Config, Context, Error, EventErrorRequest, - IntoRequest, LambdaEvent, Runtime, -}; -use bytes::Bytes; -use futures::FutureExt; -use http::header::{CONTENT_TYPE, SET_COOKIE}; -use http::{HeaderMap, Method, Request, Response, StatusCode, Uri}; -use hyper::body::HttpBody; -use hyper::{client::connect::Connection, Body}; -use lambda_runtime_api_client::{build_request, Client}; -use serde::{Deserialize, Serialize}; -use std::str::FromStr; -use std::{ - env, - fmt::{self, Debug, Display}, - future::Future, - panic, -}; -use tokio::io::{AsyncRead, AsyncWrite}; -use tokio_stream::{Stream, StreamExt}; -use tower::{Service, ServiceExt}; -use tracing::{error, trace, Instrument}; - -/// Starts the Lambda Rust runtime and stream response back [Configure Lambda -/// Streaming Response](https://docs.aws.amazon.com/lambda/latest/dg/configuration-response-streaming.html). -/// -/// # Example -/// ```no_run -/// use hyper::{body::Body, Response}; -/// use lambda_runtime::{service_fn, Error, LambdaEvent}; -/// use std::{thread, time::Duration}; -/// use serde_json::Value; -/// -/// #[tokio::main] -/// async fn main() -> Result<(), Error> { -/// lambda_runtime::run_with_streaming_response(service_fn(func)).await?; -/// Ok(()) -/// } -/// async fn func(_event: LambdaEvent) -> Result, Error> { -/// let messages = vec!["Hello ", "world ", "from ", "Lambda!"]; -/// -/// let (mut tx, rx) = Body::channel(); -/// -/// tokio::spawn(async move { -/// for message in messages.iter() { -/// tx.send_data((*message).into()).await.unwrap(); -/// thread::sleep(Duration::from_millis(500)); -/// } -/// }); -/// -/// let resp = Response::builder() -/// .header("content-type", "text/plain") -/// .header("CustomHeader", "outerspace") -/// .body(rx)?; -/// -/// Ok(resp) -/// } -/// ``` -pub async fn run_with_streaming_response(handler: F) -> Result<(), Error> -where - F: Service>, - F::Future: Future, F::Error>>, - F::Error: Debug + Display, - A: for<'de> Deserialize<'de>, - B: HttpBody + Unpin + Send + 'static, - B::Data: Into + Send, - B::Error: Into + Send + Debug, -{ - trace!("Loading config from env"); - let config = Config::from_env()?; - let client = Client::builder().build().expect("Unable to create a runtime client"); - let runtime = Runtime { client, config }; - - let client = &runtime.client; - let incoming = incoming(client); - runtime.run_with_streaming_response(incoming, handler).await -} - -impl Runtime -where - C: Service + Clone + Send + Sync + Unpin + 'static, - C::Future: Unpin + Send, - C::Error: Into>, - C::Response: AsyncRead + AsyncWrite + Connection + Unpin + Send + 'static, -{ - async fn run_with_streaming_response( - &self, - incoming: impl Stream, Error>> + Send, - mut handler: F, - ) -> Result<(), Error> - where - F: Service>, - F::Future: Future, F::Error>>, - F::Error: fmt::Debug + fmt::Display, - A: for<'de> Deserialize<'de>, - B: HttpBody + Unpin + Send + 'static, - B::Data: Into + Send, - B::Error: Into + Send + Debug, - { - let client = &self.client; - tokio::pin!(incoming); - while let Some(next_event_response) = incoming.next().await { - trace!("New event arrived (run loop)"); - let event = next_event_response?; - let (parts, body) = event.into_parts(); - - #[cfg(debug_assertions)] - if parts.status == http::StatusCode::NO_CONTENT { - // Ignore the event if the status code is 204. - // This is a way to keep the runtime alive when - // there are no events pending to be processed. - continue; - } - - let ctx: Context = Context::try_from(parts.headers)?; - let ctx: Context = ctx.with_config(&self.config); - let request_id = &ctx.request_id.clone(); - - let request_span = match &ctx.xray_trace_id { - Some(trace_id) => { - env::set_var("_X_AMZN_TRACE_ID", trace_id); - tracing::info_span!("Lambda runtime invoke", requestId = request_id, xrayTraceId = trace_id) - } - None => { - env::remove_var("_X_AMZN_TRACE_ID"); - tracing::info_span!("Lambda runtime invoke", requestId = request_id) - } - }; - - // Group the handling in one future and instrument it with the span - async { - let body = hyper::body::to_bytes(body).await?; - trace!("incoming request payload - {}", std::str::from_utf8(&body)?); - - #[cfg(debug_assertions)] - if parts.status.is_server_error() { - error!("Lambda Runtime server returned an unexpected error"); - return Err(parts.status.to_string().into()); - } - - let lambda_event = match deserializer::deserialize(&body, ctx) { - Ok(lambda_event) => lambda_event, - Err(err) => { - let req = build_event_error_request(request_id, err)?; - client.call(req).await.expect("Unable to send response to Runtime APIs"); - return Ok(()); - } - }; - - let req = match handler.ready().await { - Ok(handler) => { - // Catches panics outside of a `Future` - let task = panic::catch_unwind(panic::AssertUnwindSafe(|| handler.call(lambda_event))); - - let task = match task { - // Catches panics inside of the `Future` - Ok(task) => panic::AssertUnwindSafe(task).catch_unwind().await, - Err(err) => Err(err), - }; - - match task { - Ok(response) => match response { - Ok(response) => { - trace!("Ok response from handler (run loop)"); - EventCompletionStreamingRequest { - request_id, - body: response, - } - .into_req() - } - Err(err) => build_event_error_request(request_id, err), - }, - Err(err) => { - error!("{:?}", err); - let error_type = type_name_of_val(&err); - let msg = if let Some(msg) = err.downcast_ref::<&str>() { - format!("Lambda panicked: {msg}") - } else { - "Lambda panicked".to_string() - }; - EventErrorRequest::new(request_id, error_type, &msg).into_req() - } - } - } - Err(err) => build_event_error_request(request_id, err), - }?; - - client.call(req).await.expect("Unable to send response to Runtime APIs"); - Ok::<(), Error>(()) - } - .instrument(request_span) - .await?; - } - Ok(()) - } -} - -pub(crate) struct EventCompletionStreamingRequest<'a, B> { - pub(crate) request_id: &'a str, - pub(crate) body: Response, -} - -#[derive(Debug, Serialize)] -#[serde(rename_all = "camelCase")] -struct MetadataPrelude { - #[serde(serialize_with = "http_serde::status_code::serialize")] - status_code: StatusCode, - #[serde(serialize_with = "http_serde::header_map::serialize")] - headers: HeaderMap, - cookies: Vec, -} - -impl<'a, B> IntoRequest for EventCompletionStreamingRequest<'a, B> -where - B: HttpBody + Unpin + Send + 'static, - B::Data: Into + Send, - B::Error: Into + Send + Debug, -{ - fn into_req(self) -> Result, Error> { - let uri = format!("/2018-06-01/runtime/invocation/{}/response", self.request_id); - let uri = Uri::from_str(&uri)?; - - let (parts, mut body) = self.body.into_parts(); - - let mut builder = build_request().method(Method::POST).uri(uri); - let req_headers = builder.headers_mut().unwrap(); - - req_headers.insert("Transfer-Encoding", "chunked".parse()?); - req_headers.insert("Lambda-Runtime-Function-Response-Mode", "streaming".parse()?); - req_headers.insert( - "Content-Type", - "application/vnd.awslambda.http-integration-response".parse()?, - ); - - let mut prelude_headers = parts.headers; - // default Content-Type - prelude_headers - .entry(CONTENT_TYPE) - .or_insert("application/octet-stream".parse()?); - - let cookies = prelude_headers.get_all(SET_COOKIE); - let cookies = cookies - .iter() - .map(|c| String::from_utf8_lossy(c.as_bytes()).to_string()) - .collect::>(); - prelude_headers.remove(SET_COOKIE); - - let metadata_prelude = serde_json::to_string(&MetadataPrelude { - status_code: parts.status, - headers: prelude_headers, - cookies, - })?; - - trace!(?metadata_prelude); - - let (mut tx, rx) = Body::channel(); - - tokio::spawn(async move { - tx.send_data(metadata_prelude.into()).await.unwrap(); - tx.send_data("\u{0}".repeat(8).into()).await.unwrap(); - - while let Some(chunk) = body.data().await { - let chunk = chunk.unwrap(); - tx.send_data(chunk.into()).await.unwrap(); - } - }); - - let req = builder.body(rx)?; - Ok(req) - } -} diff --git a/lambda-runtime/src/types.rs b/lambda-runtime/src/types.rs index 87d6ded5..27a4a9ae 100644 --- a/lambda-runtime/src/types.rs +++ b/lambda-runtime/src/types.rs @@ -1,11 +1,14 @@ use crate::{Config, Error}; -use http::{HeaderMap, HeaderValue}; +use bytes::Bytes; +use http::{HeaderMap, HeaderValue, StatusCode}; use serde::{Deserialize, Serialize}; use std::{ collections::HashMap, convert::TryFrom, + fmt::Debug, time::{Duration, SystemTime}, }; +use tokio_stream::Stream; #[derive(Debug, Eq, PartialEq, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] @@ -182,6 +185,90 @@ impl LambdaEvent { } } +/// Metadata prelude for a stream response. +#[derive(Debug, Default, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct MetadataPrelude { + #[serde(with = "http_serde::status_code")] + /// The HTTP status code. + pub status_code: StatusCode, + #[serde(with = "http_serde::header_map")] + /// The HTTP headers. + pub headers: HeaderMap, + /// The HTTP cookies. + pub cookies: Vec, +} + +pub trait ToStreamErrorTrailer { + /// Convert the hyper error into a stream error trailer. + fn to_tailer(&self) -> String; +} + +impl ToStreamErrorTrailer for Error { + fn to_tailer(&self) -> String { + format!( + "Lambda-Runtime-Function-Error-Type: Runtime.StreamError\r\nLambda-Runtime-Function-Error-Body: {}\r\n", + base64::encode(self.to_string()) + ) + } +} + +/// A streaming response that contains the metadata prelude and the stream of bytes that will be +/// sent to the client. +#[derive(Debug)] +pub struct StreamResponse { + /// The metadata prelude. + pub metadata_prelude: MetadataPrelude, + /// The stream of bytes that will be sent to the client. + pub stream: S, +} + +/// An enum representing the response of a function that can return either a buffered +/// response of type `B` or a streaming response of type `S`. +pub enum FunctionResponse { + /// A buffered response containing the entire payload of the response. This is useful + /// for responses that can be processed quickly and have a relatively small payload size(<= 6MB). + BufferedResponse(B), + /// A streaming response that delivers the payload incrementally. This is useful for + /// large payloads(> 6MB) or responses that take a long time to generate. The client can start + /// processing the response as soon as the first chunk is available, without waiting + /// for the entire payload to be generated. + StreamingResponse(StreamResponse), +} + +/// a trait that can be implemented for any type that can be converted into a FunctionResponse. +/// This allows us to use the `into` method to convert a type into a FunctionResponse. +pub trait IntoFunctionResponse { + /// Convert the type into a FunctionResponse. + fn into_response(self) -> FunctionResponse; +} + +impl IntoFunctionResponse for FunctionResponse { + fn into_response(self) -> FunctionResponse { + self + } +} + +impl IntoFunctionResponse for B +where + B: Serialize, +{ + fn into_response(self) -> FunctionResponse { + FunctionResponse::BufferedResponse(self) + } +} + +impl IntoFunctionResponse<(), S> for StreamResponse +where + S: Stream> + Unpin + Send + 'static, + D: Into + Send, + E: Into + Send + Debug, +{ + fn into_response(self) -> FunctionResponse<(), S> { + FunctionResponse::StreamingResponse(self) + } +} + #[cfg(test)] mod test { use super::*; From 722e33f2feb0f0f24f8f276b59d4ef875ccbc0e4 Mon Sep 17 00:00:00 2001 From: David Calavera Date: Mon, 18 Sep 2023 10:19:26 -0700 Subject: [PATCH 16/27] Use compile_error if no http features are enabled (#698) - Provide help so people know that they have to enable a feature. - Put imports in place so conditional compilation doesn't try to compile unnecesary code. Signed-off-by: David Calavera --- lambda-http/src/deserializer.rs | 23 ++++++++++------------- lambda-http/src/request.rs | 31 ++++++++++++++++++++++--------- lambda-http/src/response.rs | 7 +++++++ 3 files changed, 39 insertions(+), 22 deletions(-) diff --git a/lambda-http/src/deserializer.rs b/lambda-http/src/deserializer.rs index 1771ea7b..a77f68a5 100644 --- a/lambda-http/src/deserializer.rs +++ b/lambda-http/src/deserializer.rs @@ -1,8 +1,4 @@ use crate::request::LambdaRequest; -use aws_lambda_events::{ - alb::AlbTargetGroupRequest, - apigw::{ApiGatewayProxyRequest, ApiGatewayV2httpRequest, ApiGatewayWebsocketProxyRequest}, -}; use serde::{de::Error, Deserialize}; const ERROR_CONTEXT: &str = "this function expects a JSON payload from Amazon API Gateway, Amazon Elastic Load Balancer, or AWS Lambda Function URLs, but the data doesn't match any of those services' events"; @@ -17,28 +13,29 @@ impl<'de> Deserialize<'de> for LambdaRequest { Err(err) => return Err(err), }; #[cfg(feature = "apigw_rest")] - if let Ok(res) = - ApiGatewayProxyRequest::deserialize(serde::__private::de::ContentRefDeserializer::::new(&content)) - { + if let Ok(res) = aws_lambda_events::apigw::ApiGatewayProxyRequest::deserialize( + serde::__private::de::ContentRefDeserializer::::new(&content), + ) { return Ok(LambdaRequest::ApiGatewayV1(res)); } #[cfg(feature = "apigw_http")] - if let Ok(res) = ApiGatewayV2httpRequest::deserialize( + if let Ok(res) = aws_lambda_events::apigw::ApiGatewayV2httpRequest::deserialize( serde::__private::de::ContentRefDeserializer::::new(&content), ) { return Ok(LambdaRequest::ApiGatewayV2(res)); } #[cfg(feature = "alb")] if let Ok(res) = - AlbTargetGroupRequest::deserialize(serde::__private::de::ContentRefDeserializer::::new(&content)) + aws_lambda_events::alb::AlbTargetGroupRequest::deserialize(serde::__private::de::ContentRefDeserializer::< + D::Error, + >::new(&content)) { return Ok(LambdaRequest::Alb(res)); } #[cfg(feature = "apigw_websockets")] - if let Ok(res) = ApiGatewayWebsocketProxyRequest::deserialize(serde::__private::de::ContentRefDeserializer::< - D::Error, - >::new(&content)) - { + if let Ok(res) = aws_lambda_events::apigw::ApiGatewayWebsocketProxyRequest::deserialize( + serde::__private::de::ContentRefDeserializer::::new(&content), + ) { return Ok(LambdaRequest::WebSocket(res)); } diff --git a/lambda-http/src/request.rs b/lambda-http/src/request.rs index bdb755ed..ad86e5a5 100644 --- a/lambda-http/src/request.rs +++ b/lambda-http/src/request.rs @@ -8,6 +8,12 @@ //! [`RequestExt`]: crate::RequestExt #[cfg(any(feature = "apigw_rest", feature = "apigw_http", feature = "apigw_websockets"))] use crate::ext::extensions::{PathParameters, StageVariables}; +#[cfg(any( + feature = "apigw_rest", + feature = "apigw_http", + feature = "alb", + feature = "apigw_websockets" +))] use crate::ext::extensions::{QueryStringParameters, RawHttpPath}; #[cfg(feature = "alb")] use aws_lambda_events::alb::{AlbTargetGroupRequest, AlbTargetGroupRequestContext}; @@ -26,7 +32,7 @@ use serde_json::error::Error as JsonError; use std::future::Future; use std::pin::Pin; -use std::{env, io::Read, mem}; +use std::{env, io::Read}; use url::Url; /// Internal representation of an Lambda http event from @@ -61,6 +67,13 @@ impl LambdaRequest { LambdaRequest::Alb { .. } => RequestOrigin::Alb, #[cfg(feature = "apigw_websockets")] LambdaRequest::WebSocket { .. } => RequestOrigin::WebSocket, + #[cfg(not(any( + feature = "apigw_rest", + feature = "apigw_http", + feature = "alb", + feature = "apigw_websockets" + )))] + _ => compile_error!("Either feature `apigw_rest`, `apigw_http`, `alb`, or `apigw_websockets` must be enabled for the `lambda-http` crate."), } } } @@ -141,8 +154,8 @@ fn into_api_gateway_v2_request(ag: ApiGatewayV2httpRequest) -> http::Request http::Request { .expect("failed to build request"); // no builder method that sets headers in batch - let _ = mem::replace(req.headers_mut(), headers); - let _ = mem::replace(req.method_mut(), http_method); + let _ = std::mem::replace(req.headers_mut(), headers); + let _ = std::mem::replace(req.method_mut(), http_method); req } @@ -255,8 +268,8 @@ fn into_alb_request(alb: AlbTargetGroupRequest) -> http::Request { .expect("failed to build request"); // no builder method that sets headers in batch - let _ = mem::replace(req.headers_mut(), headers); - let _ = mem::replace(req.method_mut(), http_method); + let _ = std::mem::replace(req.headers_mut(), headers); + let _ = std::mem::replace(req.method_mut(), http_method); req } @@ -319,8 +332,8 @@ fn into_websocket_request(ag: ApiGatewayWebsocketProxyRequest) -> http::Request< .expect("failed to build request"); // no builder method that sets headers in batch - let _ = mem::replace(req.headers_mut(), headers); - let _ = mem::replace(req.method_mut(), http_method.unwrap_or(http::Method::GET)); + let _ = std::mem::replace(req.headers_mut(), headers); + let _ = std::mem::replace(req.method_mut(), http_method.unwrap_or(http::Method::GET)); req } diff --git a/lambda-http/src/response.rs b/lambda-http/src/response.rs index 1a2ede5c..a51d1b2d 100644 --- a/lambda-http/src/response.rs +++ b/lambda-http/src/response.rs @@ -114,6 +114,13 @@ impl LambdaResponse { headers: headers.clone(), multi_value_headers: headers, }), + #[cfg(not(any( + feature = "apigw_rest", + feature = "apigw_http", + feature = "alb", + feature = "apigw_websockets" + )))] + _ => compile_error!("Either feature `apigw_rest`, `apigw_http`, `alb`, or `apigw_websockets` must be enabled for the `lambda-http` crate."), } } } From d1687e174ec2037ae786cc6407d1aca5dbe271f5 Mon Sep 17 00:00:00 2001 From: Blake Jakopovic Date: Tue, 19 Sep 2023 19:59:18 +0200 Subject: [PATCH 17/27] Update README.md (#699) Fixed typo --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 5f6f899a..cd7bbcfc 100644 --- a/README.md +++ b/README.md @@ -278,7 +278,7 @@ $ docker run --rm \ rustserverless/lambda-rust ``` -With your application build and packaged, it's ready to ship to production. You can also invoke it locally to verify is behavior using the [lambci :provided docker container](https://hub.docker.com/r/lambci/lambda/), which is also a mirror of the AWS Lambda provided runtime with build dependencies omitted: +With your application built and packaged, it's ready to ship to production. You can also invoke it locally to verify is behavior using the [lambci :provided docker container](https://hub.docker.com/r/lambci/lambda/), which is also a mirror of the AWS Lambda provided runtime with build dependencies omitted: ```bash # start a docker container replicating the "provided" lambda runtime From 1cbe34b7cefbddd819013830a24a012028ea91f1 Mon Sep 17 00:00:00 2001 From: David Calavera Date: Wed, 18 Oct 2023 09:29:33 -0700 Subject: [PATCH 18/27] Fix time serialization issues (#707) - Update Chrono to fix compilation issues. - Update leap second tests. Signed-off-by: David Calavera --- lambda-events/Cargo.toml | 7 +++++-- lambda-events/src/encodings/time.rs | 6 +++--- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/lambda-events/Cargo.toml b/lambda-events/Cargo.toml index e401bfff..73ab06d8 100644 --- a/lambda-events/Cargo.toml +++ b/lambda-events/Cargo.toml @@ -25,12 +25,15 @@ serde_with = { version = "^3", features = ["json"], optional = true } serde_json = "^1" serde_dynamo = { version = "^4.1", optional = true } bytes = { version = "1", features = ["serde"], optional = true } -chrono = { version = "0.4.23", default-features = false, features = [ +chrono = { version = "0.4.31", default-features = false, features = [ "clock", "serde", "std", ], optional = true } -query_map = { version = "^0.7", features = ["serde", "url-query"], optional = true } +query_map = { version = "^0.7", features = [ + "serde", + "url-query", +], optional = true } flate2 = { version = "1.0.24", optional = true } [features] diff --git a/lambda-events/src/encodings/time.rs b/lambda-events/src/encodings/time.rs index 390927ca..a550b7b0 100644 --- a/lambda-events/src/encodings/time.rs +++ b/lambda-events/src/encodings/time.rs @@ -279,12 +279,12 @@ mod test { let encoded = serde_json::to_string(&instance).unwrap(); assert_eq!(encoded, String::from(r#"{"v":"427683600.002"}"#)); - // Make sure milliseconds are included. + // Make sure leap seconds are included. let instance = Test { - v: Utc.ymd(1983, 7, 22).and_hms_nano(1, 0, 0, 1_234_000_000), + v: Utc.ymd(1983, 7, 22).and_hms_nano(23, 59, 59, 1_999_999_999), }; let encoded = serde_json::to_string(&instance).unwrap(); - assert_eq!(encoded, String::from(r#"{"v":"427683601.234"}"#)); + assert_eq!(encoded, String::from(r#"{"v":"427766400.999"}"#)); } #[test] From bcd3f971016dcbe4fbbe515ec1506c92feda2799 Mon Sep 17 00:00:00 2001 From: Morgan Nicholson <55922364+nichmorgan@users.noreply.github.com> Date: Thu, 19 Oct 2023 10:30:39 -0300 Subject: [PATCH 19/27] Eventbridge Event Processor (#704) * Eventbridge Event Processor * cfg feature fix * feature comment * Removed whitespace * makefile fix --------- Co-authored-by: nich.morgan Co-authored-by: erso --- Makefile | 1 + lambda-events/Cargo.toml | 2 + lambda-events/src/event/eventbridge/mod.rs | 87 +++++++++++++++++++ lambda-events/src/event/mod.rs | 4 + .../example-eventbridge-event-obj.json | 13 +++ .../fixtures/example-eventbridge-event.json | 13 +++ lambda-events/src/lib.rs | 4 + 7 files changed, 124 insertions(+) create mode 100644 lambda-events/src/event/eventbridge/mod.rs create mode 100644 lambda-events/src/fixtures/example-eventbridge-event-obj.json create mode 100644 lambda-events/src/fixtures/example-eventbridge-event.json diff --git a/Makefile b/Makefile index 544d08b7..58eb2a9c 100644 --- a/Makefile +++ b/Makefile @@ -101,6 +101,7 @@ check-event-features: cargo test --package aws_lambda_events --no-default-features --features sns cargo test --package aws_lambda_events --no-default-features --features sqs cargo test --package aws_lambda_events --no-default-features --features streams + cargo test --package aws_lambda_events --no-default-features --features eventbridge fmt: cargo +nightly fmt --all \ No newline at end of file diff --git a/lambda-events/Cargo.toml b/lambda-events/Cargo.toml index 73ab06d8..b7a00b29 100644 --- a/lambda-events/Cargo.toml +++ b/lambda-events/Cargo.toml @@ -76,6 +76,7 @@ default = [ "sns", "sqs", "streams", + "eventbridge", ] activemq = [] @@ -117,3 +118,4 @@ ses = ["chrono"] sns = ["chrono", "serde_with"] sqs = ["serde_with"] streams = [] +eventbridge = ["chrono", "serde_with"] diff --git a/lambda-events/src/event/eventbridge/mod.rs b/lambda-events/src/event/eventbridge/mod.rs new file mode 100644 index 00000000..7809f1e2 --- /dev/null +++ b/lambda-events/src/event/eventbridge/mod.rs @@ -0,0 +1,87 @@ +use chrono::{DateTime, Utc}; +use serde::de::DeserializeOwned; +use serde::{Deserialize, Serialize}; +use serde_with::serde_as; + +#[derive(Clone, Debug, Default, Deserialize, Eq, PartialEq, Serialize)] +#[serde(rename_all = "kebab-case")] +pub struct EventBridgeEvent { + #[serde(default)] + pub version: Option, + #[serde(default)] + pub id: Option, + pub detail_type: String, + pub source: String, + #[serde(default)] + pub account: Option, + #[serde(default)] + pub time: Option>, + #[serde(default)] + pub region: Option, + #[serde(default)] + pub resources: Option>, + #[serde(default)] + pub detail: Option, +} + +#[serde_with::serde_as] +#[derive(Clone, Debug, Default, Deserialize, Eq, PartialEq, Serialize)] +#[serde(bound(deserialize = "T: DeserializeOwned"))] +#[serde(rename_all = "kebab-case")] +pub struct EventBridgeEventObj { + #[serde(default)] + pub version: Option, + #[serde(default)] + pub id: Option, + pub detail_type: String, + pub source: String, + #[serde(default)] + pub account: Option, + #[serde(default)] + pub time: Option>, + #[serde(default)] + pub region: Option, + #[serde(default)] + pub resources: Option>, + #[serde_as(as = "serde_with::json::JsonString")] + #[serde(bound(deserialize = "T: DeserializeOwned"))] + pub detail: T, +} + +#[cfg(test)] +#[cfg(feature = "eventbridge")] +mod test { + use super::*; + + use serde_json; + + #[test] + fn example_eventbridge_obj_event() { + #[derive(Serialize, Deserialize, Debug, Eq, PartialEq)] + struct CustomStruct { + a: String, + b: String, + } + + let data = include_bytes!("../../fixtures/example-eventbridge-event-obj.json"); + let parsed: EventBridgeEventObj = serde_json::from_slice(data).unwrap(); + + assert_eq!(parsed.detail.a, "123"); + assert_eq!(parsed.detail.b, "456"); + + let output: String = serde_json::to_string(&parsed).unwrap(); + let reparsed: EventBridgeEventObj = serde_json::from_slice(output.as_bytes()).unwrap(); + assert_eq!(parsed, reparsed); + } + + #[test] + fn example_eventbridge_event() { + let data = include_bytes!("../../fixtures/example-eventbridge-event.json"); + let parsed: EventBridgeEvent = serde_json::from_slice(data).unwrap(); + assert_eq!(parsed.detail, Some(String::from("String Message"))); + + let output: String = serde_json::to_string(&parsed).unwrap(); + let reparsed: EventBridgeEvent = serde_json::from_slice(output.as_bytes()).unwrap(); + assert_eq!(parsed, reparsed); + } +} diff --git a/lambda-events/src/event/mod.rs b/lambda-events/src/event/mod.rs index 4ce71dfc..46dc760c 100644 --- a/lambda-events/src/event/mod.rs +++ b/lambda-events/src/event/mod.rs @@ -140,3 +140,7 @@ pub mod sqs; /// AWS Lambda event definitions for streams. #[cfg(feature = "streams")] pub mod streams; + +/// AWS Lambda event definitions for EventBridge. +#[cfg(feature = "eventbridge")] +pub mod eventbridge; diff --git a/lambda-events/src/fixtures/example-eventbridge-event-obj.json b/lambda-events/src/fixtures/example-eventbridge-event-obj.json new file mode 100644 index 00000000..97c5e0ae --- /dev/null +++ b/lambda-events/src/fixtures/example-eventbridge-event-obj.json @@ -0,0 +1,13 @@ +{ + "version": "0", + "id": "6a7e8feb-b491-4cf7-a9f1-bf3703467718", + "detail-type": "EC2 Instance State-change Notification", + "source": "aws.ec2", + "account": "111122223333", + "time": "2017-12-22T18:43:48Z", + "region": "us-west-1", + "resources": [ + "arn:aws:ec2:us-west-1:123456789012:instance/i-1234567890abcdef0" + ], + "detail": "{\"a\":\"123\",\"b\":\"456\"}" +} diff --git a/lambda-events/src/fixtures/example-eventbridge-event.json b/lambda-events/src/fixtures/example-eventbridge-event.json new file mode 100644 index 00000000..793ca8dc --- /dev/null +++ b/lambda-events/src/fixtures/example-eventbridge-event.json @@ -0,0 +1,13 @@ +{ + "version": "0", + "id": "6a7e8feb-b491-4cf7-a9f1-bf3703467718", + "detail-type": "EC2 Instance State-change Notification", + "source": "aws.ec2", + "account": "111122223333", + "time": "2017-12-22T18:43:48Z", + "region": "us-west-1", + "resources": [ + "arn:aws:ec2:us-west-1:123456789012:instance/i-1234567890abcdef0" + ], + "detail": "String Message" +} diff --git a/lambda-events/src/lib.rs b/lambda-events/src/lib.rs index 7402a8f4..5fe81cfc 100644 --- a/lambda-events/src/lib.rs +++ b/lambda-events/src/lib.rs @@ -164,3 +164,7 @@ pub use event::sqs; /// AWS Lambda event definitions for streams. #[cfg(feature = "streams")] pub use event::streams; + +/// AWS Lambda event definitions for EventBridge. +#[cfg(feature = "eventbridge")] +pub use event::eventbridge; From 2675fa9e305536b14756bf7290db95f21f5cc992 Mon Sep 17 00:00:00 2001 From: Morgan Nicholson <55922364+nichmorgan@users.noreply.github.com> Date: Thu, 19 Oct 2023 11:05:51 -0300 Subject: [PATCH 20/27] DocumentDB support (#706) * insert event draft * abstract change event * Added documentdb delete event * Added support to change event drop * Added support to dropDatabase Event * - MongoDB v6.0 Change Event Fields removed - ChangeEvent enum tagged - AnyDocument common type created * replace event support * added support to invalidate event in documentdb * Adding DocumentDB Rename event. * run cargo fmt * Excluding 'to' parameter * Add DocumentDB Update event * fixed 'to' parameter and run cargo fmt * Refactoring 'Rename' event declaration as a single type not a commum type * InsertNs renamed to DatabaseCollection for code reuse * unused field removed * cfg fix * fix lines * fmt and makefile fixed * makefile reord --------- Co-authored-by: nich.morgan Co-authored-by: erso Co-authored-by: Luca Barcelos Co-authored-by: Vinicius Brisotti Co-authored-by: Pedro Rabello Sato Co-authored-by: darwish --- Makefile | 3 +- lambda-events/Cargo.toml | 2 + .../event/documentdb/events/commom_types.rs | 44 +++++++++ .../event/documentdb/events/delete_event.rs | 20 ++++ .../documentdb/events/drop_database_event.rs | 17 ++++ .../src/event/documentdb/events/drop_event.rs | 17 ++++ .../event/documentdb/events/insert_event.rs | 21 ++++ .../documentdb/events/invalidate_event.rs | 13 +++ .../src/event/documentdb/events/mod.rs | 9 ++ .../event/documentdb/events/rename_event.rs | 21 ++++ .../event/documentdb/events/replace_event.rs | 20 ++++ .../event/documentdb/events/update_event.rs | 19 ++++ lambda-events/src/event/documentdb/mod.rs | 96 +++++++++++++++++++ lambda-events/src/event/mod.rs | 4 + .../example-documentdb-delete-event.json | 30 ++++++ ...xample-documentdb-drop-database-event.json | 24 +++++ .../example-documentdb-drop-event.json | 30 ++++++ .../example-documentdb-insert-event.json | 29 ++++++ .../example-documentdb-invalidate-event.json | 20 ++++ .../example-documentdb-rename-event.json | 33 +++++++ .../example-documentdb-replace-event.json | 29 ++++++ .../example-documentdb-update-event.json | 29 ++++++ lambda-events/src/lib.rs | 4 + 23 files changed, 533 insertions(+), 1 deletion(-) create mode 100644 lambda-events/src/event/documentdb/events/commom_types.rs create mode 100644 lambda-events/src/event/documentdb/events/delete_event.rs create mode 100644 lambda-events/src/event/documentdb/events/drop_database_event.rs create mode 100644 lambda-events/src/event/documentdb/events/drop_event.rs create mode 100644 lambda-events/src/event/documentdb/events/insert_event.rs create mode 100644 lambda-events/src/event/documentdb/events/invalidate_event.rs create mode 100644 lambda-events/src/event/documentdb/events/mod.rs create mode 100644 lambda-events/src/event/documentdb/events/rename_event.rs create mode 100644 lambda-events/src/event/documentdb/events/replace_event.rs create mode 100644 lambda-events/src/event/documentdb/events/update_event.rs create mode 100644 lambda-events/src/event/documentdb/mod.rs create mode 100644 lambda-events/src/fixtures/example-documentdb-delete-event.json create mode 100644 lambda-events/src/fixtures/example-documentdb-drop-database-event.json create mode 100644 lambda-events/src/fixtures/example-documentdb-drop-event.json create mode 100644 lambda-events/src/fixtures/example-documentdb-insert-event.json create mode 100644 lambda-events/src/fixtures/example-documentdb-invalidate-event.json create mode 100644 lambda-events/src/fixtures/example-documentdb-rename-event.json create mode 100644 lambda-events/src/fixtures/example-documentdb-replace-event.json create mode 100644 lambda-events/src/fixtures/example-documentdb-update-event.json diff --git a/Makefile b/Makefile index 58eb2a9c..76e57e94 100644 --- a/Makefile +++ b/Makefile @@ -81,8 +81,10 @@ check-event-features: cargo test --package aws_lambda_events --no-default-features --features cognito cargo test --package aws_lambda_events --no-default-features --features config cargo test --package aws_lambda_events --no-default-features --features connect + cargo test --package aws_lambda_events --no-default-features --features documentdb cargo test --package aws_lambda_events --no-default-features --features dynamodb cargo test --package aws_lambda_events --no-default-features --features ecr_scan + cargo test --package aws_lambda_events --no-default-features --features eventbridge cargo test --package aws_lambda_events --no-default-features --features firehose cargo test --package aws_lambda_events --no-default-features --features iam cargo test --package aws_lambda_events --no-default-features --features iot @@ -101,7 +103,6 @@ check-event-features: cargo test --package aws_lambda_events --no-default-features --features sns cargo test --package aws_lambda_events --no-default-features --features sqs cargo test --package aws_lambda_events --no-default-features --features streams - cargo test --package aws_lambda_events --no-default-features --features eventbridge fmt: cargo +nightly fmt --all \ No newline at end of file diff --git a/lambda-events/Cargo.toml b/lambda-events/Cargo.toml index b7a00b29..c58ec475 100644 --- a/lambda-events/Cargo.toml +++ b/lambda-events/Cargo.toml @@ -76,6 +76,7 @@ default = [ "sns", "sqs", "streams", + "documentdb", "eventbridge", ] @@ -118,4 +119,5 @@ ses = ["chrono"] sns = ["chrono", "serde_with"] sqs = ["serde_with"] streams = [] +documentdb = [] eventbridge = ["chrono", "serde_with"] diff --git a/lambda-events/src/event/documentdb/events/commom_types.rs b/lambda-events/src/event/documentdb/events/commom_types.rs new file mode 100644 index 00000000..5d1bdc19 --- /dev/null +++ b/lambda-events/src/event/documentdb/events/commom_types.rs @@ -0,0 +1,44 @@ +use std::collections::HashMap; + +use serde::{Deserialize, Serialize}; +use serde_json::Value; + +pub type AnyDocument = HashMap; + +#[derive(Clone, Debug, Default, Deserialize, Eq, PartialEq, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct DatabaseCollection { + db: String, + #[serde(default)] + coll: Option, +} + +#[derive(Clone, Debug, Default, Deserialize, Eq, PartialEq, Serialize)] +pub struct DocumentId { + #[serde(rename = "_data")] + pub data: String, +} + +#[derive(Clone, Debug, Default, Deserialize, Eq, PartialEq, Serialize)] +pub struct DocumentKeyIdOid { + #[serde(rename = "$oid")] + pub oid: String, +} + +#[derive(Clone, Debug, Default, Deserialize, Eq, PartialEq, Serialize)] +pub struct DocumentKeyId { + #[serde(rename = "_id")] + pub id: DocumentKeyIdOid, +} + +#[derive(Clone, Debug, Default, Deserialize, Eq, PartialEq, Serialize)] +pub struct InnerTimestamp { + t: usize, + i: usize, +} + +#[derive(Clone, Debug, Default, Deserialize, Eq, PartialEq, Serialize)] +pub struct Timestamp { + #[serde(rename = "$timestamp")] + pub timestamp: InnerTimestamp, +} diff --git a/lambda-events/src/event/documentdb/events/delete_event.rs b/lambda-events/src/event/documentdb/events/delete_event.rs new file mode 100644 index 00000000..7761d62f --- /dev/null +++ b/lambda-events/src/event/documentdb/events/delete_event.rs @@ -0,0 +1,20 @@ +use serde::{Deserialize, Serialize}; + +use super::commom_types::{AnyDocument, DatabaseCollection, DocumentId, DocumentKeyId, Timestamp}; + +#[derive(Clone, Debug, Default, Deserialize, Eq, PartialEq, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct ChangeDeleteEvent { + #[serde(rename = "_id")] + id: DocumentId, + #[serde(default)] + cluster_time: Option, + document_key: DocumentKeyId, + #[serde(default)] + #[serde(rename = "lsid")] + ls_id: Option, + ns: DatabaseCollection, + // operation_type: String, + #[serde(default)] + txn_number: Option, +} diff --git a/lambda-events/src/event/documentdb/events/drop_database_event.rs b/lambda-events/src/event/documentdb/events/drop_database_event.rs new file mode 100644 index 00000000..c51e345c --- /dev/null +++ b/lambda-events/src/event/documentdb/events/drop_database_event.rs @@ -0,0 +1,17 @@ +use serde::{Deserialize, Serialize}; + +use super::commom_types::{AnyDocument, DatabaseCollection, DocumentId, Timestamp}; + +#[derive(Clone, Debug, Default, Deserialize, Eq, PartialEq, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct ChangeDropDatabaseEvent { + #[serde(rename = "_id")] + id: DocumentId, + cluster_time: Timestamp, + #[serde(rename = "lsid")] + ls_id: Option, + ns: DatabaseCollection, + // operation_type: String, + #[serde(default)] + txn_number: Option, +} diff --git a/lambda-events/src/event/documentdb/events/drop_event.rs b/lambda-events/src/event/documentdb/events/drop_event.rs new file mode 100644 index 00000000..866ce143 --- /dev/null +++ b/lambda-events/src/event/documentdb/events/drop_event.rs @@ -0,0 +1,17 @@ +use super::commom_types::{AnyDocument, DatabaseCollection, DocumentId, Timestamp}; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Debug, Default, Deserialize, Eq, PartialEq, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct ChangeDropEvent { + #[serde(rename = "_id")] + id: DocumentId, + cluster_time: Timestamp, + #[serde(default)] + #[serde(rename = "lsid")] + ls_id: Option, + ns: DatabaseCollection, + // operation_type: String, + #[serde(default)] + txn_number: Option, +} diff --git a/lambda-events/src/event/documentdb/events/insert_event.rs b/lambda-events/src/event/documentdb/events/insert_event.rs new file mode 100644 index 00000000..09ab66b2 --- /dev/null +++ b/lambda-events/src/event/documentdb/events/insert_event.rs @@ -0,0 +1,21 @@ +use serde::{Deserialize, Serialize}; + +use super::commom_types::{AnyDocument, DatabaseCollection, DocumentId, DocumentKeyId, Timestamp}; + +#[derive(Clone, Debug, Default, Deserialize, Eq, PartialEq, Serialize)] +#[serde(rename_all = "camelCase")] + +pub struct ChangeInsertEvent { + #[serde(rename = "_id")] + id: DocumentId, + #[serde(default)] + cluster_time: Option, + document_key: DocumentKeyId, + #[serde(default)] + #[serde(rename = "lsid")] + ls_id: Option, + ns: DatabaseCollection, + //operation_type: String, + #[serde(default)] + txn_number: Option, +} diff --git a/lambda-events/src/event/documentdb/events/invalidate_event.rs b/lambda-events/src/event/documentdb/events/invalidate_event.rs new file mode 100644 index 00000000..47469ff9 --- /dev/null +++ b/lambda-events/src/event/documentdb/events/invalidate_event.rs @@ -0,0 +1,13 @@ +use serde::{Deserialize, Serialize}; + +use super::commom_types::{DocumentId, Timestamp}; + +#[derive(Clone, Debug, Default, Deserialize, Eq, PartialEq, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct ChangeInvalidateEvent { + #[serde(rename = "_id")] + id: DocumentId, + #[serde(default)] + cluster_time: Option, + // operation_type: String, +} diff --git a/lambda-events/src/event/documentdb/events/mod.rs b/lambda-events/src/event/documentdb/events/mod.rs new file mode 100644 index 00000000..c1c41b98 --- /dev/null +++ b/lambda-events/src/event/documentdb/events/mod.rs @@ -0,0 +1,9 @@ +pub mod commom_types; +pub mod delete_event; +pub mod drop_database_event; +pub mod drop_event; +pub mod insert_event; +pub mod invalidate_event; +pub mod rename_event; +pub mod replace_event; +pub mod update_event; diff --git a/lambda-events/src/event/documentdb/events/rename_event.rs b/lambda-events/src/event/documentdb/events/rename_event.rs new file mode 100644 index 00000000..8bc250fb --- /dev/null +++ b/lambda-events/src/event/documentdb/events/rename_event.rs @@ -0,0 +1,21 @@ +use serde::{Deserialize, Serialize}; + +use super::commom_types::{AnyDocument, DatabaseCollection, DocumentId, Timestamp}; + +#[derive(Clone, Debug, Default, Deserialize, Eq, PartialEq, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct ChangeRenameEvent { + #[serde(rename = "_id")] + id: DocumentId, + #[serde(default)] + cluster_time: Option, + + #[serde(default)] + #[serde(rename = "lsid")] + ls_id: Option, + ns: DatabaseCollection, + //operation_type: String, + #[serde(default)] + txn_number: Option, + to: DatabaseCollection, +} diff --git a/lambda-events/src/event/documentdb/events/replace_event.rs b/lambda-events/src/event/documentdb/events/replace_event.rs new file mode 100644 index 00000000..4a0e58ad --- /dev/null +++ b/lambda-events/src/event/documentdb/events/replace_event.rs @@ -0,0 +1,20 @@ +use serde::{Deserialize, Serialize}; + +use super::commom_types::{AnyDocument, DatabaseCollection, DocumentId, DocumentKeyId, Timestamp}; + +#[derive(Clone, Debug, Default, Deserialize, Eq, PartialEq, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct ChangeReplaceEvent { + #[serde(rename = "_id")] + id: DocumentId, + #[serde(default)] + cluster_time: Option, + document_key: DocumentKeyId, + #[serde(default)] + #[serde(rename = "lsid")] + ls_id: Option, + ns: DatabaseCollection, + // operation_type: String, + #[serde(default)] + txn_number: Option, +} diff --git a/lambda-events/src/event/documentdb/events/update_event.rs b/lambda-events/src/event/documentdb/events/update_event.rs new file mode 100644 index 00000000..8698485a --- /dev/null +++ b/lambda-events/src/event/documentdb/events/update_event.rs @@ -0,0 +1,19 @@ +use serde::{Deserialize, Serialize}; + +use super::commom_types::{AnyDocument, DatabaseCollection, DocumentId, DocumentKeyId, Timestamp}; + +#[derive(Clone, Debug, Default, Deserialize, Eq, PartialEq, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct ChangeUpdateEvent { + #[serde(rename = "_id")] + id: DocumentId, + #[serde(default)] + cluster_time: Option, + document_key: DocumentKeyId, + #[serde(rename = "lsid")] + ls_id: Option, + ns: DatabaseCollection, + // operation_type: String, + #[serde(default)] + txn_number: Option, +} diff --git a/lambda-events/src/event/documentdb/mod.rs b/lambda-events/src/event/documentdb/mod.rs new file mode 100644 index 00000000..67f7c9ad --- /dev/null +++ b/lambda-events/src/event/documentdb/mod.rs @@ -0,0 +1,96 @@ +pub mod events; + +use self::events::{ + delete_event::ChangeDeleteEvent, drop_database_event::ChangeDropDatabaseEvent, drop_event::ChangeDropEvent, + insert_event::ChangeInsertEvent, invalidate_event::ChangeInvalidateEvent, rename_event::ChangeRenameEvent, + replace_event::ChangeReplaceEvent, update_event::ChangeUpdateEvent, +}; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] +#[serde(tag = "operationType", rename_all = "camelCase")] +pub enum ChangeEvent { + Insert(ChangeInsertEvent), + Delete(ChangeDeleteEvent), + Drop(ChangeDropEvent), + DropDatabase(ChangeDropDatabaseEvent), + Invalidate(ChangeInvalidateEvent), + Replace(ChangeReplaceEvent), + Update(ChangeUpdateEvent), + Rename(ChangeRenameEvent), +} + +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] +pub struct DocumentDbInnerEvent { + pub event: ChangeEvent, +} + +#[derive(Clone, Debug, Default, Deserialize, Eq, PartialEq, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct DocumentDbEvent { + #[serde(default)] + pub event_source_arn: Option, + pub events: Vec, + #[serde(default)] + pub event_source: Option, +} + +#[cfg(test)] +#[cfg(feature = "documentdb")] +mod test { + use super::*; + + pub type Event = DocumentDbEvent; + + fn test_example(data: &[u8]) { + let parsed: Event = serde_json::from_slice(data).unwrap(); + let output: String = serde_json::to_string(&parsed).unwrap(); + let reparsed: Event = serde_json::from_slice(output.as_bytes()).unwrap(); + + assert_eq!(parsed, reparsed); + } + + #[test] + fn example_documentdb_insert_event() { + test_example(include_bytes!("../../fixtures/example-documentdb-insert-event.json")); + } + + #[test] + fn example_documentdb_delete_event() { + test_example(include_bytes!("../../fixtures/example-documentdb-delete-event.json")); + } + + #[test] + fn example_documentdb_drop_event() { + test_example(include_bytes!("../../fixtures/example-documentdb-drop-event.json")); + } + + #[test] + fn example_documentdb_replace_event() { + test_example(include_bytes!("../../fixtures/example-documentdb-replace-event.json")); + } + + #[test] + fn example_documentdb_update_event() { + test_example(include_bytes!("../../fixtures/example-documentdb-update-event.json")); + } + + #[test] + fn example_documentdb_rename_event() { + test_example(include_bytes!("../../fixtures/example-documentdb-rename-event.json")); + } + + #[test] + fn example_documentdb_invalidate_event() { + test_example(include_bytes!( + "../../fixtures/example-documentdb-invalidate-event.json" + )); + } + + #[test] + fn example_documentdb_drop_database_event() { + test_example(include_bytes!( + "../../fixtures/example-documentdb-drop-database-event.json" + )); + } +} diff --git a/lambda-events/src/event/mod.rs b/lambda-events/src/event/mod.rs index 46dc760c..5ee57911 100644 --- a/lambda-events/src/event/mod.rs +++ b/lambda-events/src/event/mod.rs @@ -141,6 +141,10 @@ pub mod sqs; #[cfg(feature = "streams")] pub mod streams; +// AWS Lambda event definitions for DocumentDB +#[cfg(feature = "documentdb")] +pub mod documentdb; + /// AWS Lambda event definitions for EventBridge. #[cfg(feature = "eventbridge")] pub mod eventbridge; diff --git a/lambda-events/src/fixtures/example-documentdb-delete-event.json b/lambda-events/src/fixtures/example-documentdb-delete-event.json new file mode 100644 index 00000000..fd9259da --- /dev/null +++ b/lambda-events/src/fixtures/example-documentdb-delete-event.json @@ -0,0 +1,30 @@ +{ + "eventSourceArn": "arn:aws:rds:us-east-1:123456789012:cluster:canaryclusterb2a659a2-qo5tcmqkcl03", + "events": [ + { + "event": { + "_id": { + "_data": "0163eeb6e7000000090100000009000041e1" + }, + "clusterTime": { + "$timestamp": { + "t": 1676588775, + "i": 9 + } + }, + "documentKey": { + "_id": { + "$oid": "63eeb6e7d418cd98afb1c1d7" + } + }, + "ns": { + "db": "test_database", + "coll": "test_collection" + }, + "operationType": "delete" + } + } + ], + "eventSource": "aws:docdb" + } + \ No newline at end of file diff --git a/lambda-events/src/fixtures/example-documentdb-drop-database-event.json b/lambda-events/src/fixtures/example-documentdb-drop-database-event.json new file mode 100644 index 00000000..77a1cb93 --- /dev/null +++ b/lambda-events/src/fixtures/example-documentdb-drop-database-event.json @@ -0,0 +1,24 @@ +{ + "eventSourceArn": "arn:aws:rds:us-east-1:123456789012:cluster:canaryclusterb2a659a2-qo5tcmqkcl03", + "events": [ + { + "event": { + "_id": { + "_data": "0163eeb6e7000000090100000009000041e1" + }, + "clusterTime": { + "$timestamp": { + "t": 1676588775, + "i": 9 + } + }, + "ns": { + "db": "test_database" + }, + "operationType": "dropDatabase" + } + } + ], + "eventSource": "aws:docdb" + } + \ No newline at end of file diff --git a/lambda-events/src/fixtures/example-documentdb-drop-event.json b/lambda-events/src/fixtures/example-documentdb-drop-event.json new file mode 100644 index 00000000..89d8cc8f --- /dev/null +++ b/lambda-events/src/fixtures/example-documentdb-drop-event.json @@ -0,0 +1,30 @@ +{ + "eventSourceArn": "arn:aws:rds:us-east-1:123456789012:cluster:canaryclusterb2a659a2-qo5tcmqkcl03", + "events": [ + { + "event": { + "_id": { + "_data": "0163eeb6e7000000090100000009000041e1" + }, + "clusterTime": { + "$timestamp": { + "t": 1676588775, + "i": 9 + } + }, + "documentKey": { + "_id": { + "$oid": "63eeb6e7d418cd98afb1c1d7" + } + }, + "ns": { + "db": "test_database", + "coll": "test_collection" + }, + "operationType": "drop" + } + } + ], + "eventSource": "aws:docdb" + } + \ No newline at end of file diff --git a/lambda-events/src/fixtures/example-documentdb-insert-event.json b/lambda-events/src/fixtures/example-documentdb-insert-event.json new file mode 100644 index 00000000..cd03e374 --- /dev/null +++ b/lambda-events/src/fixtures/example-documentdb-insert-event.json @@ -0,0 +1,29 @@ +{ + "eventSourceArn": "arn:aws:rds:us-east-1:123456789012:cluster:canaryclusterb2a659a2-qo5tcmqkcl03", + "events": [ + { + "event": { + "_id": { + "_data": "0163eeb6e7000000090100000009000041e1" + }, + "clusterTime": { + "$timestamp": { + "t": 1676588775, + "i": 9 + } + }, + "documentKey": { + "_id": { + "$oid": "63eeb6e7d418cd98afb1c1d7" + } + }, + "ns": { + "db": "test_database", + "coll": "test_collection" + }, + "operationType": "insert" + } + } + ], + "eventSource": "aws:docdb" +} diff --git a/lambda-events/src/fixtures/example-documentdb-invalidate-event.json b/lambda-events/src/fixtures/example-documentdb-invalidate-event.json new file mode 100644 index 00000000..59f5af65 --- /dev/null +++ b/lambda-events/src/fixtures/example-documentdb-invalidate-event.json @@ -0,0 +1,20 @@ +{ + "eventSourceArn": "arn:aws:rds:us-east-1:123456789012:cluster:canaryclusterb2a659a2-qo5tcmqkcl03", + "events": [ + { + "event": { + "_id": { + "_data": "0163eeb6e7000000090100000009000041e1" + }, + "clusterTime": { + "$timestamp": { + "t": 1676588775, + "i": 9 + } + }, + "operationType": "invalidate" + } + } + ], + "eventSource": "aws:docdb" + } \ No newline at end of file diff --git a/lambda-events/src/fixtures/example-documentdb-rename-event.json b/lambda-events/src/fixtures/example-documentdb-rename-event.json new file mode 100644 index 00000000..65416470 --- /dev/null +++ b/lambda-events/src/fixtures/example-documentdb-rename-event.json @@ -0,0 +1,33 @@ +{ + "eventSourceArn": "arn:aws:rds:us-east-1:123456789012:cluster:canaryclusterb2a659a2-qo5tcmqkcl03", + "events": [ + { + "event": { + "_id": { + "_data": "0163eeb6e7000000090100000009000041e1" + }, + "clusterTime": { + "$timestamp": { + "t": 1676588775, + "i": 9 + } + }, + "documentKey": { + "_id": { + "$oid": "63eeb6e7d418cd98afb1c1d7" + } + }, + "ns": { + "db": "test_database", + "coll": "test_collection" + }, + "to": { + "db": "test_database_new", + "coll": "test_collection_new" + }, + "operationType": "rename" + } + } + ], + "eventSource": "aws:docdb" +} \ No newline at end of file diff --git a/lambda-events/src/fixtures/example-documentdb-replace-event.json b/lambda-events/src/fixtures/example-documentdb-replace-event.json new file mode 100644 index 00000000..1c7fe559 --- /dev/null +++ b/lambda-events/src/fixtures/example-documentdb-replace-event.json @@ -0,0 +1,29 @@ +{ + "eventSourceArn": "arn:aws:rds:us-east-1:123456789012:cluster:canaryclusterb2a659a2-qo5tcmqkcl03", + "events": [ + { + "event": { + "_id": { + "_data": "0163eeb6e7000000090100000009000041e1" + }, + "operationType": "replace", + "clusterTime": { + "$timestamp": { + "t": 1676588775, + "i": 9 + } + }, + "ns": { + "db": "engineering", + "coll": "users" + }, + "documentKey": { + "_id": { + "$oid": "63eeb6e7d418cd98afb1c1d7" + } + } + } + } + ], + "eventSource": "aws:docdb" +} diff --git a/lambda-events/src/fixtures/example-documentdb-update-event.json b/lambda-events/src/fixtures/example-documentdb-update-event.json new file mode 100644 index 00000000..dbb19159 --- /dev/null +++ b/lambda-events/src/fixtures/example-documentdb-update-event.json @@ -0,0 +1,29 @@ +{ + "eventSourceArn": "arn:aws:rds:us-east-1:123456789012:cluster:canaryclusterb2a659a2-qo5tcmqkcl03", + "events": [ + { + "event": { + "_id": { + "_data": "0163eeb6e7000000090100000009000041e1" + }, + "clusterTime": { + "$timestamp": { + "t": 1676588775, + "i": 9 + } + }, + "documentKey": { + "_id": { + "$oid": "63eeb6e7d418cd98afb1c1d7" + } + }, + "ns": { + "db": "test_database", + "coll": "test_collection" + }, + "operationType": "update" + } + } + ], + "eventSource": "aws:docdb" + } \ No newline at end of file diff --git a/lambda-events/src/lib.rs b/lambda-events/src/lib.rs index 5fe81cfc..aa0d5495 100644 --- a/lambda-events/src/lib.rs +++ b/lambda-events/src/lib.rs @@ -165,6 +165,10 @@ pub use event::sqs; #[cfg(feature = "streams")] pub use event::streams; +/// AWS Lambda event definitions for documentdb. +#[cfg(feature = "documentdb")] +pub use event::documentdb; + /// AWS Lambda event definitions for EventBridge. #[cfg(feature = "eventbridge")] pub use event::eventbridge; From b9d64e893122262e3abf0b2126b9d8dacfb265e6 Mon Sep 17 00:00:00 2001 From: Seb Maz Date: Mon, 23 Oct 2023 17:46:28 +0400 Subject: [PATCH 21/27] Updated all crates (#709) removed the deprecated mod "Attribute_value" added serde_dynamo "to_attribute_value" updated the code to match the new changes #708 --- examples/http-dynamodb/Cargo.toml | 17 +++++++++-------- examples/http-dynamodb/src/main.rs | 19 ++++++++++--------- 2 files changed, 19 insertions(+), 17 deletions(-) diff --git a/examples/http-dynamodb/Cargo.toml b/examples/http-dynamodb/Cargo.toml index c3f6d8be..be95f867 100644 --- a/examples/http-dynamodb/Cargo.toml +++ b/examples/http-dynamodb/Cargo.toml @@ -11,15 +11,16 @@ edition = "2021" # and it will keep the alphabetic ordering for you. [dependencies] -simple-error = "0.2.3" -serde_json = "1.0" -serde = { version = "1.0", features = ["derive"] } +simple-error = "0.3.0" +serde_json = "1.0.107" +serde = { version = "1.0.189", features = ["derive"] } +serde_dynamo = {version = "^4.2.7", features = ["aws-sdk-dynamodb+0_33"]} lambda_http = { path = "../../lambda-http" } lambda_runtime = { path = "../../lambda-runtime" } -aws-sdk-dynamodb = "0.21.0" -aws-config = "0.51.0" -tokio = { version = "1", features = ["macros"] } -tracing = { version = "0.1", features = ["log"] } -tracing-subscriber = { version = "0.3", default-features = false, features = ["fmt"] } +aws-sdk-dynamodb = "0.33.0" +aws-config = "0.56.1" +tokio = { version = "1.33.0", features = ["macros"] } +tracing = { version = "0.1.40", features = ["log"] } +tracing-subscriber = { version = "0.3.17", default-features = false, features = ["fmt"] } diff --git a/examples/http-dynamodb/src/main.rs b/examples/http-dynamodb/src/main.rs index 5a7030f9..b2e8af20 100644 --- a/examples/http-dynamodb/src/main.rs +++ b/examples/http-dynamodb/src/main.rs @@ -1,9 +1,10 @@ -use aws_sdk_dynamodb::model::AttributeValue; -use aws_sdk_dynamodb::{Client, Error as OtherError}; +use aws_sdk_dynamodb::{Client}; use lambda_http::{run, service_fn, Body, Error, Request, Response}; +use serde::{Deserialize, Serialize}; +use serde_dynamo::to_attribute_value; use tracing::info; -#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)] +#[derive(Clone, Debug, Serialize, Deserialize)] pub struct Item { pub p_type: String, pub age: String, @@ -76,12 +77,12 @@ async fn main() -> Result<(), Error> { // Add an item to a table. // snippet-start:[dynamodb.rust.add-item] -pub async fn add_item(client: &Client, item: Item, table: &str) -> Result<(), OtherError> { - let user_av = AttributeValue::S(item.username); - let type_av = AttributeValue::S(item.p_type); - let age_av = AttributeValue::S(item.age); - let first_av = AttributeValue::S(item.first); - let last_av = AttributeValue::S(item.last); +pub async fn add_item(client: &Client, item: Item, table: &str) -> Result<(), Error> { + let user_av = to_attribute_value(item.username)?; + let type_av = to_attribute_value(item.p_type)?; + let age_av = to_attribute_value(item.age)?; + let first_av = to_attribute_value(item.first)?; + let last_av = to_attribute_value(item.last)?; let request = client .put_item() From 45525e0dfe1196315dd130101b9cec64ac6b67f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9my=20Greinhofer?= Date: Mon, 23 Oct 2023 14:52:41 -0500 Subject: [PATCH 22/27] Add SQS API event structs (#711) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds strucs to allow serializing data coming from the AWS SQS API. Fixes awslabs/aws-lambda-rust-runtime#710 Signed-off-by: Rémy Greinhofer --- lambda-events/src/event/sqs/mod.rs | 90 +++++++++++++++++++ .../fixtures/example-sqs-api-event-obj.json | 10 +++ 2 files changed, 100 insertions(+) create mode 100644 lambda-events/src/fixtures/example-sqs-api-event-obj.json diff --git a/lambda-events/src/event/sqs/mod.rs b/lambda-events/src/event/sqs/mod.rs index af4d3f21..5c10a428 100644 --- a/lambda-events/src/event/sqs/mod.rs +++ b/lambda-events/src/event/sqs/mod.rs @@ -112,6 +112,74 @@ pub struct BatchItemFailure { pub item_identifier: String, } +/// The Event sent to Lambda from the SQS API. Contains 1 or more individual SQS Messages +#[derive(Clone, Debug, Default, Deserialize, Eq, PartialEq, Serialize)] +#[serde(rename_all = "PascalCase")] +#[serde(bound(deserialize = "T: DeserializeOwned"))] +pub struct SqsApiEventObj { + #[serde(bound(deserialize = "T: DeserializeOwned"))] + pub messages: Vec>, +} + +/// The Event sent to Lambda from SQS API. Contains 1 or more individual SQS Messages +#[derive(Clone, Debug, Default, Deserialize, Eq, PartialEq, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct SqsApiEvent { + pub messages: Vec, +} + +/// Alternative to SqsApiEvent to be used alongside SqsApiMessageObj when you need to +/// deserialize a nested object into a struct of type T within the SQS Message rather +/// than just using the raw SQS Message string +#[serde_with::serde_as] +#[derive(Clone, Debug, Default, Deserialize, Eq, PartialEq, Serialize)] +#[serde(bound(deserialize = "T: DeserializeOwned"))] +#[serde(rename_all = "PascalCase")] +pub struct SqsApiMessageObj { + /// nolint: stylecheck + #[serde(default)] + pub message_id: Option, + #[serde(default)] + pub receipt_handle: Option, + /// Deserialized into a `T` from nested JSON inside the SQS body string. `T` must implement the `Deserialize` or `DeserializeOwned` trait. + #[serde_as(as = "serde_with::json::JsonString")] + #[serde(bound(deserialize = "T: DeserializeOwned"))] + pub body: T, + #[serde(default)] + pub md5_of_body: Option, + #[serde(default)] + pub md5_of_message_attributes: Option, + #[serde(deserialize_with = "deserialize_lambda_map")] + #[serde(default)] + pub attributes: HashMap, + #[serde(deserialize_with = "deserialize_lambda_map")] + #[serde(default)] + pub message_attributes: HashMap, +} + +/// An individual SQS API Message, its metadata, and Message Attributes +#[derive(Clone, Debug, Default, Deserialize, Eq, PartialEq, Serialize)] +#[serde(rename_all = "PascalCase")] +pub struct SqsApiMessage { + /// nolint: stylecheck + #[serde(default)] + pub message_id: Option, + #[serde(default)] + pub receipt_handle: Option, + #[serde(default)] + pub body: Option, + #[serde(default)] + pub md5_of_body: Option, + #[serde(default)] + pub md5_of_message_attributes: Option, + #[serde(deserialize_with = "deserialize_lambda_map")] + #[serde(default)] + pub attributes: HashMap, + #[serde(deserialize_with = "deserialize_lambda_map")] + #[serde(default)] + pub message_attributes: HashMap, +} + #[cfg(test)] mod test { use super::*; @@ -159,4 +227,26 @@ mod test { let reparsed: SqsBatchResponse = serde_json::from_slice(output.as_bytes()).unwrap(); assert_eq!(parsed, reparsed); } + + #[test] + #[cfg(feature = "sqs")] + fn example_sqs_api_obj_event() { + // Example sqs api receive message response, fetched 2023-10-23, inspired from: + // https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_ReceiveMessage.html#API_ReceiveMessage_ResponseSyntax + #[derive(Serialize, Deserialize, Debug, Eq, PartialEq)] + struct CustStruct { + city: String, + country: String, + } + + let data = include_bytes!("../../fixtures/example-sqs-api-event-obj.json"); + let parsed: SqsApiEventObj = serde_json::from_slice(data).unwrap(); + + assert_eq!(parsed.messages[0].body.city, "provincetown"); + assert_eq!(parsed.messages[0].body.country, "usa"); + + let output: String = serde_json::to_string(&parsed).unwrap(); + let reparsed: SqsApiEventObj = serde_json::from_slice(output.as_bytes()).unwrap(); + assert_eq!(parsed, reparsed); + } } diff --git a/lambda-events/src/fixtures/example-sqs-api-event-obj.json b/lambda-events/src/fixtures/example-sqs-api-event-obj.json new file mode 100644 index 00000000..39ab67cf --- /dev/null +++ b/lambda-events/src/fixtures/example-sqs-api-event-obj.json @@ -0,0 +1,10 @@ +{ + "Messages": [ + { + "Body": "{\"country\": \"usa\", \"city\": \"provincetown\"}", + "Md5OfBody": "2b3e4f40b57e80d67ac5b9660c56d787", + "MessageId": "f663a189-97e2-41f5-9c0e-cfb595d8322c", + "ReceiptHandle": "AQEBdObBZIl7FWJiK9c3KmqKNvusy6+eqG51SLIp5Gs6lQ6+e4SI0lJ6Glw+qcOi+2RRrnfOjlsF8uDlo13TgubmtgP+CH7s+YKDdpbg2jA931vLi6qnU0ZFXcf/H8BDZ4kcz29npMu9/N2DT9F+kI9Q9pTfLsISg/7XFMvRTqAtjSfa2wI5TVcOPZBdkGqTLUoKqAYni0L7NTLzFUTjCN/HiOcvG+16zahhsTniM1MwOTSpbOO2uTZmY25V/PCfNdF1PBXtdNA9mWW2Ym6THV28ug3cuK6dXbFQBuxIGVhOq+mRVU6gKN/eZpZediiBt75oHD6ASu8jIUpJGeUWEZm6qSWU+YTivr6QoqGLwAVvI3CXOIZQ/+Wp/RJAxMQxtRIe/MOsOITcmGlFqhWnjlGQdg==" + } + ] +} From c215812f284e8d53f915518d959af493dce3c490 Mon Sep 17 00:00:00 2001 From: Martin Bartlett Date: Fri, 27 Oct 2023 18:37:38 +0200 Subject: [PATCH 23/27] Remove cfg(test) on with_stage_variables (#713) None of the other with_ methods are constrained to test-only, although this method did have a specific comment indicating test-only, so there may have been a reason. Anyway, this commit removes that constraint. Co-authored-by: Martin Bartlett --- lambda-http/src/ext/extensions.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/lambda-http/src/ext/extensions.rs b/lambda-http/src/ext/extensions.rs index e002d0ea..313090c6 100644 --- a/lambda-http/src/ext/extensions.rs +++ b/lambda-http/src/ext/extensions.rs @@ -108,10 +108,9 @@ pub trait RequestExt { /// These will always be `None` for ALB triggered requests. fn stage_variables_ref(&self) -> Option<&QueryMap>; - /// Configures instance with stage variables under `#[cfg(test)]` configurations + /// Configures instance with stage variables /// /// This is intended for use in mock testing contexts. - #[cfg(test)] fn with_stage_variables(self, variables: V) -> Self where V: Into; @@ -216,7 +215,6 @@ impl RequestExt for http::Extensions { .and_then(|StageVariables(vars)| if vars.is_empty() { None } else { Some(vars) }) } - #[cfg(test)] fn with_stage_variables(self, variables: V) -> Self where V: Into, @@ -318,7 +316,6 @@ impl RequestExt for Parts { self.extensions.stage_variables_ref() } - #[cfg(test)] fn with_stage_variables(self, variables: V) -> Self where V: Into, @@ -420,7 +417,6 @@ impl RequestExt for http::Request { self.extensions().stage_variables_ref() } - #[cfg(test)] fn with_stage_variables(self, variables: V) -> Self where V: Into, From 3e195f6b0c734ae694cb1375fed57cf1b6dd01fd Mon Sep 17 00:00:00 2001 From: Harold Sun Date: Mon, 30 Oct 2023 22:58:22 +0800 Subject: [PATCH 24/27] Fixed media type suffix detection (#714) --- lambda-http/src/response.rs | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/lambda-http/src/response.rs b/lambda-http/src/response.rs index a51d1b2d..e77ec181 100644 --- a/lambda-http/src/response.rs +++ b/lambda-http/src/response.rs @@ -282,7 +282,9 @@ where } for suffix in TEXT_ENCODING_SUFFIXES { - if content_type.ends_with(suffix) { + let mut parts = content_type.trim().split(';'); + let mime_type = parts.next().unwrap_or_default(); + if mime_type.ends_with(suffix) { return convert_to_text(self, content_type); } } @@ -484,6 +486,24 @@ mod tests { ) } + #[tokio::test] + async fn charset_content_type_header_suffix() { + // Drive the implementation by using `hyper::Body` instead of + // of `aws_lambda_events::encodings::Body` + let response = Response::builder() + .header(CONTENT_TYPE, "application/graphql-response+json; charset=utf-16") + .body(HyperBody::from("000000".as_bytes())) + .expect("unable to build http::Response"); + let response = response.into_response().await; + let response = LambdaResponse::from_response(&RequestOrigin::ApiGatewayV2, response); + + let json = serde_json::to_string(&response).expect("failed to serialize to json"); + assert_eq!( + json, + r#"{"statusCode":200,"headers":{"content-type":"application/graphql-response+json; charset=utf-16"},"multiValueHeaders":{"content-type":["application/graphql-response+json; charset=utf-16"]},"body":"〰〰〰","isBase64Encoded":false,"cookies":[]}"# + ) + } + #[tokio::test] async fn content_headers_unset() { // Drive the implementation by using `hyper::Body` instead of From c565173e36891da95218482f10301394dc6b2a97 Mon Sep 17 00:00:00 2001 From: Kikuo Emoto Date: Sun, 5 Nov 2023 03:29:16 +0900 Subject: [PATCH 25/27] Fix: Missing userNotFound field in "create/verify auth challenge" Cognito user pool events (#719) * Add user_not_found to Create/Verify auth challenge events - Adds `user_not_found` field to: - `CognitoEventUserPoolsCreateAuthChallengeRequest` - `CognitoEventUserPoolsVerifyAuthChallengeRequest` - Adds test cases where `user_not_found` becomes `true` for: - `CognitoEventUserPoolsDefineAuthChallengeRequest` - `CognitoEventUserPoolsCreateAuthChallengeRequest` - `CognitoEventUserPoolsVerifyAuthChallengeRequest` issue awslabs/aws-lambda-rust-runtime#718 * Fix coding style with cargo fmt --- lambda-events/src/event/cognito/mod.rs | 46 +++++++++++++++++++ ...-create-auth-challenge-user-not-found.json | 41 +++++++++++++++++ ...event-userpools-create-auth-challenge.json | 3 +- ...-define-auth-challenge-user-not-found.json | 36 +++++++++++++++ ...uth-challenge-optional-answer-correct.json | 3 +- ...-verify-auth-challenge-user-not-found.json | 31 +++++++++++++ ...event-userpools-verify-auth-challenge.json | 3 +- 7 files changed, 160 insertions(+), 3 deletions(-) create mode 100644 lambda-events/src/fixtures/example-cognito-event-userpools-create-auth-challenge-user-not-found.json create mode 100644 lambda-events/src/fixtures/example-cognito-event-userpools-define-auth-challenge-user-not-found.json create mode 100644 lambda-events/src/fixtures/example-cognito-event-userpools-verify-auth-challenge-user-not-found.json diff --git a/lambda-events/src/event/cognito/mod.rs b/lambda-events/src/event/cognito/mod.rs index 49f2eebd..c07c40a4 100644 --- a/lambda-events/src/event/cognito/mod.rs +++ b/lambda-events/src/event/cognito/mod.rs @@ -343,6 +343,8 @@ pub struct CognitoEventUserPoolsCreateAuthChallengeRequest { #[serde(deserialize_with = "deserialize_lambda_map")] #[serde(default)] pub client_metadata: HashMap, + #[serde(default)] + pub user_not_found: bool, } /// `CognitoEventUserPoolsCreateAuthChallengeResponse` defines create auth challenge response parameters @@ -389,6 +391,8 @@ where #[serde(deserialize_with = "deserialize_lambda_map")] #[serde(default)] pub client_metadata: HashMap, + #[serde(default)] + pub user_not_found: bool, } /// `CognitoEventUserPoolsVerifyAuthChallengeResponse` defines verify auth challenge response parameters @@ -482,6 +486,20 @@ mod test { assert_eq!(parsed, reparsed); } + #[test] + #[cfg(feature = "cognito")] + fn example_cognito_event_userpools_create_auth_challenge_user_not_found() { + let data = + include_bytes!("../../fixtures/example-cognito-event-userpools-create-auth-challenge-user-not-found.json"); + let parsed: CognitoEventUserPoolsCreateAuthChallenge = serde_json::from_slice(data).unwrap(); + + assert!(parsed.request.user_not_found); + + let output: String = serde_json::to_string(&parsed).unwrap(); + let reparsed: CognitoEventUserPoolsCreateAuthChallenge = serde_json::from_slice(output.as_bytes()).unwrap(); + assert_eq!(parsed, reparsed); + } + #[test] #[cfg(feature = "cognito")] fn example_cognito_event_userpools_custommessage() { @@ -518,6 +536,20 @@ mod test { assert_eq!(parsed, reparsed); } + #[test] + #[cfg(feature = "cognito")] + fn example_cognito_event_userpools_define_auth_challenge_user_not_found() { + let data = + include_bytes!("../../fixtures/example-cognito-event-userpools-define-auth-challenge-user-not-found.json"); + let parsed: CognitoEventUserPoolsDefineAuthChallenge = serde_json::from_slice(data).unwrap(); + + assert!(parsed.request.user_not_found); + + let output: String = serde_json::to_string(&parsed).unwrap(); + let reparsed: CognitoEventUserPoolsDefineAuthChallenge = serde_json::from_slice(output.as_bytes()).unwrap(); + assert_eq!(parsed, reparsed); + } + #[test] #[cfg(feature = "cognito")] fn example_cognito_event_userpools_migrateuser() { @@ -612,4 +644,18 @@ mod test { let reparsed: CognitoEventUserPoolsVerifyAuthChallenge = serde_json::from_slice(output.as_bytes()).unwrap(); assert_eq!(parsed, reparsed); } + + #[test] + #[cfg(feature = "cognito")] + fn example_cognito_event_userpools_verify_auth_challenge_user_not_found() { + let data = + include_bytes!("../../fixtures/example-cognito-event-userpools-verify-auth-challenge-user-not-found.json"); + let parsed: CognitoEventUserPoolsVerifyAuthChallenge = serde_json::from_slice(data).unwrap(); + + assert!(parsed.request.user_not_found); + + let output: String = serde_json::to_string(&parsed).unwrap(); + let reparsed: CognitoEventUserPoolsVerifyAuthChallenge = serde_json::from_slice(output.as_bytes()).unwrap(); + assert_eq!(parsed, reparsed); + } } diff --git a/lambda-events/src/fixtures/example-cognito-event-userpools-create-auth-challenge-user-not-found.json b/lambda-events/src/fixtures/example-cognito-event-userpools-create-auth-challenge-user-not-found.json new file mode 100644 index 00000000..40ce2a2b --- /dev/null +++ b/lambda-events/src/fixtures/example-cognito-event-userpools-create-auth-challenge-user-not-found.json @@ -0,0 +1,41 @@ +{ + "version": "1", + "region": "us-west-2", + "userPoolId": "", + "userName": "", + "callerContext": { + "awsSdkVersion": "aws-sdk-unknown-unknown", + "clientId": "" + }, + "triggerSource": "CreateAuthChallenge_Authentication", + "request": { + "userAttributes": { + "sub": "", + "cognito:user_status": "CONFIRMED", + "phone_number_verified": "true", + "cognito:phone_number_alias": "+12223334455", + "phone_number": "+12223334455" + }, + "challengeName": "CUSTOM_CHALLENGE", + "session": [ + { + "challengeName": "PASSWORD_VERIFIER", + "challengeResult": true, + "challengeMetadata": "metadata" + } + ], + "clientMetadata": { + "exampleMetadataKey": "example metadata value" + }, + "userNotFound": true + }, + "response": { + "publicChallengeParameters": { + "a": "b" + }, + "privateChallengeParameters": { + "c": "d" + }, + "challengeMetadata": "challengeMetadata" + } +} diff --git a/lambda-events/src/fixtures/example-cognito-event-userpools-create-auth-challenge.json b/lambda-events/src/fixtures/example-cognito-event-userpools-create-auth-challenge.json index 99acf0a2..2d0f2a83 100644 --- a/lambda-events/src/fixtures/example-cognito-event-userpools-create-auth-challenge.json +++ b/lambda-events/src/fixtures/example-cognito-event-userpools-create-auth-challenge.json @@ -26,7 +26,8 @@ ], "clientMetadata": { "exampleMetadataKey": "example metadata value" - } + }, + "userNotFound": false }, "response": { "publicChallengeParameters": { diff --git a/lambda-events/src/fixtures/example-cognito-event-userpools-define-auth-challenge-user-not-found.json b/lambda-events/src/fixtures/example-cognito-event-userpools-define-auth-challenge-user-not-found.json new file mode 100644 index 00000000..1ad40e2a --- /dev/null +++ b/lambda-events/src/fixtures/example-cognito-event-userpools-define-auth-challenge-user-not-found.json @@ -0,0 +1,36 @@ +{ + "version": "1", + "region": "us-west-2", + "userPoolId": "", + "userName": "", + "callerContext": { + "awsSdkVersion": "aws-sdk-unknown-unknown", + "clientId": "" + }, + "triggerSource": "DefineAuthChallenge_Authentication", + "request": { + "userAttributes": { + "sub": "", + "cognito:user_status": "CONFIRMED", + "phone_number_verified": "true", + "cognito:phone_number_alias": "+12223334455", + "phone_number": "+12223334455" + }, + "session": [ + { + "challengeName": "PASSWORD_VERIFIER", + "challengeResult": true, + "challengeMetadata": "metadata" + } + ], + "clientMetadata": { + "exampleMetadataKey": "example metadata value" + }, + "userNotFound": true + }, + "response": { + "challengeName": "challengeName", + "issueTokens": true, + "failAuthentication": true + } +} diff --git a/lambda-events/src/fixtures/example-cognito-event-userpools-verify-auth-challenge-optional-answer-correct.json b/lambda-events/src/fixtures/example-cognito-event-userpools-verify-auth-challenge-optional-answer-correct.json index 70a973f4..f6f7ca09 100644 --- a/lambda-events/src/fixtures/example-cognito-event-userpools-verify-auth-challenge-optional-answer-correct.json +++ b/lambda-events/src/fixtures/example-cognito-event-userpools-verify-auth-challenge-optional-answer-correct.json @@ -22,7 +22,8 @@ "challengeAnswer": "123xxxx", "clientMetadata": { "exampleMetadataKey": "example metadata value" - } + }, + "userNotFound": false }, "response": { } diff --git a/lambda-events/src/fixtures/example-cognito-event-userpools-verify-auth-challenge-user-not-found.json b/lambda-events/src/fixtures/example-cognito-event-userpools-verify-auth-challenge-user-not-found.json new file mode 100644 index 00000000..a5068eaa --- /dev/null +++ b/lambda-events/src/fixtures/example-cognito-event-userpools-verify-auth-challenge-user-not-found.json @@ -0,0 +1,31 @@ +{ + "version": "1", + "region": "us-west-2", + "userPoolId": "", + "userName": "", + "callerContext": { + "awsSdkVersion": "aws-sdk-unknown-unknown", + "clientId": "" + }, + "triggerSource": "VerifyAuthChallengeResponse_Authentication", + "request": { + "userAttributes": { + "sub": "", + "cognito:user_status": "CONFIRMED", + "phone_number_verified": "true", + "cognito:phone_number_alias": "+12223334455", + "phone_number": "+12223334455" + }, + "privateChallengeParameters": { + "secret": "11122233" + }, + "challengeAnswer": "123xxxx", + "clientMetadata": { + "exampleMetadataKey": "example metadata value" + }, + "userNotFound": true + }, + "response": { + "answerCorrect": true + } +} diff --git a/lambda-events/src/fixtures/example-cognito-event-userpools-verify-auth-challenge.json b/lambda-events/src/fixtures/example-cognito-event-userpools-verify-auth-challenge.json index b1d88fee..6bff9974 100644 --- a/lambda-events/src/fixtures/example-cognito-event-userpools-verify-auth-challenge.json +++ b/lambda-events/src/fixtures/example-cognito-event-userpools-verify-auth-challenge.json @@ -22,7 +22,8 @@ "challengeAnswer": "123xxxx", "clientMetadata": { "exampleMetadataKey": "example metadata value" - } + }, + "userNotFound": false }, "response": { "answerCorrect": true From 2a82ba7b842b9ffbda14fbda4bfbf3aa4a27a157 Mon Sep 17 00:00:00 2001 From: Maxime David Date: Tue, 7 Nov 2023 16:44:07 -0500 Subject: [PATCH 26/27] Add an advanced SQS multiple functions with shared data example (#720) * add multi functions example * update Readme * pr comment * pr comments * run clippy --- .../Cargo.toml | 13 ++++ .../README.md | 28 +++++++++ .../consumer/Cargo.toml | 22 +++++++ .../consumer/src/main.rs | 24 ++++++++ .../pizza_lib/Cargo.toml | 7 +++ .../pizza_lib/src/lib.rs | 7 +++ .../producer/Cargo.toml | 25 ++++++++ .../producer/src/main.rs | 61 +++++++++++++++++++ 8 files changed, 187 insertions(+) create mode 100644 examples/advanced-sqs-multiple-functions-shared-data/Cargo.toml create mode 100644 examples/advanced-sqs-multiple-functions-shared-data/README.md create mode 100644 examples/advanced-sqs-multiple-functions-shared-data/consumer/Cargo.toml create mode 100644 examples/advanced-sqs-multiple-functions-shared-data/consumer/src/main.rs create mode 100644 examples/advanced-sqs-multiple-functions-shared-data/pizza_lib/Cargo.toml create mode 100644 examples/advanced-sqs-multiple-functions-shared-data/pizza_lib/src/lib.rs create mode 100644 examples/advanced-sqs-multiple-functions-shared-data/producer/Cargo.toml create mode 100644 examples/advanced-sqs-multiple-functions-shared-data/producer/src/main.rs diff --git a/examples/advanced-sqs-multiple-functions-shared-data/Cargo.toml b/examples/advanced-sqs-multiple-functions-shared-data/Cargo.toml new file mode 100644 index 00000000..116ab8ef --- /dev/null +++ b/examples/advanced-sqs-multiple-functions-shared-data/Cargo.toml @@ -0,0 +1,13 @@ +[workspace] + +members = [ + "producer", + "consumer", + "pizza_lib", +] + +[profile.release] +opt-level = 'z' +lto = true +codegen-units = 1 +panic = 'abort' \ No newline at end of file diff --git a/examples/advanced-sqs-multiple-functions-shared-data/README.md b/examples/advanced-sqs-multiple-functions-shared-data/README.md new file mode 100644 index 00000000..83136b9b --- /dev/null +++ b/examples/advanced-sqs-multiple-functions-shared-data/README.md @@ -0,0 +1,28 @@ +# AWS Lambda Function example + +## Build & Deploy + +1. Install [cargo-lambda](https://github.com/cargo-lambda/cargo-lambda#installation) +2. Build the function with `cargo lambda build --release` +4. Make sure to edit the QUEUE_URL env variable in producer/Cargo.toml +3. Deploy boths functions to AWS Lambda with + +`cargo lambda deploy consumer --iam-role YOUR_ROLE` + +`cargo lambda deploy producer --iam-role YOUR_ROLE` + +## Build for ARM 64 + +Build the function with `cargo lambda build --release --arm64` + +## Add the SQS trigger to the consumer function + +You can use aws-cli to create an event source mapping: + +```bash +aws lambda create-event-source-mapping \ +--function-name consumer \ +--region \ +--event-source-arn \ +--batch-size 1 +``` \ No newline at end of file diff --git a/examples/advanced-sqs-multiple-functions-shared-data/consumer/Cargo.toml b/examples/advanced-sqs-multiple-functions-shared-data/consumer/Cargo.toml new file mode 100644 index 00000000..8555a073 --- /dev/null +++ b/examples/advanced-sqs-multiple-functions-shared-data/consumer/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "consumer" +version = "0.1.0" +edition = "2021" + + +[dependencies] +#tracing +tracing = "0.1.40" +tracing-subscriber = "0.3.17" + +#aws dependencies +aws-sdk-config = "0.35.0" +aws-sdk-sqs = "0.35.0" +aws_lambda_events = { version = "0.11.1", features = ["sqs"], default-features = false } + +#lambda runtime +lambda_runtime = "0.8.1" +tokio = { version = "1", features = ["macros"] } + +#shared lib +pizza_lib = { path = "../pizza_lib" } diff --git a/examples/advanced-sqs-multiple-functions-shared-data/consumer/src/main.rs b/examples/advanced-sqs-multiple-functions-shared-data/consumer/src/main.rs new file mode 100644 index 00000000..42290192 --- /dev/null +++ b/examples/advanced-sqs-multiple-functions-shared-data/consumer/src/main.rs @@ -0,0 +1,24 @@ +use aws_lambda_events::event::sqs::SqsEventObj; +use lambda_runtime::{service_fn, Error, LambdaEvent}; +use pizza_lib::Pizza; + +#[tokio::main] +async fn main() -> Result<(), Error> { + tracing_subscriber::fmt() + .with_max_level(tracing::Level::INFO) + .with_target(false) + .with_ansi(false) + .without_time() + .init(); + let func = service_fn(func); + lambda_runtime::run(func).await?; + Ok(()) +} + +async fn func(event: LambdaEvent>) -> Result<(), Error> { + for record in event.payload.records.iter() { + let pizza = &record.body; + println!("Pizza name: {} with toppings: {:?}", pizza.name, pizza.toppings); + } + Ok(()) +} diff --git a/examples/advanced-sqs-multiple-functions-shared-data/pizza_lib/Cargo.toml b/examples/advanced-sqs-multiple-functions-shared-data/pizza_lib/Cargo.toml new file mode 100644 index 00000000..76631bbd --- /dev/null +++ b/examples/advanced-sqs-multiple-functions-shared-data/pizza_lib/Cargo.toml @@ -0,0 +1,7 @@ +[package] +name = "pizza_lib" +version = "0.1.0" +edition = "2021" + +[dependencies] +serde = { version = "1.0.191", features = ["derive"] } diff --git a/examples/advanced-sqs-multiple-functions-shared-data/pizza_lib/src/lib.rs b/examples/advanced-sqs-multiple-functions-shared-data/pizza_lib/src/lib.rs new file mode 100644 index 00000000..638fa762 --- /dev/null +++ b/examples/advanced-sqs-multiple-functions-shared-data/pizza_lib/src/lib.rs @@ -0,0 +1,7 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize)] +pub struct Pizza { + pub name: String, + pub toppings: Vec, +} diff --git a/examples/advanced-sqs-multiple-functions-shared-data/producer/Cargo.toml b/examples/advanced-sqs-multiple-functions-shared-data/producer/Cargo.toml new file mode 100644 index 00000000..557ac6e5 --- /dev/null +++ b/examples/advanced-sqs-multiple-functions-shared-data/producer/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "producer" +version = "0.1.0" +edition = "2021" + +[package.metadata.lambda.deploy] +env = { "QUEUE_URL" = "https://changeMe" } + +[dependencies] +#tracing +tracing = "0.1.40" +tracing-subscriber = "0.3.17" + +#aws dependencies +aws-config = "0.57.1" +aws-sdk-config = "0.35.0" +aws-sdk-sqs = "0.35.0" + +#lambda runtime +lambda_runtime = "0.8.1" +serde_json = "1.0.108" +tokio = { version = "1", features = ["macros"] } + +#shared lib +pizza_lib = { path = "../pizza_lib" } \ No newline at end of file diff --git a/examples/advanced-sqs-multiple-functions-shared-data/producer/src/main.rs b/examples/advanced-sqs-multiple-functions-shared-data/producer/src/main.rs new file mode 100644 index 00000000..2cc2541b --- /dev/null +++ b/examples/advanced-sqs-multiple-functions-shared-data/producer/src/main.rs @@ -0,0 +1,61 @@ +use lambda_runtime::{service_fn, Error, LambdaEvent}; +use pizza_lib::Pizza; +use serde_json::{json, Value}; + +struct SQSManager { + client: aws_sdk_sqs::Client, + queue_url: String, +} + +impl SQSManager { + fn new(client: aws_sdk_sqs::Client, queue_url: String) -> Self { + Self { client, queue_url } + } +} + +#[tokio::main] +async fn main() -> Result<(), Error> { + tracing_subscriber::fmt() + .with_max_level(tracing::Level::INFO) + .with_target(false) + .with_ansi(false) + .without_time() + .init(); + + // read the queue url from the environment + let queue_url = std::env::var("QUEUE_URL").expect("could not read QUEUE_URL"); + // build the config from environment variables (fed by AWS Lambda) + let config = aws_config::from_env().load().await; + // create our SQS Manager + let sqs_manager = SQSManager::new(aws_sdk_sqs::Client::new(&config), queue_url); + let sqs_manager_ref = &sqs_manager; + + // no need to create a SQS Client for each incoming request, let's use a shared state + let handler_func_closure = |event: LambdaEvent| async move { + process_event(event, sqs_manager_ref).await + }; + lambda_runtime::run(service_fn(handler_func_closure)).await?; + Ok(()) +} + +async fn process_event(_: LambdaEvent, sqs_manager: &SQSManager) -> Result<(), Error> { + // let's create our pizza + let message = Pizza { + name: "margherita".to_string(), + toppings: vec![ + "San Marzano Tomatoes".to_string(), + "Fresh Mozzarella".to_string(), + "Basil".to_string(), + ], + }; + // send our message to SQS + sqs_manager + .client + .send_message() + .queue_url(&sqs_manager.queue_url) + .message_body(json!(message).to_string()) + .send() + .await?; + + Ok(()) +} From 39f770ba357c6139d3be7b1eceaf426be5f5b547 Mon Sep 17 00:00:00 2001 From: David Calavera Date: Tue, 7 Nov 2023 18:19:11 -0800 Subject: [PATCH 27/27] New runtime, http, and events release. (#721) Signed-off-by: David Calavera --- lambda-events/Cargo.toml | 2 +- lambda-http/Cargo.toml | 4 ++-- lambda-runtime/Cargo.toml | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/lambda-events/Cargo.toml b/lambda-events/Cargo.toml index c58ec475..bb7f115e 100644 --- a/lambda-events/Cargo.toml +++ b/lambda-events/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "aws_lambda_events" -version = "0.11.1" +version = "0.12.0" description = "AWS Lambda event definitions" authors = [ "Christian Legnitto ", diff --git a/lambda-http/Cargo.toml b/lambda-http/Cargo.toml index ea4a5fba..c8caec8d 100644 --- a/lambda-http/Cargo.toml +++ b/lambda-http/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lambda_http" -version = "0.8.1" +version = "0.8.2" authors = [ "David Calavera ", "Harold Sun ", @@ -41,7 +41,7 @@ percent-encoding = "2.2" [dependencies.aws_lambda_events] path = "../lambda-events" -version = "0.11.0" +version = "0.12.0" default-features = false features = ["alb", "apigw"] diff --git a/lambda-runtime/Cargo.toml b/lambda-runtime/Cargo.toml index 9202b1c1..d16eaedd 100644 --- a/lambda-runtime/Cargo.toml +++ b/lambda-runtime/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lambda_runtime" -version = "0.8.2" +version = "0.8.3" authors = [ "David Calavera ", "Harold Sun ",