在Lambda函数中提高内存大小和设置适当的时间限制,以便更快地处理S3文件,并使用异步导入将数据插入数据库,以避免阻塞Lambda执行。示例代码如下:
import boto3
import json
import requests
import logging
import time
s3 = boto3.client('s3')
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table('my_dynamodb_table')
def lambda_handler(event, context):
bucket_name = event['Records'][0]['s3']['bucket']['name']
key = urllib.parse.unquote_plus(event['Records'][0]['s3']['object']['key'])
response = s3.get_object(Bucket=bucket_name, Key=key)
data = json.loads(response['Body'].read().decode())
with table.batch_writer(overwrite_by_pkeys=['id']) as batch:
for item in data:
batch.put_item(
Item={
'id': item['id'],
'name': item['name'],
'email': item['email'],
}
)
return {'statusCode': 200, 'body': json.dumps('Data inserted successfully')}