// =================================================================
//
// * WARNING *
//
// This file is generated!
//
// Changes made to this file will be overwritten. If changes are
// required to the generated code, the service_crategen project
// must be updated to generate the changes.
//
// =================================================================
use std::error::Error;
use std::fmt;
use async_trait::async_trait;
use rusoto_core::credential::ProvideAwsCredentials;
use rusoto_core::region;
use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest};
use rusoto_core::{Client, RusotoError};
use rusoto_core::param::{Params, ServiceParams};
use rusoto_core::proto::xml::error::*;
use rusoto_core::proto::xml::util::{
self as xml_util, deserialize_elements, find_start_element, skip_tree, write_characters_element,
};
use rusoto_core::proto::xml::util::{Next, Peek, XmlParseError, XmlResponse};
use rusoto_core::request::HttpResponse;
use rusoto_core::signature::SignedRequest;
#[cfg(feature = "deserialize_structs")]
use serde::Deserialize;
#[cfg(feature = "serialize_structs")]
use serde::Serialize;
use std::io::Write;
use std::str::FromStr;
use xml;
use xml::EventReader;
use xml::EventWriter;
impl S3Client {
async fn sign_and_dispatch(
&self,
request: SignedRequest,
from_response: fn(BufferedHttpResponse) -> RusotoError,
) -> Result> {
let mut response = self.client.sign_and_dispatch(request).await?;
if !response.status.is_success() {
let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
return Err(from_response(response));
}
Ok(response)
}
}
use rusoto_core::event_stream::{DeserializeEvent, EventStream};
/// Specifies the days since the initiation of an incomplete multipart upload that Amazon S3 will wait before permanently removing all parts of the upload. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Policy in the Amazon S3 User Guide.
#[derive(Clone, Debug, Default, PartialEq)]
#[cfg_attr(feature = "serialize_structs", derive(Serialize))]
#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
pub struct AbortIncompleteMultipartUpload {
/// Specifies the number of days after which Amazon S3 aborts an incomplete multipart upload.
pub days_after_initiation: Option,
}
#[allow(dead_code)]
struct AbortIncompleteMultipartUploadDeserializer;
impl AbortIncompleteMultipartUploadDeserializer {
#[allow(dead_code, unused_variables)]
fn deserialize(
tag_name: &str,
stack: &mut T,
) -> Result {
deserialize_elements::<_, AbortIncompleteMultipartUpload, _>(
tag_name,
stack,
|name, stack, obj| {
match name {
"DaysAfterInitiation" => {
obj.days_after_initiation =
Some(DaysAfterInitiationDeserializer::deserialize(
"DaysAfterInitiation",
stack,
)?);
}
_ => skip_tree(stack),
}
Ok(())
},
)
}
}
pub struct AbortIncompleteMultipartUploadSerializer;
impl AbortIncompleteMultipartUploadSerializer {
#[allow(unused_variables, warnings)]
pub fn serialize(
mut writer: &mut EventWriter,
name: &str,
obj: &AbortIncompleteMultipartUpload,
) -> Result<(), xml::writer::Error>
where
W: Write,
{
writer.write(xml::writer::XmlEvent::start_element(name))?;
if let Some(ref value) = obj.days_after_initiation {
write_characters_element(writer, "DaysAfterInitiation", &value.to_string())?;
}
writer.write(xml::writer::XmlEvent::end_element())
}
}
#[derive(Clone, Debug, Default, PartialEq)]
#[cfg_attr(feature = "serialize_structs", derive(Serialize))]
pub struct AbortMultipartUploadOutput {
pub request_charged: Option,
}
#[allow(dead_code)]
struct AbortMultipartUploadOutputDeserializer;
impl AbortMultipartUploadOutputDeserializer {
#[allow(dead_code, unused_variables)]
fn deserialize(
tag_name: &str,
stack: &mut T,
) -> Result {
xml_util::start_element(tag_name, stack)?;
let obj = AbortMultipartUploadOutput::default();
xml_util::end_element(tag_name, stack)?;
Ok(obj)
}
}
#[derive(Clone, Debug, Default, PartialEq)]
#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
pub struct AbortMultipartUploadRequest {
/// The bucket name to which the upload was taking place.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
pub bucket: String,
/// The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
pub expected_bucket_owner: Option,
/// Key of the object for which the multipart upload was initiated.
pub key: String,
pub request_payer: Option,
/// Upload ID that identifies the multipart upload.
pub upload_id: String,
}
/// Configures the transfer acceleration state for an Amazon S3 bucket. For more information, see Amazon S3 Transfer Acceleration in the Amazon S3 User Guide.
#[derive(Clone, Debug, Default, PartialEq)]
#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
pub struct AccelerateConfiguration {
/// Specifies the transfer acceleration status of the bucket.
pub status: Option,
}
pub struct AccelerateConfigurationSerializer;
impl AccelerateConfigurationSerializer {
#[allow(unused_variables, warnings)]
pub fn serialize(
mut writer: &mut EventWriter,
name: &str,
obj: &AccelerateConfiguration,
) -> Result<(), xml::writer::Error>
where
W: Write,
{
writer.write(xml::writer::XmlEvent::start_element(name))?;
if let Some(ref value) = obj.status {
write_characters_element(writer, "Status", &value)?;
}
writer.write(xml::writer::XmlEvent::end_element())
}
}
/// Contains the elements that set the ACL permissions for an object per grantee.
#[derive(Clone, Debug, Default, PartialEq)]
#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
pub struct AccessControlPolicy {
/// A list of grants.
pub grants: Option>,
/// Container for the bucket owner's display name and ID.
pub owner: Option,
}
pub struct AccessControlPolicySerializer;
impl AccessControlPolicySerializer {
#[allow(unused_variables, warnings)]
pub fn serialize(
mut writer: &mut EventWriter,
name: &str,
obj: &AccessControlPolicy,
) -> Result<(), xml::writer::Error>
where
W: Write,
{
writer.write(xml::writer::XmlEvent::start_element(name))?;
if let Some(ref value) = obj.grants {
&GrantsSerializer::serialize(&mut writer, "AccessControlList", value)?;
}
if let Some(ref value) = obj.owner {
&OwnerSerializer::serialize(&mut writer, "Owner", value)?;
}
writer.write(xml::writer::XmlEvent::end_element())
}
}
/// A container for information about access control for replicas.
#[derive(Clone, Debug, Default, PartialEq)]
#[cfg_attr(feature = "serialize_structs", derive(Serialize))]
#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
pub struct AccessControlTranslation {
/// Specifies the replica ownership. For default and valid values, see PUT bucket replication in the Amazon S3 API Reference.
pub owner: String,
}
#[allow(dead_code)]
struct AccessControlTranslationDeserializer;
impl AccessControlTranslationDeserializer {
#[allow(dead_code, unused_variables)]
fn deserialize(
tag_name: &str,
stack: &mut T,
) -> Result {
deserialize_elements::<_, AccessControlTranslation, _>(
tag_name,
stack,
|name, stack, obj| {
match name {
"Owner" => {
obj.owner = OwnerOverrideDeserializer::deserialize("Owner", stack)?;
}
_ => skip_tree(stack),
}
Ok(())
},
)
}
}
pub struct AccessControlTranslationSerializer;
impl AccessControlTranslationSerializer {
#[allow(unused_variables, warnings)]
pub fn serialize(
mut writer: &mut EventWriter,
name: &str,
obj: &AccessControlTranslation,
) -> Result<(), xml::writer::Error>
where
W: Write,
{
writer.write(xml::writer::XmlEvent::start_element(name))?;
write_characters_element(writer, "Owner", &obj.owner)?;
writer.write(xml::writer::XmlEvent::end_element())
}
}
#[allow(dead_code)]
struct AccountIdDeserializer;
impl AccountIdDeserializer {
#[allow(dead_code, unused_variables)]
fn deserialize(tag_name: &str, stack: &mut T) -> Result {
xml_util::deserialize_primitive(tag_name, stack, Ok)
}
}
pub struct AccountIdSerializer;
impl AccountIdSerializer {
#[allow(unused_variables, warnings)]
pub fn serialize(
mut writer: &mut EventWriter,
name: &str,
obj: &String,
) -> Result<(), xml::writer::Error>
where
W: Write,
{
write_characters_element(writer, name, obj)
}
}
pub struct AllowQuotedRecordDelimiterSerializer;
impl AllowQuotedRecordDelimiterSerializer {
#[allow(unused_variables, warnings)]
pub fn serialize(
mut writer: &mut EventWriter,
name: &str,
obj: &bool,
) -> Result<(), xml::writer::Error>
where
W: Write,
{
write_characters_element(writer, name, &obj.to_string())
}
}
#[allow(dead_code)]
struct AllowedHeaderDeserializer;
impl AllowedHeaderDeserializer {
#[allow(dead_code, unused_variables)]
fn deserialize(tag_name: &str, stack: &mut T) -> Result {
xml_util::deserialize_primitive(tag_name, stack, Ok)
}
}
pub struct AllowedHeaderSerializer;
impl AllowedHeaderSerializer {
#[allow(unused_variables, warnings)]
pub fn serialize(
mut writer: &mut EventWriter,
name: &str,
obj: &String,
) -> Result<(), xml::writer::Error>
where
W: Write,
{
write_characters_element(writer, name, obj)
}
}
#[allow(dead_code)]
struct AllowedHeadersDeserializer;
impl AllowedHeadersDeserializer {
#[allow(dead_code, unused_variables)]
fn deserialize(
tag_name: &str,
stack: &mut T,
) -> Result, XmlParseError> {
let mut obj = vec![];
loop {
let consume_next_tag = match stack.peek() {
Some(&Ok(xml::reader::XmlEvent::StartElement { ref name, .. })) => {
name.local_name == tag_name
}
_ => false,
};
if consume_next_tag {
obj.push(AllowedHeaderDeserializer::deserialize(tag_name, stack)?);
} else {
break;
}
}
Ok(obj)
}
}
pub struct AllowedHeadersSerializer;
impl AllowedHeadersSerializer {
#[allow(unused_variables, warnings)]
pub fn serialize(
mut writer: &mut EventWriter,
name: &str,
obj: &Vec,
) -> Result<(), xml::writer::Error>
where
W: Write,
{
for element in obj {
AllowedHeaderSerializer::serialize(writer, name, element)?;
}
Ok(())
}
}
#[allow(dead_code)]
struct AllowedMethodDeserializer;
impl AllowedMethodDeserializer {
#[allow(dead_code, unused_variables)]
fn deserialize(tag_name: &str, stack: &mut T) -> Result {
xml_util::deserialize_primitive(tag_name, stack, Ok)
}
}
pub struct AllowedMethodSerializer;
impl AllowedMethodSerializer {
#[allow(unused_variables, warnings)]
pub fn serialize(
mut writer: &mut EventWriter,
name: &str,
obj: &String,
) -> Result<(), xml::writer::Error>
where
W: Write,
{
write_characters_element(writer, name, obj)
}
}
#[allow(dead_code)]
struct AllowedMethodsDeserializer;
impl AllowedMethodsDeserializer {
#[allow(dead_code, unused_variables)]
fn deserialize(
tag_name: &str,
stack: &mut T,
) -> Result, XmlParseError> {
let mut obj = vec![];
loop {
let consume_next_tag = match stack.peek() {
Some(&Ok(xml::reader::XmlEvent::StartElement { ref name, .. })) => {
name.local_name == tag_name
}
_ => false,
};
if consume_next_tag {
obj.push(AllowedMethodDeserializer::deserialize(tag_name, stack)?);
} else {
break;
}
}
Ok(obj)
}
}
pub struct AllowedMethodsSerializer;
impl AllowedMethodsSerializer {
#[allow(unused_variables, warnings)]
pub fn serialize(
mut writer: &mut EventWriter,
name: &str,
obj: &Vec,
) -> Result<(), xml::writer::Error>
where
W: Write,
{
for element in obj {
AllowedMethodSerializer::serialize(writer, name, element)?;
}
Ok(())
}
}
#[allow(dead_code)]
struct AllowedOriginDeserializer;
impl AllowedOriginDeserializer {
#[allow(dead_code, unused_variables)]
fn deserialize(tag_name: &str, stack: &mut T) -> Result {
xml_util::deserialize_primitive(tag_name, stack, Ok)
}
}
pub struct AllowedOriginSerializer;
impl AllowedOriginSerializer {
#[allow(unused_variables, warnings)]
pub fn serialize(
mut writer: &mut EventWriter,
name: &str,
obj: &String,
) -> Result<(), xml::writer::Error>
where
W: Write,
{
write_characters_element(writer, name, obj)
}
}
#[allow(dead_code)]
struct AllowedOriginsDeserializer;
impl AllowedOriginsDeserializer {
#[allow(dead_code, unused_variables)]
fn deserialize(
tag_name: &str,
stack: &mut T,
) -> Result, XmlParseError> {
let mut obj = vec![];
loop {
let consume_next_tag = match stack.peek() {
Some(&Ok(xml::reader::XmlEvent::StartElement { ref name, .. })) => {
name.local_name == tag_name
}
_ => false,
};
if consume_next_tag {
obj.push(AllowedOriginDeserializer::deserialize(tag_name, stack)?);
} else {
break;
}
}
Ok(obj)
}
}
pub struct AllowedOriginsSerializer;
impl AllowedOriginsSerializer {
#[allow(unused_variables, warnings)]
pub fn serialize(
mut writer: &mut EventWriter,
name: &str,
obj: &Vec,
) -> Result<(), xml::writer::Error>
where
W: Write,
{
for element in obj {
AllowedOriginSerializer::serialize(writer, name, element)?;
}
Ok(())
}
}
/// A conjunction (logical AND) of predicates, which is used in evaluating a metrics filter. The operator must have at least two predicates in any combination, and an object must match all of the predicates for the filter to apply.
#[derive(Clone, Debug, Default, PartialEq)]
#[cfg_attr(feature = "serialize_structs", derive(Serialize))]
#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
pub struct AnalyticsAndOperator {
/// The prefix to use when evaluating an AND predicate: The prefix that an object must have to be included in the metrics results.
pub prefix: Option,
/// The list of tags to use when evaluating an AND predicate.
pub tags: Option>,
}
#[allow(dead_code)]
struct AnalyticsAndOperatorDeserializer;
impl AnalyticsAndOperatorDeserializer {
#[allow(dead_code, unused_variables)]
fn deserialize(
tag_name: &str,
stack: &mut T,
) -> Result {
deserialize_elements::<_, AnalyticsAndOperator, _>(tag_name, stack, |name, stack, obj| {
match name {
"Prefix" => {
obj.prefix = Some(PrefixDeserializer::deserialize("Prefix", stack)?);
}
"Tag" => {
obj.tags
.get_or_insert(vec![])
.extend(TagSetDeserializer::deserialize("Tag", stack)?);
}
_ => skip_tree(stack),
}
Ok(())
})
}
}
pub struct AnalyticsAndOperatorSerializer;
impl AnalyticsAndOperatorSerializer {
#[allow(unused_variables, warnings)]
pub fn serialize(
mut writer: &mut EventWriter,
name: &str,
obj: &AnalyticsAndOperator,
) -> Result<(), xml::writer::Error>
where
W: Write,
{
writer.write(xml::writer::XmlEvent::start_element(name))?;
if let Some(ref value) = obj.prefix {
write_characters_element(writer, "Prefix", &value)?;
}
if let Some(ref value) = obj.tags {
&TagSetSerializer::serialize(&mut writer, "Tag", value)?;
}
writer.write(xml::writer::XmlEvent::end_element())
}
}
/// Specifies the configuration and any analyses for the analytics filter of an Amazon S3 bucket.
#[derive(Clone, Debug, Default, PartialEq)]
#[cfg_attr(feature = "serialize_structs", derive(Serialize))]
#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
pub struct AnalyticsConfiguration {
/// The filter used to describe a set of objects for analyses. A filter must have exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). If no filter is provided, all objects will be considered in any analysis.
pub filter: Option,
/// The ID that identifies the analytics configuration.
pub id: String,
/// Contains data related to access patterns to be collected and made available to analyze the tradeoffs between different storage classes.
pub storage_class_analysis: StorageClassAnalysis,
}
#[allow(dead_code)]
struct AnalyticsConfigurationDeserializer;
impl AnalyticsConfigurationDeserializer {
#[allow(dead_code, unused_variables)]
fn deserialize(
tag_name: &str,
stack: &mut T,
) -> Result {
deserialize_elements::<_, AnalyticsConfiguration, _>(tag_name, stack, |name, stack, obj| {
match name {
"Filter" => {
obj.filter = Some(AnalyticsFilterDeserializer::deserialize("Filter", stack)?);
}
"Id" => {
obj.id = AnalyticsIdDeserializer::deserialize("Id", stack)?;
}
"StorageClassAnalysis" => {
obj.storage_class_analysis = StorageClassAnalysisDeserializer::deserialize(
"StorageClassAnalysis",
stack,
)?;
}
_ => skip_tree(stack),
}
Ok(())
})
}
}
pub struct AnalyticsConfigurationSerializer;
impl AnalyticsConfigurationSerializer {
#[allow(unused_variables, warnings)]
pub fn serialize(
mut writer: &mut EventWriter,
name: &str,
obj: &AnalyticsConfiguration,
) -> Result<(), xml::writer::Error>
where
W: Write,
{
writer.write(xml::writer::XmlEvent::start_element(name))?;
if let Some(ref value) = obj.filter {
&AnalyticsFilterSerializer::serialize(&mut writer, "Filter", value)?;
}
write_characters_element(writer, "Id", &obj.id)?;
StorageClassAnalysisSerializer::serialize(
&mut writer,
"StorageClassAnalysis",
&obj.storage_class_analysis,
)?;
writer.write(xml::writer::XmlEvent::end_element())
}
}
#[allow(dead_code)]
struct AnalyticsConfigurationListDeserializer;
impl AnalyticsConfigurationListDeserializer {
#[allow(dead_code, unused_variables)]
fn deserialize(
tag_name: &str,
stack: &mut T,
) -> Result, XmlParseError> {
let mut obj = vec![];
loop {
let consume_next_tag = match stack.peek() {
Some(&Ok(xml::reader::XmlEvent::StartElement { ref name, .. })) => {
name.local_name == tag_name
}
_ => false,
};
if consume_next_tag {
obj.push(AnalyticsConfigurationDeserializer::deserialize(
tag_name, stack,
)?);
} else {
break;
}
}
Ok(obj)
}
}
/// Where to publish the analytics results.
#[derive(Clone, Debug, Default, PartialEq)]
#[cfg_attr(feature = "serialize_structs", derive(Serialize))]
#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
pub struct AnalyticsExportDestination {
/// A destination signifying output to an S3 bucket.
pub s3_bucket_destination: AnalyticsS3BucketDestination,
}
#[allow(dead_code)]
struct AnalyticsExportDestinationDeserializer;
impl AnalyticsExportDestinationDeserializer {
#[allow(dead_code, unused_variables)]
fn deserialize(
tag_name: &str,
stack: &mut T,
) -> Result {
deserialize_elements::<_, AnalyticsExportDestination, _>(
tag_name,
stack,
|name, stack, obj| {
match name {
"S3BucketDestination" => {
obj.s3_bucket_destination =
AnalyticsS3BucketDestinationDeserializer::deserialize(
"S3BucketDestination",
stack,
)?;
}
_ => skip_tree(stack),
}
Ok(())
},
)
}
}
pub struct AnalyticsExportDestinationSerializer;
impl AnalyticsExportDestinationSerializer {
#[allow(unused_variables, warnings)]
pub fn serialize(
mut writer: &mut EventWriter,
name: &str,
obj: &AnalyticsExportDestination,
) -> Result<(), xml::writer::Error>
where
W: Write,
{
writer.write(xml::writer::XmlEvent::start_element(name))?;
AnalyticsS3BucketDestinationSerializer::serialize(
&mut writer,
"S3BucketDestination",
&obj.s3_bucket_destination,
)?;
writer.write(xml::writer::XmlEvent::end_element())
}
}
/// The filter used to describe a set of objects for analyses. A filter must have exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). If no filter is provided, all objects will be considered in any analysis.
#[derive(Clone, Debug, Default, PartialEq)]
#[cfg_attr(feature = "serialize_structs", derive(Serialize))]
#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
pub struct AnalyticsFilter {
/// A conjunction (logical AND) of predicates, which is used in evaluating an analytics filter. The operator must have at least two predicates.
pub and: Option,
/// The prefix to use when evaluating an analytics filter.
pub prefix: Option,
/// The tag to use when evaluating an analytics filter.
pub tag: Option,
}
#[allow(dead_code)]
struct AnalyticsFilterDeserializer;
impl AnalyticsFilterDeserializer {
#[allow(dead_code, unused_variables)]
fn deserialize(
tag_name: &str,
stack: &mut T,
) -> Result {
deserialize_elements::<_, AnalyticsFilter, _>(tag_name, stack, |name, stack, obj| {
match name {
"And" => {
obj.and = Some(AnalyticsAndOperatorDeserializer::deserialize("And", stack)?);
}
"Prefix" => {
obj.prefix = Some(PrefixDeserializer::deserialize("Prefix", stack)?);
}
"Tag" => {
obj.tag = Some(TagDeserializer::deserialize("Tag", stack)?);
}
_ => skip_tree(stack),
}
Ok(())
})
}
}
pub struct AnalyticsFilterSerializer;
impl AnalyticsFilterSerializer {
#[allow(unused_variables, warnings)]
pub fn serialize(
mut writer: &mut EventWriter,
name: &str,
obj: &AnalyticsFilter,
) -> Result<(), xml::writer::Error>
where
W: Write,
{
writer.write(xml::writer::XmlEvent::start_element(name))?;
if let Some(ref value) = obj.and {
&AnalyticsAndOperatorSerializer::serialize(&mut writer, "And", value)?;
}
if let Some(ref value) = obj.prefix {
write_characters_element(writer, "Prefix", &value)?;
}
if let Some(ref value) = obj.tag {
&TagSerializer::serialize(&mut writer, "Tag", value)?;
}
writer.write(xml::writer::XmlEvent::end_element())
}
}
#[allow(dead_code)]
struct AnalyticsIdDeserializer;
impl AnalyticsIdDeserializer {
#[allow(dead_code, unused_variables)]
fn deserialize(tag_name: &str, stack: &mut T) -> Result {
xml_util::deserialize_primitive(tag_name, stack, Ok)
}
}
pub struct AnalyticsIdSerializer;
impl AnalyticsIdSerializer {
#[allow(unused_variables, warnings)]
pub fn serialize(
mut writer: &mut EventWriter,
name: &str,
obj: &String,
) -> Result<(), xml::writer::Error>
where
W: Write,
{
write_characters_element(writer, name, obj)
}
}
/// Contains information about where to publish the analytics results.
#[derive(Clone, Debug, Default, PartialEq)]
#[cfg_attr(feature = "serialize_structs", derive(Serialize))]
#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
pub struct AnalyticsS3BucketDestination {
/// The Amazon Resource Name (ARN) of the bucket to which data is exported.
pub bucket: String,
/// The account ID that owns the destination S3 bucket. If no account ID is provided, the owner is not validated before exporting data.
Although this value is optional, we strongly recommend that you set it to help prevent problems if the destination bucket ownership changes.
pub bucket_account_id: Option,
/// Specifies the file format used when exporting data to Amazon S3.
pub format: String,
/// The prefix to use when exporting data. The prefix is prepended to all results.
pub prefix: Option,
}
#[allow(dead_code)]
struct AnalyticsS3BucketDestinationDeserializer;
impl AnalyticsS3BucketDestinationDeserializer {
#[allow(dead_code, unused_variables)]
fn deserialize(
tag_name: &str,
stack: &mut T,
) -> Result {
deserialize_elements::<_, AnalyticsS3BucketDestination, _>(
tag_name,
stack,
|name, stack, obj| {
match name {
"Bucket" => {
obj.bucket = BucketNameDeserializer::deserialize("Bucket", stack)?;
}
"BucketAccountId" => {
obj.bucket_account_id = Some(AccountIdDeserializer::deserialize(
"BucketAccountId",
stack,
)?);
}
"Format" => {
obj.format =
AnalyticsS3ExportFileFormatDeserializer::deserialize("Format", stack)?;
}
"Prefix" => {
obj.prefix = Some(PrefixDeserializer::deserialize("Prefix", stack)?);
}
_ => skip_tree(stack),
}
Ok(())
},
)
}
}
pub struct AnalyticsS3BucketDestinationSerializer;
impl AnalyticsS3BucketDestinationSerializer {
#[allow(unused_variables, warnings)]
pub fn serialize(
mut writer: &mut EventWriter,
name: &str,
obj: &AnalyticsS3BucketDestination,
) -> Result<(), xml::writer::Error>
where
W: Write,
{
writer.write(xml::writer::XmlEvent::start_element(name))?;
write_characters_element(writer, "Bucket", &obj.bucket)?;
if let Some(ref value) = obj.bucket_account_id {
write_characters_element(writer, "BucketAccountId", &value)?;
}
write_characters_element(writer, "Format", &obj.format)?;
if let Some(ref value) = obj.prefix {
write_characters_element(writer, "Prefix", &value)?;
}
writer.write(xml::writer::XmlEvent::end_element())
}
}
#[allow(dead_code)]
struct AnalyticsS3ExportFileFormatDeserializer;
impl AnalyticsS3ExportFileFormatDeserializer {
#[allow(dead_code, unused_variables)]
fn deserialize(tag_name: &str, stack: &mut T) -> Result {
xml_util::deserialize_primitive(tag_name, stack, Ok)
}
}
pub struct AnalyticsS3ExportFileFormatSerializer;
impl AnalyticsS3ExportFileFormatSerializer {
#[allow(unused_variables, warnings)]
pub fn serialize(
mut writer: &mut EventWriter,
name: &str,
obj: &String,
) -> Result<(), xml::writer::Error>
where
W: Write,
{
write_characters_element(writer, name, obj)
}
}
pub type StreamingBody = ::rusoto_core::ByteStream;
#[allow(dead_code)]
struct BodyDeserializer;
impl BodyDeserializer {
#[allow(dead_code, unused_variables)]
fn deserialize(
tag_name: &str,
stack: &mut T,
) -> Result {
xml_util::deserialize_primitive(tag_name, stack, |s| Ok(s.into()))
}
}
pub struct BodySerializer;
impl BodySerializer {
#[allow(unused_variables, warnings)]
pub fn serialize(
mut writer: &mut EventWriter,
name: &str,
obj: &bytes::Bytes,
) -> Result<(), xml::writer::Error>
where
W: Write,
{
write_characters_element(
writer,
name,
std::str::from_utf8(obj).expect("Not a UTF-8 string"),
)
}
}
/// In terms of implementation, a Bucket is a resource. An Amazon S3 bucket name is globally unique, and the namespace is shared by all AWS accounts.
#[derive(Clone, Debug, Default, PartialEq)]
#[cfg_attr(feature = "serialize_structs", derive(Serialize))]
pub struct Bucket {
/// Date the bucket was created. This date can change when making changes to your bucket, such as editing its bucket policy.
pub creation_date: Option,
/// The name of the bucket.
pub name: Option,
}
#[allow(dead_code)]
struct BucketDeserializer;
impl BucketDeserializer {
#[allow(dead_code, unused_variables)]
fn deserialize(tag_name: &str, stack: &mut T) -> Result {
deserialize_elements::<_, Bucket, _>(tag_name, stack, |name, stack, obj| {
match name {
"CreationDate" => {
obj.creation_date = Some(CreationDateDeserializer::deserialize(
"CreationDate",
stack,
)?);
}
"Name" => {
obj.name = Some(BucketNameDeserializer::deserialize("Name", stack)?);
}
_ => skip_tree(stack),
}
Ok(())
})
}
}
#[allow(dead_code)]
struct BucketAccelerateStatusDeserializer;
impl BucketAccelerateStatusDeserializer {
#[allow(dead_code, unused_variables)]
fn deserialize(tag_name: &str, stack: &mut T) -> Result {
xml_util::deserialize_primitive(tag_name, stack, Ok)
}
}
pub struct BucketAccelerateStatusSerializer;
impl BucketAccelerateStatusSerializer {
#[allow(unused_variables, warnings)]
pub fn serialize(
mut writer: &mut EventWriter,
name: &str,
obj: &String,
) -> Result<(), xml::writer::Error>
where
W: Write,
{
write_characters_element(writer, name, obj)
}
}
#[allow(dead_code)]
struct BucketKeyEnabledDeserializer;
impl BucketKeyEnabledDeserializer {
#[allow(dead_code, unused_variables)]
fn deserialize(tag_name: &str, stack: &mut T) -> Result {
xml_util::deserialize_primitive(tag_name, stack, |s| Ok(bool::from_str(&s).unwrap()))
}
}
pub struct BucketKeyEnabledSerializer;
impl BucketKeyEnabledSerializer {
#[allow(unused_variables, warnings)]
pub fn serialize(
mut writer: &mut EventWriter,
name: &str,
obj: &bool,
) -> Result<(), xml::writer::Error>
where
W: Write,
{
write_characters_element(writer, name, &obj.to_string())
}
}
/// Specifies the lifecycle configuration for objects in an Amazon S3 bucket. For more information, see Object Lifecycle Management in the Amazon S3 User Guide.
#[derive(Clone, Debug, Default, PartialEq)]
#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
pub struct BucketLifecycleConfiguration {
/// A lifecycle rule for individual objects in an Amazon S3 bucket.
pub rules: Vec,
}
pub struct BucketLifecycleConfigurationSerializer;
impl BucketLifecycleConfigurationSerializer {
#[allow(unused_variables, warnings)]
pub fn serialize(
mut writer: &mut EventWriter,
name: &str,
obj: &BucketLifecycleConfiguration,
) -> Result<(), xml::writer::Error>
where
W: Write,
{
writer.write(xml::writer::XmlEvent::start_element(name))?;
LifecycleRulesSerializer::serialize(&mut writer, "Rule", &obj.rules)?;
writer.write(xml::writer::XmlEvent::end_element())
}
}
#[allow(dead_code)]
struct BucketLocationConstraintDeserializer;
impl BucketLocationConstraintDeserializer {
#[allow(dead_code, unused_variables)]
fn deserialize(tag_name: &str, stack: &mut T) -> Result {
xml_util::deserialize_primitive(tag_name, stack, Ok)
}
}
pub struct BucketLocationConstraintSerializer;
impl BucketLocationConstraintSerializer {
#[allow(unused_variables, warnings)]
pub fn serialize(
mut writer: &mut EventWriter,
name: &str,
obj: &String,
) -> Result<(), xml::writer::Error>
where
W: Write,
{
write_characters_element(writer, name, obj)
}
}
/// Container for logging status information.
#[derive(Clone, Debug, Default, PartialEq)]
#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
pub struct BucketLoggingStatus {
pub logging_enabled: Option,
}
pub struct BucketLoggingStatusSerializer;
impl BucketLoggingStatusSerializer {
#[allow(unused_variables, warnings)]
pub fn serialize(
mut writer: &mut EventWriter,
name: &str,
obj: &BucketLoggingStatus,
) -> Result<(), xml::writer::Error>
where
W: Write,
{
writer.write(xml::writer::XmlEvent::start_element(name))?;
if let Some(ref value) = obj.logging_enabled {
&LoggingEnabledSerializer::serialize(&mut writer, "LoggingEnabled", value)?;
}
writer.write(xml::writer::XmlEvent::end_element())
}
}
#[allow(dead_code)]
struct BucketLogsPermissionDeserializer;
impl BucketLogsPermissionDeserializer {
#[allow(dead_code, unused_variables)]
fn deserialize(tag_name: &str, stack: &mut T) -> Result {
xml_util::deserialize_primitive(tag_name, stack, Ok)
}
}
pub struct BucketLogsPermissionSerializer;
impl BucketLogsPermissionSerializer {
#[allow(unused_variables, warnings)]
pub fn serialize(
mut writer: &mut EventWriter,
name: &str,
obj: &String,
) -> Result<(), xml::writer::Error>
where
W: Write,
{
write_characters_element(writer, name, obj)
}
}
#[allow(dead_code)]
struct BucketNameDeserializer;
impl BucketNameDeserializer {
#[allow(dead_code, unused_variables)]
fn deserialize(tag_name: &str, stack: &mut T) -> Result {
xml_util::deserialize_primitive(tag_name, stack, Ok)
}
}
pub struct BucketNameSerializer;
impl BucketNameSerializer {
#[allow(unused_variables, warnings)]
pub fn serialize(
mut writer: &mut EventWriter,
name: &str,
obj: &String,
) -> Result<(), xml::writer::Error>
where
W: Write,
{
write_characters_element(writer, name, obj)
}
}
#[allow(dead_code)]
struct BucketVersioningStatusDeserializer;
impl BucketVersioningStatusDeserializer {
#[allow(dead_code, unused_variables)]
fn deserialize(tag_name: &str, stack: &mut T) -> Result {
xml_util::deserialize_primitive(tag_name, stack, Ok)
}
}
pub struct BucketVersioningStatusSerializer;
impl BucketVersioningStatusSerializer {
#[allow(unused_variables, warnings)]
pub fn serialize(
mut writer: &mut EventWriter,
name: &str,
obj: &String,
) -> Result<(), xml::writer::Error>
where
W: Write,
{
write_characters_element(writer, name, obj)
}
}
#[allow(dead_code)]
struct BucketsDeserializer;
impl BucketsDeserializer {
#[allow(dead_code, unused_variables)]
fn deserialize(
tag_name: &str,
stack: &mut T,
) -> Result, XmlParseError> {
deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| {
if name == "Bucket" {
obj.push(BucketDeserializer::deserialize("Bucket", stack)?);
} else {
skip_tree(stack);
}
Ok(())
})
}
}
#[allow(dead_code)]
struct BytesProcessedDeserializer;
impl BytesProcessedDeserializer {
#[allow(dead_code, unused_variables)]
fn deserialize(tag_name: &str, stack: &mut T) -> Result {
xml_util::deserialize_primitive(tag_name, stack, |s| Ok(i64::from_str(&s).unwrap()))
}
}
#[allow(dead_code)]
struct BytesReturnedDeserializer;
impl BytesReturnedDeserializer {
#[allow(dead_code, unused_variables)]
fn deserialize(tag_name: &str, stack: &mut T) -> Result {
xml_util::deserialize_primitive(tag_name, stack, |s| Ok(i64::from_str(&s).unwrap()))
}
}
#[allow(dead_code)]
struct BytesScannedDeserializer;
impl BytesScannedDeserializer {
#[allow(dead_code, unused_variables)]
fn deserialize(tag_name: &str, stack: &mut T) -> Result {
xml_util::deserialize_primitive(tag_name, stack, |s| Ok(i64::from_str(&s).unwrap()))
}
}
/// Describes the cross-origin access configuration for objects in an Amazon S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing in the Amazon S3 User Guide.
#[derive(Clone, Debug, Default, PartialEq)]
#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
pub struct CORSConfiguration {
/// A set of origins and methods (cross-origin access that you want to allow). You can add up to 100 rules to the configuration.
pub cors_rules: Vec,
}
pub struct CORSConfigurationSerializer;
impl CORSConfigurationSerializer {
#[allow(unused_variables, warnings)]
pub fn serialize(
mut writer: &mut EventWriter,
name: &str,
obj: &CORSConfiguration,
) -> Result<(), xml::writer::Error>
where
W: Write,
{
writer.write(xml::writer::XmlEvent::start_element(name))?;
CORSRulesSerializer::serialize(&mut writer, "CORSRule", &obj.cors_rules)?;
writer.write(xml::writer::XmlEvent::end_element())
}
}
/// Specifies a cross-origin access rule for an Amazon S3 bucket.
#[derive(Clone, Debug, Default, PartialEq)]
#[cfg_attr(feature = "serialize_structs", derive(Serialize))]
#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
pub struct CORSRule {
/// Headers that are specified in the Access-Control-Request-Headers
header. These headers are allowed in a preflight OPTIONS request. In response to any preflight OPTIONS request, Amazon S3 returns any requested headers that are allowed.
pub allowed_headers: Option>,
/// An HTTP method that you allow the origin to execute. Valid values are GET
, PUT
, HEAD
, POST
, and DELETE
.
pub allowed_methods: Vec,
/// One or more origins you want customers to be able to access the bucket from.
pub allowed_origins: Vec,
/// One or more headers in the response that you want customers to be able to access from their applications (for example, from a JavaScript XMLHttpRequest
object).
pub expose_headers: Option>,
/// Unique identifier for the rule. The value cannot be longer than 255 characters.
pub id: Option,
/// The time in seconds that your browser is to cache the preflight response for the specified resource.
pub max_age_seconds: Option,
}
#[allow(dead_code)]
struct CORSRuleDeserializer;
impl CORSRuleDeserializer {
#[allow(dead_code, unused_variables)]
fn deserialize(
tag_name: &str,
stack: &mut T,
) -> Result {
deserialize_elements::<_, CORSRule, _>(tag_name, stack, |name, stack, obj| {
match name {
"AllowedHeader" => {
obj.allowed_headers.get_or_insert(vec![]).extend(
AllowedHeadersDeserializer::deserialize("AllowedHeader", stack)?,
);
}
"AllowedMethod" => {
obj.allowed_methods
.extend(AllowedMethodsDeserializer::deserialize(
"AllowedMethod",
stack,
)?);
}
"AllowedOrigin" => {
obj.allowed_origins
.extend(AllowedOriginsDeserializer::deserialize(
"AllowedOrigin",
stack,
)?);
}
"ExposeHeader" => {
obj.expose_headers.get_or_insert(vec![]).extend(
ExposeHeadersDeserializer::deserialize("ExposeHeader", stack)?,
);
}
"ID" => {
obj.id = Some(IDDeserializer::deserialize("ID", stack)?);
}
"MaxAgeSeconds" => {
obj.max_age_seconds = Some(MaxAgeSecondsDeserializer::deserialize(
"MaxAgeSeconds",
stack,
)?);
}
_ => skip_tree(stack),
}
Ok(())
})
}
}
pub struct CORSRuleSerializer;
impl CORSRuleSerializer {
#[allow(unused_variables, warnings)]
pub fn serialize(
mut writer: &mut EventWriter,
name: &str,
obj: &CORSRule,
) -> Result<(), xml::writer::Error>
where
W: Write,
{
writer.write(xml::writer::XmlEvent::start_element(name))?;
if let Some(ref value) = obj.allowed_headers {
&AllowedHeadersSerializer::serialize(&mut writer, "AllowedHeader", value)?;
}
AllowedMethodsSerializer::serialize(&mut writer, "AllowedMethod", &obj.allowed_methods)?;
AllowedOriginsSerializer::serialize(&mut writer, "AllowedOrigin", &obj.allowed_origins)?;
if let Some(ref value) = obj.expose_headers {
&ExposeHeadersSerializer::serialize(&mut writer, "ExposeHeader", value)?;
}
if let Some(ref value) = obj.id {
write_characters_element(writer, "ID", &value)?;
}
if let Some(ref value) = obj.max_age_seconds {
write_characters_element(writer, "MaxAgeSeconds", &value.to_string())?;
}
writer.write(xml::writer::XmlEvent::end_element())
}
}
#[allow(dead_code)]
struct CORSRulesDeserializer;
impl CORSRulesDeserializer {
#[allow(dead_code, unused_variables)]
fn deserialize(
tag_name: &str,
stack: &mut T,
) -> Result, XmlParseError> {
let mut obj = vec![];
loop {
let consume_next_tag = match stack.peek() {
Some(&Ok(xml::reader::XmlEvent::StartElement { ref name, .. })) => {
name.local_name == tag_name
}
_ => false,
};
if consume_next_tag {
obj.push(CORSRuleDeserializer::deserialize(tag_name, stack)?);
} else {
break;
}
}
Ok(obj)
}
}
pub struct CORSRulesSerializer;
impl CORSRulesSerializer {
#[allow(unused_variables, warnings)]
pub fn serialize(
mut writer: &mut EventWriter,
name: &str,
obj: &Vec,
) -> Result<(), xml::writer::Error>
where
W: Write,
{
for element in obj {
CORSRuleSerializer::serialize(writer, name, element)?;
}
Ok(())
}
}
/// Describes how an uncompressed comma-separated values (CSV)-formatted input object is formatted.
#[derive(Clone, Debug, Default, PartialEq)]
#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
pub struct CSVInput {
/// Specifies that CSV field values may contain quoted record delimiters and such records should be allowed. Default value is FALSE. Setting this value to TRUE may lower performance.
pub allow_quoted_record_delimiter: Option,
/// A single character used to indicate that a row should be ignored when the character is present at the start of that row. You can specify any character to indicate a comment line.
pub comments: Option,
/// A single character used to separate individual fields in a record. You can specify an arbitrary delimiter.
pub field_delimiter: Option,
/// Describes the first line of input. Valid values are:
-
NONE
: First line is not a header.
-
IGNORE
: First line is a header, but you can't use the header values to indicate the column in an expression. You can use column position (such as _1, 2, …) to indicate the column (SELECT s.
1 FROM OBJECT s).
-
Use
: First line is a header, and you can use the header value to identify a column in an expression (SELECT "name" FROM OBJECT
).
pub file_header_info: Option,
/// A single character used for escaping when the field delimiter is part of the value. For example, if the value is a, b
, Amazon S3 wraps this field value in quotation marks, as follows: " a , b "
.
Type: String
Default: "
Ancestors: CSV
pub quote_character: Option,
/// A single character used for escaping the quotation mark character inside an already escaped value. For example, the value """ a , b """ is parsed as " a , b ".
pub quote_escape_character: Option,
/// A single character used to separate individual records in the input. Instead of the default value, you can specify an arbitrary delimiter.
pub record_delimiter: Option,
}
pub struct CSVInputSerializer;
impl CSVInputSerializer {
#[allow(unused_variables, warnings)]
pub fn serialize(
mut writer: &mut EventWriter,
name: &str,
obj: &CSVInput,
) -> Result<(), xml::writer::Error>
where
W: Write,
{
writer.write(xml::writer::XmlEvent::start_element(name))?;
if let Some(ref value) = obj.allow_quoted_record_delimiter {
write_characters_element(writer, "AllowQuotedRecordDelimiter", &value.to_string())?;
}
if let Some(ref value) = obj.comments {
write_characters_element(writer, "Comments", &value)?;
}
if let Some(ref value) = obj.field_delimiter {
write_characters_element(writer, "FieldDelimiter", &value)?;
}
if let Some(ref value) = obj.file_header_info {
write_characters_element(writer, "FileHeaderInfo", &value)?;
}
if let Some(ref value) = obj.quote_character {
write_characters_element(writer, "QuoteCharacter", &value)?;
}
if let Some(ref value) = obj.quote_escape_character {
write_characters_element(writer, "QuoteEscapeCharacter", &value)?;
}
if let Some(ref value) = obj.record_delimiter {
write_characters_element(writer, "RecordDelimiter", &value)?;
}
writer.write(xml::writer::XmlEvent::end_element())
}
}
/// Describes how uncompressed comma-separated values (CSV)-formatted results are formatted.
#[derive(Clone, Debug, Default, PartialEq)]
#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
pub struct CSVOutput {
/// The value used to separate individual fields in a record. You can specify an arbitrary delimiter.
pub field_delimiter: Option,
/// A single character used for escaping when the field delimiter is part of the value. For example, if the value is a, b
, Amazon S3 wraps this field value in quotation marks, as follows: " a , b "
.
pub quote_character: Option,
/// The single character used for escaping the quote character inside an already escaped value.
pub quote_escape_character: Option,
/// Indicates whether to use quotation marks around output fields.
pub quote_fields: Option,
/// A single character used to separate individual records in the output. Instead of the default value, you can specify an arbitrary delimiter.
pub record_delimiter: Option,
}
pub struct CSVOutputSerializer;
impl CSVOutputSerializer {
#[allow(unused_variables, warnings)]
pub fn serialize(
mut writer: &mut EventWriter,
name: &str,
obj: &CSVOutput,
) -> Result<(), xml::writer::Error>
where
W: Write,
{
writer.write(xml::writer::XmlEvent::start_element(name))?;
if let Some(ref value) = obj.field_delimiter {
write_characters_element(writer, "FieldDelimiter", &value)?;
}
if let Some(ref value) = obj.quote_character {
write_characters_element(writer, "QuoteCharacter", &value)?;
}
if let Some(ref value) = obj.quote_escape_character {
write_characters_element(writer, "QuoteEscapeCharacter", &value)?;
}
if let Some(ref value) = obj.quote_fields {
write_characters_element(writer, "QuoteFields", &value)?;
}
if let Some(ref value) = obj.record_delimiter {
write_characters_element(writer, "RecordDelimiter", &value)?;
}
writer.write(xml::writer::XmlEvent::end_element())
}
}
#[allow(dead_code)]
struct CloudFunctionDeserializer;
impl CloudFunctionDeserializer {
#[allow(dead_code, unused_variables)]
fn deserialize(tag_name: &str, stack: &mut T) -> Result {
xml_util::deserialize_primitive(tag_name, stack, Ok)
}
}
pub struct CloudFunctionSerializer;
impl CloudFunctionSerializer {
#[allow(unused_variables, warnings)]
pub fn serialize(
mut writer: &mut EventWriter,
name: &str,
obj: &String,
) -> Result<(), xml::writer::Error>
where
W: Write,
{
write_characters_element(writer, name, obj)
}
}
/// Container for specifying the AWS Lambda notification configuration.
#[derive(Clone, Debug, Default, PartialEq)]
#[cfg_attr(feature = "serialize_structs", derive(Serialize))]
#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
pub struct CloudFunctionConfiguration {
/// Lambda cloud function ARN that Amazon S3 can invoke when it detects events of the specified type.
pub cloud_function: Option,
/// Bucket events for which to send notifications.
pub events: Option>,
pub id: Option,
/// The role supporting the invocation of the Lambda function
pub invocation_role: Option,
}
#[allow(dead_code)]
struct CloudFunctionConfigurationDeserializer;
impl CloudFunctionConfigurationDeserializer {
#[allow(dead_code, unused_variables)]
fn deserialize(
tag_name: &str,
stack: &mut T,
) -> Result {
deserialize_elements::<_, CloudFunctionConfiguration, _>(
tag_name,
stack,
|name, stack, obj| {
match name {
"CloudFunction" => {
obj.cloud_function = Some(CloudFunctionDeserializer::deserialize(
"CloudFunction",
stack,
)?);
}
"Event" => {
obj.events
.get_or_insert(vec![])
.extend(EventListDeserializer::deserialize("Event", stack)?);
}
"Id" => {
obj.id = Some(NotificationIdDeserializer::deserialize("Id", stack)?);
}
"InvocationRole" => {
obj.invocation_role =
Some(CloudFunctionInvocationRoleDeserializer::deserialize(
"InvocationRole",
stack,
)?);
}
_ => skip_tree(stack),
}
Ok(())
},
)
}
}
pub struct CloudFunctionConfigurationSerializer;
impl CloudFunctionConfigurationSerializer {
#[allow(unused_variables, warnings)]
pub fn serialize(
mut writer: &mut EventWriter,
name: &str,
obj: &CloudFunctionConfiguration,
) -> Result<(), xml::writer::Error>
where
W: Write,
{
writer.write(xml::writer::XmlEvent::start_element(name))?;
if let Some(ref value) = obj.cloud_function {
write_characters_element(writer, "CloudFunction", &value)?;
}
if let Some(ref value) = obj.events {
&EventListSerializer::serialize(&mut writer, "Event", value)?;
}
if let Some(ref value) = obj.id {
write_characters_element(writer, "Id", &value)?;
}
if let Some(ref value) = obj.invocation_role {
write_characters_element(writer, "InvocationRole", &value)?;
}
writer.write(xml::writer::XmlEvent::end_element())
}
}
#[allow(dead_code)]
struct CloudFunctionInvocationRoleDeserializer;
impl CloudFunctionInvocationRoleDeserializer {
#[allow(dead_code, unused_variables)]
fn deserialize(tag_name: &str, stack: &mut T) -> Result {
xml_util::deserialize_primitive(tag_name, stack, Ok)
}
}
pub struct CloudFunctionInvocationRoleSerializer;
impl CloudFunctionInvocationRoleSerializer {
#[allow(unused_variables, warnings)]
pub fn serialize(
mut writer: &mut EventWriter,
name: &str,
obj: &String,
) -> Result<(), xml::writer::Error>
where
W: Write,
{
write_characters_element(writer, name, obj)
}
}
#[allow(dead_code)]
struct CodeDeserializer;
impl CodeDeserializer {
#[allow(dead_code, unused_variables)]
fn deserialize(tag_name: &str, stack: &mut T) -> Result {
xml_util::deserialize_primitive(tag_name, stack, Ok)
}
}
pub struct CommentsSerializer;
impl CommentsSerializer {
#[allow(unused_variables, warnings)]
pub fn serialize(
mut writer: &mut EventWriter,
name: &str,
obj: &String,
) -> Result<(), xml::writer::Error>
where
W: Write,
{
write_characters_element(writer, name, obj)
}
}
/// Container for all (if there are any) keys between Prefix and the next occurrence of the string specified by a delimiter. CommonPrefixes lists keys that act like subdirectories in the directory specified by Prefix. For example, if the prefix is notes/ and the delimiter is a slash (/) as in notes/summer/july, the common prefix is notes/summer/.
#[derive(Clone, Debug, Default, PartialEq)]
#[cfg_attr(feature = "serialize_structs", derive(Serialize))]
pub struct CommonPrefix {
/// Container for the specified common prefix.
pub prefix: Option,
}
#[allow(dead_code)]
struct CommonPrefixDeserializer;
impl CommonPrefixDeserializer {
#[allow(dead_code, unused_variables)]
fn deserialize(
tag_name: &str,
stack: &mut T,
) -> Result {
deserialize_elements::<_, CommonPrefix, _>(tag_name, stack, |name, stack, obj| {
match name {
"Prefix" => {
obj.prefix = Some(PrefixDeserializer::deserialize("Prefix", stack)?);
}
_ => skip_tree(stack),
}
Ok(())
})
}
}
#[allow(dead_code)]
struct CommonPrefixListDeserializer;
impl CommonPrefixListDeserializer {
#[allow(dead_code, unused_variables)]
fn deserialize(
tag_name: &str,
stack: &mut T,
) -> Result, XmlParseError> {
let mut obj = vec![];
loop {
let consume_next_tag = match stack.peek() {
Some(&Ok(xml::reader::XmlEvent::StartElement { ref name, .. })) => {
name.local_name == tag_name
}
_ => false,
};
if consume_next_tag {
obj.push(CommonPrefixDeserializer::deserialize(tag_name, stack)?);
} else {
break;
}
}
Ok(obj)
}
}
#[derive(Clone, Debug, Default, PartialEq)]
#[cfg_attr(feature = "serialize_structs", derive(Serialize))]
pub struct CompleteMultipartUploadOutput {
/// The name of the bucket that contains the newly created object.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
pub bucket: Option,
/// Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with AWS KMS (SSE-KMS).
pub bucket_key_enabled: Option,
/// Entity tag that identifies the newly created object's data. Objects with different object data will have different entity tags. The entity tag is an opaque string. The entity tag may or may not be an MD5 digest of the object data. If the entity tag is not an MD5 digest of the object data, it will contain one or more nonhexadecimal characters and/or will consist of less than 32 or more than 32 hexadecimal digits.
pub e_tag: Option,
/// If the object expiration is configured, this will contain the expiration date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded.
pub expiration: Option,
/// The object key of the newly created object.
pub key: Option,
/// The URI that identifies the newly created object.
pub location: Option,
pub request_charged: Option,
/// If present, specifies the ID of the AWS Key Management Service (AWS KMS) symmetric customer managed customer master key (CMK) that was used for the object.
pub ssekms_key_id: Option,
/// If you specified server-side encryption either with an Amazon S3-managed encryption key or an AWS KMS customer master key (CMK) in your initiate multipart upload request, the response includes this header. It confirms the encryption algorithm that Amazon S3 used to encrypt the object.
pub server_side_encryption: Option,
/// Version ID of the newly created object, in case the bucket has versioning turned on.
pub version_id: Option,
}
#[allow(dead_code)]
struct CompleteMultipartUploadOutputDeserializer;
impl CompleteMultipartUploadOutputDeserializer {
#[allow(dead_code, unused_variables)]
fn deserialize(
tag_name: &str,
stack: &mut T,
) -> Result {
deserialize_elements::<_, CompleteMultipartUploadOutput, _>(
tag_name,
stack,
|name, stack, obj| {
match name {
"Bucket" => {
obj.bucket = Some(BucketNameDeserializer::deserialize("Bucket", stack)?);
}
"ETag" => {
obj.e_tag = Some(ETagDeserializer::deserialize("ETag", stack)?);
}
"Key" => {
obj.key = Some(ObjectKeyDeserializer::deserialize("Key", stack)?);
}
"Location" => {
obj.location = Some(LocationDeserializer::deserialize("Location", stack)?);
}
_ => skip_tree(stack),
}
Ok(())
},
)
}
}
#[derive(Clone, Debug, Default, PartialEq)]
#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
pub struct CompleteMultipartUploadRequest {
/// Name of the bucket to which the multipart upload was initiated.
pub bucket: String,
/// The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
pub expected_bucket_owner: Option,
/// Object key for which the multipart upload was initiated.
pub key: String,
/// The container for the multipart upload request information.
pub multipart_upload: Option,
pub request_payer: Option,
/// ID for the initiated multipart upload.
pub upload_id: String,
}
/// The container for the completed multipart upload details.
#[derive(Clone, Debug, Default, PartialEq)]
#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
pub struct CompletedMultipartUpload {
/// Array of CompletedPart data types.
pub parts: Option>,
}
pub struct CompletedMultipartUploadSerializer;
impl CompletedMultipartUploadSerializer {
#[allow(unused_variables, warnings)]
pub fn serialize(
mut writer: &mut EventWriter,
name: &str,
obj: &CompletedMultipartUpload,
) -> Result<(), xml::writer::Error>
where
W: Write,
{
writer.write(xml::writer::XmlEvent::start_element(name))?;
if let Some(ref value) = obj.parts {
&CompletedPartListSerializer::serialize(&mut writer, "Part", value)?;
}
writer.write(xml::writer::XmlEvent::end_element())
}
}
/// Details of the parts that were uploaded.
#[derive(Clone, Debug, Default, PartialEq)]
#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
pub struct CompletedPart {
/// Entity tag returned when the part was uploaded.
pub e_tag: Option,
/// Part number that identifies the part. This is a positive integer between 1 and 10,000.
pub part_number: Option,
}
pub struct CompletedPartSerializer;
impl CompletedPartSerializer {
#[allow(unused_variables, warnings)]
pub fn serialize(
mut writer: &mut EventWriter,
name: &str,
obj: &CompletedPart,
) -> Result<(), xml::writer::Error>
where
W: Write,
{
writer.write(xml::writer::XmlEvent::start_element(name))?;
if let Some(ref value) = obj.e_tag {
write_characters_element(writer, "ETag", &value)?;
}
if let Some(ref value) = obj.part_number {
write_characters_element(writer, "PartNumber", &value.to_string())?;
}
writer.write(xml::writer::XmlEvent::end_element())
}
}
pub struct CompletedPartListSerializer;
impl CompletedPartListSerializer {
#[allow(unused_variables, warnings)]
pub fn serialize(
mut writer: &mut EventWriter,
name: &str,
obj: &Vec,
) -> Result<(), xml::writer::Error>
where
W: Write,
{
for element in obj {
CompletedPartSerializer::serialize(writer, name, element)?;
}
Ok(())
}
}
pub struct CompressionTypeSerializer;
impl CompressionTypeSerializer {
#[allow(unused_variables, warnings)]
pub fn serialize(
mut writer: &mut EventWriter,
name: &str,
obj: &String,
) -> Result<(), xml::writer::Error>
where
W: Write,
{
write_characters_element(writer, name, obj)
}
}
/// A container for describing a condition that must be met for the specified redirect to apply. For example, 1. If request is for pages in the /docs
folder, redirect to the /documents
folder. 2. If request results in HTTP error 4xx, redirect request to another host where you might process the error.
#[derive(Clone, Debug, Default, PartialEq)]
#[cfg_attr(feature = "serialize_structs", derive(Serialize))]
#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
pub struct Condition {
/// The HTTP error code when the redirect is applied. In the event of an error, if the error code equals this value, then the specified redirect is applied. Required when parent element Condition
is specified and sibling KeyPrefixEquals
is not specified. If both are specified, then both must be true for the redirect to be applied.
pub http_error_code_returned_equals: Option,
/// The object key name prefix when the redirect is applied. For example, to redirect requests for ExamplePage.html
, the key prefix will be ExamplePage.html
. To redirect request for all pages with the prefix docs/
, the key prefix will be /docs
, which identifies all objects in the docs/
folder. Required when the parent element Condition
is specified and sibling HttpErrorCodeReturnedEquals
is not specified. If both conditions are specified, both must be true for the redirect to be applied.
Replacement must be made for object keys containing special characters (such as carriage returns) when using XML requests. For more information, see XML related object key constraints.
pub key_prefix_equals: Option,
}
#[allow(dead_code)]
struct ConditionDeserializer;
impl ConditionDeserializer {
#[allow(dead_code, unused_variables)]
fn deserialize(
tag_name: &str,
stack: &mut T,
) -> Result {
deserialize_elements::<_, Condition, _>(tag_name, stack, |name, stack, obj| {
match name {
"HttpErrorCodeReturnedEquals" => {
obj.http_error_code_returned_equals =
Some(HttpErrorCodeReturnedEqualsDeserializer::deserialize(
"HttpErrorCodeReturnedEquals",
stack,
)?);
}
"KeyPrefixEquals" => {
obj.key_prefix_equals = Some(KeyPrefixEqualsDeserializer::deserialize(
"KeyPrefixEquals",
stack,
)?);
}
_ => skip_tree(stack),
}
Ok(())
})
}
}
pub struct ConditionSerializer;
impl ConditionSerializer {
#[allow(unused_variables, warnings)]
pub fn serialize(
mut writer: &mut EventWriter,
name: &str,
obj: &Condition,
) -> Result<(), xml::writer::Error>
where
W: Write,
{
writer.write(xml::writer::XmlEvent::start_element(name))?;
if let Some(ref value) = obj.http_error_code_returned_equals {
write_characters_element(writer, "HttpErrorCodeReturnedEquals", &value)?;
}
if let Some(ref value) = obj.key_prefix_equals {
write_characters_element(writer, "KeyPrefixEquals", &value)?;
}
writer.write(xml::writer::XmlEvent::end_element())
}
}
///
#[derive(Clone, Debug, Default, PartialEq)]
#[cfg_attr(feature = "serialize_structs", derive(Serialize))]
pub struct ContinuationEvent {}
#[allow(dead_code)]
struct ContinuationEventDeserializer;
impl ContinuationEventDeserializer {
#[allow(dead_code, unused_variables)]
fn deserialize(
tag_name: &str,
stack: &mut T,
) -> Result {
xml_util::start_element(tag_name, stack)?;
let obj = ContinuationEvent::default();
xml_util::end_element(tag_name, stack)?;
Ok(obj)
}
}
#[derive(Clone, Debug, Default, PartialEq)]
#[cfg_attr(feature = "serialize_structs", derive(Serialize))]
pub struct CopyObjectOutput {
/// Indicates whether the copied object uses an S3 Bucket Key for server-side encryption with AWS KMS (SSE-KMS).
pub bucket_key_enabled: Option,
/// Container for all response elements.
pub copy_object_result: Option,
/// Version of the copied object in the destination bucket.
pub copy_source_version_id: Option,
/// If the object expiration is configured, the response includes this header.
pub expiration: Option,
pub request_charged: Option,
/// If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.
pub sse_customer_algorithm: Option,
/// If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round-trip message integrity verification of the customer-provided encryption key.
pub sse_customer_key_md5: Option,
/// If present, specifies the AWS KMS Encryption Context to use for object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs.
pub ssekms_encryption_context: Option,
/// If present, specifies the ID of the AWS Key Management Service (AWS KMS) symmetric customer managed customer master key (CMK) that was used for the object.
pub ssekms_key_id: Option,
/// The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms).
pub server_side_encryption: Option,
/// Version ID of the newly created copy.
pub version_id: Option,
}
#[allow(dead_code)]
struct CopyObjectOutputDeserializer;
impl CopyObjectOutputDeserializer {
#[allow(dead_code, unused_variables)]
fn deserialize(
tag_name: &str,
stack: &mut T,
) -> Result {
Ok(CopyObjectOutput {
copy_object_result: Some(CopyObjectResultDeserializer::deserialize(
"CopyObjectResult",
stack,
)?),
..CopyObjectOutput::default()
})
}
}
#[derive(Clone, Debug, Default, PartialEq)]
#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
pub struct CopyObjectRequest {
/// The canned ACL to apply to the object.
This action is not supported by Amazon S3 on Outposts.
pub acl: Option,
/// The name of the destination bucket.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
pub bucket: String,
/// Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using AWS KMS (SSE-KMS). Setting this header to true
causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.
Specifying this header with a COPY action doesn’t affect bucket-level settings for S3 Bucket Key.
pub bucket_key_enabled: Option,
/// Specifies caching behavior along the request/reply chain.
pub cache_control: Option,
/// Specifies presentational information for the object.
pub content_disposition: Option,
/// Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field.
pub content_encoding: Option,
/// The language the content is in.
pub content_language: Option,
/// A standard MIME type describing the format of the object data.
pub content_type: Option,
/// Specifies the source object for the copy operation. You specify the value in one of two formats, depending on whether you want to access the source object through an access point:
-
For objects not accessed through an access point, specify the name of the source bucket and the key of the source object, separated by a slash (/). For example, to copy the object reports/january.pdf
from the bucket awsexamplebucket
, use awsexamplebucket/reports/january.pdf
. The value must be URL encoded.
-
For objects accessed through access points, specify the Amazon Resource Name (ARN) of the object as accessed through the access point, in the format arn:aws:s3:<Region>:<account-id>:accesspoint/<access-point-name>/object/<key>
. For example, to copy the object reports/january.pdf
through access point my-access-point
owned by account 123456789012
in Region us-west-2
, use the URL encoding of arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf
. The value must be URL encoded.
Amazon S3 supports copy operations using access points only when the source and destination buckets are in the same AWS Region.
Alternatively, for objects accessed through Amazon S3 on Outposts, specify the ARN of the object as accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/object/<key>
. For example, to copy the object reports/january.pdf
through outpost my-outpost
owned by account 123456789012
in Region us-west-2
, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf
. The value must be URL encoded.
To copy a specific version of an object, append ?versionId=<version-id>
to the value (for example, awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893
). If you don't specify a version ID, Amazon S3 copies the latest version of the source object.
pub copy_source: String,
/// Copies the object if its entity tag (ETag) matches the specified tag.
pub copy_source_if_match: Option,
/// Copies the object if it has been modified since the specified time.
pub copy_source_if_modified_since: Option,
/// Copies the object if its entity tag (ETag) is different than the specified ETag.
pub copy_source_if_none_match: Option,
/// Copies the object if it hasn't been modified since the specified time.
pub copy_source_if_unmodified_since: Option,
/// Specifies the algorithm to use when decrypting the source object (for example, AES256).
pub copy_source_sse_customer_algorithm: Option,
/// Specifies the customer-provided encryption key for Amazon S3 to use to decrypt the source object. The encryption key provided in this header must be one that was used when the source object was created.
pub copy_source_sse_customer_key: Option,
/// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure that the encryption key was transmitted without error.
pub copy_source_sse_customer_key_md5: Option,
/// The account ID of the expected destination bucket owner. If the destination bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
pub expected_bucket_owner: Option,
/// The account ID of the expected source bucket owner. If the source bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
pub expected_source_bucket_owner: Option,
/// The date and time at which the object is no longer cacheable.
pub expires: Option,
/// Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.
This action is not supported by Amazon S3 on Outposts.
pub grant_full_control: Option,
/// Allows grantee to read the object data and its metadata.
This action is not supported by Amazon S3 on Outposts.
pub grant_read: Option,
/// Allows grantee to read the object ACL.
This action is not supported by Amazon S3 on Outposts.
pub grant_read_acp: Option,
/// Allows grantee to write the ACL for the applicable object.
This action is not supported by Amazon S3 on Outposts.
pub grant_write_acp: Option,
/// The key of the destination object.
pub key: String,
/// A map of metadata to store with the object in S3.
pub metadata: Option<::std::collections::HashMap>,
/// Specifies whether the metadata is copied from the source object or replaced with metadata provided in the request.
pub metadata_directive: Option,
/// Specifies whether you want to apply a Legal Hold to the copied object.
pub object_lock_legal_hold_status: Option,
/// The Object Lock mode that you want to apply to the copied object.
pub object_lock_mode: Option,
/// The date and time when you want the copied object's Object Lock to expire.
pub object_lock_retain_until_date: Option,
pub request_payer: Option,
/// Specifies the algorithm to use to when encrypting the object (for example, AES256).
pub sse_customer_algorithm: Option,
/// Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon S3 does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm
header.
pub sse_customer_key: Option,
/// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure that the encryption key was transmitted without error.
pub sse_customer_key_md5: Option,
/// Specifies the AWS KMS Encryption Context to use for object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs.
pub ssekms_encryption_context: Option,
/// Specifies the AWS KMS key ID to use for object encryption. All GET and PUT requests for an object protected by AWS KMS will fail if not made via SSL or using SigV4. For information about configuring using any of the officially supported AWS SDKs and AWS CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 User Guide.
pub ssekms_key_id: Option,
/// The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms).
pub server_side_encryption: Option,
/// By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The STANDARD storage class provides high durability and high availability. Depending on performance needs, you can specify a different Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, see Storage Classes in the Amazon S3 User Guide.
pub storage_class: Option,
/// The tag-set for the object destination object this value must be used in conjunction with the TaggingDirective
. The tag-set must be encoded as URL Query parameters.
pub tagging: Option,
/// Specifies whether the object tag-set are copied from the source object or replaced with tag-set provided in the request.
pub tagging_directive: Option,
/// If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata.
pub website_redirect_location: Option,
}
/// Container for all response elements.
#[derive(Clone, Debug, Default, PartialEq)]
#[cfg_attr(feature = "serialize_structs", derive(Serialize))]
pub struct CopyObjectResult {
/// Returns the ETag of the new object. The ETag reflects only changes to the contents of an object, not its metadata. The source and destination ETag is identical for a successfully copied non-multipart object.
pub e_tag: Option,
/// Creation date of the object.
pub last_modified: Option,
}
#[allow(dead_code)]
struct CopyObjectResultDeserializer;
impl CopyObjectResultDeserializer {
#[allow(dead_code, unused_variables)]
fn deserialize(
tag_name: &str,
stack: &mut T,
) -> Result {
deserialize_elements::<_, CopyObjectResult, _>(tag_name, stack, |name, stack, obj| {
match name {
"ETag" => {
obj.e_tag = Some(ETagDeserializer::deserialize("ETag", stack)?);
}
"LastModified" => {
obj.last_modified = Some(LastModifiedDeserializer::deserialize(
"LastModified",
stack,
)?);
}
_ => skip_tree(stack),
}
Ok(())
})
}
}
/// Container for all response elements.
#[derive(Clone, Debug, Default, PartialEq)]
#[cfg_attr(feature = "serialize_structs", derive(Serialize))]
pub struct CopyPartResult {
/// Entity tag of the object.
pub e_tag: Option,
/// Date and time at which the object was uploaded.
pub last_modified: Option,
}
#[allow(dead_code)]
struct CopyPartResultDeserializer;
impl CopyPartResultDeserializer {
#[allow(dead_code, unused_variables)]
fn deserialize(
tag_name: &str,
stack: &mut T,
) -> Result {
deserialize_elements::<_, CopyPartResult, _>(tag_name, stack, |name, stack, obj| {
match name {
"ETag" => {
obj.e_tag = Some(ETagDeserializer::deserialize("ETag", stack)?);
}
"LastModified" => {
obj.last_modified = Some(LastModifiedDeserializer::deserialize(
"LastModified",
stack,
)?);
}
_ => skip_tree(stack),
}
Ok(())
})
}
}
/// The configuration information for the bucket.
#[derive(Clone, Debug, Default, PartialEq)]
#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
pub struct CreateBucketConfiguration {
/// Specifies the Region where the bucket will be created. If you don't specify a Region, the bucket is created in the US East (N. Virginia) Region (us-east-1).
pub location_constraint: Option,
}
pub struct CreateBucketConfigurationSerializer;
impl CreateBucketConfigurationSerializer {
#[allow(unused_variables, warnings)]
pub fn serialize(
mut writer: &mut EventWriter,
name: &str,
obj: &CreateBucketConfiguration,
) -> Result<(), xml::writer::Error>
where
W: Write,
{
writer.write(xml::writer::XmlEvent::start_element(name))?;
if let Some(ref value) = obj.location_constraint {
write_characters_element(writer, "LocationConstraint", &value)?;
}
writer.write(xml::writer::XmlEvent::end_element())
}
}
#[derive(Clone, Debug, Default, PartialEq)]
#[cfg_attr(feature = "serialize_structs", derive(Serialize))]
pub struct CreateBucketOutput {
/// Specifies the Region where the bucket will be created. If you are creating a bucket on the US East (N. Virginia) Region (us-east-1), you do not need to specify the location.
pub location: Option,
}
#[allow(dead_code)]
struct CreateBucketOutputDeserializer;
impl CreateBucketOutputDeserializer {
#[allow(dead_code, unused_variables)]
fn deserialize(
tag_name: &str,
stack: &mut T,
) -> Result {
xml_util::start_element(tag_name, stack)?;
let obj = CreateBucketOutput::default();
xml_util::end_element(tag_name, stack)?;
Ok(obj)
}
}
#[derive(Clone, Debug, Default, PartialEq)]
#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
pub struct CreateBucketRequest {
/// The canned ACL to apply to the bucket.
pub acl: Option,
/// The name of the bucket to create.
pub bucket: String,
/// The configuration information for the bucket.
pub create_bucket_configuration: Option,
/// Allows grantee the read, write, read ACP, and write ACP permissions on the bucket.
pub grant_full_control: Option,
/// Allows grantee to list the objects in the bucket.
pub grant_read: Option,
/// Allows grantee to read the bucket ACL.
pub grant_read_acp: Option,
/// Allows grantee to create new objects in the bucket.
For the bucket and object owners of existing objects, also allows deletions and overwrites of those objects.
pub grant_write: Option,
/// Allows grantee to write the ACL for the applicable bucket.
pub grant_write_acp: Option,
/// Specifies whether you want S3 Object Lock to be enabled for the new bucket.
pub object_lock_enabled_for_bucket: Option,
}
#[derive(Clone, Debug, Default, PartialEq)]
#[cfg_attr(feature = "serialize_structs", derive(Serialize))]
pub struct CreateMultipartUploadOutput {
/// If the bucket has a lifecycle rule configured with an action to abort incomplete multipart uploads and the prefix in the lifecycle rule matches the object name in the request, the response includes this header. The header indicates when the initiated multipart upload becomes eligible for an abort operation. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Policy.
The response also includes the x-amz-abort-rule-id
header that provides the ID of the lifecycle configuration rule that defines this action.
pub abort_date: Option,
/// This header is returned along with the x-amz-abort-date
header. It identifies the applicable lifecycle configuration rule that defines the action to abort incomplete multipart uploads.
pub abort_rule_id: Option,
/// The name of the bucket to which the multipart upload was initiated.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
pub bucket: Option,
/// Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with AWS KMS (SSE-KMS).
pub bucket_key_enabled: Option,
/// Object key for which the multipart upload was initiated.
pub key: Option,
pub request_charged: Option,
/// If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.
pub sse_customer_algorithm: Option,
/// If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round-trip message integrity verification of the customer-provided encryption key.
pub sse_customer_key_md5: Option,
/// If present, specifies the AWS KMS Encryption Context to use for object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs.
pub ssekms_encryption_context: Option,
/// If present, specifies the ID of the AWS Key Management Service (AWS KMS) symmetric customer managed customer master key (CMK) that was used for the object.
pub ssekms_key_id: Option,
/// The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms).
pub server_side_encryption: Option,
/// ID for the initiated multipart upload.
pub upload_id: Option,
}
#[allow(dead_code)]
struct CreateMultipartUploadOutputDeserializer;
impl CreateMultipartUploadOutputDeserializer {
#[allow(dead_code, unused_variables)]
fn deserialize(
tag_name: &str,
stack: &mut T,
) -> Result {
deserialize_elements::<_, CreateMultipartUploadOutput, _>(
tag_name,
stack,
|name, stack, obj| {
match name {
"Bucket" => {
obj.bucket = Some(BucketNameDeserializer::deserialize("Bucket", stack)?);
}
"Key" => {
obj.key = Some(ObjectKeyDeserializer::deserialize("Key", stack)?);
}
"UploadId" => {
obj.upload_id = Some(MultipartUploadIdDeserializer::deserialize(
"UploadId", stack,
)?);
}
_ => skip_tree(stack),
}
Ok(())
},
)
}
}
#[derive(Clone, Debug, Default, PartialEq)]
#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
pub struct CreateMultipartUploadRequest {
/// The canned ACL to apply to the object.
This action is not supported by Amazon S3 on Outposts.
pub acl: Option,
/// The name of the bucket to which to initiate the upload
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
pub bucket: String,
/// Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using AWS KMS (SSE-KMS). Setting this header to true
causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.
Specifying this header with an object action doesn’t affect bucket-level settings for S3 Bucket Key.
pub bucket_key_enabled: Option,
/// Specifies caching behavior along the request/reply chain.
pub cache_control: Option,
/// Specifies presentational information for the object.
pub content_disposition: Option,
/// Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field.
pub content_encoding: Option,
/// The language the content is in.
pub content_language: Option,
/// A standard MIME type describing the format of the object data.
pub content_type: Option,
/// The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
pub expected_bucket_owner: Option,
/// The date and time at which the object is no longer cacheable.
pub expires: Option,
/// Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.
This action is not supported by Amazon S3 on Outposts.
pub grant_full_control: Option,
/// Allows grantee to read the object data and its metadata.
This action is not supported by Amazon S3 on Outposts.
pub grant_read: Option,
/// Allows grantee to read the object ACL.
This action is not supported by Amazon S3 on Outposts.
pub grant_read_acp: Option,
/// Allows grantee to write the ACL for the applicable object.
This action is not supported by Amazon S3 on Outposts.
pub grant_write_acp: Option,
/// Object key for which the multipart upload is to be initiated.
pub key: String,
/// A map of metadata to store with the object in S3.
pub metadata: Option<::std::collections::HashMap>,
/// Specifies whether you want to apply a Legal Hold to the uploaded object.
pub object_lock_legal_hold_status: Option,
/// Specifies the Object Lock mode that you want to apply to the uploaded object.
pub object_lock_mode: Option,
/// Specifies the date and time when you want the Object Lock to expire.
pub object_lock_retain_until_date: Option,
pub request_payer: Option