API Actions: CreateS3Replication

API Actions: CreateS3Replication

 

Request Syntax for AccessType=AccessKeys:

JSON

POST /api/CreateS3Replication HTTP/1.1
Host: use.your.host.name:82
X-Amz-Content-Sha256: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
Content-Type: application/json
X-Amz-Date: 20171115T202130Z
Authorization: AWS4-HMAC-SHA256 Credential= UQOPWUVNBALABCABCABC/20171115/us-east-1/cloudbasic/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-content-sha256;x-amz-date, Signature=995374189c189e8e68ed3de82c1764ca11971711fb5179eeab2b19edd883dd74
 
{
	"parallelTablesLimit": 5,
	"rebuildIndexes": false,
	"source": {
		"connectionString": "Data Source=your-source-db-server;Initial Catalog=TestDBAll_2;Persist Security Info=False;User ID=user-name;Password=********;Connect Timeout=1280",
		"encryptDataInTransit": true,
		"changeTrackingRetentionPeriod": "2 DAYS",
		"resetChangeTracking": false
	},
	"staging": {
		"connectionString": "Data Source=your-source-db-server;Initial Catalog=TestDBAll_2;Persist Security Info=False;User ID=user-name;Password=********;Connect Timeout=12800",
		"encryptDataInTransit": true,
		"replicateCompatibilityLevel": true
	},
	"s3DataLake": {
		"accessType": "AccessKey", -- Valid values (AccessKey, IAMRole). Separate example is provided for accessType=IAMRole below
		"accessKey":  "XXXXXIR2D555AV4RFA",
		"secretKey":  "XXXXXWGywbZtPbtWm1TBmwUxkV5Ovm6tJEsuy+4X",
		"bucketName": "my-bucket-name",
		"bucketFolderName": "/SCD/Type2/",
		"regionEndpoint": "us-east-1",
		"retentionDays": 5,
		"scdType": "Type2",
		"delimiter": "TAB", -- applicable to FileFormat=CSV only, standard delimiters to be selected are ASC30, ASC31, TAB
                "csvFileFormat": "csv" -- 12.207 or above. Applicable to FileFormat=CSV only. Default='csv', allows specifying of custom file extension such as "dat"
		"folderStructure":"OneFolderPerTablePerYearMonthDayHour",
                "folderNamingFormat: "DayHourEqualValue", -- Default, DayHourEqualValue [12.204 and above], KeyEqualValue (applicable to FolderStructure in (OneFolderPerTablePerYearMonthDay, OneFolderPerTablePerYearMonthDayHour) only
		"fileFormat": "Parquet",
                "csvFileExtension": "csv" -- 12.207 or above. Applicable to FileFormat=CSV only. Default='csv', allows specifying of custom file extension such as "dat"
		"jsonExportType": "Line",
		"compression": "Snappy", -- <!-- None, Gzip, Bzip2, Snappy -->
                "addTableSchemaPrefix": false, -- if addTableSchemaPrefix=true, table schema prefix will be added to the output file name, i.e. for table dbo.mytable, output file will be named as dbo_mytable_{timestamps}_{processid}.snappy.parquet
                "convertDataTypesToString": false 
	},
	"addUpdateTimestamp": false,
	"tablesToInclude": "[dbo].[Departments],[dbo].[Managers]",
	"reseedingSchedule": {
		"noPkTablesToInclude": "[dbo].[NoPkTable]",
		"pkTablesToInclude": null,
		"fromTime": "10:00:00 AM",
		"toTime": "10:59:59 PM",
		"repeatIntervalInMinutes": "100"
	}	
}

XML

POST /api/CreateS3Replication HTTP/1.1
Host: use.your.host.name:82
X-Amz-Content-Sha256: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
Content-Type: application/xml
X-Amz-Date: 20171115T202130Z
Authorization: AWS4-HMAC-SHA256 Credential= UQOPWUVNBALABCABCABC/20171115/us-east-1/cloudbasic/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-content-sha256;x-amz-date, Signature=995374189c189e8e68ed3de82c1764ca11971711fb5179eeab2b19edd883dd74
 
<?xml version="1.0" encoding="utf-8"?>
<CreateS3ReplicationRequest>
    <ParallelTablesLimit>5</ParallelTablesLimit>
	<RebuildIndexes>false</RebuildIndexes> 
	<Source>
		<ConnectionString>Data Source=your-source-db-server;Initial Catalog=TestDBAll_2;Persist Security Info=False;User ID=user-name;Password=******;Connect Timeout=1280</ConnectionString>	  
		<EncryptDataInTransit>true</EncryptDataInTransit>
		<ChangeTrackingRetentionPeriod>2 DAYS</ChangeTrackingRetentionPeriod>
		<ResetChangeTracking>false</ResetChangeTracking>
	</Source>
	<Staging>
		<ConnectionString>Data Source=your-source-db-server;Initial Catalog=TestDBAll_2;Persist Security Info=False;User ID=user-name;Password=******;Connect Timeout=12800</ConnectionString>	  
		<EncryptDataInTransit>true</EncryptDataInTransit>
		<ReplicateCompatibilityLevel>true</ReplicateCompatibilityLevel>
	</Staging>
	<S3DataLake>
		<AccessType>AccessKey</AccessType>
		<AccessKey>AKIAIK5IR2D555AV4RFA</AccessKey>
		<SecretKey>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</SecretKey>
		<BucketName>cbr-test</BucketName>
		<BucketFolderName>/SCD/11232017/TestDBAll_1</BucketFolderName>
		<RetentionDays>5</RetentionDays>
		<RegionEndpoint>us-east-1</RegionEndpoint>
		<ScdType>Type2</ScdType> <!-- Type1, Type2 -->
		<FolderStructure>OneFolderPerTablePerYearMonthDayHour</FolderStructure> <!-- OneFolderPerExportProcessRun,OneFolderPerTable,OneFolderPerTablePerDay,OneFolderPerTablePerDayWithAppending,OneFolderPerTablePerYearMonthDay,OneFolderPerTablePerYearMonthDayHour -->
		<FolderNamingFormat>DayHourEqualValue</FolderNamingFormat> <!-- Default, DayHourEqualValue [12.204 and above], KeyEqualValue (applicable to FolderStructure in (OneFolderPerTablePerYearMonthDay, OneFolderPerTablePerYearMonthDayHour) only -->                
                <FileFormat>Parquet</FileFormat> <!-- JSON, CSV, Parquet -->
                <CSVFileExtension>csv</CSVFileExtension> <!-- 12.207 or above. Applicable to FileFormat=CSV only. Default='csv', allows specifying of custom file extension such as "dat" -->
                <Delimiter>TAB</Delimiter> <!-- applicable to FileFormat=CSV only, standard delimiters to be selected are ASC30, ASC31, TAB -->
		<JsonExportType>Line</JsonExportType> <!-- Document, Line (applicable to FileFormat=JSON only) -->
		<Compression>Snappy</Compression> <!-- None, Gzip, Bzip2, Snappy -->
                <AddTableSchemaPrefix>false</AddTableSchemaPrefix> <!-- if AddTableSchemaPrefix=true, table schema prefix will be added to output file name, i.e. for table dbo.mytable, output file will be named as dbo_mytable_{timestamps}_{processid}.snappy.parquet 
                <ConvertDataTypesToString>false</ConvertDataTypesToString>
	</S3DataLake>
	<AddUpdateTimestamp>false</AddUpdateTimestamp> 
	<TablesToInclude>[dbo].[Departments],[dbo].[Managers]</TablesToInclude> <!-- All = include all tables --> 
	<ReseedingSchedule>
		<NoPkTablesToInclude<[dbo].[NoPkTable1], [dbo].[NoPkTable2]</NoPkTablesToInclude>
		<PkTablesToInclude<[dbo].[PkTable1], [dbo].[PkTable2]</PkTablesToInclude>
		<FromTime<12:01:00 AM</FromTime>
		<ToTime<4:00:00 AM</ToTime>
		<RepeatIntervalInMinutes<120</RepeatIntervalInMinutes>
	</ReseedingSchedule>
</CreateS3ReplicationRequest>

 

Request Syntax for AccessType=IAMRole:

JSON

POST /api/CreateS3Replication HTTP/1.1
Host: use.your.host.name:82
X-Amz-Content-Sha256: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
Content-Type: application/json
X-Amz-Date: 20171115T202130Z
Authorization: AWS4-HMAC-SHA256 Credential= UQOPWUVNBALABCABCABC/20171115/us-east-1/cloudbasic/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-content-sha256;x-amz-date, Signature=995374189c189e8e68ed3de82c1764ca11971711fb5179eeab2b19edd883dd74
 
{
	"parallelTablesLimit": 5,
	"rebuildIndexes": false,
	"source": {
		"connectionString": "Data Source=your-source-db-server;Initial Catalog=TestDBAll_2;Persist Security Info=False;User ID=user-name;Password=********;Connect Timeout=1280",
		"encryptDataInTransit": true,
		"changeTrackingRetentionPeriod": "2 DAYS",
		"resetChangeTracking": false
	},
	"staging": {
		"connectionString": "Data Source=your-source-db-server;Initial Catalog=TestDBAll_2;Persist Security Info=False;User ID=user-name;Password=********;Connect Timeout=12800",
		"encryptDataInTransit": true,
		"replicateCompatibilityLevel": true
	},
	"s3DataLake": {
		"accessType": "IAMRole", -- Valid values (AccessKey, IAMRole). Separate example is provided for accessType=AccessKey above
		"accountId": "XXXXX5IR2D555AV4RFA",
		"roleName": "XXXXXywbZtPbtWm1TBmwUxkV5Ovm6tJEsuy+4X",
		"bucketName": "my-bucket-name",
		"bucketFolderName": "/SCD/11232017/TestDBAll_1",
		"regionEndpoint": "us-east-1",
		"retentionDays": 5,
	        "scdType": "Type2", 
                "folderStructure": "OneFolderPerTablePerYearMonthDayHour",
                "folderNamingFormat: "KeyEqualValue", 
                "fileFormat": "Parquet",
                "csvFileExtension": "csv", -- 12.207 or above. Applicable to FileFormat=CSV only. Default='csv', allows specifying of custom file extension such as "dat"
                "delimiter": "TAB", 
                "jsonExportType": "Line", 
                "compression": "Snappy", -- None, Gzip, Bzip2, Snappy
                "addTableSchemaPrefix": false, -- if addTableSchemaPrefix=true, table schema prefix will be added to output file name, i.e. for table dbo.mytable, output file will be named as dbo_mytable_{timestamps}_{processid}.snappy.parquet 
                "convertDataTypesToString": false
	},
	"addUpdateTimestamp": false,
	"tablesToInclude": "[dbo].[Departments],[dbo].[Managers]",
	"reseedingSchedule": {
		"noPkTablesToInclude": "[dbo].[NoPkTable]",
		"pkTablesToInclude": null,
		"fromTime": "10:00:00 AM",
		"toTime": "10:59:59 PM",
		"repeatIntervalInMinutes": "100"
	}	
}

XML

POST /api/CreateS3Replication HTTP/1.1
Host: use.your.host.name:82
X-Amz-Content-Sha256: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
Content-Type: application/xml
X-Amz-Date: 20171115T202130Z
Authorization: AWS4-HMAC-SHA256 Credential= UQOPWUVNBALABCABCABC/20171115/us-east-1/cloudbasic/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-content-sha256;x-amz-date, Signature=995374189c189e8e68ed3de82c1764ca11971711fb5179eeab2b19edd883dd74
 
<?xml version="1.0" encoding="utf-8"?>
<CreateS3ReplicationRequest>
    <ParallelTablesLimit>5</ParallelTablesLimit>
	<RebuildIndexes>false</RebuildIndexes> 
	<Source>
		<ConnectionString>Data Source=your-source-db-server;Initial Catalog=TestDBAll_2;Persist Security Info=False;User ID=user-name;Password=********;Connect Timeout=1280</ConnectionString>
		<EncryptDataInTransit>true</EncryptDataInTransit>
		<ChangeTrackingRetentionPeriod>2 DAYS</ChangeTrackingRetentionPeriod> 
		<ResetChangeTracking>false</ResetChangeTracking>
	</Source>
	<Staging>
		<ConnectionString>Data Source=your-source-db-server;Initial Catalog=TestDBAll_2;Persist Security Info=False;User ID=user-name;Password=********;Connect Timeout=12800</ConnectionString>
		<EncryptDataInTransit>true</EncryptDataInTransit>
		<ReplicateCompatibilityLevel>true</ReplicateCompatibilityLevel>
	</Staging>
	<S3DataLake>
		<AccessType>IAMRole</AccessType>
		<AccountId>AKIAIK5IR2D555AV4RFA</AccountId>
		<RoleName>XrMEaWGywbZtPbtWm1TBmwUxkV5Ovm6tJEsuy+4X</RoleName>
		<BucketName>cbr-test</BucketName>
		<BucketFolderName>/SCD/11232017/TestDBAll_1</BucketFolderName>
		<RetentionDays>5</RetentionDays>
		<RegionEndpoint>us-east-1</RegionEndpoint>
		<ScdType>Type2</ScdType> <!-- Type1, Type2 --> 
                <FolderStructure>OneFolderPerTablePerYearMonthDayHour</FolderStructure> <!-- OneFolderPerExportProcessRun,OneFolderPerTable,OneFolderPerTablePerDay,OneFolderPerTablePerDayWithAppending,OneFolderPerTablePerYearMonthDay,OneFolderPerTablePerYearMonthDayHour --> 
                <FolderNamingFormat>KeyEqualValue</FolderNamingFormat> <!-- Default, KeyEqualValue (applicable to FolderStructure in (OneFolderPerTablePerYearMonthDay, OneFolderPerTablePerYearMonthDayHour) only --> 
                <FileFormat>Parquet</FileFormat> <!-- JSON, CSV, Parquet 
                <CSVFileExtension>csv</CSVFileExtension> <!-- 12.207 or above. Applicable to FileFormat=CSV only. Default='csv', allows specifying of custom file extension such as "dat" --> 
                <Delimiter>TAB</Delimiter> <!-- applicable to FileFormat=CSV only -->
                <JsonExportType>Line</JsonExportType> <!-- Document, Line (applicable to FileFormat=JSON only) --> 
                <Compression>Snappy</Compression> <!-- None, Gzip, Bzip2, Snappy -->
                <AddTableSchemaPrefix>false</AddTableSchemaPrefix> <-- if AddTableSchemaPrefix=true, table schema prefix will be added to output file name, i.e. for table dbo.mytable, output file will be named as dbo_mytable_{timestamps}_{processid}.snappy.parquet -->
                <ConvertDataTypesToString>false</ConvertDataTypesToString> <!-- applicable to Parquet only, for csv and json output formats, this param is defaulted to true --> 
	</S3DataLake>
	<AddUpdateTimestamp>false</AddUpdateTimestamp> 
	<TablesToInclude>[dbo].[Departments],[dbo].[Managers]</TablesToInclude> 
	<ReseedingSchedule>
		<NoPkTablesToInclude<[dbo].[NoPkTable1], [dbo].[NoPkTable2]</NoPkTablesToInclude>
		<PkTablesToInclude<[dbo].[PkTable1], [dbo].[PkTable2]</PkTablesToInclude>
		<FromTime<12:01:00 AM</FromTime>
		<ToTime<4:00:00 AM</ToTime>
		<RepeatIntervalInMinutes<120</RepeatIntervalInMinutes>
	</ReseedingSchedule>
</CreateS3ReplicationRequest>

 

Request Parameters

Parameter Description Required
ParallelTablesLimit Indicates the maximum number of parallel table replications at any given moment. Instances with insufficient performance can get saturated if a large number of parallel table replications is requested.

Note:
Higher value will decrease the total time to complete the seeding, but might affect the performance of the source RDS/SQL Server depending on server type and size. For AWS RDS, value of 10 is recommended for RDS type *.Large or below.

No
Type: Integer
Default: None
RebuildIndexes Indicates whether the indexes should be rebuilt in the destination database instance.

Note:
Rebuilding of indexes upon completion of seeding might be necessary when deadlocks occur in the destination database.

No
Type: String
Default: None
Valid value: true/ false
Source  Yes
ConnectionString A valid connection string to the source database instance. Yes
Type: String
Default: None
EncryptDataInTransit Indicates whether data in transit should be encrypted.

Note:
TLS 1.2 is activated for SQL Server 2016 and prior versions that have been updated to support TLS 1.2. TLS 1.1, 1.0 or SSL are activated if the SQL Server version does not support TLS 1.2

Yes
Type: String
Default: None
Valid value: true/ false
ChangeTrackingRetentionPeriod Specifies the period that Change Tracking data is saved for before being automatically purged. Yes
Type: String
Default: None
Valid values: 12 Hours, 1 Day, 2 Days, 3 Days, 4 Days, 5 Days, 6, Days, 7 Days, 8 Days
ResetChangeTracking If <ResetChangeTracking>false<ResetChangeTracking> and the database has already change tracking enabled with a different retention period (i.e. 3 DAYS), the following error will be reported: "Change Tracking appears to have been already enabled with retention period 3 DAYS on the source database. In order to apply the new selected retention period, specify <ResetChangeTracking>true<ResetChangeTracking>.

Important: If change tracking is reset and the source database is involved in other replications, those will be invalidated after the reset.

Yes
Type: String
Default: None
Valid value: true/ false
Stageless If Stageless=True then a full staging replica is Not maintained. Local SQL Express is used as a limited staging data buffer.
If Stageless=false then the full staging replica specified in the Staging\ConnectionString tag is maintained.Type: String
Default: false
Valid value: true/ falseImportant:If Stageless=True, The Staging\ConnectionString tag is still required, but only the Initial Catalog needs to be customized (name of limited staging DB provided), the rest of the connection string needs to be defaulted to below:

Data Source=localhost\\sqlexpress;Initial Catalog={LimitedStagingDBName};Persist Security Info=False;User ID=rds365;Password=rds365;Connect Timeout=12800;Encrypt=True;TrustServerCertificate=True"

 No
Staging Yes
ConnectionString A valid connection string to the source database instance. Yes
Type: String
Default: None
EncryptDataInTransit Indicates whether data in transit should be encrypted.

Note:
TLS 1.2 is activated for SQL Server 2016 and prior versions that have been updated to support TLS 1.2. TLS 1.1, 1.0 or SSL are activated if the SQL Server version does not support TLS 1.2

Yes
Type: String
Default: None
Valid value: true/ false
ReplicateCompatibilityLevel Replicate the compatibility level of the source database.

Note:
Valid values for SQL Server 2014 are 100, 110 and 120. Valid values for SQL Server 2016 are 100, 110, 120 & 130.

Yes
Type: String
Default: None
Valid value: true/ false
S3DataLake  Yes
AccessType The type of access can be either AccessKey or IAMRole Yes
Type: String
Default: None
Valid value: AccessKey/ IAMRole
AccessKey When the AccessType is set to AccessKey this field must contain the AWS AccessKey that the CloudBasic process will use to write to the specified S3 bucket.  Required if AccessType=AccessKey only
Type: String
Default: None
SecretKey When the AccessType is set to AccessKey this field must contain the AWS SecretKey that the CloudBasic process will use to write to the specified S3 bucket.  Required if AccessType=AccessKey only
Type: String
Default: None
AccountId When the AccessType is set to IAMRole this field must contain the AWS AccountID that the CloudBasic process will use to write to the specified S3 bucket. Required if AccessType=IAMRole only
Type: String
Default: None
RoleName When the AccessType is set to IAMRole this field must contain the AWS RoleName that the CloudBasic process will use to write to the specified S3 bucket.  Required if AccessType=IAMRole only
Type: String
Default: None
BucketName The name of the AWS S3 bucket where the CloudBasic process will write. Yes
Type: String
Default: None
BucketFolderName The folder in the AWS S3 bucket where the CloudBasic process will write. Yes
Type: String
Default: None
RetentionDays Number of the change tracking files will be kept for before the CloudBasic process deletes them. Yes
Type: Integer
Default: None
RegionEndpoint The name of the AWS Region where the S3 bucket is located. Yes
Type: String
Default: None
ScdType Slowly changing dimension type. Yes
Type: String
Default: Type1
Valid value: Type1/ Type2
FolderStructure Possible values include:

  • OneFolderFilePerTable (Scd Type 1 only)
  • OneFolderPerTable (multi-files; Scd Type 2 only)
  • OneFolderPerTablePerDay (Scd Type 2 only)
  • OneFolderPerTablePerDayWithAppending (file overwriting; Scd Type 2 only)
  • OneFolderPerExportProcessRun (Scd Type 1 only)
  • OneFolderPerTablePerYearMonthDay (Scd Type 2 only)
  • OneFolderPerTablePerYearMonthDayHour (Scd Type 2 only; version 12.132 and above)
Yes
Type: String
Default: None
FileFormat Controls the export File Format Yes
Type: String
Default: None
Valid values are:
- JSON
- CSV
- Parquet
Delimiter The ASCII code of the delimiter character to be used if FileFormat=CSV Required if FileFormat=CSV
Type: String
Default: ASC30
Valid value: ASC30, ASC31, TAB; custom character(s), i.e. $|
CSVFileExtension The ASCII code of the delimiter character to be used if FileFormat=CSV Optional, applicable to FileFormat=CSV only
Type: String
Default: csv
Valid value: any valid file extension, i.e. dat, txt
FolderNamingFormat
(Version 12.133 and above)
Controls folder naming format when SCD=Type2 and FileFormat=OneFolderPerTablePerYearMonthDay or FileFormat=OneFolderPerTablePerYearMonthDayPerHour Required only if SCD=Type2
and
FileFormat in (
OneFolderPerTablePerYearMonthDay,
OneFolderPerTablePerYearMonthDayPerHour
)
Type: String
Default: Default
Valid values are:
- Default (YYYY/DD/MM/HH)
-DayHourEqualValue (day=yyyymmdd/hour=hh)   --  12.204 and above

- KeyEqualValue (year=YYYY/month=MM/day=DD/hour=HH)
JsonExportType Only applicable when the FileFormat is set to JSON Required if FileFormat=JSON
Type: String
Default: Document
Valid values are:
- Document
- Line
Compression Parquet files can use:
- None
- Gzip
- Snappy (version 12.133 and above)CSV and JSON files can use:
- None
- Gzip
- Bzip2
Yes
Type: String
Default: None
Valid values are:
- None
- Bzip2
- Gzip
- Snappy (version 12.133 and above)
AddTableSchemaPrefix
(Version 12.132 and above)
If set to True, table schema prefix (default prefix is dbo_{tablename}.{parquet|json|csv}, resulting from the default table schema dbo.{tablename}) will be added as prefix to the output filename . If tables with same names and different prefixes exist, adding a file prefix would be required to proceed. As new tables can be added at a later time, adding prefix to the output file name is recommended. No
Type: String
Default: true (if upgrading from older version to 12.132 and above,  backward compatibility is ensured)
Valid values: true/ false
ConvertDataTypesToString
(Version 12.132 and above)
If set to True, all decimal types will be converted to string in the output file No
Type: String
Default: false (if upgrading from older version to 12.132 and above, the value will be defaulted to True, to ensure backward compatibility)
Valid values: true/ false
AddUpdateTimestamp Indicates whether to add a field named [cb_sys_update_timestamp] to each destination/replica table. No
Type: String
Default: None
Valid value: true/ false
TablesToInclude List of tables to be included in the replication job:
XML formatted request example:
<TablesToInclude>[dbo].[Departments],[dbo].[Managers]</TablesToInclude>
JSON formatted request example:
"tablesToInclude": "[dbo].[Departments],[dbo].[Managers]"Pass 'All' to include all tables:
XML formatted request sample:
<TablesToInclude>All</TablesToInclude>
JSON formatted request sample:
"tablesToInclude": "All"
Yes
Type: String
Default: None
ReseedingSchedule Tables with No Primary Keys Reseeding Schedule List.

Note:
Tables with No Primary Keys (PKs) will be seeded during replication but will not be tracked for changes. If adding PKs is not an option, you can include those tables in the reseeding schedule job for reseeding. See http://GetStarted.cloudbasic.net for more information.

 No
NoPkTablesToInclude The list of tables with no Primary Keys that will be included in the reseeding.

Note:
Any table without a Primary Key (No PK), regardless of whether it is excluded in <tablestoinclude> list or not, can be included here.

No
Type: String
Default: None
PkTablesToInclude The list of tables with Primary Keys that will be included in the reseeding.

Note:
Only tables with Primary Keys (Pk) NOT included in the <tablestoinclude> list can be included here.

No
Type: String
Default: None
FromTime Starting time of the period when the reseeding process is allowed to run. Yes
Type: String
Default: None
ToTime Ending time of the period when the reseeding process is allowed to run. Yes
Type: String
Default: None
RepeatIntervalInMinutes Specifies the frequency of execution of the reseeding process in minutes. Yes
Type: Integer
Default: None

 

Request Headers

This implementation uses only request headers that are common to all operations. For more information please see the section Common Request Headers

 

Response Syntax:

JSON

HTTP/1.1 200
status: 200
 
{
	"replicationId": "a90f2cb9-21e6-4ca9-828f-5ed5a92d7d9d",
	"status": "Running",
	"note": ""
}

XML

HTTP/1.1 200
status: 200
 
<?xml version="1.0" encoding="utf-8"?>
<CreateS3ReplicationResponse>
	<ReplicationId>a90f2cb9-21e6-4ca9-828f-5ed5a92d7d9d</ReplicationId>
	<Status>Running</Status>
	<Note />
</CreateS3ReplicationResponse>

 

Response Parameters

Parameter Description Optional
ReplicationId A GUID value that uniquely identifies the newly created Replication. No
Status The current status of the Replication process. Possible values include:

  • Running
  • RunningWithErrors
  • RunningWithWarnings
  • Failed
  • Success
  • CompletedWithErrors
  • CompletedWithWarnings
No
Note For security reasons random passwords are assigned to any replicated logins and users. Passwords need to be manually reset on the replica SQL Server after the initial database replication seeding completes. Password changes are also not automatically replicated as part of the schema replication process. Yes

 

Error Response Syntax:

JSON

HTTP/1.1 400
status: 400

{
   "errors": [
       "{S3DataLake\BucketName}: Invalid value for BucketName"
	],
	"requestId": 20
}

XML

HTTP/1.1 400
status: 400
 
<?xml version="1.0" encoding="utf-8"?>
<ErrorResponse> 
  <RequestId>request-id</RequestId> 
  <Errors> 
    <Error> <Message>Error message 1</Message> </Error> 
    <Error> <Message>Error message 2</Message> </Error> 
  </Errors> 
</ErrorResponse>