<?xml version="1.0" encoding="utf-8"?>
<!--Please note: Paths and extensions defined herein are considered as case-sensitive in OptimizeRasters.py-->
<OptimizeRasters>
<Description>This template has to be used when you want to optimize and convert your NetCDF imagery to MRF format with LERC compression. The output is in NZTM projection. LERC is especially valuable for higher bit depth data such as newer satellite imagery and elevation models.</Description>
<Defaults>
<!--Acceptable modes are [mrf, mrf_jpeg, tif, tif_lzw, tif_jpeg, rasterproxy. The Following items while still supported are no longer encouraged to be used: cachingmrf, clonemrf, splitmrf]-->
<Mode>mrf</Mode>
<!--File extensions considered as (Rasters). These files will not be copied from the input path-->
<RasterFormatFilter>nc</RasterFormatFilter>
<!--File extensions to ignore completely while copying files/data from the input path-->
<ExcludeFilter>png,tif,tiff,TIF,TIFF,img,jp2,JP2,IMG,JPG,jpg,sid,SID,tmp,rrd,idx,lrc,mrf_cache,pjp,ppng,pft,pzp,ovr,aux.xml,aux,tfw,TFW,pjg</ExcludeFilter>
<!--'true' will scan for (Rasters) in sub-directories. Acceptable values are [true, yes, t, 1, y, false, no, f, 0, n]-->
<IncludeSubdirectories>false</IncludeSubdirectories>
<!--Compression to use on output (Rasters)-->
<Compression>LERC</Compression>
<!--how the data is stored as Pixel interleave or band interleave. For cloneMRF the value should be the same as input (Def: Pixel)-->
<Interleave></Interleave>
<!--Compression quality to apply for JPEG compression (Def: 85)-->
<Quality></Quality>
<!--LERC precision to apply for LERC compression (Def: 0.5 for int data and 0.001 for float data)-->
<LERCPrecision>0.001</LERCPrecision>
<!--Build pyramids? Acceptable values are [true, yes, t, 1, y, false, no, f, 0, n, only, external]-->
<BuildPyramids>true</BuildPyramids>
<!--Pyramid levels to create (Def: 2)-->
<PyramidFactor>2</PyramidFactor>
<!--Pyramid sampling [nearest,avg,average,gauss,cubic,cubicspline,lanczos,average_mp,average_magphase,mode] (Def: average)-->
<!--The avg sampling method is only supported by mrf format, which is similar to average, but is a pure averaging of every 2x2 pixels from top left, which is slightly faster -->
<PyramidSampling>avg</PyramidSampling>
<!--Pyramid compression [jpeg, lzw, deflate] (Def: jpeg)-->
<PyramidCompression>LERC</PyramidCompression>
<!--No data value. If undefined/empty value -a_nodata will not be applied (Def: undefined)-->
<NoDataValue>-9999</NoDataValue>
<!--Output tile size-->
<BlockSize>512</BlockSize>
<!--This needs to be specified when generating caching or clone MRF, the value should be based on the input raster pyramid factor (Def: 2)-->
<Scale></Scale>
<!--If ‘True’ raster output extensions will not be renamed to 'mrf'. Acceptable values are [true, yes, t, 1, y, false, no, f, 0, n]-->
<KeepExtension>false</KeepExtension>
<!--Simultaneous threads to use for parallel processing/instances of gdal_translate/gdal_addo/etc (Def: 10)-->
<Threads>4</Threads>
<!--Path to save log/completed job files-->
<LogPath>C:\General\MRF_LERC\Logs</LogPath>
<!--User GDAL_Translate values. These values will be passed on without any modification-->
<GDAL_Translate_UserParameters>-a_srs EPSG:2193 -stats</GDAL_Translate_UserParameters>
<!--Input cloud storage type to process/download data. Acceptable values are [Amazon, Azure]-->
<In_Cloud_Type></In_Cloud_Type>
<!--Output cloud storage type to upload data. Acceptable values are [Amazon, Azure]-->
<Out_Cloud_Type></Out_Cloud_Type>
<!--To upload processed data to cloud storage. Acceptable values are [true, yes, t, 1, y, false, no, f, 0, n]-->
<CloudUpload>false</CloudUpload>
<!--The Profile name to use for the output S3 credentials-->
<Out_S3_AWS_ProfileName></Out_S3_AWS_ProfileName>
<!--AWS Access Key ID to use for the output S3 account-->
<Out_S3_ID></Out_S3_ID>
<!--AWS Secret Access Key for the output S3 account-->
<Out_S3_Secret></Out_S3_Secret>
<!--Output S3 bucket name-->
<Out_S3_Bucket></Out_S3_Bucket>
<!--Output root folder path on S3 to upload converted data-->
<Out_S3_ParentFolder></Out_S3_ParentFolder>
<!--Set canned ACL to apply to uploaded files. Acceptable values are [private, public-read, public-read-write, authenticated-read, bucket-owner-read, bucket-owner-full-control]-->
<Out_S3_ACL></Out_S3_ACL>
<!--If ‘true’ generated output will be deleted once successfully uploaded to S3. Acceptable values are [true, yes, t, 1, y, false, no, f, 0, n]-->
<Out_S3_DeleteAfterUpload>true</Out_S3_DeleteAfterUpload>
<!--The input Profile name to use for S3 credentials-->
<In_S3_AWS_ProfileName></In_S3_AWS_ProfileName>
<!--AWS Access Key ID to use for the input S3 account-->
<In_S3_ID></In_S3_ID>
<!--AWS Secret Access Key for the input S3 account-->
<In_S3_Secret></In_S3_Secret>
<!--Input S3 bucket name-->
<In_S3_Bucket></In_S3_Bucket>
<!--Input S3 root folder path to access/download data-->
<In_S3_ParentFolder></In_S3_ParentFolder>
<!--The input Profile name to use for Azure credentials-->
<In_Azure_ProfileName></In_Azure_ProfileName>
<!--Input Azure account name-->
<In_Azure_AccountName></In_Azure_AccountName>
<!--Input Azure primary key-->
<In_Azure_AccountKey></In_Azure_AccountKey>
<!--Input Azure container name-->
<In_Azure_Container></In_Azure_Container>
<!--Input Azure root folder path to access/download data-->
<In_Azure_ParentFolder></In_Azure_ParentFolder>
<!--Set the user access permission level on uploaded files. Acceptable values are ['private', 'blob', 'container']-->
<Out_Azure_Access></Out_Azure_Access>
<!--The Profile name to use for output S3 credentials-->
<Out_Azure_ProfileName></Out_Azure_ProfileName>
<!--Output Azure account name-->
<Out_Azure_AccountName></Out_Azure_AccountName>
<!--Output Azure primary key-->
<Out_Azure_AccountKey></Out_Azure_AccountKey>
<!--Output Azure container name-->
<Out_Azure_Container></Out_Azure_Container>
<!--Output root folder path on Azure to upload converted data-->
<Out_Azure_ParentFolder></Out_Azure_ParentFolder>
</Defaults>
</OptimizeRasters>