UrlsController/src/main/resources/application.properties

77 lines
3.3 KiB
Properties

# HTTP CONFIGURATION
server.port = 1880
# Server api path
server.servlet.context-path=/api
# Service config
services.pdfaggregation.controller.isTestEnvironment = false
# In case the "isTestEnvironment" is "true", the "testDatabase" below and all its tables are created (if not exist).
# The tables "datasource", "publication", "publication_pids" and "publication_urls" are filled with the data from the same tables existing in the "initialDatabase", if they don't exist.
# In case the "isTestEnvironment" is "false", the "initialDatabase" is used. The Controller assumes that the above 4 tables are present, and only creates, if they don't exist, the following tables:
# "assignment", "attempt" and "payload", which are populated during execution.
services.pdfaggregation.controller.db.initialDatabaseName = pdfaggregation_i
services.pdfaggregation.controller.db.testDatabaseName = pdfaggregationdatabase_new_s3_names
services.pdfaggregation.controller.baseTargetLocation = /tmp/
services.pdfaggregation.controller.maxAttemptsPerRecord = 3
services.pdfaggregation.controller.assignmentLimit = 10000
services.pdfaggregation.controller.s3.endpoint = xa
services.pdfaggregation.controller.s3.accessKey = xa
services.pdfaggregation.controller.s3.secretKey = xa
services.pdfaggregation.controller.s3.region = xa
services.pdfaggregation.controller.s3.bucketName = xa
services.pdfaggregation.controller.s3.shouldEmptyBucket = false
services.pdfaggregation.controller.s3.shouldShowAllS3Buckets = true
# Database
spring.datasource.url=jdbc:impala://iis-cdh5-test-gw.ocean.icm.edu.pl:21050/
spring.datasource.username=
spring.datasource.password=
spring.datasource.driver-class-name=com.cloudera.impala.jdbc41.Driver
spring.datasource.hikari.pool-name=ControllerPool
spring.datasource.hikari.maximumPoolSize=20
spring.datasource.hikari.maxLifetime=1800000
spring.datasource.hikari.minimumIdle=4
spring.datasource.hikari.connectionTimeout=30000
spring.datasource.hikari.idleTimeout=600000
# LOGGING LEVELS
logging.level.root=INFO
logging.level.org.springframework.web=INFO
logging.level.org.springframework.security=WARN
logging.level.org.apache.hadoop.io.compress=WARN
logging.level.eu.openaire.urls_controller=DEBUG
spring.output.ansi.enabled=always
# Parquet settings
hdfs.baseUrl=https://iis-cdh5-test-gw.ocean.icm.edu.pl/webhdfs/v1
# HTTP-Authorization --> Authorization: Basic Base64Encode(username:password)
# Give the credentials by either giving the Http-Auth-string AND the username (used as parameter in the WebHdfs-requests)
# Or by giving the username AND the password, in order for the program to crete the auth-String programmatically.
# The first approach is intended for more privacy, while the second for more ease. Either way, all three should be uncommented, no matter which ones are used.
hdfs.httpAuth=
hdfs.userName=
hdfs.password=
schema.payload.filePath=src/main/resources/schemas/payload.avsc
schema.attempt.filePath=src/main/resources/schemas/attempt.avsc
output.parquetLocalDirectoryPath=parquetFiles/
hdfs.parquetRemoteBaseDirectoryPath=/tmp/parquet_uploads/
## MULTIPART (MultipartProperties)
# Enable multipart uploads
spring.servlet.multipart.enabled=true
# Threshold after which files are written to disk.
spring.servlet.multipart.file-size-threshold=2KB
# Max file size.
spring.servlet.multipart.max-file-size=200MB
# Max Request Size
spring.servlet.multipart.max-request-size=215MB