#!groovy /** * Deploy components to the D4Science Infrastructure (dev-only) * * Roberto Cirillo (ISTI-CNR) */ // related jenkins job: https://jenkins.d4science.org/job/gCubeDeployer/ def agent_root_folder = '/var/lib/jenkins' def agent_deploy_filename = 'deploy.csv' def deployList if (params.deployFile) { println "Using custom deploy file" deployList = params.deployFile } else { println "Using local deploy file" //load the report from local deployList = agent_root_folder+'/'+agent_deploy_filename; println "Load from local file ${deployList}" } pipeline { agent { label 'CD' } triggers { // every fifteen minutes (perhaps at :07, :22, :37, :52) cron('H/15 * * * *') // once a day on the 1st and 15th of every month // cron ('H H 1,15 * *') } //once in every two hours slot between 9 AM and 5 PM every weekday (perhaps at 10:38 AM, 12:38 PM, 2:38 PM, 4:38 PM) // cron('H H(9-16)/2 * * 1-5') environment { AGENT_ROOT_FOLDER = "${agent_root_folder}" DEPLOY_FILE = "${agent_root_folder}/${agent_deploy_filename}" TRIGGER_JOB= "${params.TRIGGER_JOB}" TRIGGER_VERSION= "${params.TRIGGER_VERSION}" } stages { stage('initialize environment') { steps { sh ''' date=`date` touch $DEPLOY_FILE ''' } } stage('Deploy from system') { when{ allOf{ triggeredBy 'TimerTrigger' // maybe we can add a new condition in order to consider the manual execution of this pipeline environment name: 'IS_CRON', value: 'True' } } steps { echo 'Cron build enabled. Deploy from system ongoing' script { // parse the report and extract the data def components = parseCSVList(deployList) assert 0 < components.size(): "No component found" for (component in components) { println "$component" } } } } stage('Nothing to do by System ') { when{ allOf{ triggeredBy 'TimerTrigger' environment name: 'IS_CRON', value: 'False' } } steps { echo 'Do Nothing: cron build disabled' } } stage('Add new pending deploy ') { when{ environment name: 'IS_CRON', value: 'True' anyOf{ triggeredBy 'BuildUpstreamCause' triggeredBy 'UpstreamCause' } } steps { sh ''' echo "Cron build enabled. New deploy of ${TRIGGER_JOB} - ${TRIGGER_VERSION} appended to the deploy file" echo "${TRIGGER_JOB} , ${TRIGGER_VERSION}" >> ${DEPLOY_FILE} ''' } } stage('Deploy from job ') { when{ environment name: 'IS_CRON', value: 'False' anyOf{ triggeredBy 'BuildUpstreamCause' triggeredBy 'UpstreamCause' } } steps { echo "Cron build disabled. New deploy of ${params.TRIGGER_JOB} - ${params.TRIGGER_VERSION} ongoing" } } } post { always { script { sh ''' echo ' jobs currently appended:' ''' //cat ./${ACTION_DEPLOY_FILE}.csv } } success { echo 'The deploy pipeline worked!' emailext attachLog: true,//attachmentsPattern: "**/${ACTION_DEPLOY_FILE}.csv", to: 'roberto.cirillo@isti.cnr.it', subject: "Deploy report", body: "${currentBuild.fullDisplayName}. Build time: ${currentBuild.durationString}. See ${env.BUILD_URL}. " } failure { echo 'The deploy pipeline has failed' emailext attachLog: true, to: 'roberto.cirillo@isti.cnr.it', subject: "[Jenkins deploy D4S] deploy ${currentBuild.fullDisplayName} failed", body: "Something is wrong with ${env.BUILD_URL}" } } } //a non CPS method is necessary for the usage of splitEachLine() @NonCPS def parseDeployComponent(def deployList) { println "Going to parsing file ${deployList}" def content = readCSV file : "${deployList}" println "JOB REPORT CONTENT: ${content}" def components = [] content.splitEachLine(/,/) { columns -> if (columns[0].startsWith('#') || columns[0].startsWith('Component')) return components.add([ name : columns[0], version : columns[1] ] ) } return components } def parseCSVList(def deployList) { if (fileExists("${deployList}")) { echo ' file found' readFile("${deployList}").split('\n').each { line, count-> def fields = line.split(',') for(String item: fields) { println item println ' you are parsing line : ' + count } } } else { echo ' File Not found. Failing.' } }