@@ -238,6 +238,19 @@ else
238238 TRANSFER_TAR_OPTIONS=(" " )
239239fi
240240
241+ # Extract gzip compressed file into plain tar file.
242+ gzip_to_plain_tar () {
243+ brlog " DEBUG" " Recompressing file"
244+ local original_backup=" $1 "
245+ local recompressed_backup=" $2 "
246+ gunzip -c " ${original_backup} " > " ${recompressed_backup} "
247+ # Debug.
248+ original_backup_info=$( ls -lh " ${original_backup} " )
249+ recompressed_backup_info=$( ls -lh " ${recompressed_backup} " )
250+ brlog " DEBUG" " Original backup: ${original_backup_info} "
251+ brlog " DEBUG" " Recompressed backup: ${recompressed_backup_info} "
252+ }
253+
241254kube_cp_from_local (){
242255 IS_RECURSIVE=false
243256 if [ " $1 " = " -r" ] ; then
@@ -413,6 +426,18 @@ get_mc(){
413426 fi
414427}
415428
429+ setup_mc () {
430+ mkdir -p ${TMP_WORK_DIR} /.mc
431+ if [ -n " ${MC_COMMAND+UNDEF} " ] ; then
432+ MC=${MC_COMMAND}
433+ else
434+ get_mc ${TMP_WORK_DIR}
435+ MC=${TMP_WORK_DIR} /mc
436+ fi
437+ export MINIO_CONFIG_DIR=" ${PWD} /${TMP_WORK_DIR} /.mc"
438+ MC_OPTS=(--config-dir ${MINIO_CONFIG_DIR} --quiet --insecure)
439+ }
440+
416441# Compare two timestamps (i.e. 2024-06-12T14:34:03Z).
417442# Return 1 if timestamp1 > timestamp2 else 0.
418443compare_timestamps () {
@@ -1254,7 +1279,8 @@ check_datastore_available(){
12541279# Assume there are only version 6 or 7 indices in the cluster.
12551280get_elastic_version () {
12561281 ELASTIC_POD=$( get_elastic_pod)
1257- run_script_in_pod ${ELASTIC_POD} " ${SCRIPT_DIR} /reindex_es6_indices.sh" " --dry-run" -c elasticsearch ${OC_ARGS}
1282+ ELASTIC_POD_CONTAINER=$( get_elastic_pod_container)
1283+ run_script_in_pod ${ELASTIC_POD} " ${SCRIPT_DIR} /reindex_es6_indices.sh" " --dry-run" -c " ${ELASTIC_POD_CONTAINER} " ${OC_ARGS}
12581284 result=$( get_last_cmd_result_in_pod)
12591285
12601286 if echo " $result " | grep -q " Skip ElasticSearch 6 index" ; then
@@ -1287,7 +1313,8 @@ validate_elastic_version() {
12871313# Reindex elastic search indices from version 6 to 7.
12881314reindex_elastic_es6 () {
12891315 brlog " INFO" " Reindexing ..."
1290- run_script_in_pod ${ELASTIC_POD} " ${SCRIPT_DIR} /reindex_es6_indices.sh" " " -c elasticsearch ${OC_ARGS}
1316+ ELASTIC_POD_CONTAINER=$( get_elastic_pod_container)
1317+ run_script_in_pod ${ELASTIC_POD} " ${SCRIPT_DIR} /reindex_es6_indices.sh" " " -c " ${ELASTIC_POD_CONTAINER} " ${OC_ARGS}
12911318 brlog " INFO" " Reindex completed. Waiting for ElasticSearch status to be green ..."
12921319 # Wait a few moments for elastic search to be ready after the reindex.
12931320 # TODO: implement waiting logic inside of reindex_es6_indices.sh, and remove the sleep below from this function.
@@ -1341,12 +1368,49 @@ check_postgres_available(){
13411368}
13421369
13431370get_elastic_pod (){
1344- echo " $( oc get pods ${OC_ARGS} -o jsonpath=" {.items[0].metadata.name}" -l tenant=${TENANT_NAME} ,app=elastic,ibm-es-data=True) "
1371+ local wd_version=${WD_VERSION:- $(get_version)}
1372+ if [ $( compare_version ${wd_version} " 5.2.0" ) -lt 0 ] ; then
1373+ echo " $( oc get pods ${OC_ARGS} -o jsonpath=" {.items[0].metadata.name}" -l tenant=${TENANT_NAME} ,app=elastic,ibm-es-data=True) "
1374+ else
1375+ echo " $( oc get pods ${OC_ARGS} -o jsonpath=" {.items[0].metadata.name}" -l tenant=${TENANT_NAME} ,app=opensearch) "
1376+ fi
1377+ }
1378+
1379+ get_elastic_pod_container () {
1380+ local wd_version=${WD_VERSION:- $(get_version)}
1381+ if [ $( compare_version ${wd_version} " 5.2.0" ) -lt 0 ] ; then
1382+ echo " elasticsearch"
1383+ else
1384+ echo " opensearch"
1385+ fi
1386+ }
1387+
1388+ get_elastic_repo () {
1389+ local wd_version=${WD_VERSION:- $(get_version)}
1390+ if [ $( compare_version ${wd_version} " 5.2.0" ) -lt 0 ] ; then
1391+ echo " my_backup"
1392+ else
1393+ echo " cloudpak"
1394+ fi
1395+ }
1396+
1397+ get_elastic_repo_location () {
1398+ local wd_version=${WD_VERSION:- $(get_version)}
1399+ if [ $( compare_version ${wd_version} " 5.2.0" ) -lt 0 ] ; then
1400+ echo " /workdir/shared_storage"
1401+ else
1402+ echo " /workdir/snapshot_storage"
1403+ fi
1404+ }
1405+
1406+ get_opensearch_cluster () {
1407+ echo " $( oc get cluster.opensearch ${OC_ARGS} -o jsonpath=" {.items[0].metadata.name}" -l tenant=${TENANT_NAME} ,app=opensearch) "
13451408}
13461409
13471410check_elastic_available (){
13481411 ELASTIC_POD=$( get_elastic_pod)
1349- oc exec ${OC_ARGS} " ${ELASTIC_POD} " -c elasticsearch -- bash -c ' export ELASTIC_ENDPOINT=https://localhost:9200 && \
1412+ ELASTIC_POD_CONTAINER=$( get_elastic_pod_container)
1413+ oc exec ${OC_ARGS} " ${ELASTIC_POD} " -c " ${ELASTIC_POD_CONTAINER} " -- bash -c ' export ELASTIC_ENDPOINT=https://localhost:9200 && \
13501414 curl -s -k -u ${ELASTIC_USER}:${ELASTIC_PASSWORD} "${ELASTIC_ENDPOINT}/_cluster/health" | grep "\"status\":\"yellow\"\\|\"status\":\"green\"" > /dev/null' || return 1
13511415 return 0
13521416}
@@ -1355,8 +1419,9 @@ check_s3_available(){
13551419 setup_s3_env
13561420 local wd_version=" ${WD_VERSION:- $(get_version)} "
13571421 if [ $( compare_version ${wd_version} " 4.7.0" ) -lt 0 ] ; then
1358- ELASTIC_POD=$( oc get pods ${OC_ARGS} -o jsonpath=" {.items[0].metadata.name}" -l tenant=${TENANT_NAME} ,app=elastic,ibm-es-data=True)
1359- oc exec ${OC_ARGS} " ${ELASTIC_POD} " -c elasticsearch -- bash -c " curl -ks 'https://${S3_SVC} :${S3_PORT} /minio/health/ready' -w '%{http_code}' -o /dev/null | grep 200 > /dev/null" || return 1
1422+ ELASTIC_POD=$( get_elastic_pod)
1423+ ELASTIC_POD_CONTAINER=$( get_elastic_pod_container)
1424+ oc exec ${OC_ARGS} " ${ELASTIC_POD} " -c " ${ELASTIC_POD_CONTAINER} " -- bash -c " curl -ks 'https://${S3_SVC} :${S3_PORT} /minio/health/ready' -w '%{http_code}' -o /dev/null | grep 200 > /dev/null" || return 1
13601425 return 0
13611426 else
13621427 launch_s3_pod
@@ -1487,10 +1552,12 @@ check_instance_mappings(){
14871552
14881553get_instance_tuples (){
14891554 ELASTIC_POD=$( get_elastic_pod)
1555+ ELASTIC_POD_CONTAINER=$( get_elastic_pod_container)
1556+
14901557 local file_name=" $( basename " ${MAPPING_FILE} " ) "
1491- _oc_cp " ${MAPPING_FILE} " " ${ELASTIC_POD} :/tmp/mapping.json" -c elasticsearch
1492- local mappings=( $( fetch_cmd_result ${ELASTIC_POD} " jq -r '.instance_mappings[] | \" \(.source_instance_id),\(.dest_instance_id)\" ' /tmp/mapping.json" -c elasticsearch ) )
1493- oc exec -c elasticsearch ${ELASTIC_POD} -- bash -c " rm -f /tmp/mapping.json"
1558+ _oc_cp " ${MAPPING_FILE} " " ${ELASTIC_POD} :/tmp/mapping.json" -c " ${ELASTIC_POD_CONTAINER} "
1559+ local mappings=( $( fetch_cmd_result ${ELASTIC_POD} " jq -r '.instance_mappings[] | \" \(.source_instance_id),\(.dest_instance_id)\" ' /tmp/mapping.json" -c " ${ELASTIC_POD_CONTAINER} " ) )
1560+ oc exec -c " ${ELASTIC_POD_CONTAINER} " ${ELASTIC_POD} -- bash -c " rm -f /tmp/mapping.json"
14941561 for map in " ${mappings[@]} "
14951562 do
14961563 ORG_IFS=${IFS}
@@ -1642,6 +1709,16 @@ get_bucket_suffix(){
16421709 echo " ${suffix} "
16431710}
16441711
1712+ get_snapshot_repo_size () {
1713+ local wd_version=${WD_VERSION:- $(get_version)}
1714+ if [ $( compare_version " ${wd_version} " " 5.2.0" ) -lt 0 ] ; then
1715+ echo " $( oc ${OC_ARGS} get elasticsearchcluster ${TENANT_NAME} -o jsonpath=' {.spec.snapshotRepo.size}' ) "
1716+ else
1717+ opensearch_cluster=$( get_opensearch_cluster)
1718+ echo " $( oc ${OC_ARGS} get cluster.opensearch ${opensearch_cluster} -o jsonpath=' {.spec.backup.sizeLimit}' ) "
1719+ fi
1720+ }
1721+
16451722create_elastic_shared_pvc (){
16461723 local wd_version=${WD_VERSION:- $(get_version)}
16471724 if [ $( compare_version " ${wd_version} " " 4.7.0" ) -ge 0 ] ; then
@@ -1667,7 +1744,7 @@ create_elastic_shared_pvc(){
16671744 brlog " ERROR" " numfmt command is not available. Please install numfmt."
16681745 exit 1
16691746 fi
1670- local snapshot_repo_size=" $( oc ${OC_ARGS} get elasticsearchcluster ${TENANT_NAME} -o jsonpath= ' {.spec.snapshotRepo.size} ' ) "
1747+ local snapshot_repo_size=$( get_snapshot_repo_size )
16711748 local size_array=( $( echo " ${snapshot_repo_size} " | awk ' match($0, /([[:digit:]]+)([[:alpha:]]+)/, array) {print array[1], array[2]}' ) )
16721749 ELASTIC_SHARED_PVC_SIZE=" $(( size_array[0 ]* 2 )) ${size_array[1]} "
16731750 ELASTIC_SHARED_PVC_DEFAULT_NAME=" ${TENANT_NAME} -discovery-backup-restore-pvc"
@@ -1685,6 +1762,15 @@ spec:
16851762 storageClassName: ${FILE_STORAGE_CLASS}
16861763EOF
16871764 ELASTIC_SHARED_PVC=" ${ELASTIC_SHARED_PVC_DEFAULT_NAME} "
1765+
1766+ if [ $( compare_version " ${wd_version} " " 5.2.0" ) -ge 0 ] ; then
1767+ # Need those label when using custom shared PVC for opensearch.
1768+ # https://pages.github.ibm.com/CloudPakOpenContent/ibm-opensearch-operator/api/crds/#clusterspecbackup
1769+ oc label pvc $ELASTIC_SHARED_PVC icpdsupport/empty-on-nd-backup=false
1770+ oc label pvc $ELASTIC_SHARED_PVC icpdsupport/ignore-on-nd-backup=false
1771+ fi
1772+
1773+ brlog " DEBUG" " Created '$ELASTIC_SHARED_PVC ' PVC for elasticsearch backup/restore."
16881774 fi
16891775 fi
16901776}
0 commit comments