#! /bin/bash

#==========================================================
# Copyright @ 2014 Puppet Labs, LLC
# Redistribution prohibited.
# Address: 308 SW 2nd Ave., 5th Floor Portland, OR 97204
# Phone: (877) 575-9775
# Email: info@puppetlabs.com
#
# Please refer to the LICENSE.pdf file included
# with the Puppet Enterprise distribution
# for licensing information.
#==========================================================

#===[ Summary ]=========================================================

# This program installs Puppet Enterprise. Run this file to start the
# interactive installation or run with a "-h" option to display help.

#===[ Conventions ]=====================================================

# VARIABLES
#
# Variable names starting with "q_" are sanitized user answers to
# questions asked by the `ask` function.
#
# Variable names starting with "t_" are transient variables for use
# within a function. For example, "t_ask__name" is a transient variable
# for storing a "name" within the "ask" function. This convention is
# necessary because all POSIX sh variables are globals and there's no
# way to localize the scope of variables to prevent functions from
# stomping over each other's state.
#
# Variable names in all capital letters are globals that are
# intentionally shared between different functions.
#
# This file can be sourced into a shell for use as a library.

#===[ Global Varables ]================================================
CONSOLE_PORT_OPTIONS="443,3000,3001,3002,3003,3004,3005"
SLES_10_REGEX="sles-10-(i386|x86_64)"
FINAL_EXIT_CODE=0

#===[ Functions ]=======================================================

# Enqueue vendor packages based on user's answers...
enqueue_vendor_packages() {
    # NONPORTABLE
    if [ "y" = "${q_database_install?}" ]; then
      case "${PLATFORM_NAME?}" in
        amazon | centos | rhel | sles)
          enqueue_package 'libxslt'
          ;;
        debian | ubuntu)
          enqueue_package 'libxslt1.1'
          ;;
      esac
    fi

    case "${PLATFORM_NAME?}" in
      amazon | centos | rhel)
        # pciutils are required for facter
        enqueue_package 'pciutils'

        # zlib is required for ruby
        enqueue_package 'zlib'

        enqueue_package 'which'
        enqueue_package 'libxml2'
        # dmidecode is required for facter, but not available on el4
        case "${PLATFORM_RELEASE}" in
          6)
            enqueue_package 'dmidecode'
            enqueue_package 'cronie'
            ;;
          5)
            enqueue_package 'dmidecode'
            enqueue_package 'vixie-cron'
            ;;
        esac

        enqueue_package 'net-tools'
        if [ y = "${q_puppetmaster_install?}" -o y = "${q_puppetdb_install?}" ]; then
          enqueue_package 'libjpeg'
        fi

        if [ y = "${q_puppetmaster_install?}" -o y = "${q_puppet_enterpriseconsole_install?}" ]; then
          enqueue_package 'curl'
          enqueue_package 'system-logos'
          # JJM mailcap is required for /etc/mime.types
          # In both RHEL x5 and x6
          enqueue_package 'mailcap'
        fi
        ;;
      sles)
        # pciutils and pmtools are required for facter
        if [ "${PLATFORM_RELEASE}" = "11" ] ; then
            enqueue_package 'pmtools'
        fi
        enqueue_package 'pciutils'
        enqueue_package 'cron'
        enqueue_package 'net-tools'
        enqueue_package 'libxml2'
        if [ y = "${q_puppetmaster_install?}" -o y = "${q_puppetdb_install?}" ]; then
            case "${PLATFORM_RELEASE}" in
                11)
                  enqueue_package 'libjpeg'
                  ;;
                12)
                  enqueue_package 'libjpeg62'
                  ;;
              esac
        fi

        if [ y = "${q_puppetmaster_install?}" -o y = "${q_puppet_enterpriseconsole_install?}" ]; then
          enqueue_package 'libapr1'
          enqueue_package 'curl'
          enqueue_package 'libapr-util1'
        fi
        ;;
      ubuntu | debian | cumulus)
        # pciutils and dmidecode are required for facter, except on powerpc which has no dmidecode
        enqueue_package 'pciutils'
        if [ "${PLATFORM_NAME?}" = "ubuntu" ] || [ "${PLATFORM_NAME?}" = "debian" ] ; then
            enqueue_package 'dmidecode'
        fi

        enqueue_package 'hostname'
        enqueue_package 'cron'
        enqueue_package 'libldap-2.4-2'
        enqueue_package 'libreadline5'
        enqueue_package 'libxml2'

        if [ y = "${q_puppetmaster_install?}" -o y = "${q_puppetdb_install?}" ]; then
          enqueue_package 'libjpeg62'
        fi

        if [ y = "${q_puppetmaster_install?}" -o y = "${q_puppet_enterpriseconsole_install?}" ]; then
          enqueue_package 'file'
          enqueue_package 'libmagic1'
          enqueue_package 'libpcre3'
          enqueue_package 'curl'
          enqueue_package 'perl'
          enqueue_package 'mime-support'
          enqueue_package 'libapr1'
          enqueue_package 'libcap2'
          enqueue_package 'libaprutil1'
          if [ "${PLATFORM_NAME?}" = "ubuntu" ] || [ "${PLATFORM_NAME?}" = "debian" ] ; then
            enqueue_package 'libaprutil1-dbd-sqlite3'
            enqueue_package 'libaprutil1-ldap'
          fi
        fi
        ;;
      solaris)
        if [ "${PLATFORM_RELEASE?}" = "11" ] ; then
          enqueue_package 'system/library/gcc-45-runtime'
          enqueue_package 'library/readline'
          enqueue_package 'library/security/openssl'
        fi
        # There are no vendor packages to install on Solaris 10
        ;;
      aix | eos)
        # No vendor packages
        :
        ;;
      *)
        display_failure "Do not know how to install vendor packages on this platform."
        ;;
    esac
}

# Install or upgrade all modules to /opt/puppetlabs/server/share/puppet/modules
#
# Arguments: Whether it is an install or upgrade. Defaults to install
install_puppet_modules() {
    # Save our current working directory.
    pushd "${INSTALLER_DIR}" &>/dev/null
    local t_install_dir="$(pwd)"
    t_module_backup_file="${MODULE_DIR?}/module.upgrade.backup.tar"
    if ! is_noop; then
        pushd ${MODULE_DIR?} &> /dev/null
        if run_suppress_output "ls ${MODULE_DIR?}/* &> /dev/null"; then
            # Backup current modules for recovery
            run_suppress_stdout "tar cf ${t_module_backup_file} ${MODULE_DIR?}/*"

            # Check module whitelist
            if [ -f "${t_install_dir?}/modules/whitelist_modules.txt" ] ; then
                t_whitelist_modules=$( cat "${t_install_dir?}/modules/whitelist_modules.txt" )
            fi
            t_remove_modules=`ls "${MODULE_DIR?}"`
            for t_whitelist_module in ${t_whitelist_modules} ;
            do
                t_remove_modules=`echo ${t_remove_modules?} | sed "s/\b${t_whitelist_module?}\b//"`
            done

            # Remove non-whitelist modules
            for t_remove_module in ${t_remove_modules?} ;
            do
                run_suppress_stdout "rm -rf ${t_remove_module?}"
            done
        fi
        for module_pkg in $( cat "${t_install_dir?}/modules/install_modules.txt" );
        do
            if [ -e "${t_install_dir?}/modules/${module_pkg}"* ]; then
                cur_mod_pkg=`ls "${t_install_dir?}/modules/${module_pkg?}"*`
                # Install module
                # --force is present to ensure we overwrite modules we control
                run_suppress_stdout "${PUPPET_BIN_DIR?}/puppet module install \"${cur_mod_pkg}\" --force --ignore-dependencies --modulepath ${MODULE_DIR?}"
            fi
        done

        # Pop back to $INSTALLER_DIR
        popd &>/dev/null
    fi
    # Pop back to original $PWD
    popd &>/dev/null
    run_suppress_stdout "chown ${PLATFORM_ROOT_USER}:${PLATFORM_ROOT_GROUP} -R ${MODULE_DIR?}"
}

# Copy puppet modules to /opt/puppetlabs/server/share/installer/modules and make sure they
# are readable to the world so that they can be served by a puppet fileserver
# mount to compile masters. This will remove any existing modules in the
# directory on upgrades.
create_module_mount() {
    run_suppress_stdout "rm -rf ${SERVER_SHARE_DIR?}/installer/modules"

    run_suppress_stdout "cp -R ${INSTALLER_DIR?}/modules ${SERVER_SHARE_DIR?}/installer/modules"

    run_suppress_stdout "chmod 0755 ${SERVER_SHARE_DIR?}/installer/modules"
}

configure_postgresql_server() {
    # For fresh installs in which case we're installing postgres, make shmmax
    # big enough. For upgrades from 3.x, presumably everything is already
    # working so we'll just leave it alone.
    if ! is_noop && ! is_upgrade; then
        t_postgres_memory_mb="$(${PUPPET_BIN_DIR?}/facter memorysize_mb)"
        t_postgres_shmmax_req="$((${t_postgres_memory_mb%.*} * 1024 * 1024 / 2))"
        t_postgres_shmmax_avail="$(/sbin/sysctl kernel.shmmax | awk '{ printf $3 }')"

        # Set kernel.shmmax if it's less than half the available memory size
        if [ ${t_postgres_shmmax_req?} -gt ${t_postgres_shmmax_avail?} ]; then
            export t_manage_kernel_shmmax='y'
        fi
    fi

    apply_template_manifest "postgresql_server.pp.erb"
}

configure_puppetdb() {
    display "Configuring puppetdb..."

    create_package_repo

    # Uses
    #   t_puppetdb_java_args
    apply_template_manifest "puppetdb.pp.erb"

    remove_package_repo

    display "PuppetDB configured."
}

# Setup a package repo on the master for the platform we're currently installing on
setup_pe_repo() {
    display_comment "Setting up package repository for ${PLATFORM_TAG}"

    t_pe_repo_package_dir="${SERVER_DATA_DIR}/packages/public/${PE_VERSION?}"
    run "mkdir -p '${t_pe_repo_package_dir?}'"
    # platform_package_dir contains the platform tag as well
    run "cp -R '$(platform_package_dir)' '${t_pe_repo_package_dir?}'"
    t_aio_agent_version=$(puppet_fact 'aio_agent_version')
    run "mv '${t_pe_repo_package_dir?}/${PLATFORM_TAG?}' '${t_pe_repo_package_dir?}/${PLATFORM_TAG?}-${t_aio_agent_version?}'"

    # Apply the pe_repo class to generate the install bash scripts.
    # Template uses:
    # - t_pe_repo_puppet_class
    # - q_tarball_server
    export t_pe_repo_puppet_class="$(platform_puppet_class)"

    set -e
    apply_template_manifest "pe_repo.pp.erb" || true
    set +e

    # Compile master upgrades from 3.8 -> 2015.2+ need to be able to download
    # and run the pe-code-migration script to adjust their puppet.conf and
    # hiera.yaml config
    run "cp '${INSTALLER_DIR?}/pe-code-migration.rb' '${t_pe_repo_package_dir?}'"

    # Ensure pe-code-migration is world-readable so the webserver can see it.
    run "chmod 644 '${t_pe_repo_package_dir}/pe-code-migration.rb'"
}

# Enqueue installer environment
enqueue_installer_packages() {
    # Puppet agent includes Ruby and all its dependencies
    enqueue_package 'puppet-agent'
    enqueue_package 'pe-bundler'
    enqueue_package 'pe-installer'
}

query_about_master_connectivity() {
    t_qamc__msg="Puppet Master at '${q_puppetagent_server}:8140' could not be reached."

    # fail if the answer file tells us to
    if [ 'y' == "${q_fail_on_unsuccessful_master_lookup}" ]; then
        display_failure "${t_qamc__msg} Aborting installation as directed by answer file. Set 'q_fail_on_unsuccessful_master_lookup' to 'n' if installation should continue despite communication failures."
    elif [ 'n' == "${q_fail_on_unsuccessful_master_lookup}" -a 'y' == "${IS_ANSWER_REQUIRED}" ]; then
        break
    fi

    ask q_continue_or_reenter_master_hostname "The installer couldn’t reach the puppet master server at ${q_puppetagent_server}. If this server name is correct, please check your DNS configuration to ensure the puppet master node can be reached by name, and make sure your firewall settings allow traffic on port 8140. Enter ‘r’ if you need to re-enter the puppet master’s name; otherwise, enter ‘c’ to continue." cr
    if [ 'c' == "${q_continue_or_reenter_master_hostname}" ]; then
        break
    else
        unset q_puppetagent_server
        ask q_puppetagent_server "Puppet master hostname to connect to?" String 'puppet'
    fi
    unset q_continue_or_reenter_master_hostname
}

cron_enable() {
    case "${PLATFORM_NAME?}" in
        amazon | centos | rhel | eos )
            enable_service 'crond'
            bounce_service 'crond'
            ;;
        debian | ubuntu | sles | cumulus)
            enable_service 'cron'
            bounce_service 'cron'
            ;;
        solaris)
            run_suppress_stdout "/usr/sbin/svcadm enable svc:/system/cron:default"
            ;;
        aix)
            if ! /usr/sbin/lsitab "cron" > /dev/null; then
                run_suppress_stdout '/usr/sbin/mkitab "cron:23456789:respawn:/usr/sbin/cron"'
            fi
            ;;
    esac
}

# Prepares the mcollective configuration with facts for the current node.
prime_mcollective_facts() {
    t_yaml_extract_values='
    require "json";
    require "yaml";
    facts = JSON.parse(ARGF.read);
    puts YAML.dump(facts["values"]);
    '
    run "${PUPPET_BIN_DIR?}/puppet facts | ${PUPPET_BIN_DIR?}/ruby -e '${t_yaml_extract_values?}' > /etc/puppetlabs/mcollective/facts.yaml"
}

# Utility function to re-query the user regarding an unavailable
# value for a database variable, e.g. a db name that is in use already
# Ask twice, then fail.
# Arguments:
# 1. The value of the resource to check for
# 2. A string, one of either "user" or "db" to flag *which* kind of resource to check
# 3. The variable that the value should be assigned to
# 4. An optional message string to display to the user in the ask message
# 5. An optional failure message to display
requery_db_resource_value() {
    t_success="n"
    t_count=0
    t_value="${1?}"
    t_type="${2?}"
    t_variable="${3?}"
    t_message="${4}"
    t_fail_message="${5}"
    while [ "n" = "${t_success}" ] && [ ${t_count} -lt 2 ] ; do
        unset "${t_variable}"
        if [ -z "${t_message}" ] ; then
            ask ${t_variable} "The ${t_type} name (${t_value}) for this host already exists on the PostgreSQL server. Please enter an unused ${t_type} name?" String
        else
            ask ${t_variable} "${t_message}" String
        fi
        eval t_value="${!t_variable}"
        if is_db_name_available "${t_value}" "${t_type}" ; then
            t_success="y"
        fi
        t_count=$(($t_count + 1))
    done
    if [ "n" = "${t_success}" ] ; then
        # We tried twice, and failed. Fail hard.
        if [ -z "${t_fail_message}" ] ; then
            display_failure "Unable to create the ${t_type} with the name ${t_value}, ${t_type} already exists on the PostgreSQL server."
        else
            display_failure "${t_fail_message}"
        fi
    fi
}

# Wait for the database server to be up and running. This is used so that we
# don't try to verify the database until the server is ready to accept
# connections. This is only done on database installs. $1 is the number of
# connection attempts to make. Returns 0 if the database is running, and 1 if
# the maximum number of attempts is reached.
wait_for_db() {
    t_wait_for_db_max_retries="${1?}"
    t_wait_for_db_tries=0
    t_path_to_psql="$(postgres_bin_dir)/psql"

    while [ "${t_wait_for_db_tries?}" -lt "${t_wait_for_db_max_retries?}" ]; do
        if eval "su - ${q_database_root_user} -c \"${t_path_to_psql?} --command='\l'\" -s /bin/bash" &> /dev/null; then
            return 0
        else
            sleep 0.5
            t_wait_for_db_tries=$(expr "${t_wait_for_db_tries?}" + 1)
        fi
    done

    return 1
}

# Wait for a service to be up and running.
# This is only done on all-in-one installs.
# $1 is the url to attempt the connection.
# $2 is the number of connection attempts to make.
# Returns 0 if the database is running, and 1 if
# the maximum number of attempts is reached.
wait_for_service() {
    t_wait_for_service_max_retries="${2?}"
    t_wait_for_service_tries=0

    t_wait_for_service_url="${1?}"

    while [ "${t_wait_for_service_tries?}" -lt "${t_wait_for_service_max_retries?}" ]; do
        run_suppress_stdout "HTTP_PROXY= http_proxy= HTTPS_PROXY= https_proxy= $(puppet_bin_dir)/curl --tlsv1 -f -s ${t_wait_for_service_url}"
        t_contact_service_exit_status=$?
        # We expect known SSL failures which mean the service is up and listening.
        # Since we're not using any known certs, we expect to be able to connect and then have an SSL failure.
        #  7     Failed to connect to host.
        # 22     HTTP error (>=400) if fail on error is enabled via -f.
        # 35     SSL connect error. The SSL handshaking failed.
        # 60     Peer certificate cannot be authenticated with known CA certificates.
        case $t_contact_service_exit_status in
            35 | 60)
                return 0
                ;;
            7 | 22)
                sleep 1
                t_wait_for_service_tries=$(expr "${t_wait_for_service_tries?}" + 1)
                ;;
            *)
                display_failure  "Unexpected error when connecting to the Puppet Master."
                HTTP_PROXY= http_proxy= HTTPS_PROXY= https_proxy= $(puppet_bin_dir)/curl --tlsv1 -s ${t_wait_for_service_url}
                break
                ;;
        esac
    done

    return 1
}

# Wait for Node Classifier to be up and running.
# This is only done on all-in-one installs.
# $1 is the url to attempt the connection.
# $2 is the number of connection attempts to make.
# Returns 0 if the database is running, and 1 if
# the maximum number of attempts is reached.
wait_for_nc() {
    t_wait_for_service_max_retries="${2?}"
    t_wait_for_service_tries=0

    t_wait_for_service_url="${1?}"

    while [ "${t_wait_for_service_tries?}" -lt "${t_wait_for_service_max_retries?}" ]; do
        # We grep for that specific string because when that endpoint returns an actual
        # date for last updated, we can be sure that the Node Classifier will have classes available
        # If last update is null, that means there are no classes in the NC.
        if run_suppress_stdout "HTTP_PROXY= http_proxy= HTTPS_PROXY= https_proxy= $(puppet_bin_dir)/curl --tlsv1 -s --cacert /etc/puppetlabs/puppet/ssl/certs/ca.pem --key ${SERVER_DATA_DIR:?}/console-services/certs/${q_puppetagent_certname}.private_key.pem --cert ${SERVER_DATA_DIR:?}/console-services/certs/${q_puppetagent_certname}.cert.pem ${t_wait_for_service_url}/v1/last-class-update | grep -q last_update.*[[:digit:]]"; then
            return 0
        else
            sleep 3
            t_wait_for_service_tries=$(expr "${t_wait_for_service_tries?}" + 1)
        fi
    done

    return 1
}

# Verify a single database on the database server.
# The arguments to this function are the database, username, and password to
# verify.
verify_single_database() {
    t_verify_database="${1?}"
    t_verify_user="${2?}"
    t_verify_password="${3?}"

    create_db_encoding="ENCODING 'utf8' LC_CTYPE 'en_US.utf8' LC_COLLATE 'en_US.utf8' template template0"
    t_psql_remote_string="--host='${q_database_host?}' --port=${q_database_port?}"

    t_db_setup=0
    t_path_to_psql="$(postgres_bin_dir)/psql"

    if ! output=$(run "PGPASSWORD='${t_verify_password?}' ${t_path_to_psql?} --username='${t_verify_user?}' ${t_psql_remote_string} --dbname='${t_verify_database?}' --command='\dT' 2>&1"); then
        if echo $output | $PLATFORM_EGREP -q "role \"${t_verify_user?}\" does not exist" ; then
            echo "Could not connect to the postgresql server using the user: ${t_verify_user?}. Please log in as a privileged user and set it up manually. Example SQL commands:" | display_wrapped_text
            display_newline
            printf -- '%s' "
            CREATE USER \"${t_verify_user}\" PASSWORD '${t_verify_password?}';
            "
        elif echo $output | $PLATFORM_EGREP -q "database \"${t_verify_database?}\" does not exist" ; then
            echo "Could not connect to postgresql server using database: ${t_verify_database?} with user: ${t_verify_user?}. Please log in as a privileged user and set it up manually. Example SQL commands:" | display_wrapped_text
            display_newline
            printf -- '%s' "
            CREATE DATABASE \"${t_verify_database}\" OWNER \"${t_verify_user}\" ${create_db_encoding?};
            "
        else
            echo "Could not connect to postgres server using the user: ${t_verify_user?} and database: ${t_verify_database?}. Please log in as a privileged user and set up the user or database manually. Example SQL commands:" | display_wrapped_text
            display_newline
            printf -- '%s' "
            CREATE USER \"${t_verify_user}\" PASSWORD '${t_verify_password?}';
            CREATE DATABASE \"${t_verify_database}\" OWNER \"${t_verify_user}\" ${create_db_encoding?};
            "
        fi
        display_newline

        t_db_setup=1
    else
        display_comment "Database ${t_verify_database} verified successfully."
    fi
}

# verify_postgresql: there are two basic cases
# 1) we are verifying a root user
# 2) we are verifying existing credentials and databases
#
# The first and only argument represents which databases to verify:
# puppetdb, classifier, activity, rbac, or some comma delimited subset thereof.
#
# The function uses several installer variables including:
# - q_database_install
# - q_database_host
# - q_database_port
# - q_database_root_user
# - q_puppetdb_database_name
# - q_puppetdb_database_user
# - q_puppetdb_database_password
# - q_activity_database_name
# - q_activity_database_user
# - q_activity_database_password
# - q_classifier_database_name
# - q_classifier_database_user
# - q_classifier_database_password
# - q_rbac_database_name
# - q_rbac_database_user
# - q_rbac_database_password
#
# It returns 0 for success if the databases/users exist or the root account is valid
# and non-zero for failure if those databases/users don't exist or the root account is invalid

verify_postgresql() {
    t_db_setup=0
    t_path_to_psql="$(postgres_bin_dir)/psql"

    # If we are doing the setup, we need to verify that the root account credentials are good.
    if [ y = "${q_database_install?}" ]; then
        if ! eval "su - ${q_database_root_user} -c \"${t_path_to_psql?} --command='\l'\" -s /bin/bash" &> /dev/null; then
            display_failure "Could not connect to the postgresql server using the ${q_database_root_user} user."
        fi
    else
        t_psql_remote_string="--host='${q_database_host?}' --port=${q_database_port?}"

        # If we aren't doing the setup, we need to verify that the PuppetDB account credentials are good and the databases exist.
        # We check each in turn and give example SQL commands for each block upon failure. We don't fail hard on error until the function returns,
        # so all blocks can display errors to the user.

        if [ "${1?}" != "${1/orchestrator,/match}" ]; then
            if ! verify_single_database "${q_orchestrator_database_name?}" "${q_orchestrator_database_user?}" "${q_orchestrator_database_password?}"; then
                t_db_setup=1
            fi
        fi

        if [ "${1?}" != "${1/puppetdb,/match}" ]; then
            if ! verify_single_database "${q_puppetdb_database_name?}" "${q_puppetdb_database_user?}" "${q_puppetdb_database_password?}"; then
                t_db_setup=1
            fi

            # Verify pg_trgm extension is created (PE-6859)
            if output=$(run "PGPASSWORD='${q_puppetdb_database_password?}' ${t_path_to_psql?} --username='${q_puppetdb_database_user?}' ${t_psql_remote_string} --dbname='${q_puppetdb_database_name?}' --command='\dx pg_trgm' 2>&1"); then
                if echo "${output?}" | grep -q "pg_trgm.*|" ; then
                    display_comment "Database ${q_puppetdb_database_name} pg_trgm extension verified."
                else
                    display_error "Database ${q_puppetdb_database_name} pg_trgm extension could not be found. Please install pg_trgm."
                    t_db_setup=1
                fi
            fi
        fi

        if [ "${1?}" != "${1/activity,/match}" ]; then
            if ! verify_single_database "${q_activity_database_name?}" "${q_activity_database_user?}" "${q_activity_database_password?}"; then
                t_db_setup=1
            fi
        fi

        if [ "${1?}" != "${1/classifier,/match}" ]; then
            if ! verify_single_database "${q_classifier_database_name?}" "${q_classifier_database_user?}" "${q_classifier_database_password?}"; then
                t_db_setup=1
            fi
        fi

        if [ "${1?}" != "${1/rbac,/match}" ]; then
            if ! verify_single_database "${q_rbac_database_name?}" "${q_rbac_database_user?}" "${q_rbac_database_password?}"; then
                t_db_setup=1
            fi

            # Verify citext extension is created (PE-6859)
            if output=$(run "PGPASSWORD='${q_rbac_database_password?}' ${t_path_to_psql?} --username='${q_rbac_database_user?}' ${t_psql_remote_string} --dbname='${q_rbac_database_name?}' --command='\dx citext' 2>&1"); then
                if echo "${output?}" | grep -q "citext.*|" ; then
                    display_comment "Database ${q_rbac_database_name} citext extension verified."
                else
                    display_error "Database ${q_rbac_database_name} citext extension could not be found. Please install citext."
                    t_db_setup=1
                fi
            fi
        fi
    fi

    return ${t_db_setup?}
}

# Verify the db credentials
verify_db() {
    t_verify_dbs=''
    if is_upgrade; then
        # TODO should run this on upgrades for sake of checking state of
        # external Postgresql (PE-10858)
        display_comment "Skipping postgresql verify on upgrade..."
        return 0
    else
        display_comment "Verifying postgresql credentials..."
    fi

    if [ y = "${q_puppetmaster_install?}" ]; then
        t_verify_dbs='orchestrator,'
    fi

    if [ y = "${q_puppetdb_install?}" ]; then
        t_verify_dbs="${t_verify_dbs} puppetdb,"
    fi

    if [ y = "${q_puppet_enterpriseconsole_install?}" ]; then
        t_verify_dbs="${t_verify_dbs} rbac, classifier, activity,"
    fi

    if ! verify_postgresql "${t_verify_dbs?}"; then
        display_failure "The provided postgresql instance could not be verified. Please follow the above instructions and try again."
    fi

    if is_external_postgres && is_valid_postgres_bin_dir; then
        t_postgres_version=$(check_postgres_version)
        if [ "${t_postgres_version?}" = "9.4" ] ; then
            display_comment "Postgres ${t_postgres_version?} is installed on ${q_database_host}."
        else
            display_failure "=== Your PostgreSQL instance on ${q_database_host} is not being managed by Puppet Enterprise and appears to be ${t_postgres_version?}.  Please upgrade to PostgreSQL 9.4 before continuing the Puppet Enterprise upgrade. ==="
        fi
    fi
}

# Warn about needed open ports
warn_open_ports() {
    t_inbound_port_string=""
    t_outbound_port_string=""

    # Add 443 for HTTPS connections if the console is being installed...
    # Also 4433 for HTTPS connections if the classifier is being installed...
    # ... inbound if it's a console install...
    if [ y = "${q_puppet_enterpriseconsole_install?}" ]; then
      t_inbound_port_string="${t_inbound_port_string?}${q_puppet_enterpriseconsole_httpd_port?}, 4433, "
    elif [ y = "${q_puppetmaster_install?}" ]; then
      t_outbound_port_string="${t_outbound_port_string?}4433, "
    fi

    # Add 8140 for the puppetmaster and 61613 for mcollective/stomp/activemq...
    # ... inbound if it's a master install...
    if [ y = "${q_puppetmaster_install?}" ]; then
      t_inbound_port_string="${t_inbound_port_string?}8140, 61613, "
    else
      # ... otherwise outbound
      t_outbound_port_string="${t_outbound_port_string?}8140, 61613, "
    fi

    # Add 5432 for Postgres...
    # ... inbound if it's a non-console database install...
    if [ y = "${q_database_install?}" -a ! y = "${q_puppet_enterpriseconsole_install?}" ]; then
      t_inbound_port_string="${t_inbound_port_string?}${q_database_port?}, "
    fi
    # ... outbound on a non-database PuppetDB or console install
    if [ ! y = "${q_database_install?}" ] && [ y = "${q_puppetmaster_install?}" -o y = "${q_puppetdb_install}" -o y = "${q_puppet_enterpriseconsole_install}" ]; then
      t_outbound_port_string="${t_outbound_port_string?}${q_database_port?}, "
    fi

    # Add 8081 for PuppetDB if not a standalone install...
    if [ ! y = "${q_all_in_one_install?}" ]; then
      # ... inbound if it's a PuppetDB install
      if [ y = "${q_puppetdb_install?}" ]; then
        t_inbound_port_string="${t_inbound_port_string?}${q_puppetdb_port?}, "
      fi
      # ... outbound if it's a puppetmaster or console install
      if [ y = "${q_puppetmaster_install}" ]; then
        t_outbound_port_string="${t_outbound_port_string?}${q_puppetdb_port?}, "
      fi
    fi

    if [ -n "${t_inbound_port_string?}" ]; then
        echo "If you have a firewall running, please ensure the following TCP ports are open: ${t_inbound_port_string%, }" | display_wrapped_text
        display_newline
    fi

    if [ -n "${t_outbound_port_string?}" ]; then
        echo "If you have a firewall running, please ensure outbound connections are allowed to the following TCP ports: ${t_outbound_port_string%, }" | display_wrapped_text
        display_newline
    fi

}

# Create a temporary directory and install the current installer's
# puppetlabs-puppet_enterprise module into it.
# $1 the temporary directory to install into
setup_tmp_puppet_enterprise_module() {
    t_tmp_puppet_modulepath=${1?}
    t_modules_dir="$(installer_dir)/modules"
    t_module_name="$(ls ${t_modules_dir:?} | grep puppetlabs-puppet_enterprise)"

    run_suppress_stdout "mkdir -p ${t_tmp_puppet_modulepath?}"
    run_suppress_stdout "$(puppet_bin_dir)/puppet module install ${t_modules_dir?}/${t_module_name?} --target-dir=${t_tmp_puppet_modulepath?} --ignore-dependencies --force"
}

# Query the `custom_auth_conf` fact on the system to see if the user has a
# modified custom auth.conf file. Return 0 if the auth.conf is modified, or
# return 1 if the auth.conf is not modified.
found_custom_auth_conf() {
    if [ "$(puppet_fact 'custom_auth_conf')" = 'false' ]; then
        return 1
    else
        return 0
    fi
}

# If the `auth.conf` is modified (as specified by the custom fact
# provided in the module) then generate what a new unmodified
# `auth.conf` file will look like in PE 3.8.0 and save the diff
# between the unmodified `auth.conf` file and the `auth.conf` file
# that is currently on the system so that the user can better
# determine what changes they will have to make,
display_auth_conf_diff() {
    prepare_workdir
    t_tmp_puppet_modulepath="${WORKDIR?}/tmp-modulepath-for-auth-conf-diff"

    setup_tmp_puppet_enterprise_module $t_tmp_puppet_modulepath

    export t_tmp_puppet="${WORKDIR?}/tmp-auth-conf-diff"
    run_suppress_stdout "mkdir -p ${t_tmp_puppet?}"

    t_puppet_manifest="${t_tmp_puppet?}/custom_auth.pp"

    t_puppet_apply_flags=""
    if [ "$(echo_vercmp 2015.2.0 $CURRENT_PE_VERSION)" = "1" ]; then
        t_puppet_apply_flags="--parser=future"
    fi

    # Uses
    #  t_tmp_puppet
    run_suppress_stdout "$(puppet_bin_dir)/erb -T - '${INSTALLER_DIR?}/erb/check-auth.conf.pp.erb' > ${t_puppet_manifest?}"
    run_suppress_stdout "$(puppet_bin_dir)/puppet apply --modulepath ${t_tmp_puppet_modulepath?} ${t_puppet_apply_flags} ${t_puppet_manifest?}"
    if [ -f "${t_tmp_puppet?}/auth.conf.diff" ]; then
        run "cat ${t_tmp_puppet?}/auth.conf.diff"
    fi
}

# Setup just the agent symlinks.  The assumption here is that we're installing
# just the agent, and we don't have modules unpacked.  So we're manually
# setting up symlinks so that the expected binaries are immediately available
# without waiting for a puppet cert sign and an agent run.
#
# Service nodes (master, console, db), should get symlinks setup automatically
# by the classes in the application of their respective erb templates.  So they
# don't need this method.
ensure_agent_links() {
    if run "mkdir -p '${PLATFORM_SYMLINK_TARGET?}'" && [ -w ${PLATFORM_SYMLINK_TARGET?} ]; then
        for executable in puppet facter hiera pe-man; do
            puppet_resource "file ${PLATFORM_SYMLINK_TARGET?}/${executable?} ensure=link target=${PUPPET_BIN_DIR?}/${executable?}"
        done
    else
        display "!!! WARNING: ${PLATFORM_SYMLINK_TARGET?} is inaccessible; unable to create convenience symlinks for puppet, hiera, facter and pe-man.  These executables may be found in ${PUPPET_BIN_DIR?}."
    fi
}

# Backup the old puppet.conf
backup_puppet_conf() {
    t_timestamp=$(date +'%Y%m%d%H%M%s')
    t_puppet_conf_backup="/etc/puppetlabs/puppet/puppet.conf-${t_timestamp?}.bak"
    display_comment "Backing up puppet.conf to ${t_puppet_conf_backup?}"
    run "cp /etc/puppetlabs/puppet/puppet.conf ${t_puppet_conf_backup?}"
}

# Backup the old hiera.yaml
backup_hiera_yaml() {
    t_timestamp=$(date +'%Y%m%d%H%M%s')
    t_hiera_yaml_backup="/etc/puppetlabs/puppet/hiera.yaml-${t_timestamp?}.bak"
    display_comment "Backing up hiera.yaml to ${t_hiera_yaml_backup?}"
    run "cp /etc/puppetlabs/puppet/hiera.yaml ${t_hiera_yaml_backup?}"
    HIERA_YAML_BACKUP=${t_hiera_yaml_backup?}
    export HIERA_YAML_BACKUP
}

check_future_parser_settings() {
    # PE-8947 Enable detection of future parser on PE environments
    # PE 2015.2 depends on the future parser being enabled for - all - environments.
    # There are several ways to accomplish this. Puppet.conf may have parser=future
    # while parser is unset in environment.conf. Environment.confs may be
    # set to parser=future. If the value of parser is ever current (implicitly or
    # explicitly), we should fail with a helpful error.
    env_paths=$("$(puppet_bin_dir)/puppet" config print environmentpath) ;
    env_path_array=$(echo ${env_paths?} | sed s/:/\ /g)
    t_future_parser_warn_and_quit=0
    puppet_conf_missing_parser_future=0

    if  ! [ "$(get_ini_field /etc/puppetlabs/puppet/puppet.conf parser)" == 'future' ] ; then
        puppet_conf_missing_parser_future=1
    fi

    for env_path in ${env_path_array[@]}
    do
        local envs=(${env_path?}/*)
        check_envs_for_parser_future ${puppet_conf_missing_parser_future?} "envs[@]"
    done

    if [ ${t_future_parser_warn_and_quit?} -eq 1 ]; then
        display_failure 'Some of your environment configurations have
                disabled the future parser. Puppet Enterprise now depends on
                new parser functionality and cannot be upgraded until the future
                parser is enabled in all environments. You can enable the parser
                by doing one of the following: edit the config files above so that
                the "parser" directive is set to "future", or remove the "parser"
                setting completely and ensure that the parser setting in
                /etc/puppetlabs/puppet/puppet.conf is set to "future". Please
                fix the configuration files, test your changes, and rerun the upgrade.'
        quit 1
    fi
}

check_envs_for_parser_future() {
    # For each environment specified in puppet.conf, loop through all environment
    # directories and check the state of the parser directive in environment.conf.
    t_puppet_conf_missing_parser_future=${1?}
    t_future_parser_envs=("${!2}")

    # if puppet.conf is missing parser=future then check that all environments have parser=future
    if [ ${t_puppet_conf_missing_parser_future?} -eq 1 ] ; then
        for envconfdir in ${t_future_parser_envs[@]}
        do
            if ! ([ -e ${envconfdir?}/environment.conf ] && [ "$(get_ini_field ${envconfdir?}/environment.conf parser)" == 'future' ]) ; then
                t_future_parser_warn_and_quit=1
                display_error "Puppet.conf does not have parser=future and ${envconfdir?}/environment.conf is not configured to use parser=future."
            fi
        done
    else
        # no environment can have parser=current.
        for envconfdir in ${t_future_parser_envs[@]}
        do
            if [ -e ${envconfdir?}/environment.conf ] ; then
                if [ "$(get_ini_field ${envconfdir?}/environment.conf parser)" == 'current' ] ; then
                    t_future_parser_warn_and_quit=1
                    display_error "${envconfdir?}/environment.conf has parser=current."
                fi
            fi
        done
    fi
}

# This routine does several things for an upgraded puppet master:
#
# Backup puppet.conf
# Update puppet.conf for Puppet 4
#  * clear or update basemodulepath
#  * update reports setting
#  * remove default or deleted settings
# Migrate environments, modules, hiera to Puppet 4 codedir
#
# NOTE: Also renames the old /opt/puppet/share/puppet/modules sitemoduledir as
# a safety mechanism.  Because if for some reason puppet.conf's basemodulepath
# does not get adjusted, the old PE modules will rewrite configuration for PE
#
perform_code_migration_and_update_puppet_conf() {
    display_newline
    display_minor_separator
    display "Puppet Enterprise has a new directory for code where it expects to find environments, user modules, hiera configuration and data: /etc/puppetlabs/code.  It is recommended that you migrate your environments, modules and hiera configuration and data to this directory.  The update script will only be updating puppet.conf settings for the current version of Puppet Enterprise."
    display_newline
    backup_puppet_conf
    display_comment "Moving old puppet enterprise modules aside to /opt/puppet/share/puppet/modules.bak"
    run "mv /opt/puppet/share/puppet/modules /opt/puppet/share/puppet/modules.bak"
    display_comment "Updating puppet.conf"
    if is_debug; then
        t_migration_args='-D'
    fi
    if run "${PUPPET_BIN_DIR?}/ruby ${INSTALLER_DIR?}/pe-code-migration.rb ${t_migration_args}"; then
        t_code_migration_failed="n"
        display "Finished."
    else
        t_code_migration_failed="y"
        display_error "Failed."
    fi
    display_minor_separator
    display_newline
}

copy_codedir_to_staging_dir () {
    # Move the contents of the code dir into filesync's staging dir so it
    # can be synced into the codedir as it needs these to complete puppet agent runs
    t_puppet_old_codedir="/etc/puppetlabs/code"
    t_filesync_staging_dir="/etc/puppetlabs/code-staging"
    display_comment "Populating the file-sync staging dir with the contents of the codedir"
    if [ ! -d "${t_filesync_staging_dir?}" ]; then
        run_suppress_stdout "mkdir -p ${t_filesync_staging_dir?} && chown ${PLATFORM_PUPPET_USER}:${PLATFORM_PUPPET_GROUP} ${t_filesync_staging_dir?} && chmod 750 ${t_filesync_staging_dir?}"
    fi
    run_suppress_stdout "mv ${t_puppet_old_codedir?}/* ${t_filesync_staging_dir?}/ && chown -R ${PLATFORM_PUPPET_USER}:${PLATFORM_PUPPET_GROUP} ${t_filesync_staging_dir?}"
}

perform_file_sync_commit() {
    # Perform a commit to sync the contents of the staging dir into the
    # working dir
    t_commit_endpoint="https://${q_puppetmaster_certname?}:8140/file-sync/v1/commit"
    display_comment "Publishing the contents of the file-sync staging dir"
    run_suppress_stdout "HTTP_PROXY= http_proxy= HTTPS_PROXY= https_proxy= $(puppet_bin_dir)/curl --tlsv1 -s --cacert /etc/puppetlabs/puppet/ssl/certs/ca.pem --key /etc/puppetlabs/puppet/ssl/private_keys/${q_puppetmaster_certname?}.pem --cert /etc/puppetlabs/puppet/ssl/certs/${q_puppetmaster_certname?}.pem -X POST $t_commit_endpoint -H 'Content-Type: application/json' -d '{\"commit-all\":true}'"
    display_newline
}

#===[ Main ]============================================================

. "$(dirname "${0?}")/utilities"

# Version variables to use when creating links and printing messages.
PE_VERSION=$(cat "$(dirname "${0?}")/VERSION" 2> /dev/null)
PE_LINK_VER=$(echo ${PE_VERSION?} | cut -d '.' -f1,2)

if [ "puppet-enterprise-installer" = "$(basename "${0?}")" ]; then

    #---[ Environment ]-----------------------------------------------------

    # Installing via sudo may not add required path components
    PATH=$PATH:/usr/sbin:/usr/bin:/sbin:/bin

    #---[ Paranoia ]--------------------------------------------------------

    # Exit immediately if a simple command exits with a non-zero status:
    set -e

    #---[ Prepare ]---------------------------------------------------------

    # Catch CTRL-C and "set -e" errors:
    register_exception_handler

    # Setup "PLATFORM_*" variables:
    prepare_platform

    #---[ Process command-line options ]------------------------------------

    ANSWER_FILE_TO_LOAD=
    IS_ANSWER_REQUIRED=n
    LOGFILE=
    IS_NOOP=n
    IS_DEBUG=n
    IS_VERBOSE_DEBUG=n
    IS_SUPPRESS_OUTPUT=n
    export IS_UPGRADE=n

    while getopts a:A:Dhl:nqV name; do
        case "$name" in
            a)
                ANSWER_FILE_TO_LOAD="${OPTARG?}"
                IS_ANSWER_REQUIRED=y
                ;;
            A)
                ANSWER_FILE_TO_LOAD="${OPTARG?}"
                IS_ANSWER_REQUIRED=n
                ;;
            D)
                IS_DEBUG=y
                ;;
            h)
                display_header
                display_usage
                ;;
            l)
                LOGFILE="${OPTARG?}"
                ;;
            n)
                IS_NOOP=y
                ;;
            q)
                IS_SUPPRESS_OUTPUT=y
                ;;
            V)
                IS_VERBOSE_DEBUG=y
                ;;
            ?)
                display_header
                display_usage "Illegal option specified"
                ;;
        esac
    done

    if ! is_use_answers_file; then
        if has_logfile; then
            display_header
            display_usage "logfile option '-l' must be used with '-a' or '-A' options"
        fi
        if is_noop; then
            display_header
            display_usage "noop option '-n' must be used with '-a' or '-A' options"
        fi
        if is_verbose_debug; then
            display_header
            display_usage "verbose option '-V' must be used with '-a' or '-A' options"
        fi
        if is_debug; then
            display_header
            display_usage "debug option '-D' must be used with '-a' or '-A' options"
        fi
    fi

    #---[ Announce installation ]-------------------------------------------

    # Enforce quiet mode if specified
    if is_quiet ; then
        run_quiet_mode
    fi

    # Enforce very verbose debugging if specified
    if is_verbose_debug; then
        set -x
    fi

    # Announce installer:
    display_header

    # Set umask to 0022. This is in a subshell and inside a conditional, so this umask will only
    # persist for the run of the installer.
    umask 0022

    # Prepare the installer variable:
    installer_dir > /dev/null

    [ -s $(installer_dir)/VERSION ] || display_failure "The VERSION file seems to be missing from your installer. The installer cannot proceed without it."

    # Check if we are on a supported platform
    platform_support_check

    # Check user:
    prepare_user

    # Load answers if specified:
    if [ ! -z "${ANSWER_FILE_TO_LOAD?}" ]; then
        load_answers "${ANSWER_FILE_TO_LOAD?}"
    fi

    # Prepare log file:
    if ( ! is_noop ); then
        prepare_log_file "install"
    fi

    # PE-537 without libgcc_s.so.1, our compiled software will not run.
    solaris_re='solaris-10-(i386|sparc)'
    if [[ "${PLATFORM_TAG}" =~ $solaris_re ]] && ( ! is_package_installed "SUNWgccruntime" ); then
        display_failure "SUNWgccruntime provides /usr/sfw/lib/libgcc_s.so.1, which is required for Puppet Enterprise, please install the package from your Solaris installation media and run ${0} again."
    fi

    # Check if we are working on a system that already has an install
    if is_pe_installed; then
        export IS_UPGRADE=y

        CURRENT_PE_BUILD=$(current_pe_build)
        CURRENT_PE_VERSION=${CURRENT_PE_BUILD%%-*}
        export CURRENT_PE_MAJOR=$(echo $CURRENT_PE_VERSION | cut -d'.' -f1)
        export CURRENT_PE_MINOR=$(echo $CURRENT_PE_VERSION | cut -d'.' -f2)
        CURRENT_PE_INCR=$(echo $CURRENT_PE_VERSION | cut -d'.' -f3)

        if [ "${CURRENT_PE_BUILD?}" = "${PE_VERSION?}" ]; then
            display_newline
            display " === Puppet-Enterprise version ${CURRENT_PE_BUILD?} already installed === "
            display_newline
            quit
        else
            display_newline
            display " === Upgrade from version ${CURRENT_PE_BUILD?} detected === "
            display_newline
        fi

        # Fail if the current installed version is less than PE 3.8.1
        if [ "$(echo_vercmp 3.8.1 $CURRENT_PE_VERSION)" = "1" ]; then
            display_failure "In order to upgrade to ${PE_VERSION}, you must be running PE 3.8.1 or higher. Once you have upgraded to 3.8.1, you can complete the upgrade to ${PE_VERSION}. For more information, see http://docs.puppetlabs.com/pe/2015.2/install_upgrading.html."
        fi

        # Check if this is an Enterprise Readiness Guide deployment (as opposed
        # to a stock install). If so, bail out because we can't automatically
        # upgrade.
        if [ -e "/etc/puppetlabs/installer/details.txt" ]; then
            display_failure "Your current version of Puppet Enterprise cannot be automatically upgraded because it uses a non-standard deployment and/or configuration. Please contact Puppet Labs support for assistance with your upgrade."
        fi

        q_puppetagent_install=y

        q_puppetagent_certname="$(puppet_config_print certname agent)"
        q_puppetagent_server="$(puppet_config_print server agent)"

        if is_puppetmaster; then
            q_puppetmaster_install='y'
            q_puppetmaster_certname="$(puppet_config_print certname master)"
            t_puppetmaster_node_terminus="$(get_ini_field '/etc/puppetlabs/puppet/puppet.conf' 'node_terminus')"
            t_puppetmaster_external_node="$(get_ini_field '/etc/puppetlabs/puppet/puppet.conf' 'external_nodes')"

            export t_puppetserver_java_args="$(get_java_args "pe-puppetserver")"

            if [ "${t_puppetmaster_node_terminus}" == 'classifier' ]; then
                q_puppetmaster_external_node_terminus=${q_puppetmaster_external_node_terminus:-"n"}
            fi
            #q_puppetca_install='y'

            extract_console_location_from_enc_script

            if [ "$(echo_vercmp 4.0.0 $CURRENT_PE_VERSION)" = "1" ]; then
                check_future_parser_settings
            fi

            # When upgrading from Ankeny, we should enable file-sync service
            # based on its current status
            if [ "$(echo_vercmp 2015.3.0 "$CURRENT_PE_VERSION")" != "1" ]; then
                # $CURRENT_PE_VERSION >= 2015.3.0
                t_file_sync_service=$(grep 'file-sync.file-sync-versioned-code-*' '/etc/puppetlabs/puppetserver/bootstrap.cfg')
                if [[ "${t_file_sync_service?}" =~ file-sync-versioned-code-service ]]; then
                    q_puppetmaster_file_sync_service_enabled='y'
                elif [[ "${t_file_sync_service?}" =~ file-sync-versioned-code-disabled-service ]]; then
                    q_puppetmaster_file_sync_service_enabled='n'
                else
                    display "!!! WARNING: we did not find a file-sync service setting in /etc/puppetlabs/puppetserver/bootstrap.cfg (found '${t_file_sync_service?}')" 
                    # file-sync service has not been configured at all?
                    # perhaps service names have changed?
                fi
            fi
        else
            q_puppetmaster_install='n'
            #q_puppetca_install='n'
        fi

        if is_console; then
            q_puppet_enterpriseconsole_install='y'

            # If upgrading from 3.8 we need to get the current ssl console port
            # from the old httpd configuration. Otherwise we can look in the
            # new nginx configuration.
            if [ "$(echo_vercmp 4.0.0 $CURRENT_PE_VERSION)" = "1" ]; then
                t_console_httpd_port_file='/etc/puppetlabs/httpd/conf.d/puppetproxy.conf'
                q_puppet_enterpriseconsole_httpd_port="${q_puppet_enterpriseconsole_httpd_port:-"$(${PLATFORM_EGREP} Listen ${t_console_httpd_port_file?} | sed -e 's/^\s*Listen [^:]*:\([[:digit:]]*\)\s*$/\1/')"}"
            else
                t_console_httpd_port_file='/etc/puppetlabs/nginx/conf.d/proxy.conf'
                q_puppet_enterpriseconsole_httpd_port="${q_puppet_enterpriseconsole_httpd_port:-"$(${PLATFORM_EGREP} listen ${t_console_httpd_port_file?} | sed -e 's/^\s*listen\s\+\([[:digit:]]*\)\s\+ssl;$/\1/')"}"
            fi

            if ! [[ "${q_puppet_enterpriseconsole_httpd_port?}" =~ ^[0-9]+$ ]]; then
                # PE-11501 - on the split console, a bug in console hostname
                # setting for the configuration templates results in an extra
                # vhost with the master servername.  This needs to be removed
                # during upgrade
                if ! is_puppetmaster && $PLATFORM_EGREP "^\s*server_name\s*${q_puppetagent_server?}\s*;\s*$" ${t_console_httpd_port_file?}; then
                    # q_puppet_enterpriseconsole_httpd_port will have the port
                    # listed twice now; make an array out of it and take the
                    # last port, though they should be the same
                    t_ports=(${q_puppet_enterpriseconsole_httpd_port?})
                    q_puppet_enterpriseconsole_httpd_port=${t_ports[1]}
                    t_remove_invalid_console_proxy_vhost='true'
                else
                    display_failure "Invalid console port '${q_puppet_enterpriseconsole_httpd_port?}': please make sure ${t_console_httpd_port_file?} does not contain extra vhost entries."
                fi
            fi

            export t_console_services_java_args="$(get_java_args "pe-console-services")"

            if [ -z "${q_puppet_enterpriseconsole_httpd_port}" ]; then
                display_failure "Could not determine the existing Puppet Enterprise console port"
            fi
        else
            q_puppet_enterpriseconsole_install='n'
        fi

        if is_puppetdb; then
            q_puppetdb_install='y'

            # If it's not installed, we have to ask if they want to move to
            # pe-postgres, so we can't just set this to no.
            if is_postgres; then
                q_database_install='y'
            fi

            q_puppetdb_plaintext_port="${q_puppetdb_plaintext_port:-"$(get_ini_field '/etc/puppetlabs/puppetdb/conf.d/jetty.ini' 'port')"}"
            q_puppetdb_hostname="${q_puppetdb_hostname:-$q_puppetagent_certname}"
            q_puppetdb_port="${q_puppetdb_port:-"$(get_ini_field '/etc/puppetlabs/puppetdb/conf.d/jetty.ini' 'ssl-port')"}"
            export t_puppetdb_java_args="$(get_java_args "pe-puppetdb")"

            t_main_database_subname="$(get_ini_field '/etc/puppetlabs/puppetdb/conf.d/database.ini' 'subname')"
            extract_database_host_and_port_from_subname "${t_main_database_subname?}"

            q_puppetdb_database_name="${q_puppetdb_database_name:-$(echo "${t_main_database_subname?}" | sed -e 's/\/\/\([^:][^:]*\):\([0-9][0-9]*\)\/\([^?]\+\)?\?.*/\3/')}"

            q_puppetdb_database_user="${q_puppetdb_database_user:-$(get_ini_field '/etc/puppetlabs/puppetdb/conf.d/database.ini' 'username')}"
            q_puppetdb_database_password="${q_puppetdb_database_password:-$(get_ini_field '/etc/puppetlabs/puppetdb/conf.d/database.ini' 'password')}"

        else
            # This could be changed later during the all-in-one check.
            q_puppetdb_install='n'

            if is_puppetmaster; then
                if [ -e '/etc/puppetlabs/puppet/puppetdb.conf' ]; then
                    # Try to figure out where PuppetDB is from the puppetdb.conf file
                    if [ "$(echo_vercmp 4.0.0 $CURRENT_PE_VERSION)" = "1" ]; then
                        q_puppetdb_hostname=${q_puppetdb_hostname:-"$(get_ini_field '/etc/puppetlabs/puppet/puppetdb.conf' server)"}
                        q_puppetdb_port=${q_puppetdb_port:-"$(get_ini_field '/etc/puppetlabs/puppet/puppetdb.conf' port)"}
                    else
                        t_puppet_puppetdb_server_urls="$(get_ini_field '/etc/puppetlabs/puppet/puppetdb.conf' server_urls)"
                        extract_puppetdb_host_and_port "${t_puppet_puppetdb_server_urls?}"
                    fi
                fi
            elif is_console; then
                # Try to figure out where the database is from the classifer-database.conf file
                if [ -e '/etc/puppetlabs/console-services/conf.d/classifier-database.conf' ]; then
                    t_main_database_subname="$(get_hocon_field '/etc/puppetlabs/console-services/conf.d/classifier-database.conf' 'subname')"
                    extract_database_host_and_port_from_subname "${t_main_database_subname?}"
                fi

                # Try to figure out where PuppetDB is from the console.conf file
                if [ -s '/etc/puppetlabs/console-services/conf.d/console.conf' ]; then
                    t_puppetdb_url="$(get_hocon_field '/etc/puppetlabs/console-services/conf.d/console.conf' 'puppetdb-server')"
                    # Remove protocol prefix
                    t_puppetdb_host_and_port=${t_puppetdb_url##*://}

                    # Split puppetdb.foo.com:8081 around ':'
                    q_puppetdb_hostname=${t_puppetdb_host_and_port%%:*}
                    q_puppetdb_port=${t_puppetdb_host_and_port##*:}
                fi
            fi
        fi

        if is_postgres; then
            q_database_shared_buffers="$(get_postgres_setting "shared_buffers")"
            q_database_maintenance_work_mem="$(get_postgres_setting 'maintenance_work_mem')"
            q_database_effective_cache_size="$(get_postgres_setting 'effective_cache_size')"
            q_database_wal_buffers="$(get_postgres_setting 'wal_buffers')"
            q_database_work_mem="$(get_postgres_setting 'work_mem')"
            q_database_checkpoint_segments="$(get_postgres_setting 'checkpoint_segments')"
            q_database_log_min_duration_statement="$(get_postgres_setting 'log_min_duration_statement')"
        fi

        # If we're a master and a console, we must be either a 2.x install (in
        # which case we get converted to all-in-one), or a 3.x all-in-one
        # install (which means we must already have puppetdb and the database).
        if is_puppetmaster && is_console; then
            q_all_in_one_install='y'

            q_puppetdb_install='y'
        else
            q_all_in_one_install='n'
        fi

        # If we're a master or console, we need to make sure to respect that
        # the user may have opted out of update checking. If they set it in an
        # answer file *now*, use that. Otherwise check if they set it in the
        # old answer file (if it's still around). Otherwise it's on by default.
        if (is_puppetmaster || is_console) && [ -s '/etc/puppetlabs/installer/answers.install' ]; then
            q_pe_check_for_updates=${q_pe_check_for_updates:-"$(get_ini_field '/etc/puppetlabs/installer/answers.install' q_pe_check_for_updates)"}
        fi

        if (is_puppetdb || is_console) && [ -s '/etc/puppetlabs/installer/answers.install' ]; then
            q_puppetmaster_certname="${q_puppetmaster_certname:-"$(get_ini_field '/etc/puppetlabs/installer/answers.install' 'q_puppetmaster_certname')"}"
        fi
    fi

    #---[ Support for installing in a symlinked opt dir ]--------------------

    if [ -L "/opt" ]; then
        case "${PLATFORM_NAME?}" in
            solaris)
                declare -x PKG_NONABI_SYMLINKS='true'
                ;;
        esac
    fi

    if ! is_upgrade; then
        #---[ Interview user ]--------------------------------------------------
        if [ -z "${ANSWER_FILE_TO_LOAD?}" ]; then
            t_automated_install_doc_link="http://docs.puppetlabs.com/pe/${PE_LINK_VER?}/install_automated.html"
            display_step 'GUIDED INSTALLATION' n
            display_newline
            display "Before you begin, choose an installation method. We've provided a few paths to choose from."
            display_newline
            echo "- Perform a guided installation using the web-based interface. Think of this as an installation interview in which we ask you exactly how you want to install PE. In order to use the web-based installer, you must be able to access this machine on port 3000 and provide the SSH credentials of a user with root access. This method will login to servers on your behalf, install Puppet Enterprise and get you up and running fairly quickly." | display_wrapped_text 0 0
            display_newline
            display_newline
            echo "- Use the web-based interface to create an answer file so that you can log in to the servers yourself and perform the installation locally. If you choose not to use the web-based interface, you can write your own answer file, or use the answer file(s) provided in the PE installation tarball. Refer to Answer File Installation (${t_automated_install_doc_link?}), which provides an overview on installing PE with an answer file." | display_wrapped_text 0 0
            display_newline
            display_newline

            ask q_packages_install "Install packages and perform a guided install?" Yn

            if [ 'y' = "${q_packages_install?}" ]; then
                display_newline
                display "Installing setup packages."
                display_newline

                enqueue_installer_packages
                run_suppress_stdout install_queued_packages

                # Copy the pe installer
                t_installer_dest="${PUPPET_SHARE_DIR?}/installer/installer"
                run "mkdir -p '${t_installer_dest?}'"
                run "cp -pR '$(installer_dir)'/* '${t_installer_dest?}'"

                # Start installer service
                INSTALLER_SUPPORTED_PORTS="3000,4567"
                INSTALLER_PORT=$(find_unused_tcp_port "${PLATFORM_HOSTNAME}" "${INSTALLER_SUPPORTED_PORTS}")
                if [ "${INSTALLER_PORT}" != '' ]; then
                    echo "Please go to https://${PLATFORM_HOSTNAME}:${INSTALLER_PORT} in your browser to continue installation. Be sure to use https:// and that port ${INSTALLER_PORT?} is reachable through the firewall." | display_wrapped_text 0 0
                    display_newline

                    # This will block
                    pushd ${PUPPET_SHARE_DIR?}/installer &>/dev/null

                    run_suppress_output "RACK_ENV=production ${PUPPET_BIN_DIR}/bundle exec thin start --debug -p ${INSTALLER_PORT} -a 0.0.0.0 --ssl --ssl-disable-verify"

                    popd &>/dev/null

                    display_newline
                    display "Your infrastructure has finished installing."
                    display "Thank you for installing Puppet Enterprise!"
                    quit 0
                else
                    display_failure "Could not open a TCP port for web server. Tried ${INSTALLER_SUPPORTED_PORTS}."
                fi
            else
                display_newline
                display_major_separator
                display_newline
                display "!! Installation cancelled"
                display_newline
                display_major_separator
                quit 1
            fi
        fi
    fi

    display_step 'SELECT AND CONFIGURE ROLES' n
    display_newline

    echo "This installer lets you select and install the various roles required in a Puppet Enterprise deployment: puppet master, console, database, and puppet agent." | display_wrapped_text
    display_newline
    display_newline

    display "NOTE: when specifying hostnames during installation, use the fully-qualified domain name (foo.example.com) rather than a shortened name (foo)."
    display_newline

    display_product 'puppet master' "The puppet master serves configurations to a group of puppet agent nodes. This role also provides MCollective's message queue and client interface. It should be installed on a robust, dedicated server."
    ask q_puppetmaster_install 'Install puppet master?' yN

    if [ y = "${q_puppetmaster_install?}" ]; then
        display_product "standalone install" "You may choose to either install PuppetDB and the console on this node, or to install each service on its own node. If you choose not to install PuppetDB and the console on this node, you will be asked where to find them."
        ask q_all_in_one_install "Install PuppetDB and console on this node?" Yn

        # If all-in-one, then autoselect everything. If not all-in-one,
        # then deselect everything.
        if [ y = "${q_all_in_one_install?}" ]; then
            q_puppetdb_install='y'
            q_puppet_enterpriseconsole_install='y'
        else
            q_puppetdb_install='n'
            q_puppet_enterpriseconsole_install='n'
        fi
    else
        q_all_in_one_install=n

        ask q_puppetagent_server "Puppet master hostname to connect to?" String puppet
        if [ 'n' = "${q_skip_master_verification:-"n"}" ] ; then
            while ! tcp_port_in_use "${q_puppetagent_server}" 8140 ; do
                query_about_master_connectivity
            done
        fi
        :           ${q_fail_on_unsuccessful_master_lookup:='y'}
        :           ${q_puppetca_hostname:=${q_puppetagent_server?}}
    fi

    # If you're *not* installing a master, you can choose PuppetDB
    if [ ! y = "${q_puppetmaster_install?}" ]; then
        display_product 'database support' "This role provides database support for PuppetDB and PE's console. PuppetDB is a centralized data service that caches data generated by Puppet and provides access to it via a robust API. The console uses data provided by a PostgreSQL server and database both of which will be installed along with PuppetDB on the node you specify."

        echo "IMPORTANT: If you choose not to install PuppetDB at this time, you will be prompted for the host name of the node you intend to use to provide database services. Note that you must install database support on that node for the console to function. When using a separate node, you should install database support on it BEFORE installing the console role." | display_wrapped_text
        display_newline
        display_newline

        ask q_puppetdb_install 'Install PuppetDB?' yN

        # If you've chosen PuppetDB, you cannot choose console
        if [ y = "${q_puppetdb_install?}" ]; then
            q_puppet_enterpriseconsole_install='n'
        fi
    fi

    if [ 'y' = "${q_puppetdb_install?}" ]; then
        :       ${q_puppetdb_plaintext_port:='8080'}
        :       ${q_puppetdb_port:='8081'}

        if ( ! is_upgrade ); then
            # Verify that ports 8080 and 8081 (or the port from the answer file) are available if this isn't answer-save mode.
            for port in $q_puppetdb_plaintext_port $q_puppetdb_port; do
                if tcp_port_in_use "127.0.0.1" $port ; then
                    display_newline
                    display_failure "Port $port appears to be in use. This port is required for PuppetDB. Please either move the services for this port to another or install on a system with this port available."
                fi
            done
        fi
    fi

    # If you're not installing master or PuppetDB, you can install console
    if [ ! y = "${q_puppetmaster_install?}" -a ! y = "${q_puppetdb_install?}" ]; then
        display_product 'console' "The console is a web interface where you can view reports, classify nodes, control Puppet runs, and invoke MCollective agents. It can be installed on the puppet master's node, but for performance considerations, especially in larger deployments, it can also be installed on a separate node."
        ask q_puppet_enterpriseconsole_install 'Install the console?' yN
        q_puppetca_install='n'
    fi

    # Verify that port 8140 is available if this isn't answer-save mode and we're not upgrading.
    if ( ! is_upgrade ) && [ 'y' = "${q_puppetmaster_install?}" -o 'y' = "${q_puppet_enterpriseconsole_install?}" ] && tcp_port_in_use "127.0.0.1" 8140 ; then
        display_newline
        display_failure "Port 8140 appears to be in use. This port is required for the puppet master. Please either move the services for this port to another or install on a system with this port available."
    fi

    # Verify they haven't somehow chosen two out of three roles (such as
    # answer file). This shouldn't actually be possible, since we
    # automatically deselect roles during the interview, but better to be
    # safe than sorry.
    if [ y = "${q_puppetmaster_install?}" -a y = "${q_puppetdb_install?}" -a ! y = "${q_puppet_enterpriseconsole_install?}" ]; then
        display_failure "You may not select the Puppet master and PuppetDB roles together without the console role. Please select either an all-in-one install or a single role."
    fi

    if [ y = "${q_puppetmaster_install?}" -a ! y = "${q_puppetdb_install?}" -a y = "${q_puppet_enterpriseconsole_install?}" ]; then
        display_failure "You may not select the Puppet master and console roles together without the PuppetDB role. Please select either an all-in-one install or a single role."
    fi

    if [ ! y = "${q_puppetmaster_install?}" -a y = "${q_puppetdb_install?}" -a y = "${q_puppet_enterpriseconsole_install?}" ]; then
        display_failure "You may not select the PuppetDB and console roles together without the Puppet master role. Please select either an all-in-one install or a single role."
    fi

    # If we're not installing PuppetDB, we probably need to know where it is.
    if [ ! y = "${q_puppetdb_install?}" ] && [ y = "${q_puppetmaster_install?}" -o y = "${q_puppet_enterpriseconsole_install?}" ]; then
        if [ y = "${q_puppetmaster_install?}" ] && ! is_upgrade; then
            display_newline
            echo "Puppet Enterprise requires the installation of PuppetDB.  PuppetDB needs to be cleanly installed on a new node after the installation of the puppet master has successfully completed. Please provide the hostname and port for the node on which you will be installing PuppetDB." | display_wrapped_text
            display_newline
            display_newline
        fi
        ask q_puppetdb_hostname "Hostname for contacting PuppetDB?" String
        ask q_puppetdb_port "Port for contacting PuppetDB?" Port "8081"
        :       ${q_puppetdb_plaintext_port:='8080'}
    fi

    if [ y = "${q_puppet_enterpriseconsole_install?}" ]; then
        :                ${q_puppetmaster_enterpriseconsole_hostname:=$(hostname)}
    elif [ y = "${q_puppetmaster_install?}" ]; then
        if ! is_upgrade ; then
            display_newline
            echo "Puppet Enterprise requires the installation of the Console.  The Console needs to be cleanly installed on a new node after the installation of the puppet master has successfully completed. Please provide the hostname and port for the node on which you will be installing the Console." | display_wrapped_text
            display_newline
            display_newline
        fi
        ask q_puppetmaster_enterpriseconsole_hostname "Hostname for contacting the console?" String
        :                ${q_puppetmaster_enterpriseconsole_certname:="${q_puppetmaster_enterpriseconsole_hostname?}"}
    fi

    if [ y = "${q_puppetmaster_install?}" -o y = "${q_puppet_enterpriseconsole_install?}" -o y = "${q_puppetdb_install?}" ]; then
        q_puppetagent_install='y'
        display_product 'puppet agent' 'The puppet agent role is automatically installed with the console, puppet master, and puppetdb roles.'
    else
        display_product 'puppet agent' 'The puppet agent applies configurations from the puppet master and submits reports and inventory information. It should be installed on every node you plan to manage with Puppet.'
        ask q_puppetagent_install 'Install puppet agent?' Yn
    fi

    if [ y = "${q_puppetmaster_install?}" ]; then
        ask q_puppetmaster_certname "The puppet master's certificate will contain a unique name (\"certname\"); this should be the main DNS name at which it can be reliably reached. Puppet master's certname?" StringForceLowerCase "${PLATFORM_HOSTNAME?}" # The master's certname gets used as the filebucket server in site.pp. If it isn't a reachable DNS name, users have to edit site.pp post-install.
        # We only use alt names when generating the master's cert, so we
        # don't need them for upgrade
        if ! is_upgrade; then
            ask q_puppetmaster_dnsaltnames "The puppet master's certificate can contain DNS aliases; agent nodes will only trust the master if they reach it at its certname or one of these official aliases. Puppet master's DNS aliases (comma-separated list)?" StringDNSName "$(display_dnsaltnames "${q_puppetmaster_certname?}" "puppet")"
        fi
        display_newline
        :           ${q_puppetca_hostname:=${q_puppetmaster_certname?}}
    fi

    if [ y = "${q_puppetdb_install?}" -o y = "${q_puppet_enterpriseconsole_install?}" ]; then
        if [ y = "${q_puppetdb_install?}" ]; then
            if [ y = "${q_puppet_enterpriseconsole_install?}" ]; then
                t_main_database_products="The Puppet Enterprise console and PuppetDB"
                t_main_database_verb="require"
            else
                t_main_database_products="PuppetDB"
                t_main_database_verb="requires"
            fi

            echo "${t_main_database_products?} ${t_main_database_verb?} a PostgreSQL database and a user account able to edit it. Puppet Enterprise includes a Postgresql server which you can install locally, or you can specify an existing remote database (which must be configured and available prior to installing the console or PuppetDB)." | display_wrapped_text
        elif [ y = "${q_puppet_enterpriseconsole_install?}" ]; then
            echo "The Puppet Enterprise console requires a PostgreSQL database and a user account able to edit it. This database and the Puppet Enterprise PostgreSQL server are automatically installed and configured, along with PuppetDB, on a node you select for the database support role. You should install this role BEFORE installing the console role. After installing the database support role, you can find auto-generated passwords for the database user and the authentication database user in '/etc/puppetlabs/installer/database_info.install' on that node." | display_wrapped_text
        fi
        display_newline
        display_newline
    fi

    if [ y = "${q_puppetdb_install?}" ]; then
        ask q_database_install "Install the included Puppet Enterprise PostgreSQL server locally?" Yn
    else
        q_database_install='n'
    fi

    # orchestrator db information should be generated on the master node in a split deployment
    if [ y = "${q_puppetmaster_install?}" ]; then
        if ! is_upgrade || (is_upgrade && [ "$(echo_vercmp 2015.3.0 $CURRENT_PE_VERSION)" = "1" ]); then
            set_orchestrator_defaults
            save_database_info_etc
        fi
        # notify the user that orchestrator db info will be needed on the db node
        if [ ! y = "${q_database_install?}" ] ; then
            # only alert on upgrade if the orchestrator db will be created
            if is_upgrade && [ "$(echo_vercmp 2015.3.0 $CURRENT_PE_VERSION)" = "1" ]; then
                t_alert_to_move_orchestrator_answers='y'
            fi
            # alert on new installs
            # notify that orchestrator db info will have to be provided on the db node
            if ! is_upgrade; then
                t_alert_to_move_orchestrator_answers='y'
            fi
        fi
    fi

    # orchestrator db information will be required on the database node in a split deployment
    # In an external postgres install, the database node does not need to care.
    if [ y = "${q_database_install?}" ] && [ ! y = "${q_puppetmaster_install?}" ] && ! is_external_postgres ; then
        if ! is_upgrade || (is_upgrade && [ "$(echo_vercmp 2015.3.0 $CURRENT_PE_VERSION)" = "1" ]); then
            ask q_orchestrator_database_name "What is the name of the Orchestrator database?" String pe-orchestrator
            ask q_orchestrator_database_user "What is the name of the Orchestrator user?" String pe-orchestrator
            ask q_orchestrator_database_password "What is the password for ${q_orchestrator_database_user?} (the database user)?" Password4
            export t_manage_orchestrator_database='y'
        fi
    fi

    if [ y = "${q_database_install?}" ]; then

        if ! is_upgrade && [ -x /opt/puppetlabs/server/data/postgresql ] && [ "y" != "${q_skip_backup}" ]; then
            display_newline
            display " === Existing Puppet Enterprise data detected === "
            display_newline
            display_comment "It looks like Puppet Enterprise had been installed on this machine and it was uninstalled without the purge data option (-d). The existing databases cannot be reused by this installer. You can either quit this installation or the installer can move your existing database directory to /opt/puppetlabs/server/data/postgresql.<yyyymmddHHMMSS>.bak and create a new one."
            ask q_backup_and_purge_old_database_directory "Would you like to backup your existing database directory and continue installing Puppet Enterprise?" yN
            if [ "y" != "${q_backup_and_purge_old_database_directory?}" ]; then
                quit 1
            fi
        else
            q_backup_and_purge_old_database_directory='n'
        fi

        if is_upgrade; then
            # Before attempting to connect to PSQL, we must ensure that it is running
            puppet_resource "service pe-postgresql ensure=running"
        fi

        q_database_root_user="pe-postgres"
        : ${q_database_host:="${PLATFORM_HOSTNAME?}"}
        q_database_port='5432'
        # Verify that port 5432 is available for the pe-postgresql server
        if ( ! is_upgrade ) && tcp_port_in_use "127.0.0.1" ${q_database_port} ; then
            display_newline
            display_failure "Port ${q_database_port} appears to be in use. This port is required for the Puppet Enterprise Postgresql Server. Please either move the services for this port to another or install on a system with this port available."
        fi

        q_database_root_password="${q_database_root_password:-"$(gen_password)"}"

        if [ y = "${q_puppetmaster_install?}" ] ; then
            if ! is_upgrade || (is_upgrade && [ "$(echo_vercmp 2015.3.0 $CURRENT_PE_VERSION)" = "1" ]); then
                set_orchestrator_defaults
            fi
        fi

        set_database_defaults
    else
        # On split installs, default to PuppetDB location as the likely
        # location of postgres. It's either that or they're using their
        # own. On combined or PuppetDB-only installs, we have nothing to
        # reasonably default to.
        if [ ! y = "${q_puppetdb_install?}" ]; then
            ask q_database_host "What is the hostname of the PostgreSQL server?" String "${q_puppetdb_hostname?}"
        else
            ask q_database_host "What is the hostname of the PostgreSQL server?" String
        fi
        ask q_database_port "What is the port of the PostgreSQL server?" Port "5432"

        if [ y = "${q_puppetdb_install?}" ] && (! is_upgrade || is_external_postgres); then
            ask q_puppetdb_database_name "What is the name of the PuppetDB database?" String pe-puppetdb
            ask q_puppetdb_database_user "What is the name of the PuppetDB database user?" String pe-puppetdb
            ask q_puppetdb_database_password "What is the password for ${q_puppetdb_database_user?} (the database user)?" Password4
        fi
    fi

    if [ y = "${q_puppet_enterpriseconsole_install?}" ]; then
        if ! is_upgrade; then
            # We already know the port to use in an upgrade, so we don't want to find a new one or ask.
            default_console_port=$(find_unused_tcp_port "127.0.0.1" "${CONSOLE_PORT_OPTIONS}")
            if [ -z "${default_console_port}" ] ; then
                ask q_puppet_enterpriseconsole_httpd_port "None of the default ports (${CONSOLE_PORT_OPTIONS}) are open for use by the PE console. What is a port for use by the PE console?" Port
            else
                q_puppet_enterpriseconsole_httpd_port="${q_puppet_enterpriseconsole_httpd_port:-"${default_console_port}"}"
            fi
        fi

        if [ y = "${q_puppetmaster_install}" ]; then
            :                ${q_puppet_enterpriseconsole_master_hostname:="${q_puppetmaster_certname?}"}
        fi

        if is_upgrade && [ "$(echo_vercmp 3.7.0 $CURRENT_PE_VERSION)" = "1" ] ; then
            display_newline
            echo "!!! WARNING: Existing Puppet Enterprise Console users will not be migrated. Please set a new password for your Puppet Enterprise Console 'admin' superuser." | display_wrapped_text 0
            display_newline
            display_newline
        fi

        if ! is_upgrade; then
            ask q_puppet_enterpriseconsole_auth_password "Password for Puppet Enterprise Console superuser 'admin' (minimum 8 characters)?" Password8
        fi

        if ! is_upgrade || is_external_postgres; then
            if [ ! y = "${q_database_install?}" ]; then
                ask q_rbac_database_name "What is the name of the RBAC database?" String pe-rbac
                ask q_rbac_database_user "What is the name of the RBAC database user?" String pe-rbac
                ask q_rbac_database_password "What is the password for ${q_rbac_database_user?} (the database user)?" Password4
                ask q_activity_database_name "What is the name of the Activity database?" String pe-activity
                ask q_activity_database_user "What is the name of the Activity database user?" String pe-activity
                ask q_activity_database_password "What is the password for ${q_activity_database_user?} (the database user)?" Password4
                ask q_classifier_database_name "What is the name of the Classifier database?" String pe-classifier
                ask q_classifier_database_user "What is the name of the Classifier database user?" String pe-classifier
                ask q_classifier_database_password "What is the password for ${q_classifier_database_user?} (the database user)?" Password4
            fi
        fi

        if [ ! y = "${q_puppetdb_install?}" ]; then
            display_newline
            echo "In order to properly classify ${q_puppetdb_hostname?} as a PuppetDB server and provide continued management, you must provide the PuppetDB database information." | display_wrapped_text
            display_newline
            ask q_puppetdb_database_name "What is the name of the PuppetDB database?" String pe-puppetdb
            ask q_puppetdb_database_user "What is the name of the PuppetDB database user?" String pe-puppetdb
            ask q_puppetdb_database_password "What is the password for ${q_puppetdb_database_user?} (the database user)?" Password4
        fi
    fi

    # If it's a remote postgres and we're not just saving answers, we
    # need to validate that we can connect, and that the
    # databases/users exist. If it's our postgres, we're already going
    # to be ensuring those things, so there should be no need to validate.
    if is_external_postgres && is_pe_service_install; then
        if is_valid_postgres_bin_dir; then
            verify_db
            need_to_verify_db=n
        else
            need_to_verify_db=y
        fi
    fi


    if [ y = "${q_puppetagent_install?}" ]; then
        t_default_agent_certname="${PLATFORM_HOSTNAME?}"
        # If we're on a master, and the user isn't driving the installer from an answer file
        # set the certname to the master certname provided earlier
        if [ y = "${q_puppetmaster_install?}" ]; then
            # Update the default value to remove visually jarring difference in default/answer.
            t_default_agent_certname="${q_puppetmaster_certname}"
            if [ -z "${q_puppetagent_certname}" ]; then
                q_puppetagent_certname="${q_puppetmaster_certname}"
            fi
            :            ${q_puppetagent_server:="${q_puppetmaster_certname?}"}
        else
            ask q_puppetagent_certname "Puppet agent needs a unique name (\"certname\") for its certificate; this can be an arbitrary string. Certname for this node?" StringForceLowerCase "${t_default_agent_certname}"

            if [ y = "${q_puppet_enterpriseconsole_install?}" -a n = "${q_puppetmaster_install?}" ]; then
                :                ${q_puppet_enterpriseconsole_master_hostname:="${q_puppetagent_server?}"}
            fi
        fi
        :                ${q_puppetmaster_enterpriseconsole_certname:="${q_puppetagent_certname?}"}
    fi

    if [ y = "${q_puppetdb_install?}" ]; then
        # We need to know this for classification, and to bind host and port
        :       ${q_puppetdb_hostname:="${q_puppetagent_certname?}"}
    fi

    if [ ! y = "${q_puppetmaster_install?}" ] && [ y = "${q_puppet_enterpriseconsole_install?}" -o y = "${q_puppetdb_install?}" ]; then
        # In case the user deleted their old answer file we do this on upgrades as well
        ask q_puppetmaster_certname "What is the certname of the puppet master?" StringForceLowerCase "${q_puppetagent_server?}"
    fi

    if [ 'xaix' = "x${PLATFORM_NAME}" -a -x /usr/sbin/updtvpkg ] ; then
        display_product 'updtvpkg' "Puppet Enterprise contains rpm packages that depend on native AIX libraries. In order to resolve these dependencies correctly, the 'updtvpkg' command is used to populate the rpm database with the native AIX libraries already present on the system."
        ask q_run_updtvpkg "Run 'updtvpkg' to populate the rpm database with available native libaries? (this may take some time)" Yn
    else
        q_run_updtvpkg=n
    fi

    # Ensure that both the master and the console node have the correct console
    # hostname reference
    if [ y = "${q_puppetmaster_install?}" ]; then
        t_console_hostname="${q_puppetmaster_enterpriseconsole_hostname?}"
    elif [ y = "${q_puppet_enterpriseconsole_install?}" ]; then
        t_console_hostname="${q_puppetagent_certname?}"
    fi
    # For puppet_enterprise.pp.erb and pe-classification.rb
    export t_console_hostname

    #...[ Vendor packages ].................................................

    enqueue_vendor_packages

    # Determine which vendor packages are missing
    t_main_missing_vendor_packages="$(missing_queued_packages)"

    # Continue interview
    if [ ! -z "${t_main_missing_vendor_packages?}" ]; then
      # Check for RHEL4 here and bail if we're missing vendor packages.
      # RHEL4 doesn't have yum, so installing vendor packages is an exercise left to the user
      if [ "${VENDOR_PACKAGE_OFFLINE?}" = "true" ]; then
        display_product 'Vendor Packages' "The installer has detected that Puppet Enterprise requires additional packages from your operating system vendor's repositories, and cannot automatically install them. The installer will now exit so you can install them manually."
        display_missing_vendor_packages "${t_main_missing_vendor_packages?}"
        display_failure "You must manually install the above packages before installing Puppet Enterprise."
      else
        # When running, only prompt user to install vendor packages if needed
        display_product 'Vendor Packages' "The installer has detected that Puppet Enterprise requires additional packages from your operating system vendor's repositories, and can automatically install them. If you choose not to install these packages automatically, the installer will exit so you can install them manually."
        display_missing_vendor_packages "${t_main_missing_vendor_packages?}"
        ask q_vendor_packages_install 'Install these packages automatically?' Yn
        if [ ! y = "${q_vendor_packages_install?}" ]; then
          display_failure "You must manually install the above packages before installing Puppet Enterprise."
        fi
      fi
    else
      # Set default value
      q_vendor_packages_install="${q_vendor_packages_install:-"n"}"
    fi

    #...[ Check for existing configuration ]...............................

    if is_pe_service_install && ! is_upgrade && [ "y" != "${q_skip_backup}" ]; then
        if is_pe_installed ; then
            display_newline
            display " === Existing Puppet Enterprise configuration detected === "
            display_newline

            display_comment "It looks like Puppet Enterprise had been installed on this machine and it was uninstalled without the purge option (-p). The existing configuration cannot be reused by this installer. You can either quit this installation or the installer can move your existing configuration to /etc/puppetlabs.<yyyymmddHHMMSS>.bak and create a new one."
            ask q_backup_and_purge_old_configuration "Would you like to backup your existing configuration and continue installing Puppet Enterprise?" yN
            if [ "y" != "${q_backup_and_purge_old_configuration?}" ]; then
                quit 1
            fi
        else
            q_backup_and_purge_old_configuration='n'
        fi
    fi

    #...[ Check directory environment migration ]..............................

    if is_upgrade && [ "$(echo_vercmp 4.0.0 "$CURRENT_PE_VERSION")" = "1" ]; then
        # The environments.rake script MUST run from Puppet < 4, because Puppet 4 will
        # raise an error if it encounters legacy environment settings.
        if ! run "/opt/puppet/bin/rake -s -f '${INSTALLER_DIR}/environments.rake' environments:check 2>/dev/null"; then
            display_newline
            # We can't run with legacy environments, so fail hard.
            display_failure "Looks like you don't have directory environments enabled. They are required for ${PE_VERSION}. Please resolve this issue by making the appropriate change(s) to puppet.conf and then run the upgrade again. If needed, contact Puppet Labs support. At this point, your Puppet Enterprise components and configuration files have not been changed."
        fi
    fi

    #---[ Quit early ]------------------------------------------------------

    if ! is_pe_service_install && [ ! y = "${q_puppetagent_install?}" ]; then
        display_newline
        display_major_separator
        display_newline
        display_failure "Nothing selected for installation"
    fi

    # (PE-5583) Upgrade pg for split installs on pdb node (console and master should ignore), mono installs if pg is 9.2, and quit if external pg instances have the wrong version.
    t_backup_restore_pg="false"
    if is_upgrade && is_puppetdb; then
        t_postgres_version=$(check_postgres_version)
        if [ "${t_postgres_version?}" = '' ]; then
            display_failure " === Unable to determine your PostgreSQL version. This is usually caused by incorrect PuppetDB login credentials. === "
        elif  [ "${t_postgres_version?}" = "9.4" ]; then
            display "PostgreSQL 9.4 was detected. No database migration necessary."
        elif is_postgres && [ "${t_postgres_version?}" = "9.2" ]; then
            export t_backup_restore_pg="true"
        else
            # external postgres
            display_failure " === Your PostgreSQL instance is not being managed by Puppet Enterprise and appears to be ${t_postgres_version?}.  Please upgrade to PostgreSQL 9.4 before continuing the Puppet Enterprise upgrade. ==="
        fi
    fi

    # A pg upgrade will require some extra space on the system. Fail early if we don't have enough.
    if [ "${t_backup_restore_pg?}" = "true" ]; then
        t_postgresql92_size=`du -s /opt/puppet/var/lib/pgsql/9.2 | cut -f1`
        t_required_backup_space=$(expr ${t_postgresql92_size?} \* 2)
        t_free_drive_space=`df /opt/puppet/var/lib | tail -n1 | tr -s ' ' | cut -d' ' -f4`
        if ! [ ${t_free_drive_space?} -gt ${t_required_backup_space?} ]; then
            display_failure  "!!! WARNING: Hard drive has insufficient space to perform the PostgreSQL upgrade. \
Please free at least ${t_required_backup_space?} bytes."
        fi

        # Fetch existing locale information so we don't attempt upgrading to a
        # different encoding.
        t_path_to_psql="$(postgres_bin_dir)/psql"
        display_newline
        display "Detecting Postgres Locale Settings..."
        export t_existing_psql_encoding=$(su ${q_database_root_user} -c "${t_path_to_psql?} -P tuples_only=on --command='SHOW server_encoding'" -s /bin/bash | tr -d '[[:space:]]')
        display "Encoding: ${t_existing_psql_encoding}"
        export t_existing_psql_ctype=$(su ${q_database_root_user} -c "${t_path_to_psql?} -P tuples_only=on --command='SHOW LC_CTYPE'" -s /bin/bash | tr -d '[[:space:]]')
        display "Ctype: ${t_existing_psql_ctype}"
        export t_existing_psql_collate=$(su ${q_database_root_user} -c "${t_path_to_psql?} -P tuples_only=on --command='SHOW LC_COLLATE'" -s /bin/bash | tr -d '[[:space:]]')
        display "Collate: ${t_existing_psql_collate}"
    fi

    #---[ Check for modified auth.conf ]------------------------------------

    if is_upgrade && is_puppetmaster; then
        # Detect if there is a custom auth.conf file and if there is do not
        # continue until the user comfirms with the `q_exit_and_update_auth_conf`
        # answer to make it clear that there must be user intervention to
        # continue.

        if found_custom_auth_conf; then
            display_newline
            display_major_separator
            display_newline
            display "We found a custom auth.conf file. We're replacing the format of auth.conf, and strongly recommend you migrate over to the new format, as the old format will be going away in a future release. After the upgrade has finished, you can migrate by modifying the new auth.conf file in /etc/puppetlabs/puppetserver/conf.d/auth.conf. Optionally you may continue to use your existing auth.conf file to manage existing endpoints until it is completely removed in a future release."
            display "Please see https://docs.puppetlabs.com/pe/${PE_LINK_VER?}/install_upgrading_notes.html#changes-to-authconf-when-upgrading-to-puppet-enterprise-20153 for more information."
            display_newline
            ask q_migrate_auth_conf_after_upgrade "Would you like to migrate to the new auth.conf format after upgrading ('n' to continue using the existing auth.conf format)?" Yn
            if [ "${q_migrate_auth_conf_after_upgrade?}" = 'n' ]; then
                # On split installs we are unable to detect if the master has a
                # custom auth.conf from the console node. Currently the only
                # solution to getting the console this information is via an
                # answer that must be passed into the installer script.
                if is_puppetmaster && ! is_console; then
                    t_display_legacy_auth_conf_warning='y'
                fi

                display_auth_conf_diff
                display_newline
                display "Changes may be required to your auth.conf file before continuing with the upgrade. If you have not made these changes already, we strongly advise you exit the upgrader now to do so, as some functionality (e.g., console services) may not be available after upgrade if the endpoints aren't authorized."
                display "Please see https://docs.puppetlabs.com/pe/${PE_LINK_VER?}/install_upgrading_notes.html#changes-to-authconf-when-upgrading-to-puppet-enterprise-20153 for more information."
                display_newline
                ask q_exit_and_update_auth_conf "Would you like to exit the upgrade now to make the changes to your auth.conf ('n' if already completed)?" Yn
                if [ "${q_exit_and_update_auth_conf}" = "y" ]; then
                    display_failure "Upgrade canceled."
                fi
            fi
        fi
    fi

    #---[ Set file-sync service if needed ] --------------------------------
    if [ 'y' = "${q_puppetmaster_install?}" -o 'y' = "${q_puppet_enterpriseconsole_install?}" ]; then
        if [ -z "${q_puppetmaster_file_sync_service_enabled}" ]; then
            # We did not locate file-sync configuration and no answer was set
            if is_upgrade && [ "$(echo_vercmp 2015.3.0 "$CURRENT_PE_VERSION")" = "1" ]; then
                q_puppetmaster_file_sync_service_enabled='n'
            else
                q_puppetmaster_file_sync_service_enabled='y'
            fi
        fi

        # We've ensured that the answer has a value at this point
        # Now ask() to take advantage of the validation and typecasting
        # Will also leave a trail in the log about the answer state
        display_newline
        ask q_puppetmaster_file_sync_service_enabled "Enable the file sync service? (this will default to 'y' on an install and 'n' on an upgrade when upgrading from a version prior to 2015.3)" Yn
        if [ 'n' = "${q_all_in_one_install?}" -a 'y' = "${q_puppetmaster_install?}" ]; then
            display_comment "If you have overridden this in your answer file, be sure that you enter the same answer for file sync service on the console node's answer file..."
        fi
    fi

    #---[ Confirm ]---------------------------------------------------------

    display_step 'CONFIRM PLAN'

    render_plan

    #---[ Set OFFER_ROLL_BACK flag ]----------------------------------------------
    # Up until this point, we have made no changes to a system, so a failure
    # prior to this point would not require any sort of roll-back to restore
    # a system to a pristine state. After this point, we may have modified a
    # a system, if only to save an answers file, so we can offer to roll
    # back using the uninstaller
    OFFER_ROLL_BACK='y'

    if is_upgrade; then
        if is_pe_service_install; then
            display "It is strongly recommended that you take a snapshot of this system before performing the upgrade."
            display_newline
        fi

        if [ "${CURRENT_PE_MAJOR?}" = "3" -a "${CURRENT_PE_MINOR?}" = "3" ]; then

            echo "If you've installed any additional gems beyond those installed by PE, they will not be migrated during the upgrade. If you have modules that depend on additional gems, you will need to reinstall them after you complete the upgrade process. Refer to http://docs.puppetlabs.com/pe/${PE_LINK_VER?}/release_notes_known_issues.html#updating-puppet-master-gems." | display_wrapped_text
            display_newline
        fi

        ask q_install 'Perform upgrade?' Yn
    else
        ask q_install 'Perform installation?' Yn
    fi

    if [ y = "${q_database_install}" ]; then
        #---[ Sanitize q_database_host for ssl ]-----------------------------
        if [ "${q_database_host}" == 'localhost' ]; then
            q_database_host=$q_puppetagent_certname
        fi
    fi

    if [ ! y = "${q_install?}" ]; then
        display_newline
        display_major_separator
        display_newline
        display "!! Installation cancelled"
        display_newline
        display_major_separator
        do_save_answers
        quit 1
    else
        if ! is_upgrade; then
          if [ "y" = "${q_backup_and_purge_old_configuration:-'n'}" ]; then
            run "mv /etc/puppetlabs{,.$(date '+%Y%m%d%H%M%S').bak}"
            run "./puppet-enterprise-uninstaller -py"
          fi

          if [ "y" = "${q_backup_and_purge_old_database_directory:-'n'}" ]; then
            run "mv /opt/puppetlabs/server/data/postgresql{,.$(date '+%Y%m%d%H%M%S').bak}"
          fi
        fi

        do_save_answers
        if [ y = "${q_database_install}" -o  y = "${q_master_install}" ]; then
            save_database_info_etc
        fi
    fi

    #---[ Ignore q_upgrade_installation=y, #16091 ]------------------------
    ignore_duplicate 'upgrade'

    #---[ Export answers for use with erb ]---------------------------------

    for t_env_variable in `set | ${PLATFORM_EGREP?} '^q_' | sed -n 's/^\(q_[^=][^=]*\).*$/\1/p'`; do export ${t_env_variable}; done
    # Export a non-q variable
    export PLATFORM_HOSTNAME

    # PLATFORM_HOSTNAME_SHORT is used by databases.erb
    export PLATFORM_HOSTNAME_SHORT

    # This is needed for puppet.conf
    export PLATFORM_NAME
    export PLATFORM_PUPPET_GROUP
    export PLATFORM_PUPPET_USER

    # These are used to make package repos
    export PE_VERSION
    export PLATFORM_TAG

    #---[ Stop services for upgrade ]---------------------------------------

    if is_upgrade ; then
        display_comment "Stopping Puppet Enterprise services for upgrade"

        if [ "$(echo_vercmp 4.0.0 "$CURRENT_PE_VERSION")" = "1" ]; then
            puppet_resource "service pe-puppet ensure=stopped" ||:
            puppet_resource "service pe-mcollective ensure=stopped" ||:
        else
            # puppet-agent services
            puppet_resource "service puppet ensure=stopped" ||:
            puppet_resource "service mcollective ensure=stopped" ||:
        fi
        if is_puppetmaster; then
            puppet_resource "service pe-activemq ensure=stopped"
            puppet_resource "service pe-puppetserver ensure=stopped"
            if [ "$(echo_vercmp 2015.3.0 $CURRENT_PE_VERSION)" != "1" ]; then
                puppet_resource "service pe-orchestration-services ensure=stopped"
            fi
        fi
        if is_console; then
            puppet_resource "service pe-console-services ensure=stopped"
            if [ "$(echo_vercmp 4.0.0 "$CURRENT_PE_VERSION")" = "1" ]; then
                puppet_resource "service pe-httpd ensure=stopped"
                puppet_resource "service pe-puppet-dashboard-workers ensure=stopped"
            else
                puppet_resource "service pe-nginx ensure=stopped"
            fi
        fi
        if is_puppetdb; then
            puppet_resource "service pe-puppetdb ensure=stopped"
        fi
        # (PE-5583) If we have determined that this is an upgrade of a local
        # postgres instance, and postgres is at version 9.2, backup the data in
        # preparation for an upgrade. This must be done after other services
        # have been stopped, but while postgres is still running.
        if [ "${t_backup_restore_pg?}" = "true" ]; then
            # These are overriden by upgrading the pe-postgres package
            # so we need them in a safe location until the PG upgrade is complete
            run "cp -r /opt/puppet/bin /opt/puppet/var/lib/pgsql/9.2/bin"
            run "chown pe-postgres:pe-postgres /opt/puppet/var/lib/pgsql/9.2/bin"
            run "cp -r /opt/puppet/lib /opt/puppet/var/lib/pgsql/9.2/lib"
            run "chown pe-postgres:pe-postgres /opt/puppet/var/lib/pgsql/9.2/lib"
            run "mkdir -p /opt/puppet/var/lib/pgsql/9.2/share"
            run "cp -r /opt/puppet/share/postgresql /opt/puppet/var/lib/pgsql/9.2/share/postgresql"
            run "chown -R pe-postgres:pe-postgres /opt/puppet/var/lib/pgsql/9.2/share"

            # Backup the console database
            t_pg_backup_dir="/opt/puppet/var/lib/pgsql/${CURRENT_PE_VERSION?}_backups"
            run "mkdir -p ${t_pg_backup_dir?}"
            run "chown pe-postgres:pe-postgres ${t_pg_backup_dir?}"
            run "chmod 700 ${t_pg_backup_dir?}"
            t_console_tablespace_name="pe-console"
            t_get_console_db_name_cmd="\
SELECT d.datname \
FROM pg_catalog.pg_database d \
JOIN pg_catalog.pg_tablespace t on d.dattablespace = t.oid \
WHERE t.spcname = '${t_console_tablespace_name?}';"
            q_puppet_enterpriseconsole_database_name=$(su - pe-postgres -c "$(postgres_bin_dir)/psql -c \"${t_get_console_db_name_cmd?}\" -t -A" -s /bin/bash)

            # In a split install, there will still be connections to the
            # console db in postgres from the console node.  So we are preventing
            # new connections from being made, and closing existing connections, before
            # backing up and dropping the console database.
            revoke_postgres_connection_permissions "${q_puppet_enterpriseconsole_database_name?}"
            stop_postgres_connections "${q_puppet_enterpriseconsole_database_name?}"
            if backup_postgres_data "${t_pg_backup_dir?}" "${q_puppet_enterpriseconsole_database_name?}"; then
                drop_postgres_data "${t_console_tablespace_name?}" "${q_puppet_enterpriseconsole_database_name?}"
            fi
        fi
        if is_postgres; then
            puppet_resource "service pe-postgresql ensure=stopped"
        fi
    fi

    #---[ Migrate default PuppetDB var dir ]--------------------------------

    if is_upgrade && is_puppetdb; then
        if [ "$(echo_vercmp 4.0.0 "$CURRENT_PE_VERSION")" = "1" ]; then
            t_puppetdb_existing_confdir="/etc/puppetlabs/puppetdb/conf.d"
            t_puppetdb_existing_config="${t_puppetdb_existing_confdir}/config.ini"
            t_puppetdb_existing_vardir_setting="$(get_ini_field ${t_puppetdb_existing_config} vardir)"
            t_puppetdb_existing_vardir="/opt/puppet/share/puppetdb"

            if [ "${t_puppetdb_existing_vardir_setting}" == "${t_puppetdb_existing_vardir}" ]; then
                display_comment "Migrating ${t_puppetdb_existing_vardir} to ${SERVER_DATA_DIR}..."
                run "mkdir -p ${SERVER_DATA_DIR}"
                run "mv ${t_puppetdb_existing_vardir} ${SERVER_DATA_DIR}/"
            else
                display_error "Your PuppetDB vardir is set to an unsupported location (${t_puppetdb_existing_vardir_setting}). It will not be migrated."
            fi
        fi
    fi

    #---[ Enqueue our packages ]--------------------------------------------

    # NONPORTABLE
    if [ "${VENDOR_PACKAGE_OFFLINE?}" = "true" ]; then
        # If we've gotten this far, all of the vendor packages are installed,
        # so we need to unset the variable to allow the installation of all
        # rpms regardless of presence.
        unset PACKAGES_REQUIRED
    fi

    enqueue_package 'puppet-agent'

    # Release info is necessary on all PE infrastructure nodes.
    if [ y = "${q_puppetmaster_install?}" -o y = "${q_puppetdb_install?}" -o y = "${q_puppet_enterpriseconsole_install?}" ]; then
        enqueue_package 'pe-puppet-enterprise-release'
    fi

    if [ y = "${q_puppetmaster_install?}" ]; then
        if is_upgrade && [ 'y' != "${q_all_in_one_install?}" -a "$(echo_vercmp 3.7.0 $CURRENT_PE_VERSION)" = "1" ] ; then
            # Upgrade these packages so we don't have to worry about
            # metapackages from earlier releases like 3.0.1
            # Also upgrade these before the master manifest is applied so
            # the version-check in the puppetserver package doesn't conflict
            enqueue_package 'pe-httpd'
        fi
        enqueue_package 'pe-java'
        enqueue_package 'pe-puppetserver'
        enqueue_package 'pe-license'
        enqueue_package 'pe-puppet-license-cli'
        enqueue_package 'pe-puppetdb-termini'
        enqueue_package 'pe-console-services-termini'
        enqueue_package 'pe-r10k'

        enqueue_package 'pe-postgresql'
        enqueue_package 'pe-orchestration-services'
        enqueue_package 'pe-client-tools'
    fi

    if [ y = "${q_puppet_enterpriseconsole_install?}" ]; then
        enqueue_package 'pe-nginx'

        # PostgreSQL Packages for PE Console
        enqueue_package 'pe-postgresql'

        # Console Packages
        # pe-console-services only depends on pe-java, not a specific version
        # so if we are upgrading, we need to specifically include pe-java to
        # get the latest version of pe-java
        enqueue_package 'pe-java'
        enqueue_package 'pe-console-services'

        #FIXME Replace this with with explicit use of pe-psql
        t_path_to_psql="${SERVER_BIN_DIR?}/psql"
    fi


    # This entire if block can be removed once we are managing postgresql with
    # the new puppet_enterprise module
    if [ y = "${q_puppetdb_install?}" ]; then
      # we are installing /opt/puppetlabs/server/bin/psql at this point
      # override any previously found version
      t_path_to_psql="${SERVER_BIN_DIR?}/psql"
      if [ y = "${q_database_install?}" ]; then
        enqueue_package 'pe-postgresql-server'
        enqueue_package 'pe-postgresql-contrib'
      fi

      enqueue_package 'pe-postgresql'
      enqueue_package 'pe-java'
      enqueue_package 'pe-puppetdb'
    fi

    #---[ Write out PE version ]-------------------------------------------

    # We only need to write pe_build on the server node. AIO agents all have
    # the same version, PE or not.
    if ! is_noop && is_pe_service_install; then
        run_suppress_stdout "mkdir -p /opt/puppetlabs && chown ${PLATFORM_ROOT_USER}:${PLATFORM_ROOT_GROUP} /opt/puppetlabs && chmod 755 /opt/puppetlabs"
        run_suppress_stdout "mkdir -p ${SERVER_DIR?} && cp ${INSTALLER_DIR}/VERSION ${SERVER_DIR?}/pe_build && chown ${PLATFORM_ROOT_USER}:${PLATFORM_ROOT_GROUP} ${SERVER_DIR?}/pe_build && chmod 644 ${SERVER_DIR?}/pe_build"
    fi

    #---[ Install support/uninstaller scripts ]------------------------------------------------

    if is_pe_service_install; then
        run_suppress_stdout "mkdir -p /opt/puppetlabs/bin && chown ${PLATFORM_ROOT_USER}:${PLATFORM_ROOT_GROUP} /opt/puppetlabs/bin && chmod 755 /opt/puppetlabs/bin"
        run_suppress_stdout "mkdir -p ${SERVER_SHARE_DIR?}/installer && chown ${PLATFORM_ROOT_USER}:${PLATFORM_ROOT_GROUP} ${SERVER_SHARE_DIR?} ${SERVER_SHARE_DIR?}/installer && chmod 755 ${SERVER_SHARE_DIR?} ${SERVER_SHARE_DIR?}/installer"
        run_suppress_stdout "cp ${INSTALLER_DIR}/puppet-enterprise-support ${INSTALLER_DIR}/puppet-enterprise-uninstaller /opt/puppetlabs/bin"
        run_suppress_stdout "cp ${INSTALLER_DIR}/utilities ${SERVER_SHARE_DIR?}/installer/utilities"
        run_suppress_stdout "chown ${PLATFORM_ROOT_USER}:${PLATFORM_ROOT_GROUP} /opt/puppetlabs/bin/puppet-enterprise-support /opt/puppetlabs/bin/puppet-enterprise-uninstaller ${SERVER_SHARE_DIR?}/installer/utilities"
        run_suppress_stdout "chmod 755 /opt/puppetlabs/bin/puppet-enterprise-support /opt/puppetlabs/bin/puppet-enterprise-uninstaller ${SERVER_SHARE_DIR?}/installer/utilities"
    fi

    #---[ Install packages ]------------------------------------------------

    display_step 'INSTALL PACKAGES'

    if [ 'y' = "${q_run_updtvpkg}" ] ; then
        display_comment 'Running /usr/sbin/updtvpkg to update rpm database...'
        run_suppress_output '/usr/sbin/updtvpkg'
    fi

    #---[ Migrate environments ]--------------------------------------------

    if is_upgrade; then
        if [ y = "${q_puppetmaster_install?}" ] && [ "$(echo_vercmp 4.0.0 "$CURRENT_PE_VERSION")" = "1" ]; then
            backup_puppet_conf
            # The environments.rake script MUST run from Puppet < 4, because Puppet 4 will
            # raise an error if it encounters legacy environment settings. This means that
            # we need to run this script PRIOR to upgrading packages.
            display_comment 'Perform directory environment migration (if required)'
            run "/opt/puppet/bin/rake -s -f '${INSTALLER_DIR}/environments.rake' environments:upgrade"
        fi
    fi

    # Backup Hiera.yaml before upgrading packages
    if is_upgrade && [ "$(echo_vercmp 4.0.0 $CURRENT_PE_VERSION)" = "1" ]; then
        backup_hiera_yaml
    fi

    # Move proxy.conf if we had mistakenly configured a vhost for the master
    # servername (PE-11501)
    if [ "true" = "${t_remove_invalid_console_proxy_vhost:-false}" ]; then
        t_timestamp=$(date +'%Y%m%d%H%M%s')
        run "mv ${t_console_httpd_port_file?} ${t_console_httpd_port_file?}-${t_timestamp?}.bak"
    fi

    install_queued_packages

    if [ y = "${q_database_install?}" ]; then
        # Make sure the en_US locale with the UTF8 charset is available:
        verify_en_us_utf8
    fi

    # On early versions of 3.7 for SLES 10 we installed pe-agent and rubygem-net-ssh, but they are no longer
    # needed, so we remove them here, if installed, to clean up.
    if [[ $PLATFORM_TAG =~ ${SLES_10_REGEX?} ]]; then
        if is_upgrade; then
            if is_package_installed 'pe-agent'; then
                run_suppress_output "rpm -e --allmatches pe-agent"
            fi
            if is_package_installed 'pe-rubygem-net-ssh'; then
                run_suppress_output "rpm -e --allmatches pe-rubygem-net-ssh"
            fi
        fi
    fi

    #---[ Check external postgres ]-----------------------------------------

    # If we couldn't verify postgres earlier (because it wasn't installed),
    # verify it now.
    if [ y = "${need_to_verify_db:-'n'}" ]; then
        verify_db
    fi

    #---[ Generate "puppet.conf" ]------------------------------------------

    if is_upgrade; then
        if [ "$(echo_vercmp 4.0.0 "$CURRENT_PE_VERSION")" = "1" ]; then
            perform_code_migration_and_update_puppet_conf
        fi
    else
        run "${PUPPET_BIN_DIR?}/erb -T - '${INSTALLER_DIR}/erb/puppet.conf.erb' > '/etc/puppetlabs/puppet/puppet.conf'"
    fi

    #---[ Setup packages ]--------------------------------------------------

    # All of these roles need local copies of the modules
    if is_pe_service_install; then
        install_puppet_modules
        # We just installed a bunch of modules, or upgraded them, which means
        # we may have swapped out a bunch of parser functions that had
        # already be loaded by puppet (I'm looking at you, pe_accounts and
        # create_resource). Removing the contents of the libdir will get rid
        # of all the crufty functions, and pluginsync will later repopulate it
        # with new versions of the modules.
        if is_upgrade; then
            run_suppress_stdout "rm -rf $(puppet_config_print libdir agent)"
        fi
    fi

    if [ y = "${q_puppetmaster_install?}" ]; then

        if [ n = "${q_all_in_one_install}" ]; then
            # Uses
            #  q_puppetmaster_enterpriseconsole_hostname
            #  q_puppetdb_hostname
            run "${PUPPET_BIN_DIR?}/erb -T - '${INSTALLER_DIR?}/erb/autosign.conf.erb' >> '/etc/puppetlabs/puppet/autosign.conf'"
        fi

        # Generate the master's SSL server certificate
        if ! is_noop && [ ! -e "$(puppet_config_print hostcert master)" ]; then
            run_suppress_stdout "${PUPPET_BIN_DIR?}/puppet cert --generate ${q_puppetmaster_certname?} --ca_name 'Puppet CA generated on ${q_puppetca_hostname?} at $(date '+%Y-%m-%d %H:%M:%S %z')' --dns_alt_names '${q_puppetmaster_dnsaltnames?}' --verbose --color=false || true"
        fi

        generate_certs "pe-internal-orchestrator"

        # Mcollective utilizes a shared public/private key between all users
        # as well as a private/public key pair for each mco client.
        # Every MCO server than needs a copy of that MCO clients public key.
        # There currently exists no ideal solution to this problem short of writing a new
        # mco security provider. For now we are forced to generate the public/private keypairs on
        # the master during initial install, and then send them as files in the catalog to whichever nodes get
        # classified with the correct mcollective classes.
        t_mco_shared_keypair_name="pe-internal-mcollective-servers"
        t_mco_peadmin_key_name="pe-internal-peadmin-mcollective-client"
        t_mco_console_key_name="pe-internal-puppet-console-mcollective-client"

        for certname in ${t_mco_shared_keypair_name?} ${t_mco_peadmin_key_name?} ${t_mco_console_key_name?}; do
          generate_certs $certname
        done

        display "Generating mcollective password..."
        gen_password "/etc/puppetlabs/mcollective/credentials"
        run_suppress_stdout "chown ${PLATFORM_PUPPET_USER}:${PLATFORM_PUPPET_GROUP} /etc/puppetlabs/mcollective/credentials"
        run_suppress_stdout "chmod 600 /etc/puppetlabs/mcollective/credentials"
    fi

    if [ ! -s "/etc/puppetlabs/puppet/ssl/private_keys/${q_puppetagent_certname?}.pem" ]; then
        if [ "${q_puppetmaster_install?}" = y ]; then
            run_suppress_stdout "${PUPPET_BIN_DIR?}/puppet cert generate ${q_puppetagent_certname?} --color=false" || :
        else
            # Generate the agent credentials by attempting to contact the master
            run_suppress_stdout "${PUPPET_BIN_DIR?}/puppet certificate find ${q_puppetagent_certname?} --ca-location remote --ca_server ${q_puppetca_hostname?}" || :
        fi
    fi
    # Generate the certs for the console
    if [ y = "${q_puppet_enterpriseconsole_install?}" ]; then
        for certname in "pe-internal-dashboard" "pe-internal-classifier"; do
            generate_certs $certname
        done
    fi

    if [ y = "${q_puppetmaster_install?}" ]; then
        display_comment 'Setting up puppet master...'

        if ! is_noop && ! is_upgrade; then
            t_environmentpath=$(${PUPPET_BIN_DIR?}/puppet config print environmentpath)
            run "${PUPPET_BIN_DIR?}/erb -T - '${INSTALLER_DIR}/erb/site.pp.erb' > '${t_environmentpath}/production/manifests/site.pp'"
        fi

        # Copy modules into place to be served via puppet's fileserver to
        # secondary masters
        create_module_mount

        if is_upgrade && [ "$(echo_vercmp 3.7.0 $CURRENT_PE_VERSION)" = "1" ] ; then

            display_comment 'Updating puppet.conf SSL settings'
            puppet_resource "pe_ini_setting /etc/puppetlabs/puppet/puppet.conf path=/etc/puppetlabs/puppet/puppet.conf section=master setting=ssl_client_header ensure=absent"
            puppet_resource "pe_ini_setting /etc/puppetlabs/puppet/puppet.conf path=/etc/puppetlabs/puppet/puppet.conf section=master setting=ssl_client_verify_header ensure=absent"

            # TODO: These files should be backed up
            display_comment 'Cleaning up old httpd configuration'
            t_httpd_confdir="/etc/puppetlabs/httpd/conf.d"
            run_suppress_stdout "rm -f ${t_httpd_confdir}/puppetmaster.conf"
            run_suppress_stdout "rm -f ${t_httpd_confdir}/headers.conf"
            run_suppress_stdout "rm -rf /var/opt/lib/pe-puppetmaster"

            if [ 'y' != "${q_all_in_one_install?}" ] ; then
                run_suppress_stdout "rm -f ${t_httpd_confdir}/passenger-extra.conf"
            else
                bounce_service "pe-httpd"
            fi
        fi

        create_package_repo

        # Uses
        #  q_puppetmaster_certname
        #  q_puppetmaster_dnsaltnames
        #  q_puppetmaster_enterpriseconsole_hostname
        #  q_puppetmaster_enterpriseconsole_certname
        #  q_puppet_enterpriseconsole_install
        #  q_puppetdb_hostname
        #  q_puppetmaster_external_node_terminus
        #  t_puppetserver_java_args
        apply_template_manifest "master.pp.erb"

        #Setup a repo for the simplified agent and future masters / amq nodes to use
        setup_pe_repo

        remove_package_repo

        if is_upgrade; then
            # These files are not used by PE anymore, they can be safely removed
            for file in send_cert_request.rb receive_signed_cert.rb; do
                if [ -f "/opt/puppet/bin/${file}" ]; then
                    run_suppress_stdout "rm -f /opt/puppet/bin/${file}"
                fi
            done

            display_comment 'Updating puppet.conf report processors'
            puppet_resource "pe_ini_subsetting /etc/puppetlabs/puppet/puppet.conf path=/etc/puppetlabs/puppet/puppet.conf section=master setting=reports subsetting=https subsetting_separator=, ensure=absent"
            puppet_resource "pe_ini_subsetting /etc/puppetlabs/puppet/puppet.conf path=/etc/puppetlabs/puppet/puppet.conf section=master setting=reports subsetting=http subsetting_separator=, ensure=absent"
            puppet_resource "pe_ini_setting /etc/puppetlabs/puppet/puppet.conf path=/etc/puppetlabs/puppet/puppet.conf section=master setting=reporturl ensure=absent"

            # Remove the MRI master-specific packages if this is a master-only
            # install from PE < 3.7. We don't remove them on an all-in-one
            # because they're still used by the console.
            if [ 'y' != "${q_all_in_one_install?}" -a "$(echo_vercmp 3.7.0 $CURRENT_PE_VERSION)" = "1" ] ; then
                puppet_package "pe-httpd" "purged"
                case "${PLATFORM_NAME?}" in
                    amazon | centos | rhel | sles)
                        puppet_package "pe-httpd-devel" "purged"
                        puppet_package "pe-mod_ssl" "purged"
                        puppet_package "pe-libapr" "purged"
                        puppet_package "pe-libaprutil" "purged"
                        puppet_package "pe-libldap" "purged"
                        ;;
                    ubuntu | debian)
                        puppet_package "pe-httpd-utils" "purged"
                        puppet_package "pe-httpd-common" "purged"
                        puppet_package "pe-httpd-bin" "purged"
                        ;;
                esac
                puppet_package "pe-passenger" "purged"
                puppet_package "pe-rubygem-rack" "purged"
            fi
        fi
        t_wait_for_puppetmaster_url="https://${q_puppetmaster_certname?}:8140"
        if ! is_noop && ! wait_for_service $t_wait_for_puppetmaster_url 120 ; then
            display_failure "The puppet master service failed to start within 120 seconds; unable to proceed"
        fi

        if ! is_noop && ! is_upgrade; then
            copy_codedir_to_staging_dir
            perform_file_sync_commit
        fi
    fi

    display_comment 'Checking the agent certificate name detection...'
    if run_suppress_stdout "${PUPPET_BIN_DIR?}/puppet agent --configprint certname --color=false"; then
        if [ ! y = "${q_puppetmaster_install?}" ] && [ y = "${q_puppet_enterpriseconsole_install?}" -o y = "${q_puppetdb_install?}" -o y = "${q_puppet_agent_first_run:-"y"}" ] ; then
          display_comment 'Setting up puppet agent...'
          run_suppress_stdout "${PUPPET_BIN_DIR?}/puppet agent --test --color=false || true"
        fi
    else
        display_failure 'The agent certificate name was incorrect; please check that your system can correctly resolve its own FQDN in DNS.'
    fi

    if [ y = "${q_database_install?}" ]; then
        display_comment 'Setting up the database...'

        if [ "${t_backup_restore_pg?}" = "true" ]; then
            display_comment 'This may take a long time because of the Postgres migration, depending on the size of your database and speed of the system (current timeouts allow for up to two hours)...'
        fi

        # Verify that we can set the password here.
        configure_postgresql_server
        if [ "${t_backup_restore_pg?}" = "true" ]; then
            t_postgres_dir="/opt/puppetlabs/server/data/postgresql"
            t_postgres_tblspc_dir="${t_postgres_dir?}/9.4/data/pg_tblspc"
            if [ -d "${t_postgres_tblspc_dir?}" ]; then
                puppet_resource "service pe-postgresql ensure=stopped"
                # If the upgrade didn't succeed this file won't contain any links
                # so it will not do anything
                for link in $(ls "${t_postgres_tblspc_dir?}"); do
                    # read the link to the old tablespace in `/opt/puppet/var/lib/pgsql/9.2/*`
                    t_old_postgres_tablespace_dir=$(readlink "${t_postgres_tblspc_dir?}/${link?}")
                    t_postgres_tablespace_to_move=$(ls "${t_old_postgres_tablespace_dir?}" | grep '9.4')
                    t_new_postgres_tablespace_dir="${t_postgres_dir?}/$(basename ${t_old_postgres_tablespace_dir?})"
                    # Only move tablespaces that were not just created by configure_postgresql_server.
                    if [ ! -d "${t_new_postgres_tablespace_dir?}" ]; then
                        run "mkdir ${t_new_postgres_tablespace_dir?}"
                        # move the upgraded tablespace contents to the new location
                        run "mv \"${t_old_postgres_tablespace_dir?}/${t_postgres_tablespace_to_move?}\" \
    \"${t_new_postgres_tablespace_dir?}\""
                        run "chown -R pe-postgres:pe-postgres ${t_new_postgres_tablespace_dir?}"
                        # update the link so that PostgreSQL knows about the new location
                        run "ln -sfT ${t_new_postgres_tablespace_dir?} ${t_postgres_tblspc_dir?}/${link?}"
                        run "chown pe-postgres:pe-postgres ${t_postgres_tblspc_dir?}/${link?}"
                    fi
                done
                puppet_resource "service pe-postgresql ensure=running"
            fi
        fi

        # Now we've created databases and users, so if we roll back, we should run it with the -d flag
        ROLL_BACK_DBS='y'
    fi

    # If this is a fresh db install we should be setting the password to the desired password. If we are just setting the db up we
    # have already verified the credentials so we should be able to just set up the dbs.
    if [ y = "${q_puppet_enterpriseconsole_install?}" ]; then
        display_comment 'Setting up the console...'

        # If q_public_hostname is set by the answer file, use that. Otherwise,
        # it defaults to the detected hostname, unless we're on EC2 in which
        # case we use the public-hostname from the EC2 data.
        if [ -z "${q_public_hostname}" ]; then
            q_public_hostname="${PLATFORM_HOSTNAME?}"

            export q_public_hostname
        fi

        # Uses
        #   @certname         = ENV['q_puppetagent_certname']
        #   @database_host    = ENV['q_database_host']
        #   @database_port    = ENV['q_database_port']
        #   @master           = ENV['q_puppetagent_server']
        #   @master_certname  = ENV['q_puppetmaster_certname']
        #   @ca               = ENV['q_puppetca_hostname']
        #   @puppetdb         = ENV['q_puppetdb_hostname']
        #   @puppetdb_port    = ENV['q_puppetdb_port']
        #   @classifier_database_name     = ENV['q_classifier_database_name']
        #   @classifier_database_user     = ENV['q_classifier_database_user']
        #   @classifier_database_password = ENV['q_classifier_database_password'].gsub("'","")
        #   @rbac_database_name     = ENV['q_rbac_database_name']
        #   @rbac_database_user     = ENV['q_rbac_database_user']
        #   @rbac_database_password = ENV['q_rbac_database_password'].gsub("'","")
        #   @activity_database_name     = ENV['q_activity_database_name']
        #   @activity_database_user     = ENV['q_activity_database_user']
        #   @activity_database_password = ENV['q_activity_database_password'].gsub("'","")
        #   @manage_config    = ENV['IS_UPGRADE'] != 'y'
        #   t_console_services_java_args
        apply_template_manifest "console.pp.erb"
    fi

    if [ y = "${q_puppet_enterpriseconsole_install?}" ]; then
        display_comment "Starting http server for puppet console."

        # Start the http service if we're an enterprise console with our certs automatically signed.
        bounce_service 'pe-nginx'
        enable_service 'pe-nginx'
    fi

    # On the PuppetDB node, this will configure PuppetDB itself.
    if [ y = "${q_puppetdb_install?}" ]; then
        configure_puppetdb
    fi

    if [ y = "${q_puppet_enterpriseconsole_install?}" ]; then
        # Postgres could be restarted again after configuring PuppetDB, so we
        # wait for it to come back up before proceeding to run rake tasks which
        # modify the DB. If we're not noop.
        if ! is_noop && [ y = "${q_database_install?}" ] && ! wait_for_db 20; then
            display_failure "The PostgreSQL server failed to start; unable to proceed"
        fi

        if ! is_noop; then
            # We are assuming that the node classifier will be on the same node as
            # the console for 3.7
            t_wait_for_nc_url="https://${q_puppetagent_certname}:4433/classifier-api"
            display 'Waiting for Node Classifier to start...'
            if wait_for_nc $t_wait_for_nc_url 120; then
                display 'Applying configurations...'

                # Make update-classes call to ensure node classifier
                # has all of the classes from the puppet master before
                # classifying PE.
                export t_platform_puppet_class=$(platform_puppet_class) # set class from utilities for classification
                if run "${PUPPET_BIN_DIR?}/ruby ${INSTALLER_DIR}/pe-classification.rb"; then
                    display 'All default PE classifications were successfully added!'
                else
                    display "!!! WARNING: The node classifier was unable to add default PE classifications; please check the logs in '/var/log/puppetlabs/console-services/' for more information."
                    t_classification_failure='y'
                fi

            else
                display "!!! WARNING: The node classifier could not be reached; please check the logs in '/var/log/puppetlabs/console-services/' for more information."
                t_classification_failure='y'
            fi

            # Set the superuser password in RBAC to be the password supplied for dashboard auth.
            # This script depends on console-services being up and running. The wait_for_nc
            # function above is one way to ensure that RBAC will be available, so this script
            # should be after the if block above.
            if ! is_upgrade; then
                if run "${PUPPET_BIN_DIR?}/ruby ${INSTALLER_DIR}/update-superuser-password.rb"; then
                    echo "Updated Puppet Enterprise Console superuser 'admin' password"
                else
                    t_update_superuser_pass_failure='y'
                fi
            fi

        fi
    fi

    if ! is_noop; then
        if [ y = "${q_all_in_one_install?}" ]; then
            t_wait_for_puppetdb_url="https://${q_puppetdb_hostname?}:${q_puppetdb_port?}"
            if wait_for_service $t_wait_for_puppetdb_url 120; then
                run_suppress_stdout "${PUPPET_BIN_DIR?}/puppet agent --test --color=false || true"
            fi
        elif ! is_pe_service_install && [ y = "${q_puppetagent_install?}" ]; then
            ensure_agent_links
        fi
    fi

    #---[ Finish installation ]---------------------------------------------
    cron_enable
    prime_mcollective_facts
    if is_puppetmaster; then
        run "chown ${PLATFORM_PUPPET_USER}:${PLATFORM_PUPPET_GROUP} /etc/puppetlabs/puppet/puppet.conf"
    else
        run "chown ${PLATFORM_ROOT_USER}:${PLATFORM_ROOT_GROUP} /etc/puppetlabs/puppet/puppet.conf"
    fi
    run "chmod 600 /etc/puppetlabs/puppet/puppet.conf"

    # Until we've finished upgrading all the PE nodes, agent runs will fail to
    # retrieve a fresh catalog. We need to delete the cached catalog in order
    # to avoid it reverting anything we've changed.
    t_puppet_client_datadir=$(puppet_config_print client_datadir)
    run_suppress_stdout "rm -f ${t_puppet_client_datadir?}/catalog/${q_puppetagent_certname?}.json"

    # If this is a master or console or puppetdb install, ignore the q_puppet_agent_first_run value
    if is_pe_service_install || [ y = "${q_puppet_agent_first_run:-"y"}" ] ; then
        # NONPORTABLE
        case "${PLATFORM_NAME?}" in
            amazon | centos | rhel | sles | aix | eos)
                bounce_service 'puppet'
                enable_service 'puppet'
                ;;
            ubuntu | debian | cumulus)
                run "printf \"START=true\nDAEMON_OPTS=''\n\" > /etc/default/puppet"
                bounce_service 'puppet'
                enable_service 'puppet'
                ;;
            solaris)
                if [ "${PLATFORM_RELEASE?}" = "10" ] ; then
                  if [ ! -d /etc/puppetlabs ]; then
                      run_suppress_stdout "mkdir /etc/puppetlabs"
                      run_suppress_stdout "chown ${PLATFORM_ROOT_USER}:${PLATFORM_ROOT_GROUP} /etc/puppetlabs"
                      run_suppress_stdout "chmod 755 /etc/puppetlabs"
                  fi
                  run_suppress_stdout "/usr/sbin/svccfg import /var/svc/manifest/network/puppet.xml"
                  run_suppress_stdout "/usr/sbin/svcadm enable svc:/network/puppet:default"
                else
                  # We deliver the manifest and puppet.conf as part of the
                  # puppet-agent package on Solaris 11, so no need to create their
                  # directories.  Service manifest import happens as part of
                  # package installation, so we only need to enable it.
                  run_suppress_stdout "/usr/sbin/svcadm enable svc:/network/puppet:default"
                fi
                ;;
            *)
                display_failure "Do not know how to start puppet agent service on this platform"
                ;;
        esac
    fi

    display_step 'DONE'
    if is_upgrade; then
        display 'Thanks for upgrading Puppet Enterprise!'
    else
        display 'Thanks for installing Puppet Enterprise!'
    fi

    display_newline

    if [ ! y = "${q_all_in_one_install}" ] ; then
        if is_upgrade ; then
            t_upgrade_or_install="upgrade"
        else
            t_upgrade_or_install="installation"
        fi

        if [ y = "${q_puppetdb_install?}" ] ; then
            echo "You have completed the ${t_upgrade_or_install?} of PuppetDB.  You should now complete your ${t_upgrade_or_install?} by installing or upgrading the Puppet Enterprise Console. See the documentation for more assistance: http://docs.puppetlabs.com/pe/latest" | display_wrapped_text
            display_newline
            display_newline

        elif [ y = "${q_puppetmaster_install?}" ] ; then
            if is_upgrade; then
                echo "You have completed the upgrade of the puppet master, you should now proceed to upgrade PuppetDB. See the documentation for further assistance: http://docs.puppetlabs.com/pe/latest The PuppetDB node MUST be upgraded prior to installing the Console." | display_wrapped_text
            else
                echo "You have completed the installation of the puppet master, you should now proceed to install PuppetDB on a unique node. See the documentation for further assistance: http://docs.puppetlabs.com/pe/latest The PuppetDB node MUST be installed prior to installing the Console." | display_wrapped_text
            fi
            display_newline
            display_newline
        fi
    fi

    echo "To learn more and get started using Puppet Enterprise, refer to the Puppet Enterprise Quick Start Guide (http://docs.puppetlabs.com/pe/latest/quick_start.html) and the Puppet Enterprise Deployment Guide (http://docs.puppetlabs.com/guides/deployment_guide/index.html)." | display_wrapped_text
    display_newline
    display_newline

    # on an enterprise console install
    if [ y = "${q_puppet_enterpriseconsole_install}" ]; then
        # display the console URL otherwise
        CONSOLE_URL="https://${q_public_hostname?}:${q_puppet_enterpriseconsole_httpd_port?}"
        if [ '443' = "${q_puppet_enterpriseconsole_httpd_port?}" ]; then
            CONSOLE_URL="https://${q_public_hostname?}"
        fi
        display "   The console can be reached at the following URI:"
        display "    *  ${CONSOLE_URL?}"
        display_newline
    fi

    display_major_separator
    display_newline
    display_comment 'NOTES'
    display_newline
    if ! is_upgrade; then
        echo 'Puppet Enterprise has been installed to "/opt/puppetlabs," and its configuration files are located in "/etc/puppetlabs".' | display_wrapped_text
        display_newline
        display_newline
    fi

    echo "Answers from this session saved to '$(answer_file_to_save)'" | display_wrapped_text
    display_newline
    if [ "y" = "${q_database_install}" ] ; then
        echo "In addition, auto-generated database users and passwords have been saved to '/etc/puppetlabs/installer/database_info.*'" | display_wrapped_text
        display_newline
        echo "!!! WARNING: Do not discard these files! All auto-generated database users and passwords have been saved in them. You will need this information to configure the console role during installation." | display_wrapped_text
        display_newline
    fi
    display_newline

    # XXX Don't warn about ports for now. Too lazy to do anything about the console port.
    if ! is_upgrade; then
        warn_open_ports
        display_newline
    fi

    if is_upgrade; then
        echo "If you have installed compile masters and/or ActiveMQ hubs and spokes in a large environment installation, you will need to upgrade them separately per these docs: http://docs.puppetlabs.com/pe/${PE_LINK_VER?}/install_lei_upgrade.html" | display_wrapped_text
        display_newline
        display_newline
    fi

    # If the wait_for_nc function fails to get a class update, the installer won't be able to
    # classify PE. Make sure the user knows this.
    if [ "${t_classification_failure}" == 'y' ]; then
        # make this better
        echo "!!! WARNING: Installer failed to classify Puppet Enterprise. Puppet Enterprise will not be able to manage itself because of this. Check '/var/log/puppetlabs/console-services/' for more information." | display_wrapped_text
        display_newline
        display_newline
        FINAL_EXIT_CODE=$((FINAL_EXIT_CODE + 3))
    fi

    if [ "${t_update_superuser_pass_failure}" == 'y' ]; then
        # make this better
        echo "!!! WARNING: Installer failed to update Puppet Enterprise Console superuser 'admin' password. This leaves your PE installation at risk. Check '/var/log/puppetlabs/console-services/' for more information. Log into the console (user: admin, password: admin) as soon as possible and change the admin users password through the console." | display_wrapped_text
        display_newline
        display_newline
        puppet_resource "service pe-console-services ensure=stopped"
    fi

    if [ "${t_code_migration_failed}" == 'y' ]; then
        echo "!!! WARNING: The code migration step did not complete successfully. Please check your /etc/puppetlabs/puppet/puppet.conf settings, and refer to the documentation to ensure you have the correct settings."
        display_newline
        display_newline
        FINAL_EXIT_CODE=$((FINAL_EXIT_CODE + 7))
    fi

    if [ "${t_backup_restore_pg?}" = "true" ]; then
        display "As part of the upgrade process, your Puppet data was migrated to the new database. \
The retired pe-puppet-dashboard database was not migrated but was backed up to '${t_pg_backup_dir?}' and can be removed. \
A full backup is also available at '/opt/puppet/var/lib/pgsql/9.2'."
        display_newline
        display_newline
    fi

    if [ "${t_display_legacy_auth_conf_warning}" = 'y' ]; then
        display "To ensure that the existing auth.conf file is used instead of the new auth.conf file please add 'q_migrate_auth_conf_after_upgrade=${q_migrate_auth_conf_after_upgrade?}' to your answers file when upgrading PE on the console node."
        display_newline
        display_newline
    fi

    if [ "${t_alert_to_move_orchestrator_answers}" == 'y' ]; then
        display "To ensure that the Puppet master can access the orchestrator database, please find the orchestrator database name, user name, and password in /etc/puppetlabs/installer/database_info*, and have it available when you install or upgrade your database node."
        display_newline
        display_newline
    fi


    display_minor_separator
    display_newline

    # Clean up the stuff we exported
    unset q_public_hostname

    quit $FINAL_EXIT_CODE
fi

#===[ End ]=============================================================

# vim: tabstop=4:softtabstop=4:shiftwidth=4:expandtab
