#!/sbin/sh

#pragma ident "@(#)reconf_ener	1.151   97/10/08 SMI"

#
# 	Copyright (C) 1994 Sun Microsystems, Inc.
#

#
# reconf_ener - Energizer cluster reconfiguration program.
#

#
# XXX - TODO
#
# More robust handling of temporary files
#
# Cleanup file names and location of temporary files
#

# setenv TRACE_PDB to enable debugging traces on either stdout
# or the log file (depending on who/what invokes reconf_ener)

export PATH
PATH=/usr/sbin:/usr/bin

LD_LIBRARY_PATH=/opt/SUNWcluster/lib
export LD_LIBRARY_PATH

pre="SUNWcluster.reconf"

log_trace() {
	if [ -n "$TRACE_PDB" ]; then
		echo "# + $cmd: $*" >&3;
	fi
}

log_trace_end() {
	if [ -n "$TRACE_PDB" ]; then
		echo "# - $cmd: $*" >&3;
	fi
}

init() {
	log_trace init

	mybin=/opt/SUNWcluster/bin
	myetc=/etc/opt/SUNWcluster
	myvar=/var/opt/SUNWcluster

	SSACLI=${mybin}/pdbssa
	PATH=${PATH}:${mybin}

        logfile=${myvar}/pdbadmin.log		# needs to be in sync with pdbadmin

        cdbfile=${myetc}/conf/${clustname}.cdb
	cdbfilter=${myetc}/conf/cdb.filter
        tmpdir=`${mybin}/cdbmatch env.tmpdir ${cdbfile}`

	pdbapps=`${mybin}/cdbmatch cluster.pdbapps ${cdbfile}`
	admindir=${myvar}/admindir

	vm=`${mybin}/appmatch ${pdbapps} ${CVM}`
	if [ "${vm}" = "1" ]; then
		vm=cvm
	else
		vm=`${mybin}/appmatch ${pdbapps} ${VxVM}`
		if [ "${vm}" = "1" ]; then
			vm=vxvm
		fi
	fi
	udlm=`${mybin}/appmatch ${pdbapps} ${OPS}`

	if [ -z "$tmpdir" ]; then tmpdir=${myvar}; fi

	ccm_selected_net_file=`${mybin}/cdbmatch ccm.script.net.file ${cdbfile}`
	ccm_top_net_file=`${mybin}/cdbmatch ccm.script.topnet.file ${cdbfile}`

	 DOINGSTOPFLAG=${tmpdir}/didstopprogs			# hooks for execution of user cluster applications
	RESERVEDCTLS=${tmpdir}/ssa_is_reserved                  # control disk reservation
	FORCESTARTFLAG=${tmpdir}/do_not_use_ssa_reservations	# ok to continue without majority quorum?
	 ISRUNNINGFLAG=${tmpdir}/cluster_is_running		# is cluster is already online
	  RSUMDRUNNING=${tmpdir}/rsumd_is_running		# is rsumd running

	#
	# On a two-node cluster, the quorum disk protocol interacts with CMM to
	# identify and remove stale nodes from the cluster. This is necessary
	# because CMM doesn't use majority voting to determine the most recent
	# reconfiguration sequence number. We use a modified algorithm that
	# requires a node to keep in stable storage the last sequence number seen
	# at the beginning of cmmstep1 in stable storage. When a node joins starts
	# up, CMM resets its reconfiguration sequence number to this value.
	#

	  CMMSEQNUM=`${mybin}/cdbmatch cmm.sequencefile ${cdbfile}`

	if [ -z "$CMMSEQNUM"   ]; then   CMMSEQNUM=/var/opt/SUNWcluster/cmm/reconf-seqnum.${clustname}; fi

	if [ ! -d $tmpdir ]; then mkdir -p $tmpdir; fi

	log_trace_end init
}

# This function verifies that the required env variables are set by clustd.
# Otherwise, it will issue appropriate clustm requests to set them.
validate_env_vars()
{
	if [ "X${CURRNODES}" = "X" ]; then
		currnodes=`${mybin}/clustm getcurrmembers $clustname`
	else
		currnodes=${CURRNODES}
	fi

	if [ "X${LOCALNODEID}" = "X" ]; then
		localnodeid=`${mybin}/clustm getlocalnodeid $clustname`
	else
		localnodeid=${LOCALNODEID}
	fi

	if [ "X${SEQNUM}" = "X" ]; then
		seqnum=`${mybin}/clustm getseqnum $clustname`
	else
		seqnum=${SEQNUM}
	fi

	if [ "X${RESTART_CCMD}" = "X" ]; then
		RESTART_CCMD="Y"
	fi

	if [ "X${VM_IMPORT}" = "X" ]; then
		VM_IMPORT=0
	fi

	if [ "X${ALLNODES}" = "X" ]; then
		allnodes=`${mybin}/clustm getallnodes ${clustname}`
	else
		allnodes=${ALLNODES}
	fi
}


############################################################################

_usage() {
	echo "Usage: ${prog} [-a] [-f] startnode cluster_name"
	echo "       ${prog} [-a]      stopnode  cluster_name"
	echo "       ${prog} reldisks cluster_name"
	echo ""
	echo "       The [startnode|stoptnode] sub-commands are invoked"
	echo "       manually by the system administrator or automatically"
	echo "       from /etc/rc3.d scripts via the 'pdbadmin' script."
	echo ""
	echo "       The reldisks sub-command is invoked manually by the"
	echo "       system administrator, usually by the 'pdbadmin' script."
	exit 2

	# The following are not public entrypoints

	echo ""
	echo "       ${prog} [cmmstart|cmmstop|cmmabort]  cluster_name"
	echo "       ${prog} [cmmstep0-9|cmmreturn]       cluster_name"
	echo ""
	echo "       The [cmmstart|cmmstepN|cmmstop|cmmabort|cmmreturn]"
	echo "       commands are invoked by the cluster membership"
	echo "       monitor during cluster state transitions."
	exit 2
}

# lookup a value in the pdb configuration file
enmatch() {
	${mybin}/cdbmatch $* ${cdbfile} || \
		(log_error "$pre.4001" "cdbmatch $* ${cdbfile} failed" 1>&2; return 1)
}

# wrapper for some unix commands that reports errors to syslog
unix_cmd() {
	(
	set +e
	errfile=${tmpdir}/err.$$
	result=`$* 2>${errfile}`
	status=$?
	if [ ${status} != 0 ]; then
		log_error "$pre.4003" "'$*' failed: `cat ${errfile}`"
		/bin/rm -f ${errfile}
		set -e
		exit 1
	fi
	/bin/rm -f ${errfile}
	set -e
	exit 0
	)
}

# networks utilities
set_network_vars() {
	log_trace set_network_vars
	myuname=`uname -n`
	if [ `enmatch cluster.node.0.hostname` = ${myuname} ]; then
		myid=0
	elif [ `enmatch cluster.node.1.hostname` = ${myuname} ]; then
		myid=1
	else
		log_info "$pre.4010" "${clustname} node ${myuname} is not defined as part of this cluster in the ${clustname}.cdb file"
		exit 1
	fi

	str="enmatch cluster.node.${myid}.hahost.0"
	set -- `eval $str`
	hahost_ipaddrs0=$1

	netmask=`enmatch cluster.subnet_mask`

	# -1 means no net is in use; -2 means we don't know ...
	if [ -r ${ccm_selected_net_file} ]; then
		selected_net=`cat ${ccm_selected_net_file}`
		if [ "X${selected_net}" = "X" ]; then
			/bin/rm -f ${ccm_selected_net_file}
			selected_net="-2";
		elif [ ${selected_net} -ne -1 -a \
			${selected_net} -ne 0 -a ${selected_net} -ne 1 ]; then
			/usr/bin/rm -f ${ccm_selected_net_file}
			selected_net="-2"
		fi
	else
		selected_net="-2"
	fi
	log_trace_end set_network_vars
}


# start networks
start_networks() {
	log_trace start_networks

	set_network_vars

	str="enmatch cluster.node.${myid}.if.0"
	set -- `eval $str`
	phost_interface0=$1
	str="enmatch cluster.node.${myid}.phost.0"
	set -- `eval $str`
	phost_ipaddrs0=$1

	str="enmatch cluster.node.${myid}.if.1"
	set -- `eval $str`
	phost_interface1=$1
	str="enmatch cluster.node.${myid}.phost.1"
	set -- `eval $str`
	phost_ipaddrs1=$1

	# if no history was available, try net zero
	if [ ${selected_net} = -1 -o ${selected_net} = -2 ]; then
		selected_net=0;
	fi

	str="enmatch cluster.node.${myid}.haiface.0"
	set -- `eval $str`
	hahost_interface0=$1
	str="enmatch cluster.node.${myid}.haiface.1"
	set -- `eval $str`
	hahost_interface1=$1

	str="enmatch cluster.node.${myid}.haiface.${selected_net}"
	set -- `eval $str`
	hahost_interface=$1

	unix_cmd /sbin/ifconfig $phost_interface0 plumb
	unix_cmd /sbin/ifconfig $phost_interface1 plumb

	unix_cmd /sbin/ifconfig $hahost_interface0 0.0.0.0 down
	unix_cmd /sbin/ifconfig $hahost_interface1 0.0.0.0 down

	unix_cmd /sbin/ifconfig $phost_interface0 0.0.0.0 down
	unix_cmd /sbin/ifconfig $phost_interface1 0.0.0.0 down

	unix_cmd /sbin/ifconfig $phost_interface0 $phost_ipaddrs0  \
		netmask $netmask broadcast + -trailers private up
	unix_cmd /sbin/ifconfig $phost_interface1 $phost_ipaddrs1  \
		netmask $netmask broadcast + -trailers private up

	#
	# Map the ha network on the first physical link. ccm will remap
	# the ha network if the link is found faulty.
	#
	unix_cmd /sbin/ifconfig $hahost_interface $hahost_ipaddrs0  \
		netmask $netmask broadcast + -trailers private up
	sleep 1

	echo ${selected_net} > ${ccm_selected_net_file}
	${mybin}/fsync ${ccm_selected_net_file}

	# When starting to join, no net is up
	/usr/bin/rm -f ${ccm_top_net_file}

	ifvar=phost_interface${selected_net}
	ifname=`eval echo \\$$ifvar`
	# log_info "$pre.1020" "${clustname} net ${selected_net} ($ifname) selected (default)"

	log_trace_end start_networks
}

# stop networks
stop_networks() {
        log_trace stop_networks
 
        set_network_vars
 
        str="enmatch cluster.node.${myid}.if.0"
        set -- `eval $str`
        phost_interface0=$1
 
        str="enmatch cluster.node.${myid}.if.1"
        set -- `eval $str`
        phost_interface1=$1
 
	# if no history was available, try net zero
	if [ ${selected_net} = -1 -o ${selected_net} = -2 ]; then
		selected_net=0;
	fi

        str="enmatch cluster.node.${myid}.haiface.0"
        set -- `eval $str`
        hahost_interface0=$1
 
        str="enmatch cluster.node.${myid}.haiface.1"
        set -- `eval $str`
        hahost_interface1=$1

        unix_cmd /sbin/ifconfig $hahost_interface0 0.0.0.0 down
        unix_cmd /sbin/ifconfig $hahost_interface1 0.0.0.0 down
 
        unix_cmd /sbin/ifconfig $phost_interface0 0.0.0.0 down
        unix_cmd /sbin/ifconfig $phost_interface1 0.0.0.0 down
 
        unix_cmd /sbin/ifconfig $phost_interface0 unplumb
        unix_cmd /sbin/ifconfig $phost_interface1 unplumb
 
        log_trace_end stop_networks
}

# select netowrk "n"
select_network() {
	log_trace select_network $1
	netnumber=$1

	str="enmatch cluster.node.${myid}.haiface.${netnumber}"
	set -- `eval $str`
	hahost_interface=$1

	str="enmatch cluster.node.${myid}.if.${netnumber}"
	set -- `eval $str`
	phost_interface=$1
	str="enmatch cluster.node.${myid}.phost.${netnumber}"
	set -- `eval $str`
	phost_ipaddrs=$1

	# Leave a trail in case we are killed after ifconfig but before 
	# getting the chance to write the network id on the disk.
	echo -2 > ${ccm_selected_net_file}
	${mybin}/fsync ${ccm_selected_net_file}

	unix_cmd /sbin/ifconfig ${phost_interface} $phost_ipaddrs \
		netmask $netmask broadcast + private up

	unix_cmd /sbin/ifconfig ${hahost_interface} $hahost_ipaddrs0 \
			netmask $netmask broadcast +

	echo ${netnumber} > ${ccm_selected_net_file}
	${mybin}/fsync ${ccm_selected_net_file}

	str="enmatch cluster.node.${myid}.if.${netnumber}"
	set -- `eval $str`
	ifname=$1
	log_info "$pre.1030" "${clustname} net ${netnumber} ($ifname) selected"
	log_trace_end select_network $1
}


# deselect netowrk "n"
deselect_network() {
	netnumber=$1
	newnetnumber=$2

	log_trace select_network $netnumber

	str="enmatch cluster.node.${myid}.haiface.${netnumber}"
	set -- `eval $str`
	hahost_interface=$1

	unix_cmd /sbin/ifconfig ${hahost_interface} 0.0.0.0 down

	echo ${newnetnumber} > ${ccm_selected_net_file}
	${mybin}/fsync ${ccm_selected_net_file}

	str="enmatch cluster.node.${myid}.if.${netnumber}"
	set -- `eval $str`
	ifname=$1

	log_info "$pre.5010" "${clustname} net ${netnumber} ($ifname) de-selected"
	log_trace_end select_network $netnumber
}

ccmevent_cmd () {
	log_trace ccmevent_cmd

	set_network_vars

	# Note: the variables below are passes to reconf_ener as environment
	# variables by ccmd.
	#
	# acthosts : list a nodes currently in the cluster.
	#
	# select :  The id of the net (if any) currently in use by the 
	#		other host.
	#
	# top    : The list of nets thru which I can reach the maximum
	#		number of hosts.  These nets are eligible for
	#		being selected.
	# failed : Any net which is not in the "top" list.
	# topnets: The number of nets in the "top" list.

	acthosts=${ACTHOSTS}
	select=${SELECT}
	top=${TOP}
	topnets=${TOPNETS}
	failed=${FAILED}

	# valid network state can only be determined if there is
	# more than one active node
	if [ ${acthosts} = 1 ]; then
		return
	fi

	# if a net is already in use by the other host and is eligible, that
	# is the one to pick.
	if [ ${select} != -1 ]; then
		for i in $failed; do
			if [ ${i} = ${select} ]; then
				select=-1
			fi
		done
	fi


	# if the current net can be used, don't change nets (keep using it).
	if [ ${select} = -1 ]; then
		for i in $top; do
			if [ ${i} = ${selected_net} ]; then
				select=${selected_net}
			fi
		done
	fi

	if [ ${select} = -1 ]; then	# nope, selected net has failed,
					# need to pick a new selected net
					# from any of the available networks
		if [ ${topnets} != 0 ]; then
			# pick the first in the list of good networks
			set -- $top
			select=$1
		else
			log_info "$pre.4020" "${clustname} no active interconnect networks"
			# A non-zero exit code will cause CCM to force a
			# reconfiguration.  If we are dealing with a 
			# persistent problem, after cmm.reconfig.max attempts
			# clustd will abort one of the nodes in the cluster
			# and try again.
			exit 8
		fi
	fi


	# Up/Down state reporting is done by the ccmd

	# There is a window in select_network where if the script is killed
	# after the ifconfig but before writting the net id to disk where
	# we really don't know which net is in use.  If that happens, we 
	# make sure that all nets except the selected one are put out of use.
	# The same holds if the file usually used to store the active net
	# id can not be located.
	# -1 means no net is in use; -2 means we don't know ...

	# bring down the old network if it is different

	desel1=-1
	desel2=-1

	if [ ! ${select} = ${selected_net} ]; then
		if [ ${selected_net} = -2 ]; then
			for i in $failed $top; do
				if [ ${select} != ${i} ]; then
					desel1=$i
					desel2=${select}
				fi
			done
		# only if the net was actually up
		elif [ ${selected_net} != -1 ]; then
			desel1=${selected_net}
			desel2=-1
			selected_net=-1
		fi
	fi

	# configure the new net
	if  [ ${select} != -1 -a ${select} != ${selected_net} ]; then
		select_network ${select}
	fi

	if [ ${desel1} != -1 -a ${desel1} != -2 ]; then
		deselect_network ${desel1} ${desel2}
	fi

	# bring up new net
	if  [ ${select} != -1 -a ${select} != ${selected_net} ]; then
		netnumber=${select}

		str="enmatch cluster.node.${myid}.haiface.${netnumber}"
		set -- `eval $str`
		hahost_interface=$1

		unix_cmd /sbin/ifconfig ${hahost_interface} $hahost_ipaddrs0 \
			netmask $netmask broadcast + private up

		echo ${select} > ${ccm_selected_net_file}
		${mybin}/fsync ${ccm_selected_net_file}

	fi

	log_trace_end ccmevent_cmd
	exit 0
}

# if $1 != 0 kill ccmd
# else check to make sure that ccmd is alive.  If not, set RESTART_CCMD="Y".
#
# if $2 != 0 wait for ccmd to release all its resources
# otherwise, return immediately (needed to make sure return/stop/abort
#	transitions don't timeout).
kill_ccmd() {
        pfile=`enmatch ccm.pidfile`

	RESTART_CCMD="Y"

        if [ -f ${pfile} ]; then
                pid=`cat ${pfile}`
		set $1 $2 ${pid}
		if [ "$3" != "" -a "$3" != "0" ]; then
			if [ "$1" != "0" ]; then
				while /usr/bin/kill -USR2 -$3 1>/dev/null 2>&1
					do
					if [ "$2" = "0" ]; then break; fi
					sleep 1
					done
			elif [ ! /usr/bin/kill -0 $3 1>/dev/null 2>&1 ]; then
				RESTART_CCMD="N"
			fi
		fi
                /usr/bin/rm -f ${pfile}
        fi
        # log_info "$pre.2040" "${clustname} terminated ccmd"
}

# XXX - should we specify the reservation timeouts in the cdb file?

# reserve a quorum device
reserve_quorum_dev() {
	log_trace reserve_quorum_dev
	quorum_dev=`eval enmatch ctlreserve.node.quorumdev`

	if [ -n "$quorum_dev" ]; then
                log_info "$pre.1010" "${clustname} reserving $quorum_dev as quorum device"
                RC=0
                ${mybin}/timed_run  8 ${SSACLI} q_reserve ${quorum_dev} 1>/dev/null 2>&1 || RC=$?
                if [ ${RC} -ne 0 ]; then
                  # we retry reserve again since the previous error
                  # could be due to check condition not related to
                  # reservation conflict.(escpecially in dual host
                  # sonoma where we have bus reset which results in reservation
                  # failure dur to unit attention.
                  #
                  ${mybin}/timed_run  8 ${SSACLI} q_reserve ${quorum_dev} 1>/dev/null 2>&1 || ( \
                        log_info "$pre.4040"  "${clustname} failed to reserve $quorum_dev as quorum device"
                 echo "Warning: could not reserve the quorum device (${quorum_dev})" >&3
                  exit 1)
              fi 
        fi

	log_trace_end reserve_quorum_dev
}

# release a quorum device
release_quorum_dev() {
	log_trace release_quorum_dev
	quorum_dev=`eval enmatch ctlreserve.node.quorumdev`
	if [ -n "${quorum_dev}" ]; then
		${SSACLI} release ${quorum_dev} 1>/dev/null 2>&1 || return 0
	fi
	log_trace_end release_quorum_dev
}

# reservations and release required for failure fencing

failure_fencing() {
	log_trace failure_fencing
	rr_flag=$1
	shift

	if [ "$rr_flag" = "reserve" -a $# -gt 0 ]; then
		disks=`echo $*`

		# if the reserved.disks file exists, remove all the devices from the
		# input list that are already in the file.

		if [ -s ${myvar}/reserved.disks ]; then
			newdisks=$disks
			disks=""
			for d in ${newdisks}
			do
				ret=""
				grep ${d} ${myvar}/reserved.disks > /dev/null 2>&1 || ret=`echo $?`
				if [ -n "${ret}" -a "$ret}" -eq 1 ]; then
					disks="$disks $d"
				fi
			done
		fi
		echo "$disks" | tr '\040' '\012' | sort -u >> ${myvar}/reserved.disks

	elif [ "$rr_flag" = "release" -a -s ${myvar}/reserved.disks ]; then
		disks=`cat ${myvar}/reserved.disks | sort -u`
	fi

	${SSACLI} $rr_flag ${disks} 1>/dev/null 2>&1 || return 0

	if [ "$rr_flag" = "release" ]; then
		/bin/rm -f ${myvar}/reserved.disks
	fi

	log_trace_end failure_fencing
}

# reserve all shared devices

reserve_all_shared_devs() {
	log_trace reserve_all_shared_devs

	# No reservations are allowed if both nodes are in the cluster

	# Note, this command is not initiated by CMM.  Use clustm to get info.
	currnodes=`${mybin}/clustm getcurrmembers ${clustname}`
	localnodeid=`${mybin}/clustm getlocalnodeid ${clustname}`
	currstate=`${mybin}/clustm getstate ${clustname}`
	if [ "${currnodes}" != "${localnodeid}" ]; then
		log_info "$pre.4100" "Both nodes in cluster - cannot reserve disks"
		exit 1
	fi
	if [ "${currstate}" != "end" ]; then
		log_info "$pre.4200" "Reconfiguration in progress - cannot reserve disks"
		exit 1
	fi

	# We go through this hoopla of creating a temporary directory and
	# storing the pid of the 'pdbadmin resdisks' command in a file in that
	# directory to avoid the possibility of multiple commands running
	# at the same time
	
	/usr/bin/mkdir ${admindir} >/dev/null 2>&1
	if [ "$?" -ne 0 ]; then
		exit 0
	fi
	echo $$ > ${admindir}/reserve.pid

	reserve_quorum_dev

	if [ "${vm}" = "cvm" ]; then
		sdg=`/usr/sbin/vxdg list | grep shared | awk '{print $1}'`
		gather_disks ${sdg}
		failure_fencing reserve ${alldisks}

	elif [ "${vm}" = "vxvm" ]; then
		allnodes=`${mybin}/clustm getallnodes ${clustname}`
		for i in ${allnodes}
		do
			dg=`${mybin}/cdbmatch vm.node.${i}.cdg ${cdbfile}`
			gather_disks ${dg}
			failure_fencing reserve ${alldisks}
		done
	fi
	if [ -s ${admindir}/reserve.pid ]; then
		/bin/rm -fr ${admindir}
	fi

	log_trace_end reserve_all_shared_devs
}

# release all shared devices

release_all_shared_devs() {
	log_trace release_all_shared_devs
	failure_fencing release
	release_quorum_dev
	log_trace_end release_all_shared_devs
}


check_cdbfile() {

	while [ ! -z "$*" ]; do
		if [ $1 = `uname -n` ]; then
			return 0
		fi

		set +e
		${mybin}/timed_run -q 70 ${mybin}/checkrfile -p ${cdbfilter} -h $1 -i $2 -f ${cdbfile}
		status=$?
		set -e

		if [ ${status} = 2 ]
		then ${mybin}/clustm reconfigure ${clustname} >/dev/null 2>&1
		elif [ ${status} = 1 ] && [ ${CLNODEUP} = 0 ]
		then exit 1
		elif [ ${status} = 1 ]
		then ${mybin}/clustm reconfigure ${clustname} >/dev/null 2>&1
		fi
		shift; shift
	done
}

stop_rsumd() {
	if [ -r ${RSUMDRUNNING} ]; then
		set +e
		${mybin}/rsumd_stop
		set -e
		/bin/rm -f ${RSUMDRUNNING}
	fi
}

start_rsumd() {
	if [ ! -r ${RSUMDRUNNING} ]
	then
		${mybin}/rsumd -F ${cdbfilter}
		/usr/bin/touch ${RSUMDRUNNING}
	fi
}

gather_disks() {
	log_trace gather_disks
	alldisks=""
	if [ $# -gt 0 ]; then
	   for g in $*
	   do
	      /usr/sbin/vxdg list ${g} 1>/dev/null 2>&1 || echo $? > ${myvar}/vxdg.list
	      if [ ! -s ${myvar}/vxdg.list ]; then
	         disks=`/usr/sbin/vxprint -g ${g} -d | grep dm | awk '{print $3}' | grep -v '-'`
		 alldisks="$alldisks $disks"
	      else
	         /bin/rm -f ${myvar}/vxdg.list
	      fi
	   done
	fi
	log_trace_end gather_disks
}

cvm() {
	if [ "${vm}" = "cvm" ]; then

		if [ $1 = "step1" ]; then
			if [ "${currnodes}" != "${localnodeid}" ]; then
				# slave join - release all reservations
				failure_fencing release
			fi
		fi

		if [ $1 = "stop" ]; then
			priocntl -e -c RT /usr/sbin/vxclust $*
		else
			/usr/sbin/vxclust $*
			cvmstatus=$?
		fi

		if [ $1 = "step3" ]; then
			if [ "${currnodes}" = "${localnodeid}" ]; then
				sdg=`/usr/sbin/vxdg list | grep shared | awk '{print $1}'`
				gather_disks ${sdg}
				failure_fencing reserve ${alldisks}
			fi
		fi

		# Failure fencing is released only for the abort transition
		# since stop is actually implemented as an abort.

		if [ $1 = "stop" -o $1 = "abort" ]; then
			if [ "${cvmstatus}" -eq 0 ]; then
				failure_fencing release
			fi
		fi
	fi

	if [ "${vm}" = "vxvm" ]; then
	 # I am going to set vmstatus to 0 just as the default return
	 # value from this part of the routine. If we need to pass
	 # the exit status from any of the steps below, then vmstatus
	 # should be set there.
         vmstatus=0

		if [ $1 = "step2" ]; then
		   if [ "${currnodes}" != "${localnodeid}" ]; then
		      # cluster includes both nodes - deport all non-local
		      # cluster disk groups and release all reservations

		      for i in ${currnodes}
		      do
		         if [ ${i} -ne ${localnodeid} ]; then
			    dg=`${mybin}/cdbmatch vm.node.${i}.cdg ${cdbfile}`
			    cvm_deport $1 ${dg}
			  fi
		      done
		      failure_fencing release
		   else
		      # only the local node is in the cluster - import all
		      # non-local cluster disk groups

		      for i in ${allnodes}
		      do
		         if [ ${i} -ne ${localnodeid} ]; then
			    dg=`${mybin}/cdbmatch vm.node.${i}.cdg ${cdbfile}`
			    cvm_import ${dg}
			    gather_disks ${dg}
			    failure_fencing reserve ${alldisks}
			 fi
		      done
		   fi
		elif [ $1 = "step3" ]; then
		   # import all local cluster disk groups only if the VM_IMPORT variable
		   # is 0. If it is 1, it means that the disk groups are busy on the
		   # other node.

		   if [ "${VM_IMPORT}" = 0 ]; then
		      for i in ${allnodes}
		      do
		         if [ ${i} -eq ${localnodeid} ]; then
			    dg=`${mybin}/cdbmatch vm.node.${i}.cdg ${cdbfile}`
			    cvm_import ${dg}
			    if [ "${currnodes}" = "${localnodeid}" ]; then
			       gather_disks ${dg}
			       failure_fencing reserve ${alldisks}
			    fi
			 fi
		      done
		   else
		      log_info "$pre.1190" "Primary disk groups busy on other node - not imported"
		      vmstatus=1
		   fi
		elif [ $1 = "stop" -o $1 = "abort" ]; then
		   # deport all currently imported cluster disk groups
		   for i in ${allnodes}
		   do
		      dg=`${mybin}/cdbmatch vm.node.${i}.cdg ${cdbfile}`
		      cvm_deport $1 ${dg}
		   done
		   failure_fencing release
		fi
	fi
	if [ "${vm}" = "cvm" ]; then
	 return $cvmstatus
	else
	 return $vmstatus
	fi
}

# deport of cluster disk groups

cvm_deport() {
	current_step=$1
	/bin/rm -f ${myvar}/vxdg.deport.${current_step}

	shift
	if [ $# -gt 0 ]; then
	   for g in $*
	   do
	      /usr/sbin/vxdg list ${g} > /dev/null 2>&1 || echo $? > ${myvar}/vxdg.list

	      if [ ! -s ${myvar}/vxdg.list ]; then
		 log_info "$pre.1180" "deporting ${g}"
		 /usr/sbin/vxdg deport "${g}" || echo $? > ${myvar}/vxdg.deport.${current_step}.${g}

		 if [ -s ${myvar}/vxdg.deport.${current_step}.${g} ]; then
		    result=`/bin/cat ${myvar}/vxdg.deport.${current_step}.${g}`

		    if [ "${result}" -eq 31 ]; then
		       log_info "$pre.5020" "Disk group ${g} busy. Deport failed"
			if [ "${current_step}" != "step2" ]; then
			   /bin/rm -f ${myvar}/vxdg.deport.${current_step}.${g}
			   exit 1
			else
			   /bin/mv -f ${myvar}/vxdg.deport.${current_step}.${g} ${myvar}/vxdg.deport.${current_step}
			fi
		    fi
		 fi
	      else
	         /bin/rm -f ${myvar}/vxdg.list
	      fi
	   done
	fi
}

# import of cluster disk groups

cvm_import() {
	if [ $# -gt 0 ]; then
	   for g in $*
	   do
	      /usr/sbin/vxdg list ${g} > /dev/null 2>&1 || echo $? > ${myvar}/vxdg.list

	      if [ -s ${myvar}/vxdg.list ]; then
	         log_info "$pre.1170" "importing ${g}"
		 /bin/rm -f ${myvar}/vxdg.list
		 /usr/sbin/vxdg -Ct import "${g}" || echo $? > ${myvar}/vxdg.import

		 if [ -s ${myvar}/vxdg.import ]; then
		    log_info "$pre.4030" "Import of disk group ${g} failed"
		    /bin/rm -f ${myvar}/vxdg.import
		 fi
	      fi
	   done
	   /usr/sbin/vxrecover -sb
	fi
}


# join the local node with an Energizer cluster
startnode_cmd() {
	log_trace startnode
	#
	# XXX - here we should do sanity checks to assure that the node
	# 	is well configured for joining the cluster.

	# check that cluster is not already running
	if [ -f ${ISRUNNINGFLAG} ]; then
		# check if clustd is really running
		${mybin}/timed_run -q 3 ${mybin}/clustm getstate ${clustname} >/dev/null 2>&1 && \
		(
			echo "    This node is already running as part of the ${clustname} cluster" >&3
			exit 1
		)
	fi

	if [ ${udlm} -eq 1 -a \( -d ${mybin}/lkmgr -o ! -x ${mybin}/lkmgr \) ]; then
		log_info "$pre.4050" "${mybin}/lkmgr:  Oracle unix dlm is not installed."
		exit 1
	fi

	nodename=`eval /bin/uname -n`
	numnodes=`enmatch cmm.nodes`
	i=0;
	while [ $i -lt $numnodes ]; do
		if [ `enmatch cluster.node.$i.hostname` = ${nodename} ]; then
			break;
		fi
		i=`expr $i + 1`
	done
	log_info "$pre.1150" "Starting PDB; node $i ($nodename) joining the ${clustname} cluster."
	echo "Starting SPARCcluster PDB software - joining the ${clustname} cluster." >&3

	touch ${ISRUNNINGFLAG}

	# cleanup stale files
        pfile=`enmatch ccm.pidfile`
	if [ -f ${pfile} ]; then
		/bin/rm -f ${pfile}
	fi
	if [ -f ${ccm_selected_net_file} ]; then
		/bin/rm -f ${ccm_selected_net_file}
	fi

	start_networks

	# avoid confusion over stale sequence numbers
	/usr/bin/rm -f ${CMMSEQNUM}

	${mybin}/clustd -f ${cdbfile}	# stdout/err are redirected by caller to the logfile

	/bin/rm -f ${DOINGSTOPFLAG}

	if [ "${forcestart}" = 1 ]; then
		touch ${FORCESTARTFLAG}
		log_info "$pre.2030" "${clustname} cluster started with -f (force) option"
	else
		/bin/rm -rf ${FORCESTARTFLAG}
	fi


	# unless the '-a' flag was specified, wait for the node to do
	# the first reconfiguration.
	if [ "${async}" != 1 ]; then
		while [ -f  ${ISRUNNINGFLAG} ]; do
			# check if clustd is still running
			state=`${mybin}/timed_run -q 20 ${mybin}/clustm getstate ${clustname} 2>/dev/null` ||\
				 exit 1
			if [ "$state" = "end" ]; then
				break
			else
				sleep 3
			fi
		done
	fi
	if [ ! -f  ${ISRUNNINGFLAG} ]; then
		exit 1
	# else
	#	 echo "This node is now running as part of the ${clustname} cluster." >&3
	fi

	log_trace_end startnode
}

# called from cluster membership monitor "start" transition
cmmstart_cmd() {

	validate_env_vars

	# if no log location is specified, assign a default
	if [ ${udlm} -eq 1 ]; then
		if [ "${LKMGR_LOG}X" = "X" ]; then
			export LKMGR_LOG
			LKMGR_LOG=${myvar}
		fi

		# start the unix dlm.  We can't do it before this point because
		# clustd has to determine the nodeid.
		if [ `/bin/ps -u 0 | /bin/grep lkmgr | \
		    /bin/grep -v 'grep' | /bin/wc -l` -ne 0 ]; then
			log_info "$pre.4060" \
				"${clustname} unix dlm already running"
		    exit 1
		fi

		log_info "$pre.1160" "${clustname} starting the unix dlm."
		/bin/priocntl -c `enmatch udlm.schedclass` -p `enmatch \
			udlm.schedpriority` -e ${mybin}/lkmgr -n 2 \
			-c ${cdbfile} -i ${localnodeid}  > /dev/console 2>&1
	fi

	(((cvm start $clustname ${cdbfile}) || echo $? >> /${myvar}/cvm.start)&
        wait)
        if [ -s /${myvar}/cvm.start ]
        then
                /bin/rm -f /${myvar}/cvm.start
                exit 1
        fi

	stop_rsumd
	start_rsumd
}

# pdb cluster graceful shutdown
stopnode_cmd() {
	log_trace stopnode

	if [ ! -f ${ISRUNNINGFLAG} ]; then
		echo "The SPARCcluster PDB software is not currently running on this node." >&3
		# log_info "$pre.4060" "${clustname} cluster is not currently running on this node"
		exit 1	# for bugID 1166404
	fi

	echo "Stopping the SPARCcluster PDB software - leaving the ${clustname} cluster" >&3

	currnodes=`${mybin}/clustm getcurrmembers ${clustname}`
	localnodeid=`${mybin}/clustm getlocalnodeid ${clustname}`
	allnodes=`${mybin}/clustm getallnodes ${clustname}`

        cvm stop $clustname ${cdbfile}
	touch ${DOINGSTOPFLAG}

	kill_ccmd 1 0

	${mybin}/clustm stop ${clustname} this

	# unless the '-a' flag was specified, wait for the node to shutdown
	if [ "${async}" != 1 ]; then
		while [ -f ${ISRUNNINGFLAG} ]; do
			sleep 1
		done
	fi
	#echo "This node is no longer running as part of the ${clustname} cluster." >&3
	if [ "$currnodes" = "$localnodeid" ]; then
		echo "The ${clustname} cluster has no active hosts." >&3
	fi
	# log_info "$pre.4070" "${clustname} cluster is stopped on this node"
	log_trace_end stopnode
}

# called from cluster membership monitor "abort" and "stop" transitions
cmmabort_cmd() {
	log_trace cmmabort_cmd

	validate_env_vars
	stop_rsumd

	if [ "${vm}" = "cvm" ]; then
		log_info "SUNWcluster.cvm.5010" "disabling cluster volume manager shared access mode"
	elif [ "${vm}" = "vxvm" ]; then
		log_info "SUNWcluster.vm.5010" "disabling volume manager"
	fi

        (
	if [ ! -f ${DOINGSTOPFLAG} ]; then
		((cvm abort $clustname ${cdbfile}) || echo $? >> /${myvar}/cvm.abort)&
	fi
	if [ ${udlm} -eq 1 ]; then
		if [ -x ${mybin}/ogmsctl -a -x ${mybin}/ogms ]
		then
			log_info "$pre.2015" "${clustname} Stopping Oracle GMS"
			ORACLE_HOME=/opt/SUNWcluster
			OGMS_HOME=${myvar}/ogms_`/bin/hostname`
			export ORACLE_HOME
			export OGMS_HOME
			((${mybin}/ogmsctl abort > /dev/null 2>&1) \
				|| echo $? >> ${myvar}/ogmsctl.abort)
		fi
		((${mybin}/udlmctl abort $clustname \
		   `enmatch udlm.abort_timeout` \
               	   ) || echo $? >> /${myvar}/udlmctl.abort)&
	fi
        wait)

        if [ -s /${myvar}/udlmctl.abort ] || [ -s /${myvar}/cvm.abort ] \
		|| [ -s /${myvar}/ogmsctl.abort ]
        then
                /bin/rm -f /${myvar}/udlmctl.abort /${myvar}/cvm.abort \
			/${myvar}/ogmsctl.abort
                exit 1
        fi

	if [ -f ${RESERVEDCTLS} ]; then
		release_quorum_dev
		/bin/rm -rf ${RESERVEDCTLS}
	fi

	/bin/rm -rf ${ISRUNNINGFLAG}

	# ccmd is killed after other components stop.  This is to take
	# care of net failures that may occur during stop/abort.
	kill_ccmd 1 0

	stop_networks

	# start up post-reconfiguration scripts (e.g. user defined scripts)
	priocntl -c TS -p 59 -e \
		${myvar}/system.scripts/${clustname}.reconfig.sys_script \
		${cmd} ${SEQNUM} `/usr/bin/date +\%Y\%m\%d\%H\%M\%S` &

	log_trace_end cmmabort_cmd
}

# called from cluster membership monitor "return" transition
cmmreturn_cmd() {
	# unix dlm does not do anything during the return transition

        (((cvm return $clustname ${cdbfile}) || echo $? >> \
		/${myvar}/cvm.return)&
		wait)
        if [ -s /${myvar}/cvm.return ]
        then
                /bin/rm -f /${myvar}/cvm.return
                exit 1
        fi

	# ccmd is killed after other components stop.  This is to take
	# care of net failures that may occur during stop/abort.
	kill_ccmd 1 0

	start_rsumd
}

# called from cluster membership monitor "step1" transition
cmmstep1_cmd() {
	validate_env_vars

        set -- $currnodes
        names=""
        while [ ! -z "$*" ]; do
                thisname=`enmatch cluster.node.$1.hostname`
                  names="$names $thisname"
                shift;
        done

	# log_info "$pre.1110" "${clustname} reconfiguration sequence ${seqnum} started with nodes ${currnodes}"
	# log_info "$pre.1120" "${clustname} reconfiguration ${seqnum} started on ${names}"

	if [ "${RESTART_CCMD}X" = "YX" ]; then
		kill_ccmd 1 1
	else
		kill_ccmd 0 1
	fi

	if [ "${currnodes}" = "${localnodeid}" ]; then

		# We no longer can tell which nets are active
		/usr/bin/rm -f ${ccm_top_net_file}

		if [ -f ${FORCESTARTFLAG} ]; then
			log_info "$pre.1140" "${clustname} cluster bypassing reservation of quorum device"
		else
			reserve_quorum_dev
		fi
		touch ${RESERVEDCTLS}
	else
		# the cluster includes both nodes

		if [ -f ${RESERVEDCTLS} ]; then
			release_quorum_dev
			/bin/rm -rf ${RESERVEDCTLS}
		fi
	fi
	/bin/rm -rf ${FORCESTARTFLAG}

	if [ -f ${admindir}/reserve.pid ]; then
		pid=`cat ${admindir}/reserve.pid`
		kill -KILL ${pid} || echo $? > /dev/null 2>&1
		/bin/rm -fr ${admindir}
	fi
}

# called from cluster membership monitor "step2" transition
#
# The new instance of CCM is not started in step1 because synchronization
# can not be guaranteed within a single step.  In previous releases,
# when ccmd was re-started in step1 a new instance of ccmd could talk
# to an old instance of ccmd on the other node.  This would cause erroneous
# message to be logged.
#
cmmstep2_cmd() {
	validate_env_vars

	# the cluster includes both nodes
	if [ "${currnodes}" != "${localnodeid}" ]; then
		if [ "${RESTART_CCMD}X" = "YX" ]; then
			${mybin}/ccmd -w -m ${localnodeid} -c \
				${cdbfile} ${currnodes} &
		fi
	fi
 
#
# Check the cdb file here from step one. This is because we want
# to use the ha interface and so we need ccmd to make sure its there.
# The names parameter that is passed to check_cdbfile is in the format
# of '<hostname> <ipaddress> ...'. 

        set -- $currnodes
        names=""
        while [ ! -z "$*" ]; do
	   if [ "$1" != "${localnodeid}" ]; then
		thisname=`enmatch cluster.node.$1.hahost`
		thishostname=`enmatch cluster.node.$1.hostname`
		names="$names $thishostname"
		names="$names $thisname"
	   fi
	   shift;
        done

	check_cdbfile ${names}

}

# called from cluster membership monitor "step3" transition
cmmstep3_cmd() {
	validate_env_vars
	stop_rsumd
        (
	if [ ${udlm} -eq 1 ]; then
		if [ `/bin/ps -u 0 | /bin/grep lkmgr | \
			/bin/grep -v grep | /bin/wc -l` -eq 0 ]; then
				log_info "$pre.4061" \
				    "${clustname} unix dlm no longer running"
			exit 1
	   	fi

		((${mybin}/udlmctl step1 $clustname \
			`enmatch udlm.step1_timeout`) \
			|| echo $? >> /${myvar}/udlmctl.step1) &
	fi

        ((cvm step1 $clustname ${cdbfile}) || echo $? >> /${myvar}/cvm.step1) &
        wait)
        if [ -s /${myvar}/udlmctl.step1 ] || [ -s /${myvar}/cvm.step1 ]
        then
                /bin/rm -f /${myvar}/udlmctl.step1 /${myvar}/cvm.step1
                exit 1
        fi
}

# called from cluster membership monitor "step4" transition
cmmstep4_cmd() {
	validate_env_vars
        (
	if [ ${udlm} -eq 1 ]; then
		((${mybin}/udlmctl step2 $clustname \
			`enmatch udlm.step2_timeout`) \
                	|| echo $? >> /${myvar}/udlmctl.step2) &
	fi
        ((cvm step2 $clustname ${cdbfile}) || echo $? >> /${myvar}/cvm.step2) &
        wait)
        if [ -s /${myvar}/udlmctl.step2 ] || [ -s /${myvar}/cvm.step2 ]; then
                /bin/rm -f /${myvar}/udlmctl.step2 /${myvar}/cvm.step2
                exit 1
        fi
}

# called from cluster membership monitor "step5" transition
cmmstep5_cmd() {
	validate_env_vars
	(
	if [ ${udlm} -eq 1 ]; then
		((${mybin}/udlmctl step3 $clustname \
			`enmatch udlm.step3_timeout`) \
                	|| echo $? >> /${myvar}/udlmctl.step3) &
	fi
        ((cvm step3 $clustname ${cdbfile}) || echo $? >> /${myvar}/cvm.step3)&
        wait)
        if [ -s /${myvar}/udlmctl.step3 ] || [ -s /${myvar}/cvm.step3 ]; then
                /bin/rm -f /${myvar}/udlmctl.step3 /${myvar}/cvm.step3
                exit 1
        fi
}

# called from cluster membership monitor "step6" transition
cmmstep6_cmd() {
	validate_env_vars
	(
	if [ ${udlm} -eq 1 ]; then
		((${mybin}/udlmctl step4 $clustname \
			`enmatch udlm.step4_timeout`) \
                	|| echo $? >> /${myvar}/udlmctl.step4) &
	fi
        ((cvm step4 $clustname ${cdbfile}) || echo $? >> /${myvar}/cvm.step4) &
        wait)
        if [ -s /${myvar}/udlmctl.step4 ] || [ -s /${myvar}/cvm.step4 ]; then
                /bin/rm -f /${myvar}/udlmctl.step4 /${myvar}/cvm.step4
                exit 1
        fi
}

# called from cluster membership monitor "step7" transition
cmmstep7_cmd() {
	validate_env_vars

	name=`/bin/uname -n`
	if [ "${vm}" = "cvm" ]; then
		log_info "SUNWcluster.cvm.6010" "cluster volume manager shared access mode enabled"
	        case `/usr/sbin/vxdctl -c mode 2>/dev/null` in
	          *MASTER*) vm_on_node="master"
	          log_info "SUNWcluster.cvm.1010" "- node ${name} vm_on_node is $vm_on_node" ;;
	          *SLAVE*)  vm_on_node="slave"
	          log_info "SUNWcluster.cvm.1020" "- node ${name} vm_on_node is $vm_on_node" ;;
	        esac  
	elif [ "${vm}" = "vxvm" ]; then
		log_info "SUNWcluster.vm.6010" "volume manager enabled"
	fi

	if [ ${udlm} -eq 1 ]; then
		((${mybin}/udlmctl step5 $clustname \
			`enmatch udlm.step5_timeout`) \
                	|| echo $? >> /${myvar}/udlmctl.step5)
	fi

        if [ -s /${myvar}/udlmctl.step5 ]; then
                /bin/rm -f /${myvar}/udlmctl.step5
                exit 1
        fi
}

# called from cluster membership monitor "step8" transition
cmmstep8_cmd() {

	validate_env_vars

	if [ ${udlm} -eq 1 ]; then
		if [ -x ${mybin}/ogmsctl -a -x ${mybin}/ogms ]; then
			if [ `/bin/ps -u 0 | /bin/grep ogms | \
				/bin/grep -v grep | /bin/wc -l` -ne 0 ]; then
				log_info "$pre.1017" \
				    "${clustname} Oracle GMS already running"
			else
				log_info "$pre.1015" \
				    "${clustname} Starting Oracle GMS"
				ORACLE_HOME=/opt/SUNWcluster
				OGMS_HOME=${myvar}/ogms_`/bin/hostname`
				export ORACLE_HOME
				export OGMS_HOME
				if [ -d /tmp/.ogms ]; then
					/bin/rm -rf /tmp/.ogms
				fi
				((${mybin}/lktest > /dev/null 2>&1) \
					|| echo $? > /dev/null)
				ogmscls=`enmatch udlm.schedclass`
				udlmpri=`enmatch udlm.schedpriority`
				ogmspri=`expr $udlmpri + 1`
				((/usr/bin/priocntl -c ${ogmscls} \
					-p ${ogmspri} -e ${mybin}/ogmsctl \
					start > /dev/null 2>&1) || \
					echo $? >> ${myvar}/ogmsctl.step8)
				if [ -s ${myvar}/ogmsctl.step8 ]; then
					/bin/rm -f ${myvar}/ogmsctl.step8
					ogms_retry=0
					ogms_retry_intvl=2
					ogms_max_retries=90
					ogms_up=255
					while [ "${ogms_up}" -ne "" \
						-a ${ogms_retry} -lt \
						${ogms_max_retries} ]; do
						ogms_up=`((${mybin}/ogmsctl status > /dev/null 2>&1) || echo $?)`
						if [ "${ogms_up}" -eq "" ]; then
							break;
						fi
						log_info "$pre.1018" "${clustname} Oracle GMS not up yet, polling"
						ogms_retry=`expr $ogms_retry + 1`
						echo "OGMS down \"$ogms_up\", retry ${ogms_retry} of ${ogms_max_retries} sleeping for ${ogms_retry_intvl} seconds"
						sleep ${ogms_retry_intvl}
					done
					if [ "${ogms_up}" -ne "" ]; then
						log_info "$pre.1019" "${clustname} Oracle GMS failed to start, try starting manually"
					fi
				fi
			fi
		fi
	fi

	return 0
}

# called from cluster membership monitor "step9" transition
cmmstep9_cmd() {
	set +e
	validate_env_vars

	numnodes=`enmatch cmm.nodes`
	_thisnode=`expr $numnodes - 1`	# 0 .. n-1
	while [ ${_thisnode} -ge 0 ]; do
	 	set -- $currnodes
	 	_foundit="NO"
	 	while [ ! -z "$*" ]; do
	 		if [ $1 -eq ${_thisnode} ]; then
				_foundit="YES"
			fi
	 		shift;
	 	done
	 	name=`enmatch cluster.node.${_thisnode}.hostname`
	 	if [ "NO" = "${_foundit}" ]; then
	 		log_info "$pre.1930" "${clustname} node ${_thisnode} (${name}) is not a cluster member"
		else
			log_info "$pre.1920" "${clustname} node ${_thisnode} (${name}) is a cluster member"
	 	fi
	 	_thisnode=`expr ${_thisnode} - 1`
	done

	# start up post-reconfiguration scripts (e.g. user defined scripts)
	priocntl -c TS -p 59 -e \
		${myvar}/system.scripts/${clustname}.reconfig.sys_script \
		up ${seqnum} `/usr/bin/date +\%Y\%m\%d\%H\%M\%S` ${CLNODEUP} &

	log_info "$pre.1940" "${clustname} cluster reconf #${seqnum} finished"
	set -e
}
#
# end of shell functions
#

#####################################################################################

# get program options
set -- `getopt af $*`
if [ $? != 0 ]; then
	_usage
fi
for i in  $*; do
	case $i in
	-a) async=1; shift ;;
	-f) forcestart=1; shift ;;
	--) shift; break;;
	esac
done

prog=$0
cmd=$1
clustname=$2
# Cluster Application Bit Assignment. This is done here
# so that we can use symbolic application names throughout the
# reconfiguration script.
# OPS = bit 0
# Informix = bit 1
# Sybase = bit 2
# CVM = bit 3
# VxVM = bit 4
# SDS = bit5

OPS=0
Informix=1
Sybase=2
CVM=3
VxVM=4
SDS=5

init $*

set -e				# exit shell script on error

#
# Dispatch the call.
#
# We are carefully passing file descriptor 2 from "startnode" to clustd
# and then from clustd to the "cmm???" calls.
#
# file descriptor #3 is the original stdout if you need to send
# messages to the interactive user.
#
case ${cmd} in
	# interactive commands
	startnode)		startnode_cmd    3>&1 1>>${logfile} 2>&1 ;;
	stopnode)		stopnode_cmd     3>&1 1>>${logfile} 2>&1 ;;

	# async commands - not interactive
	cmmstop | cmmabort)	cmmabort_cmd     3>&1 1>>${logfile} 2>&1 ;;
	cmmstart | cmmreturn | cmmstep[1-9]) \
				eval ${cmd}_cmd  3>&1 1>>${logfile} 2>&1 ;;
	ccmevent)		ccmevent_cmd     3>&1 1>>${logfile} 2>&1 ;;
	resdisks)		reserve_all_shared_devs 3>&1 1>>${logfile} 2>&1 ;;
	reldisks)		release_all_shared_devs 3>&1 1>>${logfile} 2>&1 ;;

	*)			_usage				    ;;
esac
exit 0
