Sun Cluster 
------------------------------------------

bash-2.05# scstat -q

-- Quorum Summary --

Quorum votes possible: 3
Quorum votes needed: 2
Quorum votes present: 3


-- Quorum Votes by Node --

Node Name Present Possible Status
--------- ------- -------- ------
Node votes: node14 1 1 Online
Node votes: node18 1 1 Online


-- Quorum Votes by Device --

Device Name Present Possible Status
----------- ------- -------- ------
Device votes: /dev/did/rdsk/d3s2 1 1 Online

bash-2.05# scinstall -pv
Sun Cluster 3.1u3 for Solaris 9 sparc
SUNWscr: 3.1.0,REV=2003.03.25.13.14, 113801-12 117949-08 115364-10 115571- 03
SUNWscu: 3.1.0,REV=2003.03.25.13.14, 113801-12 117949-08 115364-10 115571- 03
SUNWscsck: 3.1.0,REV=2003.09.10.18.59, 115953-03
SUNWscnm: 3.1.0,REV=2004.10.02.17.53
SUNWscdev: 3.1.0,REV=2003.03.25.13.14, 113801-12 117949-08
SUNWscgds: 3.1.0,REV=2003.03.25.13.14, 115061-01
SUNWscman: 3.1.0,REV=2003.03.25.13.14, 113801-12 117949-08
SUNWscsal: 3.1.0,REV=2003.03.25.13.14, 113801-12 117949-08
SUNWscsam: 3.1.0,REV=2003.03.25.13.14, 113801-12
SUNWscvm: 3.1.0,REV=2003.03.25.13.14, 113801-12
SUNWmdm: 3.1.0,REV=2003.03.25.13.14, 115053-03
SUNWjdmk-runtime: 5.1,REV=34
SUNWjdmk-runtime-jmx: 5.1,REV=34
SUNWcacaocfg: 1.0,REV=25
SUNWcacao: 1.0,REV=25
SUNWscmasa: 3.1.0,REV=2004.10.02.17.53, 118627-01 115364-10
SUNWscva: 3.1.0,REV=2003.03.25.13.14, 115055-01
SUNWscspm: 3.1.0,REV=2004.10.02.17.53
SUNWscspmu: 3.1.0,REV=2004.10.02.17.53, 115364-10
SUNWscspmr: 3.1.0,REV=2004.10.02.17.53
SUNWexplo: 4.3.1,REV=2004.06.25.07.21

------------------------------------------
bash-2.05# lustatus
Boot Environment Is Active Active Can Copy
Name Complete Now On Reboot Delete Status
-------------------------- -------- ------ --------- ------ ----------
s9be yes yes yes no -
s10be yes no no yes -
bash-2.05# cat /etc/lutab
# DO NOT EDIT THIS FILE BY HAND. This file is not a public interface.
# The format and contents of this file are subject to change.
# Any user modification to this file may result in the incorrect
# operation of Live Upgrade.
1:s9be:C:0
1:/:/dev/dsk/c0t1d0s0:1
1:boot-device:/dev/dsk/c0t1d0s0:2
2:s10be:C:0
2:/:/dev/dsk/c0t2d0s0:1
2:boot-device:/dev/dsk/c0t2d0s0:2

-------------------------------------------------------------
bash-2.05# cat /etc/lutab
# DO NOT EDIT THIS FILE BY HAND. This file is not a public interface.
# The format and contents of this file are subject to change.
# Any user modification to this file may result in the incorrect
# operation of Live Upgrade.
1:s9be:C:0
1:/:/dev/dsk/c0t1d0s0:1
1:boot-device:/dev/dsk/c0t1d0s0:2
2:s10be:C:0
2:/:/dev/dsk/c0t2d0s0:1
2:boot-device:/dev/dsk/c0t2d0s0:2
bash-2.05# cd /etc/lu
bash-2.05# cat ICF.*
s9be:-:/dev/dsk/c0t1d0s1:swap:4194828
s9be:/:/dev/dsk/c0t1d0s0:ufs:20974140
s10be:-:/dev/dsk/c0t2d0s1:swap:4194828
s10be:/:/dev/dsk/c0t2d0s0:ufs:20974140
s10be:/global/.devices/node@1:/dev/dsk/c0t1d0s6:ufs:1048707
-------------------------------------------------------------
cd /net/sunray/export/seif_unzipped/opt/ses/software/Solaris10U3/
nohup luupgrade -u -n s10be -s /net/sunray/export/seif_unzipped/opt/ses/software/Solaris10U3/
----------------------------------------------------------------
cd /net/sunray/export/seif_unzipped/opt/ses/software/Solaris10U3/Solaris_10/Tools/Installers/
----------------------------------------------------------------
# scstat -W

-- Cluster Transport Paths --

Endpoint Endpoint Status
-------- -------- ------
Transport path: node14:qfe1 node18:qfe1 Path online
Transport path: node14:qfe2 node18:qfe2 faulted
----------------------------------------------------------------
bash-2.05# cat /etc/cluster/nodeid
1
----------------------------------------------------------------
bash-2.05# cd /usr/cluster/lib/rgm/
bash-2.05# ls
rt rtreg rtsupport
bash-2.05# find .
-------------------------------------------------------------------
bash-2.05# lumount -n s10be /mnt
/mnt
bash-2.05# ./installer -noconsole -nodisplay -altroot /mnt -state /var/tmp/stateFile
bash-2.05# cd ..
bash-2.05# cd Solaris_sparc/
bash-2.05# cd Product/sun_cluster/Solaris_10/Tools/
bash-2.05# ls
defaults dot.release lib locale scinstall
bash-2.05# ./scinstall -R /mnt/ -u update


.
./rt
./rt/haevent
./rt/haevent/event_monitor_check
./rt/haevent/event_monitor_start
./rt/haevent/event_monitor_stop
./rt/haevent/event_probe
./rt/haevent/event_svc_start
./rt/haevent/event_svc_stop
./rt/haevent/event_update
./rt/haevent/event_validate
./rt/hafoip
./rt/hafoip/hafoip_ipmp_callback
./rt/hafoip/hafoip_monitor_check
./rt/hafoip/hafoip_monitor_start
./rt/hafoip/hafoip_monitor_stop
./rt/hafoip/hafoip_prenet_start
./rt/hafoip/hafoip_retry
./rt/hafoip/hafoip_start
./rt/hafoip/hafoip_stop
./rt/hafoip/hafoip_update
./rt/hafoip/hafoip_validate
./rt/hascip
./rt/hascip/hascip_boot
./rt/hascip/hascip_fini
./rt/hascip/hascip_init
./rt/hascip/hascip_ipmp_callback
./rt/hascip/hascip_monitor_check
./rt/hascip/hascip_monitor_start
./rt/hascip/hascip_monitor_stop
./rt/hascip/hascip_prenet_start
./rt/hascip/hascip_retry
./rt/hascip/hascip_start
./rt/hascip/hascip_stop
./rt/hascip/hascip_update
./rt/hascip/hascip_validate
./rt/hastorage
./rt/hastorage/hastorage_monitor_check
./rt/hastorage/hastorage_monitor_start
./rt/hastorage/hastorage_monitor_stop
./rt/hastorage/hastorage_prenet_start
./rt/hastorage/hastorage_start
./rt/hastorage/hastorage_stop
./rt/hastorage/hastorage_validate
./rt/hastorageplus
./rt/hastorageplus/hastorageplus_monitor_check
./rt/hastorageplus/hastorageplus_monitor_start
./rt/hastorageplus/hastorageplus_monitor_stop
./rt/hastorageplus/hastorageplus_postnet_stop
./rt/hastorageplus/hastorageplus_prenet_start
./rt/hastorageplus/hastorageplus_prenet_start_private
./rt/hastorageplus/hastorageplus_start
./rt/hastorageplus/hastorageplus_stop
./rt/hastorageplus/hastorageplus_update
./rt/hastorageplus/hastorageplus_update_private
./rt/hastorageplus/hastorageplus_validate
./rt/hastorageplus/hastorageplus_validate_private
./rt/rgoffload
./rt/rgoffload/rgofl_monitor_start
./rt/rgoffload/rgofl_monitor_stop
./rt/rgoffload/rgofl_probe
./rt/rgoffload/rgofl_svc_start
./rt/rgoffload/rgofl_svc_stop
./rt/rgoffload/rgofl_update
./rt/rgoffload/rgofl_validate
./rt/hamasa
./rt/hamasa/cmas_service_ctrl_check
./rt/hamasa/cmas_service_ctrl_start
./rt/hamasa/cmas_service_ctrl_stop
./rt/hamasa/scmasa_monitor_check
./rt/hamasa/scmasa_monitor_start
./rt/hamasa/scmasa_monitor_stop
./rt/hamasa/scmasa_probe
./rt/hamasa/scmasa_svc_start
./rt/hamasa/scmasa_svc_stop
./rt/hamasa/scmasa_update
./rt/hamasa/scmasa_validate
./rtreg
./rtreg/SUNW.Event
./rtreg/SUNW.HAStorage
./rtreg/SUNW.HAStoragePlus
./rtreg/SUNW.LogicalHostname
./rtreg/SUNW.RGOffload
./rtreg/SUNW.SharedAddress
./rtreg/SUNW.gds
./rtreg/SUNW.scmasa
./rtsupport
-----------------------------------------------------------------------------
bash-2.05# pwd
/usr/cluster/lib/rgm/rt
bash-2.05# ls
haevent hamasa hastorage rgoffload
hafoip hascip hastorageplus

bash-2.05# cd /etc/cluster/ccr/
bash-2.05# ls
dcs_service_1 dcs_service_keys
dcs_service_1.bak dcs_service_keys.bak
dcs_service_10 did_instances
dcs_service_10.bak did_instances.bak
dcs_service_2 did_types
dcs_service_2.bak did_types.bak
dcs_service_3 directory
dcs_service_3.bak directory.bak
dcs_service_4 dpm_status_table
dcs_service_4.bak dpm_status_table.bak
dcs_service_5 epoch
dcs_service_5.bak epoch.bak
dcs_service_6 infrastructure
dcs_service_6.bak infrastructure.bak
dcs_service_7 rgm_rt_SUNW.LogicalHostname
dcs_service_7.bak rgm_rt_SUNW.LogicalHostname:2
dcs_service_8 rgm_rt_SUNW.LogicalHostname:2.bak
dcs_service_8.bak rgm_rt_SUNW.SharedAddress
dcs_service_9 rgm_rt_SUNW.SharedAddress:2
dcs_service_9.bak rgm_rt_SUNW.SharedAddress:2.bak
dcs_service_classes rgm_vp_version
dcs_service_classes.bak rgm_vp_version.bak
-----------------------------------------------------------------------------
scrgadm -a -t SUNW.gds
clrt register rgs
-----------------------------------------------------------------------------
scrgadm -r -t gds
-----------------------------------------------------------------------------
scdidadm -L
metadb -a -c 3 -f

root( c0t1d0 )
metadb -a -c 3 -f c0t1d0s7

root@blade-02 # scdidadm -L
1 u60-10:/dev/rdsk/c0t0d0 /dev/did/rdsk/d1
2 u60-10:/dev/rdsk/c0t1d0 /dev/did/rdsk/d2
3 u60-10:/dev/rdsk/c5t9d0 /dev/did/rdsk/d3
3 blade-02:/dev/rdsk/c4t9d0 /dev/did/rdsk/d3
4 u60-10:/dev/rdsk/c5t10d0 /dev/did/rdsk/d4
4 blade-02:/dev/rdsk/c4t10d0 /dev/did/rdsk/d4
5 u60-10:/dev/rdsk/c5t11d0 /dev/did/rdsk/d5
5 blade-02:/dev/rdsk/c4t11d0 /dev/did/rdsk/d5
6 blade-02:/dev/rdsk/c0t1d0 /dev/did/rdsk/d6
7 blade-02:/dev/rdsk/c0t2d0 /dev/did/rdsk/d7

root@blade-02 # scdidadm -L | grep c4t11d0
5 blade-02:/dev/rdsk/c4t11d0 /dev/did/rdsk/d5


root@blade-02 # cat /etc/hosts
#
# Internet host table
#
::1 localhost
127.0.0.1 localhost
192.168.217.108 blade-02 loghost blade-02.
192.168.217.120 u60-10

root@blade-02 # scdidadm -L | grep c4t11d0

root@blade-02 # metaset -s Z1-DS -a -h blade-02 u60-10
root@blade-02 # metaset -s Z1-DS -a -m blade-02 u60-10
root@blade-02 # metaset -s Z1-DS -a /dev/did/rdsk/d5
root@blade-02 # metaset -s Z1-DS

Set name = Z1-DS, Set number = 1

Host Owner
blade-02 Yes
u60-10

Mediator Host(s) Aliases
blade-02
u60-10

Driv Dbase

d5 Yes

root@blade-02 # metainit -s Z1-DS d1 1 1 /dev/did/rdsk/d5s0
Z1-DS/d1: Concat/Stripe is setup

root@blade-02 # newfs /dev/md/Z1-DS/dsk/d1
root@blade-02 # mount /dev/md/Z1-DS/dsk/d1 /share/

----------------------------------------------------------


1. Create diskset
# metaset -s Z1-DS -a -h NODE1 NODE2
2. Add the same nodes as diskset mediators to each diskset.
# metaset -s Z1-DS -a -m NODE1 NODE2
3. Add the disk(s) chosen previouslyto the
Z1-DS diskset.
# metaset -s Z1-DS -a /dev/did/rdsk/dx [/dev/did/rdsk/dy]
4. Verify the status of the new disksets.
# metaset -s Z1-DS
# medstat -s Z1-DS
#O
5. #check metaset disk for a slice 0 and put it into a metadevice
# metainit -s Z1-DS d1 1 1 /dev/did/rdsk/d#s0
6. add a FS to zhe metadevice
# newfs ...
7. on both nodes create mountpints and vfstab entries
/etc/vfstab
/dev/md/Z1-DS/dsk/d1 /dev/md/Z1-DS/rdsk/d100 /Mountpoint ufs 2 no -



----------------------------------------------------------

root@blade-02 # cluster show| grep -i name
----------------------------------------------------------
root@u60-02 # eeprom | grep local
local-mac-address?=true

root@u60-02 # cat /etc/hostname.hme0
u60-02 group sc_ipmp0 -failover

root@u60-02 # cat /etc/hosts
#
# Internet host table
#
::1 localhost
127.0.0.1 localhost
192.168.217.112 u60-02 loghost u60-02.
192.168.217.122 u60-12
------------------------------------------------------------
root@blade-02 # cat /etc/hostname.*
eri0-test deprecated -failover group sc_ipmp0 up
addif blade-02 up
qfe0-test deprecated -failover group sc_ipmp0 up
------------------------------------------------------------

root@u60-10 # cldg list
Z1-DS

root@u60-10 # cldg show

=== Device Groups ===

Device Group Name: Z1-DS
Type: SVM
failback: false
Node List: blade-02, u60-10
preferenced: true
numsecondaries: 1
diskset name: Z1-DS


root@u60-10 # cldg switch -n u60-10 Z1-DS
------------------------------------------------------------
root@blade-02 # clresourcetype register SUNW.HAStoragePlus
root@blade-02 # clresourcegroup create Z1-DS
root@blade-02 # clresource create -g Z1-DS -t SUNW.HAStoragePlus -p FilesystemMountPoints=/zone01 zone01
root@blade-02 # clrg show

=== Resource Groups and Resources ===

Resource Group: Z1-DS
RG_description: <NULL>
RG_mode: Failover
RG_state: Unmanaged
Failback: False
Nodelist: u60-10 blade-02

--- Resources for Group Z1-DS ---

Resource: zone01
Type: SUNW.HAStoragePlus:6
Type_version: 6
Group: Z1-DS
R_description:
Resource_project_name: default
Enabled{u60-10}: True
Enabled{blade-02}: True
Monitored{u60-10}: True
Monitored{blade-02}: True

root@blade-02 # clresourcegroup online -M Z1-DS

root@blade-02 # clrg show

=== Resource Groups and Resources ===

Resource Group: Z1-DS
RG_description: <NULL>
RG_mode: Failover
RG_state: Managed
Failback: False
Nodelist: u60-10 blade-02

--- Resources for Group Z1-DS ---

Resource: zone01
Type: SUNW.HAStoragePlus:6
Type_version: 6
Group: Z1-DS
R_description:
Resource_project_name: default
Enabled{u60-10}: True
Enabled{blade-02}: True
Monitored{u60-10}: True
Monitored{blade-02}: True

root@blade-02 # cldg switch -n blade-02 Z1-DS
root@blade-02 # clresource status

=== Cluster Resources ===

Resource Name Node Name State Status Message
------------- --------- ----- --------------
zone01 u60-10 Online Online
blade-02 Offline Offline


root@blade-02 # cldg show

=== Device Groups ===

Device Group Name: Z1-DS
Type: SVM
failback: false
Node List: blade-02, u60-10
preferenced: true
numsecondaries: 1
diskset name: Z1-DS

-----------------------------------------------------------

root@blade-02 # clrg switch -n blade-02 Z1-DS
------------------------------------------------------------
182 cd /mnt/Solaris_sparc/Product/sun_cluster_agents/Solaris_10/Packages/
183 ls -la | grep zone
184 pkgadd -d . SUNWsczone
---------------------------------------------------------------
root@u60-10 # clrt register SUNW.gds
root@u60-10 # clrt list
SUNW.LogicalHostname:3
SUNW.SharedAddress:2
SUNW.HAStoragePlus:6
SUNW.gds:6
--------------------------------------------------------------
root@u60-10 # ls /opt/SUNWsczone/sczbt/bin/
clear_zone functions probe_sczbt start_sczbt stop_sczbt validate_sczbt
--------------------------------------------------------------
root@u60-10 # cd /opt/SUNWsczone/sczbt/util/
root@u60-10 # cp sczbt_config sczbt_config.bak
--------------------------------------------------------------
root@u60-10 # ./sczbt_register -f /zone01/paramdir/sczbt_config
sourcing /zone01/paramdir/sczbt_config
Registration of resource zone01-rs succeeded.
Validation of resource zone01-rs succeeded.
---------------------------------------------------------------
root@u60-10 # cat /zone01/paramdir/sczbt_config | grep -v "^#"


RS=zone01-rs
RG=Z1-DS
PARAMETERDIR=/zone01/paramdir
SC_NETWORK=false
SC_LH=
FAILOVER=true
HAS_RS=zone01


Zonename="zone01"
Zonebrand="native"
Zonebootopt=""
Milestone="multi-user-server"
LXrunlevel="3"
SLrunlevel="3"
Mounts=""
-----------------------------------------------------------------
root@u60-10 # scstat -g

-- Resource Groups and Resources --

Group Name Resources
---------- ---------
Resources: Z1-DS zone01 zone01-rs


-- Resource Groups --

Group Name Node Name State Suspended
---------- --------- ----- ---------
Group: Z1-DS u60-10 Online No
Group: Z1-DS blade-02 Offline No


-- Resources --

Resource Name Node Name State Status Message
------------- --------- ----- --------------
Resource: zone01 u60-10 Online Online
Resource: zone01 blade-02 Offline Offline

Resource: zone01-rs u60-10 Offline Offline
Resource: zone01-rs blade-02 Offline Offline
-------------------------------------------------------------------
root@u60-10 # clrs enable zone01-rs
root@u60-10 # scstat -g

-- Resource Groups and Resources --

Group Name Resources
---------- ---------
Resources: Z1-DS zone01 zone01-rs


-- Resource Groups --

Group Name Node Name State Suspended
---------- --------- ----- ---------
Group: Z1-DS u60-10 Online No
Group: Z1-DS blade-02 Offline No


-- Resources --

Resource Name Node Name State Status Message
------------- --------- ----- --------------
Resource: zone01 u60-10 Online Online
Resource: zone01 blade-02 Offline Offline

Resource: zone01-rs u60-10 Online Online
Resource: zone01-rs blade-02 Offline Offline
----------------------------------------------------------------------------
root@u60-10 # bootadm update-archive
updating /platform/sun4u/boot_archive
-------------------------------------------------------------------



[ add comment ]   |  [ 0 trackbacks ]   |  permalink
Unique GPU bencharking tool, the Unigine Tropic Tech Demo 
http://unigine.com/download/

[ add comment ]   |  [ 0 trackbacks ]   |  permalink
Setting up LVM on three SCSI disks with striping 
# pvcreate /dev/sda
# pvcreate /dev/sdb
# pvcreate /dev/sdc

# vgcreate my_volume_group /dev/sda /dev/sdb /dev/sdc

# vgdisplay

# lvcreate -i3 -I4 -L1G -nmy_logical_volume my_volume_group


[ 6 comments ]   |  [ 0 trackbacks ]   |  permalink
Some RedHat cluster commands unsorted 
Web frontend for cluster (when xen cluster kernel)
yum install luci ricci
luci_admin init
service luci restart


GTK+ tool for cluster configuration (/etc/cluster/cluster.conf)
yum install system-config-cluster
system-config-cluster


Resource Group Manager
yum install cman rgmanager
rpm -qil cman | grep ccsd


Global Filesystem
yum list \*gfs\*
yum install gfs2-utils kmod-gfs2-xen
mkfs.gfs2 -j 2 -p lock_dlm -t new_cluster:gfs /dev/sdb1


Cluster monitor
service cman start
clusvcadm status
clusvcadm -l
cman status
cman_tool status
clustat
service dlm status
cman_tool nodes


Cluster config XML
<?xml version="1.0" ?>
<cluster config_version="3" name="new_cluster">
<fence_daemon post_fail_delay="0" post_join_delay="3"/>
<clusternodes>
<clusternode name="172.16.50.15" nodeid="1" votes="1">
<fence/>
</clusternode>
<clusternode name="172.16.50.25" nodeid="2" votes="1">
<fence/>
</clusternode>
</clusternodes>
<cman expected_votes="1" two_node="1"/>
<fencedevices>
<fencedevice agent="fence_manual" name="cloveck"/>
</fencedevices>
<rm>
<failoverdomains/>
<resources/>
<vm autostart="1" name="node1" path="/etc/xen/node1"/>
<vm autostart="1" name="node2" path="/etc/xen/node2"/>
</rm>
</cluster>


Cluster reference (RH)

[ add comment ]   |  [ 0 trackbacks ]   |  permalink
Postfix: send a copy of the e-mails send or received by a specific user 
To backup all the mails user1@domain.tld for specific sender: edit /etc/postfix/main.cf and add:
sender_bcc_maps = hash:/etc/postfix/sender_bcc

Then edit or create /etc/postfix/sender_bcc in the following format:
user1@domain.tld copy@domain.tld

and run postmap /etc/postfix/sender_bcc and posfix reload. The same is if backing up recipient's mail.
recipient_bcc_maps = hash:/etc/postfix/recipient_bcc




[ 6 comments ]   |  [ 0 trackbacks ]   |  permalink
Lionhead Milo Project - Project Natal - Xbox 360 (E3 2009) 


[ add comment ]   |  [ 0 trackbacks ]   |  permalink
Linux Find SCSI Hard Disk Model, Serial Number, Size, and Total Sectors Information 
sdparm RPM packages for Red Hat, CentOS and Fedora

To list common mode parameters of a disk, enter:
# sdparm /dev/sda
To list the designators within the device identification VPD page of a disk:
# sdparm --inquiry /dev/sdb
To see all parameters for the caching mode page:
# sdparm --page=ca /dev/sdc
To set the "Writeback Cache Enable" bit in the current values page:
# sdparm --set=WCE /dev/sda


taken: http://www.cyberciti.biz/tips/sdparm-li ... ibute.html

[ add comment ]   |  [ 0 trackbacks ]   |  permalink
Grub bootloader setup after replacing a failed drive - software RAID1 
Afterwards we must install the GRUB bootloader on the second hard drive /dev/sdb:

grub

On the GRUB shell, type in the following commands:

root (hd0,0)

grub> root (hd0,0)
Filesystem type is ext2fs, partition type 0x83

grub>

setup (hd0)

grub> setup (hd0)
Checking if "/boot/grub/stage1" exists... no
Checking if "/grub/stage1" exists... yes
Checking if "/grub/stage2" exists... yes
Checking if "/grub/e2fs_stage1_5" exists... yes
Running "embed /grub/e2fs_stage1_5 (hd0)"... 15 sectors are embedded.
succeeded
Running "install /grub/stage1 (hd0) (hd0)1+15 p (hd0,0)/grub/stage2 /grub/menu.lst"... succeeded
Done.

grub>

root (hd1,0)

grub> root (hd1,0)
Filesystem type is ext2fs, partition type 0xfd

grub>

setup (hd1)

grub> setup (hd1)
Checking if "/boot/grub/stage1" exists... no
Checking if "/grub/stage1" exists... yes
Checking if "/grub/stage2" exists... yes
Checking if "/grub/e2fs_stage1_5" exists... yes
Running "embed /grub/e2fs_stage1_5 (hd1)"... 15 sectors are embedded.
succeeded
Running "install /grub/stage1 (hd1) (hd1)1+15 p (hd1,0)/grub/stage2 /grub/menu.lst"... succeeded
Done.

grub>

quit


taken: http://www.howtoforge.com/software-raid ... an-etch-p2

Grub got deleted:

find /boot/grub/stage1 (optional)
root (hdX,Y)
setup (hd0)
quit


http://www.dedoimedo.com/computers/grub ... ocId976410

[ 6 comments ]   |  [ 0 trackbacks ]   |  permalink
Replacing a failed drive - software RAID1 
Copy partition layout from a disk to file somewhere:
sfdisk -d /dev/sda > /raidinfo/partitions.sda

Mark all the parts on the disk which will be replaced as failed:
mdadm --manage /dev/md0 --fail /dev/sdb1

Then remove all the related partitions from the raid:
mdadm --manage /dev/md0 --remove /dev/sdb1

Power down system:
shutdown -h now

After hard disk is swapped, boot the system and create the same partitioning on a new disk.
sfdisk /dev/sda < /raidinfo/partitions.sda

Then add /dev/sdb1 to /dev/md0 and other partitions as well:
mdadm --manage /dev/md0 --add /dev/sdb1

Next, install Grub boot loader on replaced disk.

[ add comment ]   |  [ 0 trackbacks ]   |  permalink
DomU Nagios plugin check 
#!/bin/bash

DOMU=$1

XM_CMD="/usr/sbin/xm"
sudo $XM_CMD list | grep $DOMU 1>/dev/null
if [ $? -ne 0 ]; then
echo "CRITICAL: The domU $DOMU seems to be down!"
exit 2
else
TIME=`sudo $XM_CMD list | grep $DOMU | awk '{ print $6 }'`
VCPU=`sudo $XM_CMD list | grep $DOMU | awk '{print $4}'`
MEM=`sudo $XM_CMD list | grep $DOMU | awk '{print $3}'`
STATE=`sudo $XM_CMD list | grep $DOMU | awk '{print $5}' | sed 's/-//g'`
if [ $STATE == "d" ]; then
echo "WARNING: $DOMU seems to be dying (Time=$TIME) (MEM=$MEM) (VCPU=$VCPU) (STATE=$STATE)";
exit 2;
elif [ $STATE == "p" ]; then
echo "WARNING: $DOMU seems to be paused (Time=$TIME) (MEM=$MEM) (VCPU=$VCPU) (STATE=$STATE)";
exit 1;
elif [ $STATE == "c" ]; then
echo "CRITICAL: $DOMU seems to be crashed (Time=$TIME) (MEM=$MEM) (VCPU=$VCPU) (STATE=$STATE)";
exit 2;
else echo "OK: $DOMU seems to be up (Time=$TIME) (MEM=$MEM) (VCPU=$VCPU) (STATE=$STATE)"
fi
exit 0
fi
done


the check above is running under the nagios nrpe as nagios user. therefore we need to allow nagios user to execute xm commands.
# #Defaults    requiretty <-- disable this!
### XEN
Cmnd_Alias XEN = /usr/sbin/xm

## Allows members of the users group to shutdown this system
# %users localhost=/sbin/shutdown -h now
%nagios ALL = NOPASSWD: XEN



There is no need for disabling requiretty globally:

Defaults requiretty

Much safer and tighter is to disable requiretty only for the user nagios runs as:

Defaults:nagios !requiretty
e


[ 7 comments ]   |  [ 0 trackbacks ]   |  permalink

<<First <Back | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | Next> Last>>