Copy partition layout from sdb to sdc and so forth:
# sfdisk -d /dev/sdb | sfdisk /dev/sdc
Let the kernel probe the new partitions:
# partprobe
Create RAID5 from sd[bcde]1. The -a yes instructs mdadm to create the device file if needed, possibly allocating an unused minor number:
# mdadm -C -a yes /dev/md0 --level=5 --raid-devices=4 /dev/sdb1 \
/dev/sdc1 /dev/sdd1 /dev/sde1
Or create RAID0 (just change the --level):
# mdadm --create /dev/md0 --level=raid0 --raid-devices=4 /dev/sdb1 \
/dev/sdc1 /dev/sdd1 /dev/sde1
Add a disk to the existing raid5 array. The disk will be added as a spare:
# mdadm --add /dev/md1 /dev/sda8
To make disk regular part of an array (disk will not longer be a spare):
# mdadm --grow /dev/md1 --raid-devices=4
Resize the filesystem:
# resize2fs /dev/md1
Check the status og the newly created RAID:
# cat /proc/mdstat
Detailed statistics about your RAID:
# mdadm --misc --detail --test /dev/md0
Run backgroudn monitor of your RAID setup with mdadm. Add the RAID monitoring into /etc/rc.local. Linux box will mail you an message when the thing goes wrong.
/sbin/mdadm --monitor --mail=root@localhost \
--delay=300 /dev/md0 /dev/md1 &
Or enable monitoring with daemon:
# chkconfig mdmonitor on
# echo "MAILADDR root@mydomain.tld " >> /etc/mdadm.conf
# /etc/init.d/mdmonitor start
To test if the background monitoring works generate a test alert.
# mdadm --monitor /dev/md0 --test
Remove failed disk from the array:
# mdadm /dev/md1 -r /dev/sdc2
Put the disk back:
# mdadm /dev/md1 -a /dev/sdc2
Check the /etm/mdadm.conf after building raid. If the /etc/mdadm.conf is empty then you can regenerate:
mdadm --detail --scan > /etc/mdadm.conf
If the radid is not visible after reboot:
# mdadm --assemble /dev/md0 /dev/sd[e-h]
or
# mdadm -A -s (-s scan)
Stop the array (f.x. rebuilding array):
# mdadm --manage -S /dev/md0
Detailed info about /dev/md0:
# mdadm --detail /dev/md0
If you want the mirrored rootdisks will ableo to boot even with second mirror then you have to write the boot loader as well to the second disk. Some distros does not do that.
# cat /proc/mdstat
Personalities : [raid1]
md1 : active raid1 hdd2[1] hdc2[0]
2048192 blocks [2/2] [UU]
md2 : active raid1 hdd3[1] hdc3[0]
16546880 blocks [2/2] [UU]
md0 : active raid1 hdd1[1] hdc1[0]
20482752 blocks [2/2] [UU]
# cat /etc/grub.conf | grep "root (" | uniq
root (hd0,0)
Then run "grub" and type:
grub> root (hd1,0); setup (hd1)
To apply bootloader.
Zero supeblock:
# mdadm --zero-superblock /dev/sdb
Filesystem benchmarking tool bonnie++ on SourceForge.org + RMP bonnie++ for CentOS.
Scan for RAID:
[root@xen stopped_xen]# mdadm --detail --scan -vv
/dev/md0:
Version : 00.90.03
Creation Time : Fri Mar 27 16:36:52 2009
Raid Level : raid6
Array Size : 1953535744 (1863.04 GiB 2000.42 GB)
Used Dev Size : 488383936 (465.76 GiB 500.11 GB)
Raid Devices : 6
Total Devices : 6
Preferred Minor : 0
Persistence : Superblock is persistent
Update Time : Sun Mar 29 20:46:28 2009
State : active
Active Devices : 6
Working Devices : 6
Failed Devices : 0
Spare Devices : 0
Chunk Size : 64K
UUID : 78f01f82:67630b8b:5eaaf972:4ecf197a
Events : 0.17
Number Major Minor RaidDevice State
0 8 33 0 active sync /dev/sdc1
1 8 49 1 active sync /dev/sdd1
2 8 65 2 active sync /dev/sde1
3 8 81 3 active sync /dev/sdf1
4 8 97 4 active sync /dev/sdg1
5 8 113 5 active sync /dev/sdh1
Remove RAID:
mdadm --manage /dev/mdfoo --fail /dev/sdfoo
mdadm --manage /dev/mdfoo --remove /dev/sdfoo
mdadm --manage --stop /dev/mdfoo
985 mdadm --detail --scan -vv
986 mdadm --manage /dev/md0 --fail /dev/sdc1
987 mdadm --manage /dev/md0 --fail /dev/sdd1
988 mdadm --manage /dev/md0 --fail /dev/sde1
989 mdadm --manage /dev/md0 --fail /dev/sdf1
990 mdadm --manage /dev/md0 --fail /dev/sdg1
991 mdadm --manage /dev/md0 --fail /dev/sdh1
992 mdadm --detail --scan -vv
993 mdadm --manage --stop /dev/md0
994 mdadm --detail --scan -vv
995 reboot
996 cat /proc/mdstat
997 ls -la /dev/md0
998 mdadm --detail --scan -vv
999 mdadm --remove /dev/md0
1000 cat /proc/mdstat
1005 fdisk /dev/sdd (t82)
1006 fdisk /dev/sde (t82)
1007 fdisk /dev/sdf (t82)
1008 fdisk /dev/sdg (t82)
1009 fdisk /dev/sdg (t82)
1010 fdisk /dev/sdh (t82)
1021 mdadm --zero-superblock /dev/sdc
1022 mdadm --zero-superblock /dev/sdc
1023 mdadm --zero-superblock /dev/sdc1
1024 mdadm --zero-superblock /dev/sdd1
1025 mdadm --zero-superblock /dev/sde1
1026 mdadm --zero-superblock /dev/sdf1
1027 mdadm --zero-superblock /dev/sdg1
1028 mdadm --zero-superblock /dev/sdh1
[ add comment ] ( 4 views ) | [ 0 trackbacks ] | permalink
# cat /etc/minirc.dfl
# remote Solaris
pr port /dev/ttyS0
pu baudrate 9600
pu bits 8
pu parity N
pu stopbits 1
pu minit
[ add comment ] ( 5 views ) | [ 0 trackbacks ] | permalink
http://www.swim-city.com/
[ add comment ] ( 6 views ) | [ 0 trackbacks ] | permalink
[ add comment ] ( 6 views ) | [ 0 trackbacks ] | permalink
Here we can see a strange 'empty' filename and 'back_space+w' filename:
# ls -la
-rw-r--r-- 1 root root 9538 Nov 28 10:10
-rw-r--r-- 1 root root 596 Oct 10 13:53w
Get their i-nodes:
# ls -lbi
845042 -rw-r--r-- 1 root root 9538 Nov 28 10:10 \177
845038 -rw-r--r-- 1 root root 596 Oct 10 13:53 \177w
Delete files through find:
# find . -inum 845042 -exec rm -i {} \;
# find . -inum 845038 -exec rm -i {} \;
[ add comment ] ( 5 views ) | [ 0 trackbacks ] | permalink
The bes links I've found so far:
http://www.ibm.com/developerworks/library/l-rpm1/
http://www.ibm.com/developerworks/library/l-rpm2/
[ add comment ] ( 7 views ) | [ 0 trackbacks ] | permalink

Exposure: f4.5/4s at iso 400 (Sigma 15-30,28mm,EOS400D)
[ add comment ] ( 6 views ) | [ 0 trackbacks ] | permalink
Mount:
# lofiadm -a /path/to/cd.iso
/dev/lofi/1
# mount -o ro -F hsfs /dev/lofi/1 /mnt
Unmount:
# umount /mnt
# lofiadm -d /dev/lofi/1
[ add comment ] ( 6 views ) | [ 0 trackbacks ] | permalink
Mail servers usually have no method how to verify valid server which is authorized to send emails from concrete domain. Unless they do use SPF record mechanism. SPF stands for "Sender Permitted From"; SPF record says who is responsible for SENDING mail for a specific domain. (Note the MX record is primary designed to inform the world who is responsible for RECEIVING mail for a domain).
How to tell the world the only authorized mail servers (mail senders) for our domain? Via the DNS.
The example below says: if the mail claims to be from someone in ourdomain.org the mail must be sent from the MX server for this domain. The a: record can be used to specify other hosts authorized to sent mail as well (other than hosts with the regular MX record). Those are -all the server. No exceptions.
ourdomain.org
TXT "v=spf1 mx a:yetanother.mailserver.com -all"
Some nice short reading about SPF:
http://www.openspf.org/Introduction
Official:
http://www.openspf.org/
To test SPF settings:
http://www.kitterman.com/spf/validate.html
Note: SPF records should also be published in DNS as type SPF records. This is new and most implementations do not support it yet.
Please consider to implement SPF to your mail server while receiving mail as well.
[ add comment ] ( 3 views ) | [ 0 trackbacks ] | permalink
Find out where the diskgroup is imported (on which host)
vxdisk -s list
Start all the volumes in diskgroup
vxvol -g <dg> startall
Import/Deport pool
vxdg deport <dg> # deport pool from host
vxdg import <dg> -C # C switch clears hostid
vxdg import <dg> -f # F force import?
List disks/diskgroup/details
vxdisk list # shows disks
vxdg list # shows dg
vxinfo -g <dg> # show volumes in the groups
[ add comment ] ( 3 views ) | [ 0 trackbacks ] | permalink