2010年12月6日月曜日
Nginx (Load balancer)
I installed Nginx on Debian. I’ve written the installation document below. Please give me advice/suggestions on it. I would appreciate any kind of advice/suggestion!
= Nginx Load balancer on Debin
> aptitude install nginx
> vi /etc/nginx/sites-enabled/default
## ADD upstream for load balancer ##
upstream backend {
server 192.168.0.10 weight=5;
server 192.168.0.11 weight=2;
}
server {
listen 80;
server_name localhost;
access_log /var/log/nginx/localhost.access.log;
location / {
#root /var/www/nginx-default; ##comment out
#index index.html index.htm; ##comment out
proxy_pass http://backend; ##ADD
}
> /etc/init.d/nginx restart
= Nginx Load balancer on Debin
> aptitude install nginx
> vi /etc/nginx/sites-enabled/default
## ADD upstream for load balancer ##
upstream backend {
server 192.168.0.10 weight=5;
server 192.168.0.11 weight=2;
}
server {
listen 80;
server_name localhost;
access_log /var/log/nginx/localhost.access.log;
location / {
#root /var/www/nginx-default; ##comment out
#index index.html index.htm; ##comment out
proxy_pass http://backend; ##ADD
}
> /etc/init.d/nginx restart
Windows Domain Auth with LDAP,LDAP Replication + SAMBA,PDC,BDC
I installed Windows Domain Auth with LDAP,LDAP Replication + SAMBA,PDC,BDC. I’ve written the installation document below. Please give me advice/suggestions on it. I would appreciate any kind of advice/suggestion!
Windows認証 (LDAP,LDAP Replication + SAMBA,PDC,BDC) on Debian
ホスト設定
vim /etc/hosts
127.0.0.1 ldap1.com #
192.168.24.71 ldap1.com mail.ldap1.com
192.168.24.72 ldap2.com mail.ldap2.com
以下PDCのみのLDAPとSAMBA設定
以下インストール
aptitude install slapd ldap-utils libldap-dev
aptitude install samba smbclient swat smbldap-tools samba-doc
aptitude install migrationtools
zcat /usr/share/doc/samba-doc/examples/LDAP/samba.schema.gz > /etc/ldap/schema/samba.schema
以下で得たMD5をslapd.confのrootpwに適用する。
ldap1:/etc/ldap# slappasswd -s PASSWORD -h {MD5}
{MD5}MZ9NJuPFNrXdhxuyxS4xeA==
vim /etc/ldap/slapd.conf
include /etc/ldap/schema/samba.schema #add
suffix "dc=ldap1,dc=com" #change
rootdn "cn=admin,dc=ldap1,dc=com" #change
rootpw {MD5}k3pcIXcHEYMlTicw3RGw7w== #add
#検索用
index objectClass eq
index uid,uidNumber,gidNumber,memberUid eq
index cn,mail,surname,givenname eq,subinitial
index sambaSID eq
index sambaPrimaryGroupSID eq
index sambaDomainName eq
# アクセス制限 他のaccessはコメントアウト
access to attrs=userPassword,sambaNTPassword,sambaLMPassword
by self write
by dn="cn=admin,dc=ldap1,dc=com" write
by anonymous auth
by * none
access to *
by dn="cn=admin,dc=ldap1,dc=com" write
by self write
by * read
以下設定、ldapsearchで見るLDAP、BDCを設定する場合は以下をBDCのLDAPに向ける。
vim /etc/ldap/ldap.conf
BASE dc=ldap1, dc=com #change
URI ldap://ldap1.com #change
LDAP起動
/etc/init.d/slapd restart
以下sambaの設定
vim /etc/samba/smb.conf
[global]
workgroup = MYDOMAIN
dos charset = CP932
unix charset = UTF-8
display charset = UTF-8
netbios name = PDC
security = user
domain logons = yes
domain master = yes
local master = yes
os level = 64 #PDCのため値を大きくしておく。BDCはこの数値以下にしておく
preferred master = yes
wins support = yes
logon path =
logon home =
#Sambaのアドミンユーザー
admin users = Administrator
passdb backend = ldapsam:ldap://ldap1.com/
ldap suffix = dc=ldap1, dc=com
ldap admin dn = cn=admin,dc=ldap1,dc=com
ldap user suffix = ou=People
ldap group suffix = ou=Groups
ldap machine suffix = ou=Computers
ldap idmap suffix = ou=People
# リナックスとWindwosのパスワード同期
ldap passwd sync = yes
passwd program = /usr/sbin/smbldap-passwd %u
passwd chat = *New*password* %n\n *Retype*new*password* %n\n *all*authentication*tokens*up
dated*
# windows managerからの変更のため
add user script = /usr/sbin/smbldap-useradd -m "%u"
ldap delete dn = Yes
delete user script = /usr/sbin/smbldap-userdel "%u"
add machine script = /usr/sbin/smbldap-useradd -w "%u"
add group script = /usr/sbin/smbldap-groupadd -p "%g"
delete group script = /usr/sbin/smbldap-groupdel "%g"
add user to group script = /usr/sbin/smbldap-groupmod -m "%u" "%g"
delete user from group script = /usr/sbin/smbldap-groupmod -x "%u" "%g"
set primary group script = /usr/sbin/smbldap-usermod -g "%g" "%u"
interfaces = 192.168.24.0/24 127.0.0.1 eth0
guest account = nobody
#ログインした際に時間を合わせたりするスクリプト等を置く場所
[netlogon]
comment = Network Logon Service
path = /home/samba/netlogon
guest ok = yes
writable = no
share modes = no
#プロファイル情報格納場所
[profiles]
comment = Users profiles
path = /home/samba/profiles
guest ok = no
browseable = no
create mask = 0600
directory mask = 0700
#[printers]
# comment = All Printers
# browseable = no
# path = /var/spool/samba
# printable = yes
# public = no
# writable = no
# create mode = 0700
#[print$]
# comment = Printer Drivers
# path = /var/lib/samba/printers
# browseable = yes
# read only = yes
# guest ok = no
[homes]
comment = Home Directories
path = %H/samba #change
writable = yes #change
browseable = no
vfs objects = recycle #add ごみ箱設定
recycle:repository = .recycle #add
recycle:keeptree = no #add
recycle:versions = yes #add
recycle:touch = no #add
recycle:maxsize = 0 #add
recycle:exclude = *.tmp ~$* #add
load printers = no #プリンタは不要
disable spoolss = yes
mkdir -p /home/samba/netlogon
mkdir -p /home/samba/profiles
chown -R nobody /home/samba
chmod 1777 /home/samba/profiles
以下でチェック
testparm
既存ユーザーのsambaディレクトリの作成
mkdir /etc/skel/samba
vi mkhomedir.sh
for user in `ls /home`
do
id $user > /dev/null 2>&1
[ $? -eq 0 ] && \
[ ! -d /home/$user/samba ] && \
mkdir /home/$user/samba && \
chown $user:$user /home/$user/samba && \
echo "/home/$user/samba create"
done
sh mkhomedir.sh
vi /etc/cron.weekly/recyclewatch
#!/bin/bash
for user in `ls /home/`
do
if [ -d /home/$user/samba/.recycle ]; then
tmpwatch -f 720 /home/$user/samba/.recycle/
fi
done
chmod +x /etc/cron.weekly/recyclewatch
#プリンターエラーの対応
touch /etc/printcap
/etc/init.d/samba start
SambaとLDAPの認証パスワードの設定ツール
zcat /usr/share/doc/smbldap-tools/examples/smbldap.conf.gz > /etc/smbldap-tools/smbldap.conf
cp /usr/share/doc/smbldap-tools/examples/smbldap_bind.conf /etc/smbldap-tools/smbldap_bind.conf
以下で得たSIDをsmbldap.conに適用
net getlocalsid
vim /etc/smbldap-tools/smbldap.conf
SID="S-1-5-21-3869316386-1369744062-3351931823" #change, get sid from command as "net getlocalsid"
sambaDomain="MYDOMAIN" # change
ldapTLS="0" # change
#verify="require"
#cafile="/etc/opt/IDEALX/smbldap-tools/ca.pem"
#clientcert="/etc/opt/IDEALX/smbldap-tools/smbldap-tools.pem"
#clientkey="/etc/opt/IDEALX/smbldap-tools/smbldap-tools.key"
usersdn="ou=People,${suffix}"
hash_encrypt="CRYPT" #change
#defaultMaxPasswordAge="45"
suffix="dc=ldap1,dc=com" #change
sambaUnixIdPooldn="sambaDomainName=${sambaDomain},${suffix}" #change
userSmbHome="\\PDC\%U" #change
userProfile="\\PDC\profiles\%U" #change
userHomeDrive="Z:" #change ネットワークドライブ名
mailDomain="ldap1.com" #change
上記の項目でslaveLDAPとmasterLDAPはPDCとBDCを設定する場合にはそれぞれのIPを設定しておく
vim /etc/smbldap-tools/smbldap_bind.conf
slaveDN="cn=admin,dc=ldap1,dc=com"
slavePw="PASSWORD"
masterDN="cn=admin,dc=ldap1,dc=com"
masterPw="PASSWORD"
LDAPにサンバ初期化登録
smbldap-populate
以下でSambaアドミニストレーターを設定
getent passwd
getent group で アドミンやドメイングループの確認をして以下設定
smbldap-populate -a Administrator -k 998 -m 512
smbldap-passwd Administrator
サンバのアドミンパスワード設定
smbpasswd -w PASSWORD
以下でユーザー登録 -a 追加 -m ディレクトリを作成
smbldap-useradd -a -m sakai PASSWORD
smbldap-passwd sakai
ディレクトリごと削除 -r で以下削除可能
smbldap-userdel -r sakai
以下でLDAPのユーザを認識、id等で確認可能
apt-get install libnss-ldap
vim /etc/libnss-ldap.conf
vim /etc/nsswitch.conf
passwd: compat ldap
group: compat ldap
shadow: compat ldap
サーバーのSSH等もLDAPで管理したい場合は、PAMを設定
apt-get install libpam-ldap
vim /etc/pam_ldap.conf
vim /etc/pam.d/common-auth
auth sufficient pam_ldap.so
vim /etc/pam.d/common-account
auth sufficient pam_ldap.so
WindowsでMy Computerの右クリック、DomainをChangeをクリックして、ユーザー名にAdministrator、設定したパスワードを入力してドメインに登録する。
再起動後、ユーザー名(sakai)とパスワードでログイン
== LDAPのレプリケーション設定
- slave
ssh 192.168.24.72
rm -rf /var/lib/ldap/*
suffix "dc=ldap1,dc=com" #change
rootdn "cn=admin,dc=ldap1,dc=com" #change
rootpw PASSWORD #change
vim /etc/ldap/slapd.conf
updatedn cn=admin,dc=ldap1,dc=com
updateref ldap://ldap1.com
- master
ssh 192.168.24.71
/etc/init.d/slapd stop
以下LDAP情報をコピー
scp /var/lib/ldap/* 192.168.24.72:/var/lib/ldap/.
vim /etc/ldap/slapd.conf
replogfile /var/lib/ldap/replog
replica uri=ldap://ldap2.com:389
binddn="cn=admin,dc=ldap1,dc=com"
bindmethod=simple credentials=PASSWORD
- slave
chown -R openldap:openldap /var/lib/ldap/
apt-get install libnss-ldap
/etc/libnss-ldap.conf
base dc=ldap1,dc=com
uri ldap://ldap2.com/ #BDCのためにスレーブを見る用にしておく
ldap_version 3
rootbinddn cn=admin,dc=ldap1,dc=com
- master and slave
/etc/init.d/slapd start
==BDCの設定
- slave
マスターから設定ファイルをコピー
scp 192.167.24.71:/etc/samba/smb.conf /etc/samba/.
以下書き換える
vim /etc/samba/smb.conf
passdb backend = ldapsam:ldap://ldap2.com/ #スレーブを見るようにしておく
domain master = no
os level = 33 #PDCより下げておく。Windowsのデフォルトが32なので33にあたりにしておく。'
wins support = no
wins server = 192.168.24.71
smbpasswd -w PASSWORD
自分のLDAPを見るようにしておく
vim /etc/ldap/ldap.conf
BASE dc=ldap1, dc=com
URI ldap://ldap2.com
PDCを落としてWindowsからDomainで入れるか確認、以下のログファイルでBDCが昇格したことを確認
cd /var/log/samba/
=============================
その他LDAP用コマンド
以下既存のユーザーをLDAPにmigrationする際のツール
vim /usr/share/migrationtools/migrate_common.ph
$DEFAULT_MAIL_DOMAIN = "ldap1.com"; #add
$DEFAULT_BASE = "dc=ldap1,dc=com"; #add
$DEFAULT_MAIL_HOST = "mail.ldap1.com"; #add
以下のLDAPの基本情報等を登録
vi base.ldif
dn: dc=ldap1,dc=com
objectClass: dcObject
objectclass: organization
o: ldap1 Organization
dc: ldap1
dn: cn=admin, dc=ldap1,dc=com
objectclass: organizationalRole
cn:admin
dn: ou=People,dc=ldap1,dc=com
objectClass: organizationalUnit
ou: People
dn: ou=Group,dc=ldap1,dc=com
objectClass: organizationalUnit
ou: Group
ldapadd -h localhost -x -D "cn=admin,dc=ldap1,dc=com" -W -f base.ldif
ldapsearch -x -b 'dc=test,dc=com' uid=test1
ldapmodify -x -Dcn=admin,dc=test,dc=com -W -f add.ldif
vim add.ldif
dn: uid=test1,ou=People,dc=test,dc=com
changetype: modify
add: userPassword
userPassword: PASSWORD
=============================
== subversion と Apache Basic Authentication as LdAP
aptitude install subversion libapache2-svn
mkdir /var/svn
svnadmin create --fs-type fsfs /var/svn
chown -R www-data:513 /var/svn
cd /tmp
svn checkout http://localhost/svn
ln -s /etc/apache2/mods-available/authnz_ldap.load /etc/apache2/mods-enabled/authnz_ldap.load
ln -s /etc/apache2/mods-available/ldap.load /etc/apache2/mods-enabled/ldap.load
vim /etc/apache2/mods-enabled/dav_svn.conf
DAV svn
SVNPath /var/svn
AuthType Basic
AuthName "LDAP Auth"
AuthBasicProvider ldap
AuthzLDAPAuthoritative off
AuthLDAPURL ldap://test.com/ou=People,dc=test,dc=com?uid?sub?(objectclass=posixAccount)
Require valid-user
svn checkout http://localhost/svn
svn import http://localhost/svn -m "init"
cd svn touch test.txt
svn add test.txt
svn commit -m "up test.txt"
svn checkout http://localhost/svn/
Authentication realm: LDAP Auth
Password for 'root':
以下でも可。SSHの証明書をauthorized_keysに加えてパスワード入力なし設定をしてもOK.
svn checkout svn+ssh://localhost/var/svn
== CVS
apt-get install cvs
mkdir /var/cvs
chown -R root:513 /var/cvs
export CVSROOT=/var/cvs
cvs init
SSHの証明書をauthorized_keysに加えてパスワード入力なし設定をしてもOK.
export CVSROOT=:ext:sakai@192.168.24.71:/var/cvs
Windows認証 (LDAP,LDAP Replication + SAMBA,PDC,BDC) on Debian
ホスト設定
vim /etc/hosts
127.0.0.1 ldap1.com #
192.168.24.71 ldap1.com mail.ldap1.com
192.168.24.72 ldap2.com mail.ldap2.com
以下PDCのみのLDAPとSAMBA設定
以下インストール
aptitude install slapd ldap-utils libldap-dev
aptitude install samba smbclient swat smbldap-tools samba-doc
aptitude install migrationtools
zcat /usr/share/doc/samba-doc/examples/LDAP/samba.schema.gz > /etc/ldap/schema/samba.schema
以下で得たMD5をslapd.confのrootpwに適用する。
ldap1:/etc/ldap# slappasswd -s PASSWORD -h {MD5}
{MD5}MZ9NJuPFNrXdhxuyxS4xeA==
vim /etc/ldap/slapd.conf
include /etc/ldap/schema/samba.schema #add
suffix "dc=ldap1,dc=com" #change
rootdn "cn=admin,dc=ldap1,dc=com" #change
rootpw {MD5}k3pcIXcHEYMlTicw3RGw7w== #add
#検索用
index objectClass eq
index uid,uidNumber,gidNumber,memberUid eq
index cn,mail,surname,givenname eq,subinitial
index sambaSID eq
index sambaPrimaryGroupSID eq
index sambaDomainName eq
# アクセス制限 他のaccessはコメントアウト
access to attrs=userPassword,sambaNTPassword,sambaLMPassword
by self write
by dn="cn=admin,dc=ldap1,dc=com" write
by anonymous auth
by * none
access to *
by dn="cn=admin,dc=ldap1,dc=com" write
by self write
by * read
以下設定、ldapsearchで見るLDAP、BDCを設定する場合は以下をBDCのLDAPに向ける。
vim /etc/ldap/ldap.conf
BASE dc=ldap1, dc=com #change
URI ldap://ldap1.com #change
LDAP起動
/etc/init.d/slapd restart
以下sambaの設定
vim /etc/samba/smb.conf
[global]
workgroup = MYDOMAIN
dos charset = CP932
unix charset = UTF-8
display charset = UTF-8
netbios name = PDC
security = user
domain logons = yes
domain master = yes
local master = yes
os level = 64 #PDCのため値を大きくしておく。BDCはこの数値以下にしておく
preferred master = yes
wins support = yes
logon path =
logon home =
#Sambaのアドミンユーザー
admin users = Administrator
passdb backend = ldapsam:ldap://ldap1.com/
ldap suffix = dc=ldap1, dc=com
ldap admin dn = cn=admin,dc=ldap1,dc=com
ldap user suffix = ou=People
ldap group suffix = ou=Groups
ldap machine suffix = ou=Computers
ldap idmap suffix = ou=People
# リナックスとWindwosのパスワード同期
ldap passwd sync = yes
passwd program = /usr/sbin/smbldap-passwd %u
passwd chat = *New*password* %n\n *Retype*new*password* %n\n *all*authentication*tokens*up
dated*
# windows managerからの変更のため
add user script = /usr/sbin/smbldap-useradd -m "%u"
ldap delete dn = Yes
delete user script = /usr/sbin/smbldap-userdel "%u"
add machine script = /usr/sbin/smbldap-useradd -w "%u"
add group script = /usr/sbin/smbldap-groupadd -p "%g"
delete group script = /usr/sbin/smbldap-groupdel "%g"
add user to group script = /usr/sbin/smbldap-groupmod -m "%u" "%g"
delete user from group script = /usr/sbin/smbldap-groupmod -x "%u" "%g"
set primary group script = /usr/sbin/smbldap-usermod -g "%g" "%u"
interfaces = 192.168.24.0/24 127.0.0.1 eth0
guest account = nobody
#ログインした際に時間を合わせたりするスクリプト等を置く場所
[netlogon]
comment = Network Logon Service
path = /home/samba/netlogon
guest ok = yes
writable = no
share modes = no
#プロファイル情報格納場所
[profiles]
comment = Users profiles
path = /home/samba/profiles
guest ok = no
browseable = no
create mask = 0600
directory mask = 0700
#[printers]
# comment = All Printers
# browseable = no
# path = /var/spool/samba
# printable = yes
# public = no
# writable = no
# create mode = 0700
#[print$]
# comment = Printer Drivers
# path = /var/lib/samba/printers
# browseable = yes
# read only = yes
# guest ok = no
[homes]
comment = Home Directories
path = %H/samba #change
writable = yes #change
browseable = no
vfs objects = recycle #add ごみ箱設定
recycle:repository = .recycle #add
recycle:keeptree = no #add
recycle:versions = yes #add
recycle:touch = no #add
recycle:maxsize = 0 #add
recycle:exclude = *.tmp ~$* #add
load printers = no #プリンタは不要
disable spoolss = yes
mkdir -p /home/samba/netlogon
mkdir -p /home/samba/profiles
chown -R nobody /home/samba
chmod 1777 /home/samba/profiles
以下でチェック
testparm
既存ユーザーのsambaディレクトリの作成
mkdir /etc/skel/samba
vi mkhomedir.sh
for user in `ls /home`
do
id $user > /dev/null 2>&1
[ $? -eq 0 ] && \
[ ! -d /home/$user/samba ] && \
mkdir /home/$user/samba && \
chown $user:$user /home/$user/samba && \
echo "/home/$user/samba create"
done
sh mkhomedir.sh
vi /etc/cron.weekly/recyclewatch
#!/bin/bash
for user in `ls /home/`
do
if [ -d /home/$user/samba/.recycle ]; then
tmpwatch -f 720 /home/$user/samba/.recycle/
fi
done
chmod +x /etc/cron.weekly/recyclewatch
#プリンターエラーの対応
touch /etc/printcap
/etc/init.d/samba start
SambaとLDAPの認証パスワードの設定ツール
zcat /usr/share/doc/smbldap-tools/examples/smbldap.conf.gz > /etc/smbldap-tools/smbldap.conf
cp /usr/share/doc/smbldap-tools/examples/smbldap_bind.conf /etc/smbldap-tools/smbldap_bind.conf
以下で得たSIDをsmbldap.conに適用
net getlocalsid
vim /etc/smbldap-tools/smbldap.conf
SID="S-1-5-21-3869316386-1369744062-3351931823" #change, get sid from command as "net getlocalsid"
sambaDomain="MYDOMAIN" # change
ldapTLS="0" # change
#verify="require"
#cafile="/etc/opt/IDEALX/smbldap-tools/ca.pem"
#clientcert="/etc/opt/IDEALX/smbldap-tools/smbldap-tools.pem"
#clientkey="/etc/opt/IDEALX/smbldap-tools/smbldap-tools.key"
usersdn="ou=People,${suffix}"
hash_encrypt="CRYPT" #change
#defaultMaxPasswordAge="45"
suffix="dc=ldap1,dc=com" #change
sambaUnixIdPooldn="sambaDomainName=${sambaDomain},${suffix}" #change
userSmbHome="\\PDC\%U" #change
userProfile="\\PDC\profiles\%U" #change
userHomeDrive="Z:" #change ネットワークドライブ名
mailDomain="ldap1.com" #change
上記の項目でslaveLDAPとmasterLDAPはPDCとBDCを設定する場合にはそれぞれのIPを設定しておく
vim /etc/smbldap-tools/smbldap_bind.conf
slaveDN="cn=admin,dc=ldap1,dc=com"
slavePw="PASSWORD"
masterDN="cn=admin,dc=ldap1,dc=com"
masterPw="PASSWORD"
LDAPにサンバ初期化登録
smbldap-populate
以下でSambaアドミニストレーターを設定
getent passwd
getent group で アドミンやドメイングループの確認をして以下設定
smbldap-populate -a Administrator -k 998 -m 512
smbldap-passwd Administrator
サンバのアドミンパスワード設定
smbpasswd -w PASSWORD
以下でユーザー登録 -a 追加 -m ディレクトリを作成
smbldap-useradd -a -m sakai PASSWORD
smbldap-passwd sakai
ディレクトリごと削除 -r で以下削除可能
smbldap-userdel -r sakai
以下でLDAPのユーザを認識、id等で確認可能
apt-get install libnss-ldap
vim /etc/libnss-ldap.conf
vim /etc/nsswitch.conf
passwd: compat ldap
group: compat ldap
shadow: compat ldap
サーバーのSSH等もLDAPで管理したい場合は、PAMを設定
apt-get install libpam-ldap
vim /etc/pam_ldap.conf
vim /etc/pam.d/common-auth
auth sufficient pam_ldap.so
vim /etc/pam.d/common-account
auth sufficient pam_ldap.so
WindowsでMy Computerの右クリック、DomainをChangeをクリックして、ユーザー名にAdministrator、設定したパスワードを入力してドメインに登録する。
再起動後、ユーザー名(sakai)とパスワードでログイン
== LDAPのレプリケーション設定
- slave
ssh 192.168.24.72
rm -rf /var/lib/ldap/*
suffix "dc=ldap1,dc=com" #change
rootdn "cn=admin,dc=ldap1,dc=com" #change
rootpw PASSWORD #change
vim /etc/ldap/slapd.conf
updatedn cn=admin,dc=ldap1,dc=com
updateref ldap://ldap1.com
- master
ssh 192.168.24.71
/etc/init.d/slapd stop
以下LDAP情報をコピー
scp /var/lib/ldap/* 192.168.24.72:/var/lib/ldap/.
vim /etc/ldap/slapd.conf
replogfile /var/lib/ldap/replog
replica uri=ldap://ldap2.com:389
binddn="cn=admin,dc=ldap1,dc=com"
bindmethod=simple credentials=PASSWORD
- slave
chown -R openldap:openldap /var/lib/ldap/
apt-get install libnss-ldap
/etc/libnss-ldap.conf
base dc=ldap1,dc=com
uri ldap://ldap2.com/ #BDCのためにスレーブを見る用にしておく
ldap_version 3
rootbinddn cn=admin,dc=ldap1,dc=com
- master and slave
/etc/init.d/slapd start
==BDCの設定
- slave
マスターから設定ファイルをコピー
scp 192.167.24.71:/etc/samba/smb.conf /etc/samba/.
以下書き換える
vim /etc/samba/smb.conf
passdb backend = ldapsam:ldap://ldap2.com/ #スレーブを見るようにしておく
domain master = no
os level = 33 #PDCより下げておく。Windowsのデフォルトが32なので33にあたりにしておく。'
wins support = no
wins server = 192.168.24.71
smbpasswd -w PASSWORD
自分のLDAPを見るようにしておく
vim /etc/ldap/ldap.conf
BASE dc=ldap1, dc=com
URI ldap://ldap2.com
PDCを落としてWindowsからDomainで入れるか確認、以下のログファイルでBDCが昇格したことを確認
cd /var/log/samba/
=============================
その他LDAP用コマンド
以下既存のユーザーをLDAPにmigrationする際のツール
vim /usr/share/migrationtools/migrate_common.ph
$DEFAULT_MAIL_DOMAIN = "ldap1.com"; #add
$DEFAULT_BASE = "dc=ldap1,dc=com"; #add
$DEFAULT_MAIL_HOST = "mail.ldap1.com"; #add
以下のLDAPの基本情報等を登録
vi base.ldif
dn: dc=ldap1,dc=com
objectClass: dcObject
objectclass: organization
o: ldap1 Organization
dc: ldap1
dn: cn=admin, dc=ldap1,dc=com
objectclass: organizationalRole
cn:admin
dn: ou=People,dc=ldap1,dc=com
objectClass: organizationalUnit
ou: People
dn: ou=Group,dc=ldap1,dc=com
objectClass: organizationalUnit
ou: Group
ldapadd -h localhost -x -D "cn=admin,dc=ldap1,dc=com" -W -f base.ldif
ldapsearch -x -b 'dc=test,dc=com' uid=test1
ldapmodify -x -Dcn=admin,dc=test,dc=com -W -f add.ldif
vim add.ldif
dn: uid=test1,ou=People,dc=test,dc=com
changetype: modify
add: userPassword
userPassword: PASSWORD
=============================
== subversion と Apache Basic Authentication as LdAP
aptitude install subversion libapache2-svn
mkdir /var/svn
svnadmin create --fs-type fsfs /var/svn
chown -R www-data:513 /var/svn
cd /tmp
svn checkout http://localhost/svn
ln -s /etc/apache2/mods-available/authnz_ldap.load /etc/apache2/mods-enabled/authnz_ldap.load
ln -s /etc/apache2/mods-available/ldap.load /etc/apache2/mods-enabled/ldap.load
vim /etc/apache2/mods-enabled/dav_svn.conf
DAV svn
SVNPath /var/svn
AuthType Basic
AuthName "LDAP Auth"
AuthBasicProvider ldap
AuthzLDAPAuthoritative off
AuthLDAPURL ldap://test.com/ou=People,dc=test,dc=com?uid?sub?(objectclass=posixAccount)
Require valid-user
svn checkout http://localhost/svn
svn import http://localhost/svn -m "init"
cd svn touch test.txt
svn add test.txt
svn commit -m "up test.txt"
svn checkout http://localhost/svn/
Authentication realm:
Password for 'root':
以下でも可。SSHの証明書をauthorized_keysに加えてパスワード入力なし設定をしてもOK.
svn checkout svn+ssh://localhost/var/svn
== CVS
apt-get install cvs
mkdir /var/cvs
chown -R root:513 /var/cvs
export CVSROOT=/var/cvs
cvs init
SSHの証明書をauthorized_keysに加えてパスワード入力なし設定をしてもOK.
export CVSROOT=:ext:sakai@192.168.24.71:/var/cvs
ラベル: LDAP
Lustre
I installed Lustre. I've written the installation document below. Please give me advice/suggestions on it. I would appreciate any kind of advice/suggestion!
= Lustre on CetnOS 5.3
> rpm -ivh kernel-lustre-smp-2.6.18-128.1.6.el5_lustre.1.8.0.1.x86_64.rpm
> rpm -ivh lustre-modules-1.8.0.1-2.6.18_128.1.6.el5_lustre.1.8.0.1smp.x86_64.rpm
> rpm -ivh lustre-1.8.0.1-2.6.18_128.1.6.el5_lustre.1.8.0.1smp.x86_64.rpm
> rpm -ivh lustre-client-1.8.0.1-2.6.18_128.1.6.el5_lustre.1.8.0.1smp.x86_64.rpm
> rpm -ivh lustre-client-modules-1.8.0.1-2.6.18_128.1.6.el5_lustre.1.8.0.1smp.x86_64.rpm
> rpm -ivh lustre-ldiskfs-3.0.8-2.6.18_128.1.6.el5_lustre.1.8.0.1smp.x86_64.rpm
> vim /etc/grub.conf
default=0 ########## Change to 0 ###########
timeout=5
splashimage=(hd0,0)/grub/splash.xpm.gz
hiddenmenu
title CentOS (2.6.18-128.1.6.el5_lustre.1.8.0.1smp)
root (hd0,0)
kernel /vmlinuz-2.6.18-128.1.6.el5_lustre.1.8.0.1smp ro root=/dev/VolGroup00/LogVol00
initrd /initrd-2.6.18-128.1.6.el5_lustre.1.8.0.1smp.img
title CentOS (2.6.18-128.2.1.el5)
root (hd0,0)
kernel /vmlinuz-2.6.18-128.2.1.el5 ro root=/dev/VolGroup00/LogVol00
initrd /initrd-2.6.18-128.2.1.el5.img
title CentOS (2.6.18-128.el5)
root (hd0,0)
kernel /vmlinuz-2.6.18-128.el5 ro root=/dev/VolGroup00/LogVol00
initrd /initrd-2.6.18-128.el5.img
> reboot
> vim /etc/hosts
192.168.0.11 lustre1
192.168.0.12 lustre2
192.168.0.13 lustre3
= MDT Server
> mkfs.lustre --fsname=test --mgs --mdt --device-size=262144 /tmp/mdt0
> mkdir -p /mnt/mdt0
> mount -t lustre -o loop /tmp/mdt0 /mnt/mdt0
= OST Server 1
> mkfs.lustre --fsname=test --ost --mgsnode=lustre1 --device-size=1048576 /tmp/ost0
> mkfs.lustre --fsname=test --ost --mgsnode=lustre1 --device-size=1048576 /tmp/ost1
> mkdir -p /mnt/ost0 /mnt/ost1
> mount -t lustre -o loop /tmp/ost0 /mnt/ost0
> mount -t lustre -o loop /tmp/ost1 /mnt/ost1
= OST Server 2
> mkfs.lustre --fsname=test --ost --mgsnode=lustre1 --device-size=1048576 /tmp/ost2
> mkfs.lustre --fsname=test --ost --mgsnode=lustre1 --device-size=1048576 /tmp/ost3
> mkdir -p /mnt/ost2 /mnt/ost3
> mount -t lustre -o loop /tmp/ost2 /mnt/ost2
> mount -t lustre -o loop /tmp/ost3 /mnt/ost3
= Client
> mkdir -p /mnt/test
> mount -t lustre lustre1:/test /mnt/test
= Lustre on CetnOS 5.3
192.168.0.10 [client] | | | ---------------------------- | | | | | | [MDT] [OST1] [OST2] 192.168.0.11 192.168.0.12 192.168.0.13
> rpm -ivh kernel-lustre-smp-2.6.18-128.1.6.el5_lustre.1.8.0.1.x86_64.rpm
> rpm -ivh lustre-modules-1.8.0.1-2.6.18_128.1.6.el5_lustre.1.8.0.1smp.x86_64.rpm
> rpm -ivh lustre-1.8.0.1-2.6.18_128.1.6.el5_lustre.1.8.0.1smp.x86_64.rpm
> rpm -ivh lustre-client-1.8.0.1-2.6.18_128.1.6.el5_lustre.1.8.0.1smp.x86_64.rpm
> rpm -ivh lustre-client-modules-1.8.0.1-2.6.18_128.1.6.el5_lustre.1.8.0.1smp.x86_64.rpm
> rpm -ivh lustre-ldiskfs-3.0.8-2.6.18_128.1.6.el5_lustre.1.8.0.1smp.x86_64.rpm
> vim /etc/grub.conf
default=0 ########## Change to 0 ###########
timeout=5
splashimage=(hd0,0)/grub/splash.xpm.gz
hiddenmenu
title CentOS (2.6.18-128.1.6.el5_lustre.1.8.0.1smp)
root (hd0,0)
kernel /vmlinuz-2.6.18-128.1.6.el5_lustre.1.8.0.1smp ro root=/dev/VolGroup00/LogVol00
initrd /initrd-2.6.18-128.1.6.el5_lustre.1.8.0.1smp.img
title CentOS (2.6.18-128.2.1.el5)
root (hd0,0)
kernel /vmlinuz-2.6.18-128.2.1.el5 ro root=/dev/VolGroup00/LogVol00
initrd /initrd-2.6.18-128.2.1.el5.img
title CentOS (2.6.18-128.el5)
root (hd0,0)
kernel /vmlinuz-2.6.18-128.el5 ro root=/dev/VolGroup00/LogVol00
initrd /initrd-2.6.18-128.el5.img
> reboot
> vim /etc/hosts
192.168.0.11 lustre1
192.168.0.12 lustre2
192.168.0.13 lustre3
= MDT Server
> mkfs.lustre --fsname=test --mgs --mdt --device-size=262144 /tmp/mdt0
> mkdir -p /mnt/mdt0
> mount -t lustre -o loop /tmp/mdt0 /mnt/mdt0
= OST Server 1
> mkfs.lustre --fsname=test --ost --mgsnode=lustre1 --device-size=1048576 /tmp/ost0
> mkfs.lustre --fsname=test --ost --mgsnode=lustre1 --device-size=1048576 /tmp/ost1
> mkdir -p /mnt/ost0 /mnt/ost1
> mount -t lustre -o loop /tmp/ost0 /mnt/ost0
> mount -t lustre -o loop /tmp/ost1 /mnt/ost1
= OST Server 2
> mkfs.lustre --fsname=test --ost --mgsnode=lustre1 --device-size=1048576 /tmp/ost2
> mkfs.lustre --fsname=test --ost --mgsnode=lustre1 --device-size=1048576 /tmp/ost3
> mkdir -p /mnt/ost2 /mnt/ost3
> mount -t lustre -o loop /tmp/ost2 /mnt/ost2
> mount -t lustre -o loop /tmp/ost3 /mnt/ost3
= Client
> mkdir -p /mnt/test
> mount -t lustre lustre1:/test /mnt/test
ラベル: Distribute Storage
IOmeter on Linux
I installed IOmeter. I’ve written the installation document below. Please give me advice/suggestions on it. I would appreciate any kind of advice/suggestion!
= iometer install into CentOS5.2 64bit.
windows, iometer GUI, 192.168.1.20
linux, dynomo, 192.168.1.30
- linux
> yum install kernel-devel
> yum install gcc gcc-c++
> unzip iometer-2006_07_27.common-src.zip
> cd iometer-2006_07_27.linux.i386-bin/src/iomtr_kstat
> cp Makefile-Linux.x86_64 Makefile
> make
###./dynamo -i iometer_computer_name -m manager_computer_name] ###
> ./dynamo -i 192.168.1.20 -m 192.168.1.30
- windows
install iometer and run GUI iometer on windows,
you can see the linux client on iometer
= iometer install into CentOS5.2 32bit
> tar xvf iometer-2006_07_27.linux.i386-bin.tgz
> cd iometer-2006_07_27.linux.i386-bin/src/iomtr_kstat
> cp Makefile-Linux26 Makefile
> vi Makefile
#KERNELSRC = /tmp/tmpwork/linux-2.6.0.xscale/
KERNELSRC = /lib/modules/`uname -r`/build/
> make
> cd ../
###./dynamo -i iometer_computer_name -m manager_computer_name] ###
> ./dynamo -i 192.168.1.20 -m 192.168.1.30
install iometer and run GUI iometer on windows,
you can see the linux client on iometer
= iometer install into CentOS5.2 64bit.
windows, iometer GUI, 192.168.1.20
linux, dynomo, 192.168.1.30
- linux
> yum install kernel-devel
> yum install gcc gcc-c++
> unzip iometer-2006_07_27.common-src.zip
> cd iometer-2006_07_27.linux.i386-bin/src/iomtr_kstat
> cp Makefile-Linux.x86_64 Makefile
> make
###./dynamo -i iometer_computer_name -m manager_computer_name] ###
> ./dynamo -i 192.168.1.20 -m 192.168.1.30
- windows
install iometer and run GUI iometer on windows,
you can see the linux client on iometer
= iometer install into CentOS5.2 32bit
> tar xvf iometer-2006_07_27.linux.i386-bin.tgz
> cd iometer-2006_07_27.linux.i386-bin/src/iomtr_kstat
> cp Makefile-Linux26 Makefile
> vi Makefile
#KERNELSRC = /tmp/tmpwork/linux-2.6.0.xscale/
KERNELSRC = /lib/modules/`uname -r`/build/
> make
> cd ../
###./dynamo -i iometer_computer_name -m manager_computer_name] ###
> ./dynamo -i 192.168.1.20 -m 192.168.1.30
install iometer and run GUI iometer on windows,
you can see the linux client on iometer
ラベル: Analystic Tool
FreeRADIUS with LDAP
I installed FreeRADIUS with LDAP. I’ve written the installation document below. Please give me advice/suggestions on it. I would appreciate any kind of advice/suggestion!
== FreeRadius + LDAP
apt-get install freeradius freeradius-ldap
vim /etc/freeradius/radiusd.conf
modules{
ldap {
server = "ldap1.com"
basedn = "dc=ldap1,dc=com"
basedn = "ou=People,dc=ldap1,dc=com"
filter ="(&(objectclass=posixAccount)(uid=%{Stripped-User-Name:-%{User-Name}}))"
#access_attr = "dialupAccess"
}
authorize {
ldap #coment out
}
}
authenticate {
Auth-Type LDAP {
ldap
}
}
以下書き換える。
vim /etc/freeradius/users
DEFAULT Auth-Type = LDAP
Fall-Through = 1
view /etc/freeradius/clients.conf
client 127.0.0.1/24 {
secret = testing123
shortname = localhost
}
/etc/init.d/freeradius restart
radtest sakai PASSWORD localhost 0 testing123
== TLS
TLSやTTLSの場合は、debianのapt-getではモジュールが入らないので、ソースからいれる。
apt-get install build-essential
apt-get install apt-src
apt-src update
mkdir ~/build_freeradius
cd ~/build_freeradius
apt-src install freeradius
vim ~/build_freeradius/freeradius-1.1.3/debian/rules
#buildssl=--without-rlm_eap_peap --without-rlm_eap_tls --without-rlm_eap_ttls --without-rlm_otp
--without-rlm_sql_postgresql --without-snmp
#modulelist=krb5 ldap sql_mysql sql_iodbc
buildssl=--with-rlm_sql_postgresql_lib_dir=`pg_config --libdir`
--with-rlm_sql_postgresql_include_dir=`pg_config --includedir`
modulelist=krb5 ldap sql_mysql sql_iodbc sql_postgresql
vim ~/build_freeradius/freeradius-1.1.3/debian/control
Source: freeradius
Build-Depends: debhelper (>= 5), libltdl3-dev, libpam0g-dev, libmysqlclient15-dev | libmysqlclient-dev, libgdbm-dev,
libldap2-dev, libsasl2-dev, libiodbc2-dev, libkrb5-dev, snmp, autotools-dev, dpatch (>= 2),
libperl-dev, libtool, dpkg-dev (>= 1.13.19), libssl-dev, libpq-dev
Build-Conflicts:
cd ~/build_freeradius/freeradius-1.1.3/debian
cat control.postgresql >> control
以下加える
vim ~/build_freeradius/freeradius-1.1.3/debian/changelog
freeradius (1.1.3-3tls) unstable; urgency=low
* Add TLS. Closes: #403389.
-- Jun Sakai Sat, 16 Dec 2006 20:45:11 +0000
# cd ~/build_freeradius
# apt-src build freeradius
dpkg -i freeradius_1.1.3-3tls_i386.deb freeradius-ldap_1.1.3-3tls_i386.deb
秘密鍵の生成と、証明書発行要求の作成
% openssl req -new -newkey rsa:2048 -keyout rad-privkey.pem -out rad-req.pem
証明書発行要求の処理
# openssl ca -out rad-cert.pem -infiles rad-req.pem
秘密鍵のパスワードの解除
# openssl rsa -in rad-privkey.pem -out rad-priv.pem
DSAパラメータファイルの作成
# openssl dsaparam -out dh2048.pem 2048
CAの証明書 (OpenSSLでCAを設定した時に出来る)
WindowsXPに読み込ませる為に、DER形式の証明書を作成。
% openssl x509 -in cacert.pem -out cacert.der -outform DER
vim /etc/freeradius/eap.conf
eap {
#default_eap_type = md5
default_eap_type = peap
tls {
private_key_password = 88390LPP
private_key_file = /usr/local/RADIUS/rad-priv.pem
# If Private key & Certificate are located in
# the same file, then private_key_file &
# certificate_file must contain the same file
# name.
certificate_file = /usr/local/RADIUS/rad-cert.pem
# Trusted Root CA list
CA_file = /usr/local/CA/cacert.pem
dh_file = /usr/local/RADIUS/dh2048.pem
random_file = /dev/urandom
peap {
default_eap_type = mschapv2
}
mschapv2 {
}
}
== FreeRadius + LDAP
apt-get install freeradius freeradius-ldap
vim /etc/freeradius/radiusd.conf
modules{
ldap {
server = "ldap1.com"
basedn = "dc=ldap1,dc=com"
basedn = "ou=People,dc=ldap1,dc=com"
filter ="(&(objectclass=posixAccount)(uid=%{Stripped-User-Name:-%{User-Name}}))"
#access_attr = "dialupAccess"
}
authorize {
ldap #coment out
}
}
authenticate {
Auth-Type LDAP {
ldap
}
}
以下書き換える。
vim /etc/freeradius/users
DEFAULT Auth-Type = LDAP
Fall-Through = 1
view /etc/freeradius/clients.conf
client 127.0.0.1/24 {
secret = testing123
shortname = localhost
}
/etc/init.d/freeradius restart
radtest sakai PASSWORD localhost 0 testing123
== TLS
TLSやTTLSの場合は、debianのapt-getではモジュールが入らないので、ソースからいれる。
apt-get install build-essential
apt-get install apt-src
apt-src update
mkdir ~/build_freeradius
cd ~/build_freeradius
apt-src install freeradius
vim ~/build_freeradius/freeradius-1.1.3/debian/rules
#buildssl=--without-rlm_eap_peap --without-rlm_eap_tls --without-rlm_eap_ttls --without-rlm_otp
--without-rlm_sql_postgresql --without-snmp
#modulelist=krb5 ldap sql_mysql sql_iodbc
buildssl=--with-rlm_sql_postgresql_lib_dir=`pg_config --libdir`
--with-rlm_sql_postgresql_include_dir=`pg_config --includedir`
modulelist=krb5 ldap sql_mysql sql_iodbc sql_postgresql
vim ~/build_freeradius/freeradius-1.1.3/debian/control
Source: freeradius
Build-Depends: debhelper (>= 5), libltdl3-dev, libpam0g-dev, libmysqlclient15-dev | libmysqlclient-dev, libgdbm-dev,
libldap2-dev, libsasl2-dev, libiodbc2-dev, libkrb5-dev, snmp, autotools-dev, dpatch (>= 2),
libperl-dev, libtool, dpkg-dev (>= 1.13.19), libssl-dev, libpq-dev
Build-Conflicts:
cd ~/build_freeradius/freeradius-1.1.3/debian
cat control.postgresql >> control
以下加える
vim ~/build_freeradius/freeradius-1.1.3/debian/changelog
freeradius (1.1.3-3tls) unstable; urgency=low
* Add TLS. Closes: #403389.
-- Jun Sakai
# cd ~/build_freeradius
# apt-src build freeradius
dpkg -i freeradius_1.1.3-3tls_i386.deb freeradius-ldap_1.1.3-3tls_i386.deb
秘密鍵の生成と、証明書発行要求の作成
% openssl req -new -newkey rsa:2048 -keyout rad-privkey.pem -out rad-req.pem
証明書発行要求の処理
# openssl ca -out rad-cert.pem -infiles rad-req.pem
秘密鍵のパスワードの解除
# openssl rsa -in rad-privkey.pem -out rad-priv.pem
DSAパラメータファイルの作成
# openssl dsaparam -out dh2048.pem 2048
CAの証明書 (OpenSSLでCAを設定した時に出来る)
WindowsXPに読み込ませる為に、DER形式の証明書を作成。
% openssl x509 -in cacert.pem -out cacert.der -outform DER
vim /etc/freeradius/eap.conf
eap {
#default_eap_type = md5
default_eap_type = peap
tls {
private_key_password = 88390LPP
private_key_file = /usr/local/RADIUS/rad-priv.pem
# If Private key & Certificate are located in
# the same file, then private_key_file &
# certificate_file must contain the same file
# name.
certificate_file = /usr/local/RADIUS/rad-cert.pem
# Trusted Root CA list
CA_file = /usr/local/CA/cacert.pem
dh_file = /usr/local/RADIUS/dh2048.pem
random_file = /dev/urandom
peap {
default_eap_type = mschapv2
}
mschapv2 {
}
}
ラベル: RADIUS
OCFS2 + DRBD8 on CentOS 5.3
I tried the OCFS2 + DRBD8 on CentOS 5.3. I’ve written the document below.
Please give me advice/suggestions on it. I would appreciate any kind of advice/suggestion!
= OCFS2
download rpm pacakge from ocfs2 site.
> yum -y install vte
> rpm -ivh ocfs2-tools-1.4.2-1.el5.x86_64.rpm
> rpm -ivh ocfs2console-1.4.2-1.el5.x86_64.rpm
> rpm -ivh ocfs2-2.6.18-128.2.1.el5-1.4.2-1.el5.x86_64.rpm
> vi /etc/sysconfig/o2cb
O2CB_ENABLED=true
> vi /etc/ocfs2/cluster.conf
node:
ip_port = 7777
ip_address = 10.0.0.74
number = 0
name = ocfs21
cluster = ocfs2
node:
ip_port = 7777
ip_address = 10.0.0.75
number = 1
name = ocfs22
cluster = ocfs2
cluster:
node_count = 2
name = ocfs2
== DRBD8
> yum -y install drbd82 kmod-drbd82
> vim /etc/drbd.conf
global {
usage-count yes;
}
common {
syncer {
rate 100M;
al-extents 257;
}
}
resource r0 {
protocol C;
startup {
become-primary-on both; ### For Primary/Primary ###
degr-wfc-timeout 60;
wfc-timeout 30;
}
disk {
on-io-error detach;
}
net {
allow-two-primaries; ### For Primary/Primary ###
cram-hmac-alg sha1;
shared-secret "FooFunFactory";
after-sb-0pri discard-zero-changes;
after-sb-1pri violently-as0p;
after-sb-2pri violently-as0p;
}
on ocfs21 {
device /dev/drbd0;
disk /dev/sdc;
address 10.0.0.81:7788;
meta-disk /dev/sdb[0];
}
on ocfs22 {
device /dev/drbd0;
disk /dev/sdc;
address 10.0.0.82:7788;
meta-disk /dev/sdb[0];
}
}
=Create Metadata
> dd if=/dev/zero of=/dev/sda3 bs=1M count=1024
> drbdadm create-md r0
> /etc/init.d/drbd stop
> /etc/init.d/drbd start
=Make them Primary/Primary
> drbdsetup /dev/drbd0 primary -o
> cat /proc/drbd
version: 8.3.0 (api:88/proto:86-89)
GIT-hash: 9ba8b93e24d842f0dd3fb1f9b90e8348ddb95829 build by ivoks@ubuntu, 2009-01-17 07:49:56
0: cs:Connected ro:Primary/Primary ds:UpToDate/Diskless C r---
ns:0 nr:0 dw:0 dr:0 al:0 bm:0 lo:0 pe:0 ua:0 ap:0 ep:1 wo:b oos:4883760
> mkfs.ocfs2 /dev/drbd0
> /etc/init.d/o2cb start
> mount -t ocfs2 /dev/drbd0 /data
Please give me advice/suggestions on it. I would appreciate any kind of advice/suggestion!
= OCFS2
download rpm pacakge from ocfs2 site.
> yum -y install vte
> rpm -ivh ocfs2-tools-1.4.2-1.el5.x86_64.rpm
> rpm -ivh ocfs2console-1.4.2-1.el5.x86_64.rpm
> rpm -ivh ocfs2-2.6.18-128.2.1.el5-1.4.2-1.el5.x86_64.rpm
> vi /etc/sysconfig/o2cb
O2CB_ENABLED=true
> vi /etc/ocfs2/cluster.conf
node:
ip_port = 7777
ip_address = 10.0.0.74
number = 0
name = ocfs21
cluster = ocfs2
node:
ip_port = 7777
ip_address = 10.0.0.75
number = 1
name = ocfs22
cluster = ocfs2
cluster:
node_count = 2
name = ocfs2
== DRBD8
> yum -y install drbd82 kmod-drbd82
> vim /etc/drbd.conf
global {
usage-count yes;
}
common {
syncer {
rate 100M;
al-extents 257;
}
}
resource r0 {
protocol C;
startup {
become-primary-on both; ### For Primary/Primary ###
degr-wfc-timeout 60;
wfc-timeout 30;
}
disk {
on-io-error detach;
}
net {
allow-two-primaries; ### For Primary/Primary ###
cram-hmac-alg sha1;
shared-secret "FooFunFactory";
after-sb-0pri discard-zero-changes;
after-sb-1pri violently-as0p;
after-sb-2pri violently-as0p;
}
on ocfs21 {
device /dev/drbd0;
disk /dev/sdc;
address 10.0.0.81:7788;
meta-disk /dev/sdb[0];
}
on ocfs22 {
device /dev/drbd0;
disk /dev/sdc;
address 10.0.0.82:7788;
meta-disk /dev/sdb[0];
}
}
=Create Metadata
> dd if=/dev/zero of=/dev/sda3 bs=1M count=1024
> drbdadm create-md r0
> /etc/init.d/drbd stop
> /etc/init.d/drbd start
=Make them Primary/Primary
> drbdsetup /dev/drbd0 primary -o
> cat /proc/drbd
version: 8.3.0 (api:88/proto:86-89)
GIT-hash: 9ba8b93e24d842f0dd3fb1f9b90e8348ddb95829 build by ivoks@ubuntu, 2009-01-17 07:49:56
0: cs:Connected ro:Primary/Primary ds:UpToDate/Diskless C r---
ns:0 nr:0 dw:0 dr:0 al:0 bm:0 lo:0 pe:0 ua:0 ap:0 ep:1 wo:b oos:4883760
> mkfs.ocfs2 /dev/drbd0
> /etc/init.d/o2cb start
> mount -t ocfs2 /dev/drbd0 /data
ラベル: Cluster
Parascale for huge storage
I installed Parascale. I’ve written the installation document below. Please give me advice/suggestions on it. I would appreciate any kind of advice/suggestion!
Console server : 192.168.0.10, 172.16.0.10 + 1LUN
Storage server1 : 192.168.0.1, 172.16.0.1 + 1LUN
Storage server2 : 192.168.0.2, 172.16.0.2 + 1LUN
Storage server3 : 192.168.0.3, 172.16.0.3 + 1LUN
Virtual ip: 192.168.0.10
External pool: 192.168.10.20-23
Internal Pool: 172.16.0.
= install console server
> mkdir /tmp/pscl
> cd /tmp/pscl
> tar zxvf pcs_v1.3.0-r4788-64bit.tgz
> ./pcsinstall -c
> vi /root/.bash_profile
export PATH=$PATH:/opt/pscl/vsn/bin
export MANPATH=$MANPATH:/opt/pscl/vsn/man
> source /root/.bash_profile
= install storage server
> mkdir /tmp/pscl
> mv pcs_v1.3.0-r4788-64bit.tgz /tmp/pscl/
> cd /tmp/pscl
> tar zxvf pcs_v1.3.0-r4788-64bit.tgz
> ./pcsinstall -s
= metenance
access http:///admin/
Console server : 192.168.0.10, 172.16.0.10 + 1LUN
Storage server1 : 192.168.0.1, 172.16.0.1 + 1LUN
Storage server2 : 192.168.0.2, 172.16.0.2 + 1LUN
Storage server3 : 192.168.0.3, 172.16.0.3 + 1LUN
Virtual ip: 192.168.0.10
External pool: 192.168.10.20-23
Internal Pool: 172.16.0.
= install console server
> mkdir /tmp/pscl
> cd /tmp/pscl
> tar zxvf pcs_v1.3.0-r4788-64bit.tgz
> ./pcsinstall -c
> vi /root/.bash_profile
export PATH=$PATH:/opt/pscl/vsn/bin
export MANPATH=$MANPATH:/opt/pscl/vsn/man
> source /root/.bash_profile
= install storage server
> mkdir /tmp/pscl
> mv pcs_v1.3.0-r4788-64bit.tgz /tmp/pscl/
> cd /tmp/pscl
> tar zxvf pcs_v1.3.0-r4788-64bit.tgz
> ./pcsinstall -s
= metenance
access http://
ラベル: Distribute Storage
HAproxy
I installed HAproxy on Debian. I’ve written the installation document below. Please give me advice/suggestions on it. I would appreciate any kind of advice/suggestion!
== HAproxy on debian
> aptitude install haproxy
> cp /etc/haproxy/haproxy.cfg /etc/haproxy/haproxy.cfg.org
> vi /etc/haproxy/haproxy.cfg
global
log 127.0.0.1 local0
log 127.0.0.1 local1 notice
#log loghost local0 info
maxconn 4096
#chroot /usr/share/haproxy
user haproxy
group haproxy
daemon
#debug
#quiet
defaults
log global
mode http
option httplog
option dontlognull
retries 3
option redispatch
maxconn 2000
contimeout 5000
clitimeout 50000
srvtimeout 50000
# enable web-stats at /haproxy?stats
stats enable
listen webfarm 192.168.0.2:80
cookie SERVERID rewrite
balance roundrobin
# mode http
# stats enable
# stats auth someuser:somepassword
# balance roundrobin
# cookie JSESSIONID prefix
option httpclose
# option forwardfor
# option httpchk HEAD /check.txt HTTP/1.0
server webA 192.168.0.10:80 cookie A check inter 2000 rise 2 fall 5
server webB 192.168.0.11.82:80 cookie B check inter 2000 rise 2 fall 5
# server webA 192.168.0.10:80 cookie A
# server webB 192.168.0.11.82:80 cookie B
> vi /etc/default/haproxy
# Set ENABLED to 1 if you want the init script to start haproxy.
ENABLED=1
# Add extra flags here.
#EXTRAOPTS="-de -m 16"
/etc/init.d/haproxy start
## Status check
http://192.168.0.2/haproxy?stats
== HAproxy on debian
> aptitude install haproxy
> cp /etc/haproxy/haproxy.cfg /etc/haproxy/haproxy.cfg.org
> vi /etc/haproxy/haproxy.cfg
global
log 127.0.0.1 local0
log 127.0.0.1 local1 notice
#log loghost local0 info
maxconn 4096
#chroot /usr/share/haproxy
user haproxy
group haproxy
daemon
#debug
#quiet
defaults
log global
mode http
option httplog
option dontlognull
retries 3
option redispatch
maxconn 2000
contimeout 5000
clitimeout 50000
srvtimeout 50000
# enable web-stats at /haproxy?stats
stats enable
listen webfarm 192.168.0.2:80
cookie SERVERID rewrite
balance roundrobin
# mode http
# stats enable
# stats auth someuser:somepassword
# balance roundrobin
# cookie JSESSIONID prefix
option httpclose
# option forwardfor
# option httpchk HEAD /check.txt HTTP/1.0
server webA 192.168.0.10:80 cookie A check inter 2000 rise 2 fall 5
server webB 192.168.0.11.82:80 cookie B check inter 2000 rise 2 fall 5
# server webA 192.168.0.10:80 cookie A
# server webB 192.168.0.11.82:80 cookie B
> vi /etc/default/haproxy
# Set ENABLED to 1 if you want the init script to start haproxy.
ENABLED=1
# Add extra flags here.
#EXTRAOPTS="-de -m 16"
/etc/init.d/haproxy start
## Status check
http://192.168.0.2/haproxy?stats
ラベル: Load balancer
Install GeoDjango on CentOS 64bit
==Instlal Pacage
>vi /etc/yum.repos.d/CentOS-Base.repo
[dag]
name=Dag RPM Repository for Red Hat Enterprise Linux
baseurl=http://apt.sw.be/redhat/el$releasever/en/$basearch/dag
>wget http://dag.wieers.com/packages/RPM-GPG-KEY.dag.txt
>rpm --import RPM-GPG-KEY.dag.txt
>yum -y install python-devel gcc gcc-c++ swig ruby yum-utils libxml2 libxml2-devel
>yum -y install postgresql84 postgresql84-devel postgresql84-server postgresql84-server-python
>yum -y install gettext python-setuptools python-devel python-tz python-curl mod_python openssl-devel python-ctypes
>yum -y install gcc gcc-c++ fonts-japanese expect sudo subversion rpm-build rpm-devel
>yum -y install httpd httpd-devel mod_ssl apr-devel zlib zlib-devel python-setuptools python-psycopg2
>yum -y install ntp libevent postfix
=GEOS
>wget wget http://download.osgeo.org/geos/geos-3.2.2.tar.bz2
>tar -xjvf geos-3.2.2.tar.bz2
>cd geos-3.2.2
>./configure
>make
>make install
>cd ..
=Proj4
>wget http://download.osgeo.org/proj/proj-4.7.0.tar.gz
>wget http://download.osgeo.org/proj/proj-datumgrid-1.5.zip
>tar xzf proj-4.7.0.tar.gz
>cd proj-4.7.0/nad
>unzip ../../proj-datumgrid-1.5.zip
>cd ..
>./configure
>make
>make install
>cd ..
=Postgis
>wget http://postgis.refractions.net/download/postgis-1.5.1.tar.gz
>tar xzf postgis-1.5.1.tar.gz
>cd postgis-1.5.1
>./configure
>make
>make install
>cd ..
=GDAL
>wget http://download.osgeo.org/gdal/gdal-1.7.2.tar.gz
>tar xzf gdal-1.7.2.tar.gz
>cd gdal-1.7.2
>./configure
>make
>make install
>cd ..
= SQLite
>wget http://www.sqlite.org/sqlite-amalgamation-3.6.22.tar.gz
>tar xzf sqlite-amalgamation-3.6.22.tar.gz
>cd sqlite-3.6.22
CFLAGS="-DSQLITE_ENABLE_RTREE=1" ./configure
>make
>make install
>cd ..
= SPATIALITE
>wget http://www.gaia-gis.it/spatialite/libspatialite-amalgamation-2.3.1.tar.gz
>wget http://www.gaia-gis.it/spatialite/spatialite-tools-2.3.1.tar.gz
>tar xzf libspatialite-amalgamation-2.3.1.tar.gz
>tar xzf spatialite-tools-2.3.1.tar.gz
>cd libspatialite-amalgamation-2.3.1
>./configure
>make
>make install
>cd ..
>cd spatialite-tools-2.3.1
>./configure
>make
>make install
>cd ..
= PYSQLITE2
>wget http://pysqlite.googlecode.com/files/pysqlite-2.6.0.tar.gz
>tar xzf pysqlite-2.6.0.tar.gz
>cd pysqlite-2.6.0
>vi setup.cfg
[build_ext]
#define=
include_dirs=/usr/local/include
library_dirs=/usr/local/lib
libraries=sqlite3
#define=SQLITE_OMIT_LOAD_EXTENSION
>python setup.py install
== Postgres
>service postgresql initdb
>/etc/init.d/postgresql start
>su - postgres
>wget http://geodjango.org/docs/create_template_postgis-1.5.sh
>ldd -d /usr/lib64/pgsql/postgis-1.5.so
>ln -s /usr/local/lib/libgeos_c.so.1.6.2 /usr/lib64/libgeos_c.so.1
>ln -s /usr/local/lib/libproj.so.0 /usr/lib64/libproj.so.0
>ln -s /usr/local/lib/libgdal.so.1 /usr/lib64/libgdal.so.1
>sh create_template_postgis-1.5.s
>createdb -T template_postgis geodjango
>createuser --createdb geo
= Mod WSGI
>wget http://modwsgi.googlecode.com/files/mod_wsgi-3.2.tar.gz
>tar zxvf mod_wsgi-3.2.tar.gz
>cd mod_wsgi-3.2
>make
>make install
>cd ..
== Httpd.conf
>vi httpd.conf
LoadModule wsgi_module modules/mod_wsgi.so
ServerAdmin jun@127.0.0.1
ServerName 192.168.1.1
Alias /media "/home/html/geodjango/media"
WSGIScriptAlias / /home/html/geo.wsgi
DocumentRoot /home/html/geodjango
CustomLog /home/html/logs/access_log common
ErrorLog /home/html/logs/error_log
SetHandler None
SetHandler None
ExpiresActive On
ExpiresDefault "access plus 1 week"
ExpiresByType image/gif "access plus 1 week"
ExpiresByType image/jpeg "access plus 1 week"
ExpiresByType image/png "access plus 1 week"
ExpiresByType text/css "access plus 1 week"
ExpiresByType application/x-javascript "access plus 1 week"
== WSGI
>vi /home/html/geo.wsgi
#WSGI
import sys
import os
sys.path.append('/home/html')
os.environ['DJANGO_SETTINGS_MODULE'] = 'geodjango.settings'
os.environ['PYTHON_EGG_CACHE'] = '/tmp/.python-eggs'
import django.core.handlers.wsgi
application = django.core.handlers.wsgi.WSGIHandler()
= PG_HBA
>vi /var/lib/pgsql/data/pg_hba.conf
local all all trust
# IPv4 local connections:
host all all 127.0.0.1/32 trust
# IPv6 local connections:
host all all ::1/128 trust
>python manage.py sqlall world
>python manage.py syncdb
>python manage.py shell
=Django
>django-admin.py startproject geodjango
>cd geodjango
>python manage.py startapp world
>vim settings.py
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'geodjango',
'USER': 'geo',
}
}
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'django.contrib.gis',
'world'
)
>mkdir world/data
>cd world/data
>wget http://thematicmapping.org/downloads/TM_WORLD_BORDERS-0.3.zip
>unzip TM_WORLD_BORDERS-0.3.zip
>cd ../..
>ogrinfo world/data/TM_WORLD_BORDERS-0.3.shp
>ogrinfo -so world/data/TM_WORLD_BORDERS-0.3.shp TM_WORLD_BORDERS-0.3
= Model of Django
>vi /home/html/geodjango/world/model.py
from django.contrib.gis.db import models
class WorldBorders(models.Model):
# Regular Django fields corresponding to the attributes in the
# world borders shapefile.
name = models.CharField(max_length=50)
area = models.IntegerField()
pop2005 = models.IntegerField('Population 2005')
fips = models.CharField('FIPS Code', max_length=2)
iso2 = models.CharField('2 Digit ISO', max_length=2)
iso3 = models.CharField('3 Digit ISO', max_length=3)
un = models.IntegerField('United Nations Code')
region = models.IntegerField('Region Code')
subregion = models.IntegerField('Sub-Region Code')
lon = models.FloatField()
lat = models.FloatField()
# GeoDjango-specific: a geometry field (MultiPolygonField), and
# overriding the default manager with a GeoManager instance.
mpoly = models.MultiPolygonField()
objects = models.GeoManager()
# So the model is pluralized correctly in the admin.
class Meta:
verbose_name_plural = "World Borders"
# Returns the string representation of the model.
def __unicode__(self):
return self.name
= Python Shell
import os
from geodjango import world
world_shp = os.path.abspath(os.path.join(os.path.dirname(world.__file__), 'data/TM_WORLD_BORDERS-0.3.shp'))
from django.contrib.gis.gdal import *
ds = DataSource(world_shp)
print ds
print len(ds)
lyr = ds[0]
print lyr
print len(lyr)
print srs
print lyr.fields
[fld.__name__ for fld in lyr.field_types]
for feat in lyr:
print feat.get('NAME'), feat.geom.num_points
lyr[0:2]
feat = lyr[234]
print feat.get('NAME')
geom = feat.geom
print geom.wkt
= load.py
>vi /home/html/geodjango/world/load.py
import os
from django.contrib.gis.utils import LayerMapping
from models import WorldBorders
world_mapping = {
'fips' : 'FIPS',
'iso2' : 'ISO2',
'iso3' : 'ISO3',
'un' : 'UN',
'name' : 'NAME',
'area' : 'AREA',
'pop2005' : 'POP2005',
'region' : 'REGION',
'subregion' : 'SUBREGION',
'lon' : 'LON',
'lat' : 'LAT',
'mpoly' : 'MULTIPOLYGON',
}
world_shp = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data/TM_WORLD_BORDERS-0.3.shp'))
def run(verbose=True):
lm = LayerMapping(WorldBorders, world_shp, world_mapping,
transform=False, encoding='iso-8859-1')
lm.save(strict=True, verbose=verbose)
from world import load
load.run()
= Insert Geo information
>python manage.py ogrinspect world/data/TM_WORLD_BORDERS-0.3.shp WorldBorders --srid=4326 --mapping --multi
= admin.poy
>vi /home/html/geodjango/world/admin.py
from django.contrib.gis import admin
from models import WorldBorders
admin.site.register(WorldBorders, admin.GeoModelAdmin)
= url.py
>vi /home/html/geodjango/url.py
from django.conf.urls.defaults import *
from django.contrib.gis import admin
admin.autodiscover()
urlpatterns = patterns('',
(r'^admin/', include(admin.site.urls)),
)
= access admin page
http://localhost/admin
>vi /etc/yum.repos.d/CentOS-Base.repo
[dag]
name=Dag RPM Repository for Red Hat Enterprise Linux
baseurl=http://apt.sw.be/redhat/el$releasever/en/$basearch/dag
>wget http://dag.wieers.com/packages/RPM-GPG-KEY.dag.txt
>rpm --import RPM-GPG-KEY.dag.txt
>yum -y install python-devel gcc gcc-c++ swig ruby yum-utils libxml2 libxml2-devel
>yum -y install postgresql84 postgresql84-devel postgresql84-server postgresql84-server-python
>yum -y install gettext python-setuptools python-devel python-tz python-curl mod_python openssl-devel python-ctypes
>yum -y install gcc gcc-c++ fonts-japanese expect sudo subversion rpm-build rpm-devel
>yum -y install httpd httpd-devel mod_ssl apr-devel zlib zlib-devel python-setuptools python-psycopg2
>yum -y install ntp libevent postfix
=GEOS
>wget wget http://download.osgeo.org/geos/geos-3.2.2.tar.bz2
>tar -xjvf geos-3.2.2.tar.bz2
>cd geos-3.2.2
>./configure
>make
>make install
>cd ..
=Proj4
>wget http://download.osgeo.org/proj/proj-4.7.0.tar.gz
>wget http://download.osgeo.org/proj/proj-datumgrid-1.5.zip
>tar xzf proj-4.7.0.tar.gz
>cd proj-4.7.0/nad
>unzip ../../proj-datumgrid-1.5.zip
>cd ..
>./configure
>make
>make install
>cd ..
=Postgis
>wget http://postgis.refractions.net/download/postgis-1.5.1.tar.gz
>tar xzf postgis-1.5.1.tar.gz
>cd postgis-1.5.1
>./configure
>make
>make install
>cd ..
=GDAL
>wget http://download.osgeo.org/gdal/gdal-1.7.2.tar.gz
>tar xzf gdal-1.7.2.tar.gz
>cd gdal-1.7.2
>./configure
>make
>make install
>cd ..
= SQLite
>wget http://www.sqlite.org/sqlite-amalgamation-3.6.22.tar.gz
>tar xzf sqlite-amalgamation-3.6.22.tar.gz
>cd sqlite-3.6.22
CFLAGS="-DSQLITE_ENABLE_RTREE=1" ./configure
>make
>make install
>cd ..
= SPATIALITE
>wget http://www.gaia-gis.it/spatialite/libspatialite-amalgamation-2.3.1.tar.gz
>wget http://www.gaia-gis.it/spatialite/spatialite-tools-2.3.1.tar.gz
>tar xzf libspatialite-amalgamation-2.3.1.tar.gz
>tar xzf spatialite-tools-2.3.1.tar.gz
>cd libspatialite-amalgamation-2.3.1
>./configure
>make
>make install
>cd ..
>cd spatialite-tools-2.3.1
>./configure
>make
>make install
>cd ..
= PYSQLITE2
>wget http://pysqlite.googlecode.com/files/pysqlite-2.6.0.tar.gz
>tar xzf pysqlite-2.6.0.tar.gz
>cd pysqlite-2.6.0
>vi setup.cfg
[build_ext]
#define=
include_dirs=/usr/local/include
library_dirs=/usr/local/lib
libraries=sqlite3
#define=SQLITE_OMIT_LOAD_EXTENSION
>python setup.py install
== Postgres
>service postgresql initdb
>/etc/init.d/postgresql start
>su - postgres
>wget http://geodjango.org/docs/create_template_postgis-1.5.sh
>ldd -d /usr/lib64/pgsql/postgis-1.5.so
>ln -s /usr/local/lib/libgeos_c.so.1.6.2 /usr/lib64/libgeos_c.so.1
>ln -s /usr/local/lib/libproj.so.0 /usr/lib64/libproj.so.0
>ln -s /usr/local/lib/libgdal.so.1 /usr/lib64/libgdal.so.1
>sh create_template_postgis-1.5.s
>createdb -T template_postgis geodjango
>createuser --createdb geo
= Mod WSGI
>wget http://modwsgi.googlecode.com/files/mod_wsgi-3.2.tar.gz
>tar zxvf mod_wsgi-3.2.tar.gz
>cd mod_wsgi-3.2
>make
>make install
>cd ..
== Httpd.conf
>vi httpd.conf
LoadModule wsgi_module modules/mod_wsgi.so
ServerAdmin jun@127.0.0.1
ServerName 192.168.1.1
Alias /media "/home/html/geodjango/media"
WSGIScriptAlias / /home/html/geo.wsgi
DocumentRoot /home/html/geodjango
CustomLog /home/html/logs/access_log common
ErrorLog /home/html/logs/error_log
SetHandler None
SetHandler None
ExpiresActive On
ExpiresDefault "access plus 1 week"
ExpiresByType image/gif "access plus 1 week"
ExpiresByType image/jpeg "access plus 1 week"
ExpiresByType image/png "access plus 1 week"
ExpiresByType text/css "access plus 1 week"
ExpiresByType application/x-javascript "access plus 1 week"
== WSGI
>vi /home/html/geo.wsgi
#WSGI
import sys
import os
sys.path.append('/home/html')
os.environ['DJANGO_SETTINGS_MODULE'] = 'geodjango.settings'
os.environ['PYTHON_EGG_CACHE'] = '/tmp/.python-eggs'
import django.core.handlers.wsgi
application = django.core.handlers.wsgi.WSGIHandler()
= PG_HBA
>vi /var/lib/pgsql/data/pg_hba.conf
local all all trust
# IPv4 local connections:
host all all 127.0.0.1/32 trust
# IPv6 local connections:
host all all ::1/128 trust
>python manage.py sqlall world
>python manage.py syncdb
>python manage.py shell
=Django
>django-admin.py startproject geodjango
>cd geodjango
>python manage.py startapp world
>vim settings.py
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'geodjango',
'USER': 'geo',
}
}
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'django.contrib.gis',
'world'
)
>mkdir world/data
>cd world/data
>wget http://thematicmapping.org/downloads/TM_WORLD_BORDERS-0.3.zip
>unzip TM_WORLD_BORDERS-0.3.zip
>cd ../..
>ogrinfo world/data/TM_WORLD_BORDERS-0.3.shp
>ogrinfo -so world/data/TM_WORLD_BORDERS-0.3.shp TM_WORLD_BORDERS-0.3
= Model of Django
>vi /home/html/geodjango/world/model.py
from django.contrib.gis.db import models
class WorldBorders(models.Model):
# Regular Django fields corresponding to the attributes in the
# world borders shapefile.
name = models.CharField(max_length=50)
area = models.IntegerField()
pop2005 = models.IntegerField('Population 2005')
fips = models.CharField('FIPS Code', max_length=2)
iso2 = models.CharField('2 Digit ISO', max_length=2)
iso3 = models.CharField('3 Digit ISO', max_length=3)
un = models.IntegerField('United Nations Code')
region = models.IntegerField('Region Code')
subregion = models.IntegerField('Sub-Region Code')
lon = models.FloatField()
lat = models.FloatField()
# GeoDjango-specific: a geometry field (MultiPolygonField), and
# overriding the default manager with a GeoManager instance.
mpoly = models.MultiPolygonField()
objects = models.GeoManager()
# So the model is pluralized correctly in the admin.
class Meta:
verbose_name_plural = "World Borders"
# Returns the string representation of the model.
def __unicode__(self):
return self.name
= Python Shell
import os
from geodjango import world
world_shp = os.path.abspath(os.path.join(os.path.dirname(world.__file__), 'data/TM_WORLD_BORDERS-0.3.shp'))
from django.contrib.gis.gdal import *
ds = DataSource(world_shp)
print ds
print len(ds)
lyr = ds[0]
print lyr
print len(lyr)
print srs
print lyr.fields
[fld.__name__ for fld in lyr.field_types]
for feat in lyr:
print feat.get('NAME'), feat.geom.num_points
lyr[0:2]
feat = lyr[234]
print feat.get('NAME')
geom = feat.geom
print geom.wkt
= load.py
>vi /home/html/geodjango/world/load.py
import os
from django.contrib.gis.utils import LayerMapping
from models import WorldBorders
world_mapping = {
'fips' : 'FIPS',
'iso2' : 'ISO2',
'iso3' : 'ISO3',
'un' : 'UN',
'name' : 'NAME',
'area' : 'AREA',
'pop2005' : 'POP2005',
'region' : 'REGION',
'subregion' : 'SUBREGION',
'lon' : 'LON',
'lat' : 'LAT',
'mpoly' : 'MULTIPOLYGON',
}
world_shp = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data/TM_WORLD_BORDERS-0.3.shp'))
def run(verbose=True):
lm = LayerMapping(WorldBorders, world_shp, world_mapping,
transform=False, encoding='iso-8859-1')
lm.save(strict=True, verbose=verbose)
from world import load
load.run()
= Insert Geo information
>python manage.py ogrinspect world/data/TM_WORLD_BORDERS-0.3.shp WorldBorders --srid=4326 --mapping --multi
= admin.poy
>vi /home/html/geodjango/world/admin.py
from django.contrib.gis import admin
from models import WorldBorders
admin.site.register(WorldBorders, admin.GeoModelAdmin)
= url.py
>vi /home/html/geodjango/url.py
from django.conf.urls.defaults import *
from django.contrib.gis import admin
admin.autodiscover()
urlpatterns = patterns('',
(r'^admin/', include(admin.site.urls)),
)
= access admin page
http://localhost/admin
ラベル: Geo
Cloudstore on CentOS5.3
== CloudStore on CentOS5.3
== Install Package on All server
> yum -y install gcc gcc-++ zlib-devel e2fsprogs-devel openssl-devel xfsprogs-devel boost boost-devel
> yum -y groupinstall "Java" "Java Development"
> rpm -ivh http://dag.wieers.com/rpm/packages/log4cpp/log4cpp-0.3.5-0.rc3.el5.rf.x86_64.rpm
> rpm -ivh http://dag.wieers.com/rpm/packages/log4cpp/log4cpp-devel-0.3.5-0.rc3.el5.rf.x86_64.rpm
> rpm -ivh http://dl.atrpms.net/all/cmake-2.6.4-7.el5.x86_64.rpm
> wget http://downloads.sourceforge.net/project/kosmosfs/kosmosfs/kfs-0.3/kfs-0.3.tar.gz
> mkdir /home/kfs
> tar xzf kfs-0.3.tar.gz
> cd kfs-0.3
> mkdir build
> cd build
> cmake -D CMAKE_BUILD_TYPE=RelWithDebInfo -DCMAKE_INSTALL_PREFIX=/home/kfs/cloudstore ../
> gmake
> gmake install
> vi /etc/hosts
192.168.0.1 kfs1
192.168.0.2 kfs2
192.168.0.3 kfs3
192.168.0.4 kfs4
== On meta server
=Send public key to other server
> ssh-keygen -t rsa
> scp /root/.ssh/id_rsa.pub kfs1:/root/.ssh/authorized_keys
> scp /root/.ssh/id_rsa.pub kfs2:/root/.ssh/authorized_keys
> scp /root/.ssh/id_rsa.pub kfs3:/root/.ssh/authorized_keys
> scp /root/.ssh/id_rsa.pub kfs4:/root/.ssh/authorized_keys
=COnfiguration
> vi /root/work/kfs-0.3/script/smachines.cfg
# KFS Machine configuration file
[metaserver]
node: kfs1
clusterkey: kfs-test-cluster
rundir: /home/kfs/meta
baseport: 20000
loglevel: INFO
numservers: 2
[chunkserver_defaults]
rundir: /home/kfs/chunk
chunkDir: /home/kfs/chunk/bin/kfschunk
baseport: 30000
space: 5 G
loglevel: INFO
= Configure for Chunk Server's info
> /root/work/kfs-0.3/scripts/machines.txt
kfs2
kfs3
kfs4
> cp machines.txt ../webui/all-machines.txt
= Distribute setting file to other server
> python kfssetup.py -f machines.cfg -m machines.txt -b /home/kfs/cloudstore -w ../webui
= Start all of service from Meta Server
> cd /home/kfs/meta/scripts/
> python kfslaunch.py -f machines.cfg -m machines.txt -s
= Check the status
> /home/kfs/cloudstore/bin/tools/kfsping -m -s kfs1 -p 20000
= Stop Service
> python kfslaunch.py -f machines.cfg -m machines.txt -S
= Put Data
> echo "jun" > /root/test/jun.txt
> /home/kfs/cloudstore/bin/tools/cptokfs -s kfs1 -p 20000 -d /root/test/jun.txt -k jun.txt
= Check
> /home/kfs/cloudstore/bin/tools/kfsshell -s kfs1 -p 20000
KfsShell> ls -al
dumpster/ Aug 24 07:57 (dir)
jun.txt Aug 24 08:57 4
= Read Data
> cd /tmp
> /home/kfs/cloudstore/bin/tools/cpfromkfs -s kfs1 -p 20000 -d ./jun.txt -k jun.txt
> cat jun.txt
jun
= Confirm chunk data
> /home/kfs/cloudstore/bin/tools/kfsfileenum -s kfs1 -p 20000 -f jun.txt
0 6
216.69.70.84 30000 4
216.69.70.82 30000 4
216.69.70.83 30000 4
File size computed from chunksizes: 4
=Mount Cloudstore as FUSE
> /etc/yum.repos.d/CentOS-Base.repo
[dag]
name=Dag RPM Repository for Red Hat Enterprise Linux
baseurl=http://ftp.riken.jp/Linux/dag/redhat/el$releasever/en/$basearch/dag
> wget http://dag.wieers.com/packages/RPM-GPG-KEY.dag.txt
> rpm --import RPM-GPG-KEY.dag.txt
> yum -y install fuse
> vi /root/work/kfs-0.3/CMakeLists.txt
SET(Fuse_LIBRARY_DIR "/usr/lib64")
SET(Fuse_INCLUDE_DIR "/usr/include")
> cd /root/work/kfs-0.3/build
> make install
> vi /home/kfs/cloudstore/bin/kfs.prp
metaServer.name = kfs1
metaServer.port = 20000
> mkdir /tmp/kfs-fuse
> cd /home/kfs/cloudstore/bin/
> ./kfs_fuse /tmp/kfs-fuse -f &
> cd /tmp/kfs-fuse
> echo "jun2" > jun2.txt
> cat jun2.txt
jun2
| ----------------------------------------- | | | | 192.168.0.1 192.168.0.2 192.168.0.3 192.168.0.4 [Mata Server] [Chunk] [Chunk] [Chunk]
== Install Package on All server
> yum -y install gcc gcc-++ zlib-devel e2fsprogs-devel openssl-devel xfsprogs-devel boost boost-devel
> yum -y groupinstall "Java" "Java Development"
> rpm -ivh http://dag.wieers.com/rpm/packages/log4cpp/log4cpp-0.3.5-0.rc3.el5.rf.x86_64.rpm
> rpm -ivh http://dag.wieers.com/rpm/packages/log4cpp/log4cpp-devel-0.3.5-0.rc3.el5.rf.x86_64.rpm
> rpm -ivh http://dl.atrpms.net/all/cmake-2.6.4-7.el5.x86_64.rpm
> wget http://downloads.sourceforge.net/project/kosmosfs/kosmosfs/kfs-0.3/kfs-0.3.tar.gz
> mkdir /home/kfs
> tar xzf kfs-0.3.tar.gz
> cd kfs-0.3
> mkdir build
> cd build
> cmake -D CMAKE_BUILD_TYPE=RelWithDebInfo -DCMAKE_INSTALL_PREFIX=/home/kfs/cloudstore ../
> gmake
> gmake install
> vi /etc/hosts
192.168.0.1 kfs1
192.168.0.2 kfs2
192.168.0.3 kfs3
192.168.0.4 kfs4
== On meta server
=Send public key to other server
> ssh-keygen -t rsa
> scp /root/.ssh/id_rsa.pub kfs1:/root/.ssh/authorized_keys
> scp /root/.ssh/id_rsa.pub kfs2:/root/.ssh/authorized_keys
> scp /root/.ssh/id_rsa.pub kfs3:/root/.ssh/authorized_keys
> scp /root/.ssh/id_rsa.pub kfs4:/root/.ssh/authorized_keys
=COnfiguration
> vi /root/work/kfs-0.3/script/smachines.cfg
# KFS Machine configuration file
[metaserver]
node: kfs1
clusterkey: kfs-test-cluster
rundir: /home/kfs/meta
baseport: 20000
loglevel: INFO
numservers: 2
[chunkserver_defaults]
rundir: /home/kfs/chunk
chunkDir: /home/kfs/chunk/bin/kfschunk
baseport: 30000
space: 5 G
loglevel: INFO
= Configure for Chunk Server's info
> /root/work/kfs-0.3/scripts/machines.txt
kfs2
kfs3
kfs4
> cp machines.txt ../webui/all-machines.txt
= Distribute setting file to other server
> python kfssetup.py -f machines.cfg -m machines.txt -b /home/kfs/cloudstore -w ../webui
= Start all of service from Meta Server
> cd /home/kfs/meta/scripts/
> python kfslaunch.py -f machines.cfg -m machines.txt -s
= Check the status
> /home/kfs/cloudstore/bin/tools/kfsping -m -s kfs1 -p 20000
= Stop Service
> python kfslaunch.py -f machines.cfg -m machines.txt -S
= Put Data
> echo "jun" > /root/test/jun.txt
> /home/kfs/cloudstore/bin/tools/cptokfs -s kfs1 -p 20000 -d /root/test/jun.txt -k jun.txt
= Check
> /home/kfs/cloudstore/bin/tools/kfsshell -s kfs1 -p 20000
KfsShell> ls -al
dumpster/ Aug 24 07:57 (dir)
jun.txt Aug 24 08:57 4
= Read Data
> cd /tmp
> /home/kfs/cloudstore/bin/tools/cpfromkfs -s kfs1 -p 20000 -d ./jun.txt -k jun.txt
> cat jun.txt
jun
= Confirm chunk data
> /home/kfs/cloudstore/bin/tools/kfsfileenum -s kfs1 -p 20000 -f jun.txt
0 6
216.69.70.84 30000 4
216.69.70.82 30000 4
216.69.70.83 30000 4
File size computed from chunksizes: 4
=Mount Cloudstore as FUSE
> /etc/yum.repos.d/CentOS-Base.repo
[dag]
name=Dag RPM Repository for Red Hat Enterprise Linux
baseurl=http://ftp.riken.jp/Linux/dag/redhat/el$releasever/en/$basearch/dag
> wget http://dag.wieers.com/packages/RPM-GPG-KEY.dag.txt
> rpm --import RPM-GPG-KEY.dag.txt
> yum -y install fuse
> vi /root/work/kfs-0.3/CMakeLists.txt
SET(Fuse_LIBRARY_DIR "/usr/lib64")
SET(Fuse_INCLUDE_DIR "/usr/include")
> cd /root/work/kfs-0.3/build
> make install
> vi /home/kfs/cloudstore/bin/kfs.prp
metaServer.name = kfs1
metaServer.port = 20000
> mkdir /tmp/kfs-fuse
> cd /home/kfs/cloudstore/bin/
> ./kfs_fuse /tmp/kfs-fuse -f &
> cd /tmp/kfs-fuse
> echo "jun2" > jun2.txt
> cat jun2.txt
jun2
ラベル: Distribute Storage
GFS2+DRBD on Ubuntu 9.04 64Bit
I installed GFS2 and DRBD. I've written the installation document below. Please give me advice/suggestions on it. I would appreciate any kind of advice/suggestion!
== on GFS1 and GFS2
> vim /etc/hosts
10.0.0.71 GFS1
10.0.0.72 GFS2
= install DRBD8 from source
> aptitude install drbd8-utils
> aptitude install build-essential autoconf automake libtool flex libncurses-dev linux-source
> aptitude install linux-source-2.6.28
> cd /usr/src/
> tar jxvf linux-source-2.6.28.tar.bz2
> cd linux-source-2.6.28
> make mrproper
> cp /boot/config-2.6.28-13-server /usr/src/linux-source-2.6.28/.config
> make menuconfig
exit
> make prepare
> wget http://oss.linbit.com/drbd/8.3/drbd-8.3.1.tar.gz
> tar xf drbd-8.3.1.tar.gz
> cd drbd-8.3.1
> make KDIR=/usr/src/linux-source-2.6.28
> make install
> mv /lib/modules/2.6.28.9/kernel/drivers/block/drbd.ko /lib/modules/2.6.28-13-server/kernel/drivers/block/
> modprobe drbd
> echo 'drbd' >> /etc/modules
> update-rc.d drbd defaults
> lsmod | grep drbd
= Edit drbd.conf
> vim /etc/drbd.conf
global {
usage-count yes;
}
common {
syncer {
rate 100M;
al-extents 257;
}
}
resource r0 {
protocol C;
startup {
become-primary-on both; ### For Primary/Primary ###
degr-wfc-timeout 60;
wfc-timeout 30;
}
disk {
on-io-error detach;
}
net {
allow-two-primaries; ### For Primary/Primary ###
cram-hmac-alg sha1;
shared-secret "FooFunFactory";
after-sb-0pri discard-zero-changes;
after-sb-1pri violently-as0p;
after-sb-2pri violently-as0p;
}
on GFS1 {
device /dev/drbd0;
disk /dev/sda4;
address 216.69.70.71:7788;
meta-disk /dev/sda3[0];
}
on GFS2 {
device /dev/drbd0;
disk /dev/sda4;
address 216.69.70.72:7788;
meta-disk /dev/sda3[0];
}
}
=Create Metadata
> dd if=/dev/zero of=/dev/sda3 bs=1M count=256
> drbdadm create-md r0
> /etc/init.d/drbd stop
> /etc/init.d/drbd start
=Make them Primary/Primary
> drbdsetup /dev/drbd0 primary -o
> cat /proc/drbd
version: 8.3.0 (api:88/proto:86-89)
GIT-hash: 9ba8b93e24d842f0dd3fb1f9b90e8348ddb95829 build by ivoks@ubuntu, 2009-01-17 07:49:56
0: cs:Connected ro:Primary/Primary ds:UpToDate/Diskless C r---
ns:0 nr:0 dw:0 dr:0 al:0 bm:0 lo:0 pe:0 ua:0 ap:0 ep:1 wo:b oos:4883760
= Install GFS package
> aptitude install gfs-tools cman clvm
= Configure GFS cluster. Change name= and IPADDR=.
> vim /etc/cluster/cluster.conf
<?xml version="1.0"?>
<cluster name="cluster1" config_version="3">
<cman two_node="1" expected_votes="1"/>
<?xml version="1.0"?>
<cluster name="cluster1" config_version="3">
<cman two_node="1" expected_votes="1"/>
<clusternodes>
<clusternode name="GFS1" votes="1" nodeid="1">
<fence>
<method name="single">
<device name="manual" ipaddr="10.0.0.71"/>
</method>
</fence>
</clusternode>
<clusternode name="GFS2" votes="1" nodeid="2">
<fence>
<method name="single">
<device name="manual" ipaddr="10.0.0.72"/>
</method>
</fence>
</clusternode>
</clusternodes>
<fence_daemon clean_start="1" post_fail_delay="0" post_join_delay="3"/>
<fencedevices>
<fencedevice name="manual" agent="fence_manual"/>
</fencedevices>
</cluster>
= Change Lock type, depend on system enviroments.
> vim /etc/lvm/lvm.conf
#locking_type = 1
#locking_dir = "/var/lock/lvm"
#library_dir = "/lib/lvm2"
locking_library = "liblvm2clusterlock.so"
locking_type = 2
library_dir = "/lib/lvm2"
= restart service
> /etc/init.d/cman stop
> /etc/init.d/cman start
> /etc/init.d/clvm stop
> /etc/init.d/clvm start
= Format drbd0 to GFS
> gfs2_mkfs -p lock_dlm -t cluster1:gfs -j 2 /dev/drbd0
> fs2_fsck /dev/drbd0
= mount GFS
> mkdir /data
> mount -t gfs2 /dev/drbd0 /data
> mount -t gfs2 /dev/drbd0 /data
> vim /etc/rc.local
sleep 5
mount -t gfs2 /dev/drbd0 /data
== GFS check
= GFS1
> i=0;while true; do echo aaaaaaaa,$i >> test.log ;i=`expr $i + 1`;done
> tail -f test.log
= GFS2
> i=0;while true; do echo bbbbbbbb,$i >> test.log ;i=`expr $i + 1`;done
> tail -f test.log
= If Split brian happen and did not sysnc.
#host 1 (main side)
> drbdadm connect resource
#host 2 (adjust side)
>umount /data
>drbdadm secondary resource
>drbdadm -- --discard-my-data connect resource
>drbdsetup /dev/drbd0 primary -o
== on GFS1 and GFS2
> vim /etc/hosts
10.0.0.71 GFS1
10.0.0.72 GFS2
= install DRBD8 from source
> aptitude install drbd8-utils
> aptitude install build-essential autoconf automake libtool flex libncurses-dev linux-source
> aptitude install linux-source-2.6.28
> cd /usr/src/
> tar jxvf linux-source-2.6.28.tar.bz2
> cd linux-source-2.6.28
> make mrproper
> cp /boot/config-2.6.28-13-server /usr/src/linux-source-2.6.28/.config
> make menuconfig
exit
> make prepare
> wget http://oss.linbit.com/drbd/8.3/drbd-8.3.1.tar.gz
> tar xf drbd-8.3.1.tar.gz
> cd drbd-8.3.1
> make KDIR=/usr/src/linux-source-2.6.28
> make install
> mv /lib/modules/2.6.28.9/kernel/drivers/block/drbd.ko /lib/modules/2.6.28-13-server/kernel/drivers/block/
> modprobe drbd
> echo 'drbd' >> /etc/modules
> update-rc.d drbd defaults
> lsmod | grep drbd
= Edit drbd.conf
> vim /etc/drbd.conf
global {
usage-count yes;
}
common {
syncer {
rate 100M;
al-extents 257;
}
}
resource r0 {
protocol C;
startup {
become-primary-on both; ### For Primary/Primary ###
degr-wfc-timeout 60;
wfc-timeout 30;
}
disk {
on-io-error detach;
}
net {
allow-two-primaries; ### For Primary/Primary ###
cram-hmac-alg sha1;
shared-secret "FooFunFactory";
after-sb-0pri discard-zero-changes;
after-sb-1pri violently-as0p;
after-sb-2pri violently-as0p;
}
on GFS1 {
device /dev/drbd0;
disk /dev/sda4;
address 216.69.70.71:7788;
meta-disk /dev/sda3[0];
}
on GFS2 {
device /dev/drbd0;
disk /dev/sda4;
address 216.69.70.72:7788;
meta-disk /dev/sda3[0];
}
}
=Create Metadata
> dd if=/dev/zero of=/dev/sda3 bs=1M count=256
> drbdadm create-md r0
> /etc/init.d/drbd stop
> /etc/init.d/drbd start
=Make them Primary/Primary
> drbdsetup /dev/drbd0 primary -o
> cat /proc/drbd
version: 8.3.0 (api:88/proto:86-89)
GIT-hash: 9ba8b93e24d842f0dd3fb1f9b90e8348ddb95829 build by ivoks@ubuntu, 2009-01-17 07:49:56
0: cs:Connected ro:Primary/Primary ds:UpToDate/Diskless C r---
ns:0 nr:0 dw:0 dr:0 al:0 bm:0 lo:0 pe:0 ua:0 ap:0 ep:1 wo:b oos:4883760
= Install GFS package
> aptitude install gfs-tools cman clvm
= Configure GFS cluster. Change name= and IPADDR=.
> vim /etc/cluster/cluster.conf
<?xml version="1.0"?>
<cluster name="cluster1" config_version="3">
<cman two_node="1" expected_votes="1"/>
<?xml version="1.0"?>
<cluster name="cluster1" config_version="3">
<cman two_node="1" expected_votes="1"/>
<clusternodes>
<clusternode name="GFS1" votes="1" nodeid="1">
<fence>
<method name="single">
<device name="manual" ipaddr="10.0.0.71"/>
</method>
</fence>
</clusternode>
<clusternode name="GFS2" votes="1" nodeid="2">
<fence>
<method name="single">
<device name="manual" ipaddr="10.0.0.72"/>
</method>
</fence>
</clusternode>
</clusternodes>
<fence_daemon clean_start="1" post_fail_delay="0" post_join_delay="3"/>
<fencedevices>
<fencedevice name="manual" agent="fence_manual"/>
</fencedevices>
</cluster>
= Change Lock type, depend on system enviroments.
> vim /etc/lvm/lvm.conf
#locking_type = 1
#locking_dir = "/var/lock/lvm"
#library_dir = "/lib/lvm2"
locking_library = "liblvm2clusterlock.so"
locking_type = 2
library_dir = "/lib/lvm2"
= restart service
> /etc/init.d/cman stop
> /etc/init.d/cman start
> /etc/init.d/clvm stop
> /etc/init.d/clvm start
= Format drbd0 to GFS
> gfs2_mkfs -p lock_dlm -t cluster1:gfs -j 2 /dev/drbd0
> fs2_fsck /dev/drbd0
= mount GFS
> mkdir /data
> mount -t gfs2 /dev/drbd0 /data
> mount -t gfs2 /dev/drbd0 /data
> vim /etc/rc.local
sleep 5
mount -t gfs2 /dev/drbd0 /data
== GFS check
= GFS1
> i=0;while true; do echo aaaaaaaa,$i >> test.log ;i=`expr $i + 1`;done
> tail -f test.log
= GFS2
> i=0;while true; do echo bbbbbbbb,$i >> test.log ;i=`expr $i + 1`;done
> tail -f test.log
= If Split brian happen and did not sysnc.
#host 1 (main side)
> drbdadm connect resource
#host 2 (adjust side)
>umount /data
>drbdadm secondary resource
>drbdadm -- --discard-my-data connect resource
>drbdsetup /dev/drbd0 primary -o
ラベル: Cluster
Pound with HA
== Pound with heartbeat on 64bit CentOS 5
== Install heartbeat
2回yumを実行
yum -y install heartbeat
yum -y install heartbeat
cp /usr/share/doc/heartbeat-2.1.3/ha.cf /etc/ha.d/.
cp /usr/share/doc/heartbeat-2.1.3/haresources /etc/ha.d/.
cp /usr/share/doc/heartbeat-2.1.3/authkeys /etc/ha.d/.
外側と内側のケーブルが抜けた時を想定し、両インタフェースをucastで監視する。
[# test1] vi /etc/ha.d/ha.cf
logfacility local0
ucast eth0 10.1.1.3 #<------- other server's IP
ucast eth1 192.168.2.242 #<------- other server's IP
auto_failback on
node test1.com
node test2.com
respawn hacluster /usr/lib64/heartbeat/ipfail
[# test2] vi /etc/ha.d/ha.cf
logfacility local0
ucast eth0 10.1.1.2 #<------- other server's IP
ucast eth1 192.168.2.241 #<------- other server's IP
auto_failback on
node test1.com
node test2.com
respawn hacluster /usr/lib64/heartbeat/ipfail
chmod 600 /etc/ha.d/authkeys
vi /etc/ha.d/authkeys
auth 2
2 sha1 PASSWORD
以下のtest1.comはプライマリーにするサーバーを設定
vi /etc/ha.d/haresources
test1.com IPaddr::10.1.1.1/24/eth0 IPaddr::192.168.2.240/24/eth1 pound
== Pound
/etc/yum.repos.d/CentOS-Base.repo
[dag]
name=Dag RPM Repository for Red Hat Enterprise Linux
baseurl=http://ftp.riken.jp/Linux/dag/redhat/el$releasever/en/$basearch/dag
wget http://dag.wieers.com/packages/RPM-GPG-KEY.dag.txt
rpm --import RPM-GPG-KEY.dag.txt
yum -y install pound
vi /etc/pound.cfg
User "nobody"
Group "nobody"
#RootJail "/var/pound/jail"
#Alive 60
ListenHTTP
Address 10.1.1.10
Port 80
End
Service
HeadRequire "Host: .*test.com.*"
BackEnd
Address 192.168.2.236
Port 80
Priority 5
End
BackEnd
Address 192.168.2.237
Port 80
Priority 5
End
BackEnd
Address 192.168.2.238
Port 80
Priority 5
End
End
heartbeatで Poundが起動するので、PoundはOFFにしておく。
# chkconfig --list | grep pound
pound 0:off 1:off 2:off 3:off 4:off 5:off 6:off
# chkconfig --list | grep heartbeat
heartbeat 0:off 1:off 2:on 3:on 4:on 5:on 6:off
以下で起動
/etc/rc.d/init.d/heartbeat start
以下でインタフェースに割り振られたIPを確認
#ip addr show
設定変更後
/etc/rc.d/init.d/pound reload でセッションは切れる。
内側のサーバーからPoundを通してNATで外にいくには以下masueradeの設定必要。
*filter
:INPUT ACCEPT [0:0]
:FORWARD ACCEPT [53:4708]
:OUTPUT ACCEPT [706:49104]
-A INPUT -i lo -j ACCEPT
-A INPUT -p icmp -m icmp --icmp-type any -j ACCEPT
-A INPUT -p esp -j ACCEPT
-A INPUT -p ah -j ACCEPT
-A INPUT -d 224.0.0.251 -p udp -m udp --dport 5353 -j ACCEPT
-A INPUT -p tcp -m state --state NEW -m tcp --dport 80 -j ACCEPT
-A INPUT -p udp -m udp --dport 631 -j ACCEPT
-A INPUT -p tcp -m tcp --dport 631 -j ACCEPT
-A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT
-A INPUT -j REJECT --reject-with icmp-host-prohibited
COMMIT
*nat
:PREROUTING ACCEPT [26:3695]
:POSTROUTING ACCEPT [288:17280]
:OUTPUT ACCEPT [289:17372]
-A POSTROUTING -o eth0 -j MASQUERADE
COMMIT
[client] 10.1.1.24 | ------------------------------------------------------------------- | 10.1.1.10 (VIP for web) | | | | | |10.1.1.2(eth0) |10.1.1.3(eth0) [Pound1] [Pound2] |192.168.2.241(eth1) |192.168.20.109(eth1) | | | 192.168.20.240(VIP of pound gw) | | | | | ---------------------------------------------------------------------------- | | | | | | |192.168.2.236(eth0) | 192.168.2.237(eth0) |192.168.2.238(eth0) [web1] [web2] [web3]
== Install heartbeat
2回yumを実行
yum -y install heartbeat
yum -y install heartbeat
cp /usr/share/doc/heartbeat-2.1.3/ha.cf /etc/ha.d/.
cp /usr/share/doc/heartbeat-2.1.3/haresources /etc/ha.d/.
cp /usr/share/doc/heartbeat-2.1.3/authkeys /etc/ha.d/.
外側と内側のケーブルが抜けた時を想定し、両インタフェースをucastで監視する。
[# test1] vi /etc/ha.d/ha.cf
logfacility local0
ucast eth0 10.1.1.3 #<------- other server's IP
ucast eth1 192.168.2.242 #<------- other server's IP
auto_failback on
node test1.com
node test2.com
respawn hacluster /usr/lib64/heartbeat/ipfail
[# test2] vi /etc/ha.d/ha.cf
logfacility local0
ucast eth0 10.1.1.2 #<------- other server's IP
ucast eth1 192.168.2.241 #<------- other server's IP
auto_failback on
node test1.com
node test2.com
respawn hacluster /usr/lib64/heartbeat/ipfail
chmod 600 /etc/ha.d/authkeys
vi /etc/ha.d/authkeys
auth 2
2 sha1 PASSWORD
以下のtest1.comはプライマリーにするサーバーを設定
vi /etc/ha.d/haresources
test1.com IPaddr::10.1.1.1/24/eth0 IPaddr::192.168.2.240/24/eth1 pound
== Pound
/etc/yum.repos.d/CentOS-Base.repo
[dag]
name=Dag RPM Repository for Red Hat Enterprise Linux
baseurl=http://ftp.riken.jp/Linux/dag/redhat/el$releasever/en/$basearch/dag
wget http://dag.wieers.com/packages/RPM-GPG-KEY.dag.txt
rpm --import RPM-GPG-KEY.dag.txt
yum -y install pound
vi /etc/pound.cfg
User "nobody"
Group "nobody"
#RootJail "/var/pound/jail"
#Alive 60
ListenHTTP
Address 10.1.1.10
Port 80
End
Service
HeadRequire "Host: .*test.com.*"
BackEnd
Address 192.168.2.236
Port 80
Priority 5
End
BackEnd
Address 192.168.2.237
Port 80
Priority 5
End
BackEnd
Address 192.168.2.238
Port 80
Priority 5
End
End
heartbeatで Poundが起動するので、PoundはOFFにしておく。
# chkconfig --list | grep pound
pound 0:off 1:off 2:off 3:off 4:off 5:off 6:off
# chkconfig --list | grep heartbeat
heartbeat 0:off 1:off 2:on 3:on 4:on 5:on 6:off
以下で起動
/etc/rc.d/init.d/heartbeat start
以下でインタフェースに割り振られたIPを確認
#ip addr show
設定変更後
/etc/rc.d/init.d/pound reload でセッションは切れる。
内側のサーバーからPoundを通してNATで外にいくには以下masueradeの設定必要。
*filter
:INPUT ACCEPT [0:0]
:FORWARD ACCEPT [53:4708]
:OUTPUT ACCEPT [706:49104]
-A INPUT -i lo -j ACCEPT
-A INPUT -p icmp -m icmp --icmp-type any -j ACCEPT
-A INPUT -p esp -j ACCEPT
-A INPUT -p ah -j ACCEPT
-A INPUT -d 224.0.0.251 -p udp -m udp --dport 5353 -j ACCEPT
-A INPUT -p tcp -m state --state NEW -m tcp --dport 80 -j ACCEPT
-A INPUT -p udp -m udp --dport 631 -j ACCEPT
-A INPUT -p tcp -m tcp --dport 631 -j ACCEPT
-A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT
-A INPUT -j REJECT --reject-with icmp-host-prohibited
COMMIT
*nat
:PREROUTING ACCEPT [26:3695]
:POSTROUTING ACCEPT [288:17280]
:OUTPUT ACCEPT [289:17372]
-A POSTROUTING -o eth0 -j MASQUERADE
COMMIT
ラベル: Load balancer
Ultra Monkey L7 (L7 load balancer)
I installed Ultra Monkey L7. I’ve written the installation document below. Please give me advice/suggestions on it. I would appreciate any kind of advice/suggestion!
== Ultra Moneky L7 (heartbeat,l7directord)
== Install heartbeat
2回yumを実行
yum -y install heartbeat
yum -y install heartbeat
cp /usr/share/doc/heartbeat-2.1.3/ha.cf /etc/ha.d/.
cp /usr/share/doc/heartbeat-2.1.3/haresources /etc/ha.d/.
cp /usr/share/doc/heartbeat-2.1.3/authkeys /etc/ha.d/.
外側と内側のケーブルが抜けた時を想定し、両インタフェースをucastで監視する。
[# test1] vi /etc/ha.d/ha.cf
logfacility local0
ucast eth0 10.1.1.3 #<------- other server's IP
ucast eth1 192.168.2.242 #<------- other server's IP
auto_failback on
node test1.com
node test2.com
respawn hacluster /usr/lib64/heartbeat/ipfail
[# test2] vi /etc/ha.d/ha.cf
logfacility local0
ucast eth0 10.1.1.2 #<------- other server's IP
ucast eth1 192.168.2.241 #<------- other server's IP
auto_failback on
node test1.com
node test2.com
respawn hacluster /usr/lib64/heartbeat/ipfail
chmod 600 /etc/ha.d/authkeys
vi /etc/ha.d/authkeys
auth 2
2 sha1 PASSWORD
以下のtest1.comはプライマリーにするサーバーを設定
vi /etc/ha.d/haresources
test1.com IPaddr::10.1.1.10/24/eth0 IPaddr::192.168.2.240/24/eth1 l7vsd l7directord
== UltraMokey-L7
yum -y install apr*
cpan install Crypt::SSLeay
wget http://mirror.jimbojay.com/apache/logging/log4cxx/0.10.0/apache-log4cxx-0.10.0.tar.gz
cd apache-log4cxx-0.10.0
./autogen.sh
./configure --prefix=/usr --libdir=/usr/lib64
make
make install
tar zxvf ultramonkey-l7-2.0.0-0.tar.gz
cd ultramonkey-l7-2.0.0-0
./configure
automake --add-missing --copy
./configure
make
make install
cat /etc/ha.d/conf/l7directord.cf
checktimeout=3
checkinterval=1
autoreload=yes
logfile="/var/log/l7directord.log"
quiescent=yes
virtual=10.1.1.10:80
real=192.168.2.236:80 masq 1
real=192.168.2.237:80 masq 1
real=192.168.2.238:80 masq 1
module=cinsert --cookie-name 'monkey'
scheduler=wrr
checktype=negotiate
request="index.html"
receive="ok"
# chkconfig --list | grep heart
heartbeat 0:off 1:off 2:on 3:on 4:on 5:on 6:off
以下で起動
/etc/rc.d/init.d/heartbeat start
以下で接続状況確認
l7vsadm -l
内側のサーバーからNATで外にアクセスしたい場合はMasqueradeの設定を以下のように/etc/rc.d/init.d/iptablesに設定する。
*nat
: PREROUTING ACCEPT [60:3408]
: POSTROUTING ACCEPT [0:0]
: OUTPUT ACCEPT [3:243]
-A POSTROUTING -o eth0 -j MASQUERADE
COMMIT
*filter
:INPUT ACCEPT [0:0]
:FORWARD ACCEPT [0:0]
:OUTPUT ACCEPT [301:35162]
-A INPUT -i lo -j ACCEPT
-A INPUT -p icmp -m icmp --icmp-type any -j ACCEPT
-A INPUT -p esp -j ACCEPT
-A INPUT -p ah -j ACCEPT
-A INPUT -d 224.0.0.251 -p udp -m udp --dport 5353 -j ACCEPT
-A INPUT -p tcp -m state --state NEW -m tcp --dport 80 -j ACCEPT
-A INPUT -p udp -m udp --dport 631 -j ACCEPT
-A INPUT -p tcp -m tcp --dport 631 -j ACCEPT
-A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT
-A INPUT -j REJECT --reject-with icmp-host-prohibited
COMMIT
== stone (SSL Acceleration)
wget http://www.gcd.org/sengoku/stone/stone-2.3e.tar.gz
tar zxvf stone-2.3e.tar.gz
cd stone-2.3d-2.3.2.7
make linux-ssl
mv stone /usr/local/bin/stone
makedir /etc/stone/
cd /etc/stone/
openssl req -new -nodes -x509 -keyout key.pem -out cert.pem -days 36500
stone -z sid_ctx='test.com:443' -z key=/etc/stone/key.pem -z cert=/etc/stone/cert.pem 10.1.1.10:80 443/ssl
== Ultra Moneky L7 (heartbeat,l7directord)
[client] 10.1.1.24 | ------------------------------------------------------------------- | 10.1.1.10 (VIP for web) | | | | | |10.1.1.2(eth0) |10.1.1.3(eth0) [LVS1] [LVS2] |192.168.2.241(eth1) |192.168.20.109(eth1) | | | 192.168.2.240 (VIP for VRRP) | | | | | ---------------------------------------------------------------------------- | | | | | | |192.168.2.236(eth0) | 192.168.2.237(eth0) |192.168.2.238(eth0) [web1] [web2] [web3] 192.168.2.240(GW) 192.168.2.240(GW) 192.168.2.240(GW)
== Install heartbeat
2回yumを実行
yum -y install heartbeat
yum -y install heartbeat
cp /usr/share/doc/heartbeat-2.1.3/ha.cf /etc/ha.d/.
cp /usr/share/doc/heartbeat-2.1.3/haresources /etc/ha.d/.
cp /usr/share/doc/heartbeat-2.1.3/authkeys /etc/ha.d/.
外側と内側のケーブルが抜けた時を想定し、両インタフェースをucastで監視する。
[# test1] vi /etc/ha.d/ha.cf
logfacility local0
ucast eth0 10.1.1.3 #<------- other server's IP
ucast eth1 192.168.2.242 #<------- other server's IP
auto_failback on
node test1.com
node test2.com
respawn hacluster /usr/lib64/heartbeat/ipfail
[# test2] vi /etc/ha.d/ha.cf
logfacility local0
ucast eth0 10.1.1.2 #<------- other server's IP
ucast eth1 192.168.2.241 #<------- other server's IP
auto_failback on
node test1.com
node test2.com
respawn hacluster /usr/lib64/heartbeat/ipfail
chmod 600 /etc/ha.d/authkeys
vi /etc/ha.d/authkeys
auth 2
2 sha1 PASSWORD
以下のtest1.comはプライマリーにするサーバーを設定
vi /etc/ha.d/haresources
test1.com IPaddr::10.1.1.10/24/eth0 IPaddr::192.168.2.240/24/eth1 l7vsd l7directord
== UltraMokey-L7
yum -y install apr*
cpan install Crypt::SSLeay
wget http://mirror.jimbojay.com/apache/logging/log4cxx/0.10.0/apache-log4cxx-0.10.0.tar.gz
cd apache-log4cxx-0.10.0
./autogen.sh
./configure --prefix=/usr --libdir=/usr/lib64
make
make install
tar zxvf ultramonkey-l7-2.0.0-0.tar.gz
cd ultramonkey-l7-2.0.0-0
./configure
automake --add-missing --copy
./configure
make
make install
cat /etc/ha.d/conf/l7directord.cf
checktimeout=3
checkinterval=1
autoreload=yes
logfile="/var/log/l7directord.log"
quiescent=yes
virtual=10.1.1.10:80
real=192.168.2.236:80 masq 1
real=192.168.2.237:80 masq 1
real=192.168.2.238:80 masq 1
module=cinsert --cookie-name 'monkey'
scheduler=wrr
checktype=negotiate
request="index.html"
receive="ok"
# chkconfig --list | grep heart
heartbeat 0:off 1:off 2:on 3:on 4:on 5:on 6:off
以下で起動
/etc/rc.d/init.d/heartbeat start
以下で接続状況確認
l7vsadm -l
内側のサーバーからNATで外にアクセスしたい場合はMasqueradeの設定を以下のように/etc/rc.d/init.d/iptablesに設定する。
*nat
: PREROUTING ACCEPT [60:3408]
: POSTROUTING ACCEPT [0:0]
: OUTPUT ACCEPT [3:243]
-A POSTROUTING -o eth0 -j MASQUERADE
COMMIT
*filter
:INPUT ACCEPT [0:0]
:FORWARD ACCEPT [0:0]
:OUTPUT ACCEPT [301:35162]
-A INPUT -i lo -j ACCEPT
-A INPUT -p icmp -m icmp --icmp-type any -j ACCEPT
-A INPUT -p esp -j ACCEPT
-A INPUT -p ah -j ACCEPT
-A INPUT -d 224.0.0.251 -p udp -m udp --dport 5353 -j ACCEPT
-A INPUT -p tcp -m state --state NEW -m tcp --dport 80 -j ACCEPT
-A INPUT -p udp -m udp --dport 631 -j ACCEPT
-A INPUT -p tcp -m tcp --dport 631 -j ACCEPT
-A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT
-A INPUT -j REJECT --reject-with icmp-host-prohibited
COMMIT
== stone (SSL Acceleration)
wget http://www.gcd.org/sengoku/stone/stone-2.3e.tar.gz
tar zxvf stone-2.3e.tar.gz
cd stone-2.3d-2.3.2.7
make linux-ssl
mv stone /usr/local/bin/stone
makedir /etc/stone/
cd /etc/stone/
openssl req -new -nodes -x509 -keyout key.pem -out cert.pem -days 36500
stone -z sid_ctx='test.com:443' -z key=/etc/stone/key.pem -z cert=/etc/stone/cert.pem 10.1.1.10:80 443/ssl
ラベル: Load balancer
OCFS2 + DRBD8 on CentOS 5.3
I tried the OCFS2 + DRBD8 on CentOS 5.3. I’ve written the document below.
Please give me advice/suggestions on it. I would appreciate any kind of advice/suggestion!
= OCFS2
download rpm pacakge from ocfs2 site.
> yum -y install vte
> rpm -ivh ocfs2-tools-1.4.2-1.el5.x86_64.rpm
> rpm -ivh ocfs2console-1.4.2-1.el5.x86_64.rpm
> rpm -ivh ocfs2-2.6.18-128.2.1.el5-1.4.2-1.el5.x86_64.rpm
> vi /etc/sysconfig/o2cb
O2CB_ENABLED=true
> vi /etc/ocfs2/cluster.conf
node:
ip_port = 7777
ip_address = 10.0.0.74
number = 0
name = ocfs21
cluster = ocfs2
node:
ip_port = 7777
ip_address = 10.0.0.75
number = 1
name = ocfs22
cluster = ocfs2
cluster:
node_count = 2
name = ocfs2
== DRBD8
> yum -y install drbd82 kmod-drbd82
> vim /etc/drbd.conf
global {
usage-count yes;
}
common {
syncer {
rate 100M;
al-extents 257;
}
}
resource r0 {
protocol C;
startup {
become-primary-on both; ### For Primary/Primary ###
degr-wfc-timeout 60;
wfc-timeout 30;
}
disk {
on-io-error detach;
}
net {
allow-two-primaries; ### For Primary/Primary ###
cram-hmac-alg sha1;
shared-secret "FooFunFactory";
after-sb-0pri discard-zero-changes;
after-sb-1pri violently-as0p;
after-sb-2pri violently-as0p;
}
on ocfs21 {
device /dev/drbd0;
disk /dev/sdc;
address 10.0.0.81:7788;
meta-disk /dev/sdb[0];
}
on ocfs22 {
device /dev/drbd0;
disk /dev/sdc;
address 10.0.0.82:7788;
meta-disk /dev/sdb[0];
}
}
=Create Metadata
> dd if=/dev/zero of=/dev/sda3 bs=1M count=1024
> drbdadm create-md r0
> /etc/init.d/drbd stop
> /etc/init.d/drbd start
=Make them Primary/Primary
> drbdsetup /dev/drbd0 primary -o
> cat /proc/drbd
version: 8.3.0 (api:88/proto:86-89)
GIT-hash: 9ba8b93e24d842f0dd3fb1f9b90e8348ddb95829 build by ivoks@ubuntu, 2009-01-17 07:49:56
0: cs:Connected ro:Primary/Primary ds:UpToDate/Diskless C r---
ns:0 nr:0 dw:0 dr:0 al:0 bm:0 lo:0 pe:0 ua:0 ap:0 ep:1 wo:b oos:4883760
> mkfs.ocfs2 /dev/drbd0
> /etc/init.d/o2cb start
> mount -t ocfs2 /dev/drbd0 /data
Please give me advice/suggestions on it. I would appreciate any kind of advice/suggestion!
= OCFS2
download rpm pacakge from ocfs2 site.
> yum -y install vte
> rpm -ivh ocfs2-tools-1.4.2-1.el5.x86_64.rpm
> rpm -ivh ocfs2console-1.4.2-1.el5.x86_64.rpm
> rpm -ivh ocfs2-2.6.18-128.2.1.el5-1.4.2-1.el5.x86_64.rpm
> vi /etc/sysconfig/o2cb
O2CB_ENABLED=true
> vi /etc/ocfs2/cluster.conf
node:
ip_port = 7777
ip_address = 10.0.0.74
number = 0
name = ocfs21
cluster = ocfs2
node:
ip_port = 7777
ip_address = 10.0.0.75
number = 1
name = ocfs22
cluster = ocfs2
cluster:
node_count = 2
name = ocfs2
== DRBD8
> yum -y install drbd82 kmod-drbd82
> vim /etc/drbd.conf
global {
usage-count yes;
}
common {
syncer {
rate 100M;
al-extents 257;
}
}
resource r0 {
protocol C;
startup {
become-primary-on both; ### For Primary/Primary ###
degr-wfc-timeout 60;
wfc-timeout 30;
}
disk {
on-io-error detach;
}
net {
allow-two-primaries; ### For Primary/Primary ###
cram-hmac-alg sha1;
shared-secret "FooFunFactory";
after-sb-0pri discard-zero-changes;
after-sb-1pri violently-as0p;
after-sb-2pri violently-as0p;
}
on ocfs21 {
device /dev/drbd0;
disk /dev/sdc;
address 10.0.0.81:7788;
meta-disk /dev/sdb[0];
}
on ocfs22 {
device /dev/drbd0;
disk /dev/sdc;
address 10.0.0.82:7788;
meta-disk /dev/sdb[0];
}
}
=Create Metadata
> dd if=/dev/zero of=/dev/sda3 bs=1M count=1024
> drbdadm create-md r0
> /etc/init.d/drbd stop
> /etc/init.d/drbd start
=Make them Primary/Primary
> drbdsetup /dev/drbd0 primary -o
> cat /proc/drbd
version: 8.3.0 (api:88/proto:86-89)
GIT-hash: 9ba8b93e24d842f0dd3fb1f9b90e8348ddb95829 build by ivoks@ubuntu, 2009-01-17 07:49:56
0: cs:Connected ro:Primary/Primary ds:UpToDate/Diskless C r---
ns:0 nr:0 dw:0 dr:0 al:0 bm:0 lo:0 pe:0 ua:0 ap:0 ep:1 wo:b oos:4883760
> mkfs.ocfs2 /dev/drbd0
> /etc/init.d/o2cb start
> mount -t ocfs2 /dev/drbd0 /data
ラベル: Cluster
CTDB, Samba, Luster on CentOS 5.3
I tried the CTDB, Samba with Luster on CentOS 5.3. I’ve written the document below.
Please give me advice/suggestions on it. I would appreciate any kind of advice/suggestion!
=== CTDB SAMBA Lustre on CetnOS 5.3
== Lustre
> rpm -ivh kernel-lustre-smp-2.6.18-128.1.6.el5_lustre.1.8.0.1.x86_64.rpm
> rpm -ivh lustre-modules-1.8.0.1-2.6.18_128.1.6.el5_lustre.1.8.0.1smp.x86_64.rpm
> rpm -ivh lustre-1.8.0.1-2.6.18_128.1.6.el5_lustre.1.8.0.1smp.x86_64.rpm
> rpm -ivh lustre-client-1.8.0.1-2.6.18_128.1.6.el5_lustre.1.8.0.1smp.x86_64.rpm
> rpm -ivh lustre-client-modules-1.8.0.1-2.6.18_128.1.6.el5_lustre.1.8.0.1smp.x86_64.rpm
> rpm -ivh lustre-ldiskfs-3.0.8-2.6.18_128.1.6.el5_lustre.1.8.0.1smp.x86_64.rpm
> vim /etc/grub.conf
default=0 ########## Change to 0 ###########
timeout=5
splashimage=(hd0,0)/grub/splash.xpm.gz
hiddenmenu
title CentOS (2.6.18-128.1.6.el5_lustre.1.8.0.1smp)
root (hd0,0)
kernel /vmlinuz-2.6.18-128.1.6.el5_lustre.1.8.0.1smp ro root=/dev/VolGroup00/LogVol00
initrd /initrd-2.6.18-128.1.6.el5_lustre.1.8.0.1smp.img
title CentOS (2.6.18-128.2.1.el5)
root (hd0,0)
kernel /vmlinuz-2.6.18-128.2.1.el5 ro root=/dev/VolGroup00/LogVol00
initrd /initrd-2.6.18-128.2.1.el5.img
title CentOS (2.6.18-128.el5)
root (hd0,0)
kernel /vmlinuz-2.6.18-128.el5 ro root=/dev/VolGroup00/LogVol00
initrd /initrd-2.6.18-128.el5.img
> reboot
> vim /etc/hosts
192.168.0.11 ctdb1
192.168.0.12 ctdb2
192.168.0.31 lustre1
192.168.0.32 lustre2
192.168.0.33 lustre3
= MDT Server
> mkfs.lustre --fsname=test --mgs --mdt --device-size=262144 /tmp/mdt0
> mkdir -p /mnt/mdt0
> mount -t lustre -o loop /tmp/mdt0 /mnt/mdt0
= OST Server 1
> mkfs.lustre --fsname=test --ost --mgsnode=lustre1 --device-size=1048576 /tmp/ost0
> mkfs.lustre --fsname=test --ost --mgsnode=lustre1 --device-size=1048576 /tmp/ost1
> mkdir -p /mnt/ost0 /mnt/ost1
> mount -t lustre -o loop /tmp/ost0 /mnt/ost0
> mount -t lustre -o loop /tmp/ost1 /mnt/ost1
= OST Server 2
> mkfs.lustre --fsname=test --ost --mgsnode=lustre1 --device-size=1048576 /tmp/ost2
> mkfs.lustre --fsname=test --ost --mgsnode=lustre1 --device-size=1048576 /tmp/ost3
> mkdir -p /mnt/ost2 /mnt/ost3
> mount -t lustre -o loop /tmp/ost2 /mnt/ost2
> mount -t lustre -o loop /tmp/ost3 /mnt/ost3
= on ctdb1, ctdb2
> mkdir -p /mnt/test
> mount -t lustre lustre1:/test /mnt/test
== samba
> yum -y install samba
> vi /etc/samba/smb.conf
[global]
clustering = yes
idmap backend = tdb2
private dir=/data/ctdb
fileid:mapping = fsname
use mmap = no
nt acl support = yes
ea support = yes
[share]
comment = Public Stuff
path = /data/share
public = yes
writeable = yes
only guest = yes
> smbpasswd -a root
> mkdir /data/share
> chmod 777 /data/share
== CTDB
> mkdir /data/ctdb
> rsync -avz samba.org::ftp/unpacked/ctdb .
> ./autogen.sh
> ./configure
> make
> make install
> cp config/ctdb.sysconfig /etc/sysconfig/ctdb
> cp config/ctdb.init /etc/rc.d/init.d/ctdb
> vim /etc/sysconfig/ctdb
CTDB_RECOVERY_LOCK="/data/ctdb/storage"
CTDB_PUBLIC_INTERFACE=eth0
CTDB_PUBLIC_ADDRESSES=/etc/ctdb/public_addresses
CTDB_MANAGES_SAMBA=yes
ulimit -n 10000
CTDB_NODES=/etc/ctdb/nodes
CTDB_LOGFILE=/var/log/log.ctdb
CTDB_DEBUGLEVEL=2
CTDB_PUBLIC_NETWORK="192.168.0.0/24"
CTDB_PUBLIC_GATEWAY="192.168.0.1"
> ln -s /usr/local/etc/ctdb/ /etc/ctdb
> ln -s /usr/local/bin/ctdb /usr/bin/ctdb
> ln -s /usr/local/sbin/ctdbd /usr/sbin/ctdbd
> vi /etc/ctdb/public_addresses
192.168.0.21/24
192.168.0.22/24
> vi cat /etc/ctdb/nodes
192.168.0.11
192.168.0.12
> vim /etc/ctdb/events.d/11.route
#!/bin/sh
. /etc/ctdb/functions
loadconfig ctdb
cmd="$1"
shift
case $cmd in
takeip)
# we ignore errors from this, as the route might be up already when we're grabbing
# a 2nd IP on this interface
/sbin/ip route add $CTDB_PUBLIC_NETWORK via $CTDB_PUBLIC_GATEWAY dev $1 2> /dev/null
;;
esac
exit 0
> chmod 755 /etc/ctdb/events.d/11.route
> /etc/rc.d/init.d/ctdb start
Please give me advice/suggestions on it. I would appreciate any kind of advice/suggestion!
=== CTDB SAMBA Lustre on CetnOS 5.3
VIP 192.168.0.21 VIP 192.168.0.22 192.168.0.11 192.168.0.12 [cTDB1] [CTDB2] | | | | | | ---------------------------- | | | | | | [MDT] [OST1] [OST2] 192.168.0.31 192.168.0.32 192.168.0.33
== Lustre
> rpm -ivh kernel-lustre-smp-2.6.18-128.1.6.el5_lustre.1.8.0.1.x86_64.rpm
> rpm -ivh lustre-modules-1.8.0.1-2.6.18_128.1.6.el5_lustre.1.8.0.1smp.x86_64.rpm
> rpm -ivh lustre-1.8.0.1-2.6.18_128.1.6.el5_lustre.1.8.0.1smp.x86_64.rpm
> rpm -ivh lustre-client-1.8.0.1-2.6.18_128.1.6.el5_lustre.1.8.0.1smp.x86_64.rpm
> rpm -ivh lustre-client-modules-1.8.0.1-2.6.18_128.1.6.el5_lustre.1.8.0.1smp.x86_64.rpm
> rpm -ivh lustre-ldiskfs-3.0.8-2.6.18_128.1.6.el5_lustre.1.8.0.1smp.x86_64.rpm
> vim /etc/grub.conf
default=0 ########## Change to 0 ###########
timeout=5
splashimage=(hd0,0)/grub/splash.xpm.gz
hiddenmenu
title CentOS (2.6.18-128.1.6.el5_lustre.1.8.0.1smp)
root (hd0,0)
kernel /vmlinuz-2.6.18-128.1.6.el5_lustre.1.8.0.1smp ro root=/dev/VolGroup00/LogVol00
initrd /initrd-2.6.18-128.1.6.el5_lustre.1.8.0.1smp.img
title CentOS (2.6.18-128.2.1.el5)
root (hd0,0)
kernel /vmlinuz-2.6.18-128.2.1.el5 ro root=/dev/VolGroup00/LogVol00
initrd /initrd-2.6.18-128.2.1.el5.img
title CentOS (2.6.18-128.el5)
root (hd0,0)
kernel /vmlinuz-2.6.18-128.el5 ro root=/dev/VolGroup00/LogVol00
initrd /initrd-2.6.18-128.el5.img
> reboot
> vim /etc/hosts
192.168.0.11 ctdb1
192.168.0.12 ctdb2
192.168.0.31 lustre1
192.168.0.32 lustre2
192.168.0.33 lustre3
= MDT Server
> mkfs.lustre --fsname=test --mgs --mdt --device-size=262144 /tmp/mdt0
> mkdir -p /mnt/mdt0
> mount -t lustre -o loop /tmp/mdt0 /mnt/mdt0
= OST Server 1
> mkfs.lustre --fsname=test --ost --mgsnode=lustre1 --device-size=1048576 /tmp/ost0
> mkfs.lustre --fsname=test --ost --mgsnode=lustre1 --device-size=1048576 /tmp/ost1
> mkdir -p /mnt/ost0 /mnt/ost1
> mount -t lustre -o loop /tmp/ost0 /mnt/ost0
> mount -t lustre -o loop /tmp/ost1 /mnt/ost1
= OST Server 2
> mkfs.lustre --fsname=test --ost --mgsnode=lustre1 --device-size=1048576 /tmp/ost2
> mkfs.lustre --fsname=test --ost --mgsnode=lustre1 --device-size=1048576 /tmp/ost3
> mkdir -p /mnt/ost2 /mnt/ost3
> mount -t lustre -o loop /tmp/ost2 /mnt/ost2
> mount -t lustre -o loop /tmp/ost3 /mnt/ost3
= on ctdb1, ctdb2
> mkdir -p /mnt/test
> mount -t lustre lustre1:/test /mnt/test
== samba
> yum -y install samba
> vi /etc/samba/smb.conf
[global]
clustering = yes
idmap backend = tdb2
private dir=/data/ctdb
fileid:mapping = fsname
use mmap = no
nt acl support = yes
ea support = yes
[share]
comment = Public Stuff
path = /data/share
public = yes
writeable = yes
only guest = yes
> smbpasswd -a root
> mkdir /data/share
> chmod 777 /data/share
== CTDB
> mkdir /data/ctdb
> rsync -avz samba.org::ftp/unpacked/ctdb .
> ./autogen.sh
> ./configure
> make
> make install
> cp config/ctdb.sysconfig /etc/sysconfig/ctdb
> cp config/ctdb.init /etc/rc.d/init.d/ctdb
> vim /etc/sysconfig/ctdb
CTDB_RECOVERY_LOCK="/data/ctdb/storage"
CTDB_PUBLIC_INTERFACE=eth0
CTDB_PUBLIC_ADDRESSES=/etc/ctdb/public_addresses
CTDB_MANAGES_SAMBA=yes
ulimit -n 10000
CTDB_NODES=/etc/ctdb/nodes
CTDB_LOGFILE=/var/log/log.ctdb
CTDB_DEBUGLEVEL=2
CTDB_PUBLIC_NETWORK="192.168.0.0/24"
CTDB_PUBLIC_GATEWAY="192.168.0.1"
> ln -s /usr/local/etc/ctdb/ /etc/ctdb
> ln -s /usr/local/bin/ctdb /usr/bin/ctdb
> ln -s /usr/local/sbin/ctdbd /usr/sbin/ctdbd
> vi /etc/ctdb/public_addresses
192.168.0.21/24
192.168.0.22/24
> vi cat /etc/ctdb/nodes
192.168.0.11
192.168.0.12
> vim /etc/ctdb/events.d/11.route
#!/bin/sh
. /etc/ctdb/functions
loadconfig ctdb
cmd="$1"
shift
case $cmd in
takeip)
# we ignore errors from this, as the route might be up already when we're grabbing
# a 2nd IP on this interface
/sbin/ip route add $CTDB_PUBLIC_NETWORK via $CTDB_PUBLIC_GATEWAY dev $1 2> /dev/null
;;
esac
exit 0
> chmod 755 /etc/ctdb/events.d/11.route
> /etc/rc.d/init.d/ctdb start
ラベル: Cluster
CTDB, Samba, Luster on CentOS 5.3
I tried the CTDB, Samba with Luster on CentOS 5.3. I’ve written the document below.
Please give me advice/suggestions on it. I would appreciate any kind of advice/suggestion!
=== CTDB SAMBA Lustre on CetnOS 5.3
== Lustre
> rpm -ivh kernel-lustre-smp-2.6.18-128.1.6.el5_lustre.1.8.0.1.x86_64.rpm
> rpm -ivh lustre-modules-1.8.0.1-2.6.18_128.1.6.el5_lustre.1.8.0.1smp.x86_64.rpm
> rpm -ivh lustre-1.8.0.1-2.6.18_128.1.6.el5_lustre.1.8.0.1smp.x86_64.rpm
> rpm -ivh lustre-client-1.8.0.1-2.6.18_128.1.6.el5_lustre.1.8.0.1smp.x86_64.rpm
> rpm -ivh lustre-client-modules-1.8.0.1-2.6.18_128.1.6.el5_lustre.1.8.0.1smp.x86_64.rpm
> rpm -ivh lustre-ldiskfs-3.0.8-2.6.18_128.1.6.el5_lustre.1.8.0.1smp.x86_64.rpm
> vim /etc/grub.conf
default=0 ########## Change to 0 ###########
timeout=5
splashimage=(hd0,0)/grub/splash.xpm.gz
hiddenmenu
title CentOS (2.6.18-128.1.6.el5_lustre.1.8.0.1smp)
root (hd0,0)
kernel /vmlinuz-2.6.18-128.1.6.el5_lustre.1.8.0.1smp ro root=/dev/VolGroup00/LogVol00
initrd /initrd-2.6.18-128.1.6.el5_lustre.1.8.0.1smp.img
title CentOS (2.6.18-128.2.1.el5)
root (hd0,0)
kernel /vmlinuz-2.6.18-128.2.1.el5 ro root=/dev/VolGroup00/LogVol00
initrd /initrd-2.6.18-128.2.1.el5.img
title CentOS (2.6.18-128.el5)
root (hd0,0)
kernel /vmlinuz-2.6.18-128.el5 ro root=/dev/VolGroup00/LogVol00
initrd /initrd-2.6.18-128.el5.img
> reboot
> vim /etc/hosts
192.168.0.11 ctdb1
192.168.0.12 ctdb2
192.168.0.31 lustre1
192.168.0.32 lustre2
192.168.0.33 lustre3
= MDT Server
> mkfs.lustre --fsname=test --mgs --mdt --device-size=262144 /tmp/mdt0
> mkdir -p /mnt/mdt0
> mount -t lustre -o loop /tmp/mdt0 /mnt/mdt0
= OST Server 1
> mkfs.lustre --fsname=test --ost --mgsnode=lustre1 --device-size=1048576 /tmp/ost0
> mkfs.lustre --fsname=test --ost --mgsnode=lustre1 --device-size=1048576 /tmp/ost1
> mkdir -p /mnt/ost0 /mnt/ost1
> mount -t lustre -o loop /tmp/ost0 /mnt/ost0
> mount -t lustre -o loop /tmp/ost1 /mnt/ost1
= OST Server 2
> mkfs.lustre --fsname=test --ost --mgsnode=lustre1 --device-size=1048576 /tmp/ost2
> mkfs.lustre --fsname=test --ost --mgsnode=lustre1 --device-size=1048576 /tmp/ost3
> mkdir -p /mnt/ost2 /mnt/ost3
> mount -t lustre -o loop /tmp/ost2 /mnt/ost2
> mount -t lustre -o loop /tmp/ost3 /mnt/ost3
= on ctdb1, ctdb2
> mkdir -p /mnt/test
> mount -t lustre lustre1:/test /mnt/test
== samba
> yum -y install samba
> vi /etc/samba/smb.conf
[global]
clustering = yes
idmap backend = tdb2
private dir=/data/ctdb
fileid:mapping = fsname
use mmap = no
nt acl support = yes
ea support = yes
[share]
comment = Public Stuff
path = /data/share
public = yes
writeable = yes
only guest = yes
> smbpasswd -a root
> mkdir /data/share
> chmod 777 /data/share
== CTDB
> mkdir /data/ctdb
> rsync -avz samba.org::ftp/unpacked/ctdb .
> ./autogen.sh
> ./configure
> make
> make install
> cp config/ctdb.sysconfig /etc/sysconfig/ctdb
> cp config/ctdb.init /etc/rc.d/init.d/ctdb
> vim /etc/sysconfig/ctdb
CTDB_RECOVERY_LOCK="/data/ctdb/storage"
CTDB_PUBLIC_INTERFACE=eth0
CTDB_PUBLIC_ADDRESSES=/etc/ctdb/public_addresses
CTDB_MANAGES_SAMBA=yes
ulimit -n 10000
CTDB_NODES=/etc/ctdb/nodes
CTDB_LOGFILE=/var/log/log.ctdb
CTDB_DEBUGLEVEL=2
CTDB_PUBLIC_NETWORK="192.168.0.0/24"
CTDB_PUBLIC_GATEWAY="192.168.0.1"
> ln -s /usr/local/etc/ctdb/ /etc/ctdb
> ln -s /usr/local/bin/ctdb /usr/bin/ctdb
> ln -s /usr/local/sbin/ctdbd /usr/sbin/ctdbd
> vi /etc/ctdb/public_addresses
192.168.0.21/24
192.168.0.22/24
> vi cat /etc/ctdb/nodes
192.168.0.11
192.168.0.12
> vim /etc/ctdb/events.d/11.route
#!/bin/sh
. /etc/ctdb/functions
loadconfig ctdb
cmd="$1"
shift
case $cmd in
takeip)
# we ignore errors from this, as the route might be up already when we're grabbing
# a 2nd IP on this interface
/sbin/ip route add $CTDB_PUBLIC_NETWORK via $CTDB_PUBLIC_GATEWAY dev $1 2> /dev/null
;;
esac
exit 0
> chmod 755 /etc/ctdb/events.d/11.route
> /etc/rc.d/init.d/ctdb start
Please give me advice/suggestions on it. I would appreciate any kind of advice/suggestion!
=== CTDB SAMBA Lustre on CetnOS 5.3
VIP 192.168.0.21 VIP 192.168.0.22 192.168.0.11 192.168.0.12 [cTDB1] [CTDB2] | | | | | | ---------------------------- | | | | | | [MDT] [OST1] [OST2] 192.168.0.31 192.168.0.32 192.168.0.33
== Lustre
> rpm -ivh kernel-lustre-smp-2.6.18-128.1.6.el5_lustre.1.8.0.1.x86_64.rpm
> rpm -ivh lustre-modules-1.8.0.1-2.6.18_128.1.6.el5_lustre.1.8.0.1smp.x86_64.rpm
> rpm -ivh lustre-1.8.0.1-2.6.18_128.1.6.el5_lustre.1.8.0.1smp.x86_64.rpm
> rpm -ivh lustre-client-1.8.0.1-2.6.18_128.1.6.el5_lustre.1.8.0.1smp.x86_64.rpm
> rpm -ivh lustre-client-modules-1.8.0.1-2.6.18_128.1.6.el5_lustre.1.8.0.1smp.x86_64.rpm
> rpm -ivh lustre-ldiskfs-3.0.8-2.6.18_128.1.6.el5_lustre.1.8.0.1smp.x86_64.rpm
> vim /etc/grub.conf
default=0 ########## Change to 0 ###########
timeout=5
splashimage=(hd0,0)/grub/splash.xpm.gz
hiddenmenu
title CentOS (2.6.18-128.1.6.el5_lustre.1.8.0.1smp)
root (hd0,0)
kernel /vmlinuz-2.6.18-128.1.6.el5_lustre.1.8.0.1smp ro root=/dev/VolGroup00/LogVol00
initrd /initrd-2.6.18-128.1.6.el5_lustre.1.8.0.1smp.img
title CentOS (2.6.18-128.2.1.el5)
root (hd0,0)
kernel /vmlinuz-2.6.18-128.2.1.el5 ro root=/dev/VolGroup00/LogVol00
initrd /initrd-2.6.18-128.2.1.el5.img
title CentOS (2.6.18-128.el5)
root (hd0,0)
kernel /vmlinuz-2.6.18-128.el5 ro root=/dev/VolGroup00/LogVol00
initrd /initrd-2.6.18-128.el5.img
> reboot
> vim /etc/hosts
192.168.0.11 ctdb1
192.168.0.12 ctdb2
192.168.0.31 lustre1
192.168.0.32 lustre2
192.168.0.33 lustre3
= MDT Server
> mkfs.lustre --fsname=test --mgs --mdt --device-size=262144 /tmp/mdt0
> mkdir -p /mnt/mdt0
> mount -t lustre -o loop /tmp/mdt0 /mnt/mdt0
= OST Server 1
> mkfs.lustre --fsname=test --ost --mgsnode=lustre1 --device-size=1048576 /tmp/ost0
> mkfs.lustre --fsname=test --ost --mgsnode=lustre1 --device-size=1048576 /tmp/ost1
> mkdir -p /mnt/ost0 /mnt/ost1
> mount -t lustre -o loop /tmp/ost0 /mnt/ost0
> mount -t lustre -o loop /tmp/ost1 /mnt/ost1
= OST Server 2
> mkfs.lustre --fsname=test --ost --mgsnode=lustre1 --device-size=1048576 /tmp/ost2
> mkfs.lustre --fsname=test --ost --mgsnode=lustre1 --device-size=1048576 /tmp/ost3
> mkdir -p /mnt/ost2 /mnt/ost3
> mount -t lustre -o loop /tmp/ost2 /mnt/ost2
> mount -t lustre -o loop /tmp/ost3 /mnt/ost3
= on ctdb1, ctdb2
> mkdir -p /mnt/test
> mount -t lustre lustre1:/test /mnt/test
== samba
> yum -y install samba
> vi /etc/samba/smb.conf
[global]
clustering = yes
idmap backend = tdb2
private dir=/data/ctdb
fileid:mapping = fsname
use mmap = no
nt acl support = yes
ea support = yes
[share]
comment = Public Stuff
path = /data/share
public = yes
writeable = yes
only guest = yes
> smbpasswd -a root
> mkdir /data/share
> chmod 777 /data/share
== CTDB
> mkdir /data/ctdb
> rsync -avz samba.org::ftp/unpacked/ctdb .
> ./autogen.sh
> ./configure
> make
> make install
> cp config/ctdb.sysconfig /etc/sysconfig/ctdb
> cp config/ctdb.init /etc/rc.d/init.d/ctdb
> vim /etc/sysconfig/ctdb
CTDB_RECOVERY_LOCK="/data/ctdb/storage"
CTDB_PUBLIC_INTERFACE=eth0
CTDB_PUBLIC_ADDRESSES=/etc/ctdb/public_addresses
CTDB_MANAGES_SAMBA=yes
ulimit -n 10000
CTDB_NODES=/etc/ctdb/nodes
CTDB_LOGFILE=/var/log/log.ctdb
CTDB_DEBUGLEVEL=2
CTDB_PUBLIC_NETWORK="192.168.0.0/24"
CTDB_PUBLIC_GATEWAY="192.168.0.1"
> ln -s /usr/local/etc/ctdb/ /etc/ctdb
> ln -s /usr/local/bin/ctdb /usr/bin/ctdb
> ln -s /usr/local/sbin/ctdbd /usr/sbin/ctdbd
> vi /etc/ctdb/public_addresses
192.168.0.21/24
192.168.0.22/24
> vi cat /etc/ctdb/nodes
192.168.0.11
192.168.0.12
> vim /etc/ctdb/events.d/11.route
#!/bin/sh
. /etc/ctdb/functions
loadconfig ctdb
cmd="$1"
shift
case $cmd in
takeip)
# we ignore errors from this, as the route might be up already when we're grabbing
# a 2nd IP on this interface
/sbin/ip route add $CTDB_PUBLIC_NETWORK via $CTDB_PUBLIC_GATEWAY dev $1 2> /dev/null
;;
esac
exit 0
> chmod 755 /etc/ctdb/events.d/11.route
> /etc/rc.d/init.d/ctdb start
ラベル: Cluster
LVS+KeepAlive+VRRP+DSR (Load balancer)
I installed LVS. I’ve written the installation document below. Please give me advice/suggestions on it. I would appreciate any kind of advice/suggestion!
== Basic Setting of LVS
ipvsadm -v
ipvsadm -v
ipvsadm v1.24 2003/06/07 (compiled with popt and IPVS v1.2.0)
LV1に仮想IPを加える。
ip addr add 192.168.10.100 label eth1:100 dev eth1
いったん設定をリセット
ipvsadm -C
VIPを追加する。 lcオプションはリアルサーバーにバランスする。
ipvsadm -A -t 192.168.10.100:80 -s lc
削除したい時は以下
ipvsadm -D -t 192.168.10.100:80
NATモードで不可分散サーバーを設定
ipvsadm -a -t 192.168.10.100:80 -r 192.168.20.110 -m
ipvsadm -a -t 192.168.10.100:80 -r 192.168.20.111 -m
削除したい時は以下
ipvsadm -d -t 192.168.10.100:80 -r 192.168.20.110
-g --gatewaying
Direct Server Return (DSR)
-i --ipip
IPIPのカプセリング
-m --masquerading
NAT
確認
ipvsadm -Ln
パケット転送を有効にする
echo '1' > /proc/sys/net/ipv4/ip_forward
再起動後も有効になるように設定
vi /etc/sysctl.conf
net.ipv4.ip_forward = 1
sysctl -p
clientサーバーの GatewayをLVSの内側192.168.20.108に設定
クライアント192.168.10.24から以下curlでアクセスして負荷分散の確認
curl http://192.168.10.100
サービスの削除は以下のコマンド
ipvsadm -d -t 192.168.10.100:80 -r 192.168.20.110
ウエイトを0にするには以下のコマンド
ipvsadm -e -t 192.168.100:80 -r 192.168.20.111 -m -w 0
== Keepalived
wget http://www.keepalived.org/software/keepalived-1.1.15.tar.gz
tar zxvf keepalived-1.1.15.tar.gz
cd keepalived-1.1.15
./configure
以下NOになっている。
Keepalived configuration
------------------------
Keepalived version : 1.1.15
Compiler : gcc
Compiler flags : -g -O2
Extra Lib : -lpopt -lssl -lcrypto
Use IPVS Framework : No
IPVS sync daemon support : No
Use VRRP Framework : Yes
Use LinkWatch : No
Use Debug flags : No
以下インストールしてConfigure
yum install kernel-devel
wget http://www.kernel.org/pub/linux/kernel/v2.6/linux-2.6.18.8.tar.gz
tar zxvf linux-2.6.18.8.tar.gz
cp net/core/* /usr/src/kernels/2.6.18-8.el5-i686/net/core/
再度configure
./configure --with-kernel-dir=/usr/src/kernels/2.6.18-8.el5-i686
Keepalived configuration
------------------------
Keepalived version : 1.1.15
Compiler : gcc
Compiler flags : -g -O2
Extra Lib : -lpopt -lssl -lcrypto
Use IPVS Framework : Yes
IPVS sync daemon support : Yes
Use VRRP Framework : Yes
Use LinkWatch : Yes
Use Debug flags : No
make
make install
cp /usr/local/etc/rc.d/init.d/keepalived /etc/rc.d/init.d/.
ln -s /usr/local/sbin/keepalived /usr/sbin/.
mkdir /etc/keepalived/
cp /usr/local/etc/keepalived/keepalived.conf /etc/keepalived/.
[root@localhost sysconfig]# cat /etc/keepalived/keepalived.conf
global_defs {
notification_email {
sakai@aplogics.com
}
notification_email_from sakai@aplogics.com
smtp_server localhost
smtp_connect_timeout 30
lvs_id LVS_DEVEL
}
vrrp_instance VI_1 {
state MASTER
interface eth0
virtual_router_id 51
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.10.100
}
}
virtual_server 192.168.10.100 80 {
delay_loop 3
lb_algo lc
lb_kind NAT
nat_mask 255.255.255.0
protocol TCP
virtualhost test.com
sorry_server 192.168.20.113 80
real_server 192.168.20.110 80 {
weight 1
inhibit_on_failure
HTTP_GET {
url {
path /index.html
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
real_server 192.168.20.111 80 {
weight 1
inhibit_on_failure
HTTP_GET {
url {
path /index.html
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
}
cp /usr/local/etc/sysconfig/keepalived /etc/sysconfig/.
cat /etc/sysconfig/keepalived
KEEPALIVED_OPTIONS="-C"
ipvsadm -Cで IPVSをクリア
/etc/rc.d/init.d/keepalived start
== VRRP ==
route add -host 192.168.10.100 gw 192.168.10.10
[root@localhost ~]# cat /etc/sysconfig/keepalived
KEEPALIVED_OPTIONS="-D -C -P"
- Master側 LVS
[root@server1 keepalived]# cat /etc/keepalived/keepalived.conf
global_defs {
notification_email {
sakai@aplogics.com
}
notification_email_from sakai@aplogics.com
smtp_server localhost
smtp_connect_timeout 30
lvs_id LVS_DEVEL
}
vrrp_instance VI_1 {
state MASTER
interface eth0
virtual_router_id 51
priority 101 <---- Priorityを をあげる
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.10.100/24 dev eth1 label eth1:100
192.168.10.101/24 dev eth1 label eth1:101
192.168.20.200/24 dev eth0
}
}
virtual_server 192.168.10.100 80 {
delay_loop 3
lb_algo lc
lb_kind NAT
nat_mask 255.255.255.0
protocol TCP
virtualhost test.com
#sorry_server 192.168.20.109 80
real_server 192.168.20.110 80 {
weight 1
inhibit_on_failure
HTTP_GET {
url {
path /index.html
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
real_server 192.168.20.111 80 {
weight 1
inhibit_on_failure
HTTP_GET {
url {
path /index.html
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
}
virtual_server 192.168.10.101 80 {
delay_loop 3
lb_algo lc
lb_kind NAT
nat_mask 255.255.255.0
protocol TCP
virtualhost test.com
#sorry_server 192.168.20.109 80
real_server 192.168.20.112 80 {
weight 1
inhibit_on_failure
HTTP_GET {
url {
path /index.html
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
}
--BACKUP側 LVS
[root@server2 keepalived]# cat /etc/keepalived/keepalived.conf
global_defs {
notification_email {
sakai@aplogics.com
}
notification_email_from sakai@aplogics.com
smtp_server localhost
smtp_connect_timeout 30
lvs_id LVS_DEVEL
}
vrrp_instance VI_1 {
state MASTER
interface eth0
virtual_router_id 51
priority 100 <---- Priorityを を下げる
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.10.100/24 dev eth1 label eth1:100
192.168.10.101/24 dev eth1 label eth1:101
192.168.20.200/24 dev eth0
}
}
virtual_server 192.168.10.100 80 {
delay_loop 3
lb_algo lc
lb_kind NAT
nat_mask 255.255.255.0
protocol TCP
virtualhost test.com
#sorry_server 192.168.20.109 80
real_server 192.168.20.110 80 {
weight 1
inhibit_on_failure
HTTP_GET {
url {
path /index.html
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
real_server 192.168.20.111 80 {
weight 1
inhibit_on_failure
HTTP_GET {
url {
path /index.html
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
}
virtual_server 192.168.10.101 80 {
delay_loop 3
lb_algo lc
lb_kind NAT
nat_mask 255.255.255.0
protocol TCP
virtualhost test.com
#sorry_server 192.168.20.109 80
real_server 192.168.20.112 80 {
weight 1
inhibit_on_failure
HTTP_GET {
url {
path /index.html
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
}
/etc/rc.d/init.d/keepalived start
以下でIPを確認
ip addr show eth1
内側のサーバーからNATで外にアクセスしたい場合はMasqueradeの設定を以下のように/etc/rc.d/init.d/iptablesに設定する。
*nat
:PREROUTING ACCEPT [60:3408]
:POSTROUTING ACCEPT [0:0]
:OUTPUT ACCEPT [3:243]
-A POSTROUTING -o eth0 -j MASQUERADE
COMMIT
*filter
:INPUT ACCEPT [0:0]
:FORWARD ACCEPT [0:0]
:OUTPUT ACCEPT [301:35162]
-A INPUT -i lo -j ACCEPT
-A INPUT -p icmp -m icmp --icmp-type any -j ACCEPT
-A INPUT -p esp -j ACCEPT
-A INPUT -p ah -j ACCEPT
-A INPUT -d 224.0.0.251 -p udp -m udp --dport 5353 -j ACCEPT
-A INPUT -p tcp -m state --state NEW -m tcp --dport 80 -j ACCEPT
-A INPUT -p udp -m udp --dport 631 -j ACCEPT
-A INPUT -p tcp -m tcp --dport 631 -j ACCEPT
-A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT
-A INPUT -j REJECT --reject-with icmp-host-prohibited
COMMIT
以下設定変更時のセッション状況
以下でweightを0にしてもセッションの継続は可、次回からのセッションが接続しなくなる
ipvsadm -e -t 10.1.1.10:80 -r 192.168.2.236:80 -m -w 0
設定をかえたら以下で設定変更。
/etc/rc.d/init.d/keepalived reload
セッションがつながっている以外のサーバーの追加や削除をしてもセッションの継続は可
ただし以下のようにVIPやサービスを削除するとセッションが切れる。
ipvsadm -D -t 10.1.1.10:80 -s lc
ipvsadm -d -t 10.1.1.10:80 -r 192.168.2.236:80
== DSR
route add -host 192.168.10.100 gw 192.168.10.200
- LVS
cat /etc/keepalived/keepalived.conf
virtual_ipaddress {
#192.168.10.100/24 dev eth1 label eth1:100
#192.168.10.101/24 dev eth1 label eth1:101
192.168.10.200/24 dev eth1
192.168.20.200/24 dev eth0
}
lb_algo rr
lb_kind DR
LVSに届いた192.168.10.100宛てのパケットに対してmark値を1にセットするようにnetfilterを設定。
iptables -t mangle -A PREROUTING -d 192.168.10.100 -j MARK --set-mark 1
iptables -t mangle -n -Lで確認可能
以下で削除
iptables -t mangle -D PREROUTING -d 192.168.10.100 -j MARK --set-mark 1
mark値が1のパケットは100番の経路テーブルを適用させるようにします。
ip rule add prio 100 fwmark 1 table 100
ip rule で確認
以下で削除可能
ip rule del prio 100 fwmark 1 table 100
再起動後には消える。
全てのパケットをローカルデバイスにルーティングするような設定を 100番の経路テーブルに追加します。
ip route add local 0/0 dev lo table 100
ip route list table 100 で確認
以下で削除可能
ip route del local 0/0 dev lo table 100
再起動後には消える。
rp_filterを無効にする。これが有効のままだと、カーネルのソースアドレス認証(Source Address Verification)
という機能が働いてしまうため、リアルサーバからの応答パケットをフィルタする。
echo 0 > /proc/sys/net/ipv4/conf/eth0/rp_filter
vi /etc/sysctl.conf
net.ipv4.conf.default.rp_filter = 0
sysctl -p
/etc/rc.d/init.d/keepalived start
- WEB Server
送信先アドレスが自分自身のIPアドレスに変換されてローカルデバイスに渡される
iptables -t nat -A PREROUTING -d 192.168.10.100 -j REDIRECT
iptables -L -t nat で確認
以下で削除可能
iptables -t nat -D PREROUTING -d 192.168.10.100 -j REDIRECT
GWをクライアントに向ける。
cat /etc/sysconfig/network
NETWORKING=yes
NETWORKING_IPV6=no
HOSTNAME=node1
GATEWAY=192.168.10.24
tcpdump -n -i eth1 port 80 and host 192.168.10.24
以下で上記のiptableの設定を保存
/etc/rc.d/init.d/iptables save
==もしサーバーのルーター越しのロードバランスではなく、VIPと同じインタフェース側のサブネットをしようする場合
echo "1" >/proc/sys/net/ipv4/ip_forward
ipvsadm -C
ipvsadm -A -t 216.69.70.80:21 -s wlc
ipvsadm -a -t 216.69.70.80:21 -r 216.69.70.81 -m
ipvsadm -a -t 216.69.70.80:21 -r 216.69.70.82 -m
ipvsadm -Ln
ロードバランスされる側のGWをロードバランサのVIPに向ける
もしくは上記で説明したDSRを用いればOK.
== Basic Setting of LVS
[client] 192.168.10.24 | | | 192.168.10.11 (eth1) [LVS] 192.168.10.100 (仮想IP) | 192.168.20.108 (eth0) | | | ----------------------------------- | | | | |192.168.20.110(eth0) | 192.168.20.111(eth1) [web1] [web2]yum -y install ipvsadm
ipvsadm -v
ipvsadm -v
ipvsadm v1.24 2003/06/07 (compiled with popt and IPVS v1.2.0)
LV1に仮想IPを加える。
ip addr add 192.168.10.100 label eth1:100 dev eth1
いったん設定をリセット
ipvsadm -C
VIPを追加する。 lcオプションはリアルサーバーにバランスする。
ipvsadm -A -t 192.168.10.100:80 -s lc
削除したい時は以下
ipvsadm -D -t 192.168.10.100:80
NATモードで不可分散サーバーを設定
ipvsadm -a -t 192.168.10.100:80 -r 192.168.20.110 -m
ipvsadm -a -t 192.168.10.100:80 -r 192.168.20.111 -m
削除したい時は以下
ipvsadm -d -t 192.168.10.100:80 -r 192.168.20.110
-g --gatewaying
Direct Server Return (DSR)
-i --ipip
IPIPのカプセリング
-m --masquerading
NAT
確認
ipvsadm -Ln
パケット転送を有効にする
echo '1' > /proc/sys/net/ipv4/ip_forward
再起動後も有効になるように設定
vi /etc/sysctl.conf
net.ipv4.ip_forward = 1
sysctl -p
clientサーバーの GatewayをLVSの内側192.168.20.108に設定
クライアント192.168.10.24から以下curlでアクセスして負荷分散の確認
curl http://192.168.10.100
サービスの削除は以下のコマンド
ipvsadm -d -t 192.168.10.100:80 -r 192.168.20.110
ウエイトを0にするには以下のコマンド
ipvsadm -e -t 192.168.100:80 -r 192.168.20.111 -m -w 0
== Keepalived
[client] 192.168.10.24 | | | 192.168.10.11 (eth1) | [LVS] 192.168.10.100 (仮想IP) | 192.168.20.108 (eth0) | | | ---------------------------------------------------------- | | | | | | |192.168.20.110(eth0) | 192.168.20.111(eth1) |192.168.20.113 [web1] [web2] [sorry]インストール keepalived
wget http://www.keepalived.org/software/keepalived-1.1.15.tar.gz
tar zxvf keepalived-1.1.15.tar.gz
cd keepalived-1.1.15
./configure
以下NOになっている。
Keepalived configuration
------------------------
Keepalived version : 1.1.15
Compiler : gcc
Compiler flags : -g -O2
Extra Lib : -lpopt -lssl -lcrypto
Use IPVS Framework : No
IPVS sync daemon support : No
Use VRRP Framework : Yes
Use LinkWatch : No
Use Debug flags : No
以下インストールしてConfigure
yum install kernel-devel
wget http://www.kernel.org/pub/linux/kernel/v2.6/linux-2.6.18.8.tar.gz
tar zxvf linux-2.6.18.8.tar.gz
cp net/core/* /usr/src/kernels/2.6.18-8.el5-i686/net/core/
再度configure
./configure --with-kernel-dir=/usr/src/kernels/2.6.18-8.el5-i686
Keepalived configuration
------------------------
Keepalived version : 1.1.15
Compiler : gcc
Compiler flags : -g -O2
Extra Lib : -lpopt -lssl -lcrypto
Use IPVS Framework : Yes
IPVS sync daemon support : Yes
Use VRRP Framework : Yes
Use LinkWatch : Yes
Use Debug flags : No
make
make install
cp /usr/local/etc/rc.d/init.d/keepalived /etc/rc.d/init.d/.
ln -s /usr/local/sbin/keepalived /usr/sbin/.
mkdir /etc/keepalived/
cp /usr/local/etc/keepalived/keepalived.conf /etc/keepalived/.
[root@localhost sysconfig]# cat /etc/keepalived/keepalived.conf
global_defs {
notification_email {
sakai@aplogics.com
}
notification_email_from sakai@aplogics.com
smtp_server localhost
smtp_connect_timeout 30
lvs_id LVS_DEVEL
}
vrrp_instance VI_1 {
state MASTER
interface eth0
virtual_router_id 51
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.10.100
}
}
virtual_server 192.168.10.100 80 {
delay_loop 3
lb_algo lc
lb_kind NAT
nat_mask 255.255.255.0
protocol TCP
virtualhost test.com
sorry_server 192.168.20.113 80
real_server 192.168.20.110 80 {
weight 1
inhibit_on_failure
HTTP_GET {
url {
path /index.html
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
real_server 192.168.20.111 80 {
weight 1
inhibit_on_failure
HTTP_GET {
url {
path /index.html
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
}
cp /usr/local/etc/sysconfig/keepalived /etc/sysconfig/.
cat /etc/sysconfig/keepalived
KEEPALIVED_OPTIONS="-C"
ipvsadm -Cで IPVSをクリア
/etc/rc.d/init.d/keepalived start
== VRRP ==
[client] 192.168.10.24 | ------------------------------------------------------------------- | 192.168.10.100 (VIP for web1,2 and VRRP) | | 192.168.10.101 (VIP for web3 and VRRP) | | | |192.168.10.108 (eth1) |192.168.10.109(eth1) [LVS1] [LVS2] |192.168.20.108 (eth0) |192.168.20.109(eth0) | | | 192.168.20.200 (VIP for VRRP) | | | | | ---------------------------------------------------------------- | | | | | | |192.168.20.110(eth0) | 192.168.20.111(eth1) |192.168.20.112 [web1] [web2] [web3]clientのGWを以下設定
route add -host 192.168.10.100 gw 192.168.10.10
[root@localhost ~]# cat /etc/sysconfig/keepalived
KEEPALIVED_OPTIONS="-D -C -P"
- Master側 LVS
[root@server1 keepalived]# cat /etc/keepalived/keepalived.conf
global_defs {
notification_email {
sakai@aplogics.com
}
notification_email_from sakai@aplogics.com
smtp_server localhost
smtp_connect_timeout 30
lvs_id LVS_DEVEL
}
vrrp_instance VI_1 {
state MASTER
interface eth0
virtual_router_id 51
priority 101 <---- Priorityを をあげる
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.10.100/24 dev eth1 label eth1:100
192.168.10.101/24 dev eth1 label eth1:101
192.168.20.200/24 dev eth0
}
}
virtual_server 192.168.10.100 80 {
delay_loop 3
lb_algo lc
lb_kind NAT
nat_mask 255.255.255.0
protocol TCP
virtualhost test.com
#sorry_server 192.168.20.109 80
real_server 192.168.20.110 80 {
weight 1
inhibit_on_failure
HTTP_GET {
url {
path /index.html
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
real_server 192.168.20.111 80 {
weight 1
inhibit_on_failure
HTTP_GET {
url {
path /index.html
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
}
virtual_server 192.168.10.101 80 {
delay_loop 3
lb_algo lc
lb_kind NAT
nat_mask 255.255.255.0
protocol TCP
virtualhost test.com
#sorry_server 192.168.20.109 80
real_server 192.168.20.112 80 {
weight 1
inhibit_on_failure
HTTP_GET {
url {
path /index.html
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
}
--BACKUP側 LVS
[root@server2 keepalived]# cat /etc/keepalived/keepalived.conf
global_defs {
notification_email {
sakai@aplogics.com
}
notification_email_from sakai@aplogics.com
smtp_server localhost
smtp_connect_timeout 30
lvs_id LVS_DEVEL
}
vrrp_instance VI_1 {
state MASTER
interface eth0
virtual_router_id 51
priority 100 <---- Priorityを を下げる
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.10.100/24 dev eth1 label eth1:100
192.168.10.101/24 dev eth1 label eth1:101
192.168.20.200/24 dev eth0
}
}
virtual_server 192.168.10.100 80 {
delay_loop 3
lb_algo lc
lb_kind NAT
nat_mask 255.255.255.0
protocol TCP
virtualhost test.com
#sorry_server 192.168.20.109 80
real_server 192.168.20.110 80 {
weight 1
inhibit_on_failure
HTTP_GET {
url {
path /index.html
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
real_server 192.168.20.111 80 {
weight 1
inhibit_on_failure
HTTP_GET {
url {
path /index.html
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
}
virtual_server 192.168.10.101 80 {
delay_loop 3
lb_algo lc
lb_kind NAT
nat_mask 255.255.255.0
protocol TCP
virtualhost test.com
#sorry_server 192.168.20.109 80
real_server 192.168.20.112 80 {
weight 1
inhibit_on_failure
HTTP_GET {
url {
path /index.html
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
}
/etc/rc.d/init.d/keepalived start
以下でIPを確認
ip addr show eth1
内側のサーバーからNATで外にアクセスしたい場合はMasqueradeの設定を以下のように/etc/rc.d/init.d/iptablesに設定する。
*nat
:PREROUTING ACCEPT [60:3408]
:POSTROUTING ACCEPT [0:0]
:OUTPUT ACCEPT [3:243]
-A POSTROUTING -o eth0 -j MASQUERADE
COMMIT
*filter
:INPUT ACCEPT [0:0]
:FORWARD ACCEPT [0:0]
:OUTPUT ACCEPT [301:35162]
-A INPUT -i lo -j ACCEPT
-A INPUT -p icmp -m icmp --icmp-type any -j ACCEPT
-A INPUT -p esp -j ACCEPT
-A INPUT -p ah -j ACCEPT
-A INPUT -d 224.0.0.251 -p udp -m udp --dport 5353 -j ACCEPT
-A INPUT -p tcp -m state --state NEW -m tcp --dport 80 -j ACCEPT
-A INPUT -p udp -m udp --dport 631 -j ACCEPT
-A INPUT -p tcp -m tcp --dport 631 -j ACCEPT
-A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT
-A INPUT -j REJECT --reject-with icmp-host-prohibited
COMMIT
以下設定変更時のセッション状況
以下でweightを0にしてもセッションの継続は可、次回からのセッションが接続しなくなる
ipvsadm -e -t 10.1.1.10:80 -r 192.168.2.236:80 -m -w 0
設定をかえたら以下で設定変更。
/etc/rc.d/init.d/keepalived reload
セッションがつながっている以外のサーバーの追加や削除をしてもセッションの継続は可
ただし以下のようにVIPやサービスを削除するとセッションが切れる。
ipvsadm -D -t 10.1.1.10:80 -s lc
ipvsadm -d -t 10.1.1.10:80 -r 192.168.2.236:80
== DSR
[client] 192.168.10.24 | --------------------------------------------------------------------------------- | 192.168.10.200 (VIP for VRRP) | | | | | | | | |192.168.10.108 (eth1) |192.168.10.109(eth1) | [LVS1] [LVS2] | |192.168.20.108 (eth0) |192.168.20.109(eth0) | | | | | 192.168.20.200 (VIP for VRRP) | | | | | | | | ---------------------------------------------------------------- | | | | | | | |192.168.20.110(eth0) |192.168.20.111(eth0) | [web1] [web2] | |192.168.10.110(eth1) |192.168.10.111(eth1) | | | | | | | ---------------------------------------------------------------------------------クライアントGWを以下に変更(同セグメントのブロードキャストにLVSがARPを返さないため)
route add -host 192.168.10.100 gw 192.168.10.200
- LVS
cat /etc/keepalived/keepalived.conf
virtual_ipaddress {
#192.168.10.100/24 dev eth1 label eth1:100
#192.168.10.101/24 dev eth1 label eth1:101
192.168.10.200/24 dev eth1
192.168.20.200/24 dev eth0
}
lb_algo rr
lb_kind DR
LVSに届いた192.168.10.100宛てのパケットに対してmark値を1にセットするようにnetfilterを設定。
iptables -t mangle -A PREROUTING -d 192.168.10.100 -j MARK --set-mark 1
iptables -t mangle -n -Lで確認可能
以下で削除
iptables -t mangle -D PREROUTING -d 192.168.10.100 -j MARK --set-mark 1
mark値が1のパケットは100番の経路テーブルを適用させるようにします。
ip rule add prio 100 fwmark 1 table 100
ip rule で確認
以下で削除可能
ip rule del prio 100 fwmark 1 table 100
再起動後には消える。
全てのパケットをローカルデバイスにルーティングするような設定を 100番の経路テーブルに追加します。
ip route add local 0/0 dev lo table 100
ip route list table 100 で確認
以下で削除可能
ip route del local 0/0 dev lo table 100
再起動後には消える。
rp_filterを無効にする。これが有効のままだと、カーネルのソースアドレス認証(Source Address Verification)
という機能が働いてしまうため、リアルサーバからの応答パケットをフィルタする。
echo 0 > /proc/sys/net/ipv4/conf/eth0/rp_filter
vi /etc/sysctl.conf
net.ipv4.conf.default.rp_filter = 0
sysctl -p
/etc/rc.d/init.d/keepalived start
- WEB Server
送信先アドレスが自分自身のIPアドレスに変換されてローカルデバイスに渡される
iptables -t nat -A PREROUTING -d 192.168.10.100 -j REDIRECT
iptables -L -t nat で確認
以下で削除可能
iptables -t nat -D PREROUTING -d 192.168.10.100 -j REDIRECT
GWをクライアントに向ける。
cat /etc/sysconfig/network
NETWORKING=yes
NETWORKING_IPV6=no
HOSTNAME=node1
GATEWAY=192.168.10.24
tcpdump -n -i eth1 port 80 and host 192.168.10.24
以下で上記のiptableの設定を保存
/etc/rc.d/init.d/iptables save
==もしサーバーのルーター越しのロードバランスではなく、VIPと同じインタフェース側のサブネットをしようする場合
echo "1" >/proc/sys/net/ipv4/ip_forward
ipvsadm -C
ipvsadm -A -t 216.69.70.80:21 -s wlc
ipvsadm -a -t 216.69.70.80:21 -r 216.69.70.81 -m
ipvsadm -a -t 216.69.70.80:21 -r 216.69.70.82 -m
ipvsadm -Ln
ロードバランスされる側のGWをロードバランサのVIPに向ける
もしくは上記で説明したDSRを用いればOK.
ラベル: Load balancer
JobG海外転職サイト
登録 投稿 [Atom]