|
IOTN :: Field Book :: ORACLE
|
rac 10.2 change public IP , VIP , nic device [ vip evtf.log ( clsrevtf: Invalid resource attr ora.node1.vip ) ]
|
ÃÖ±æÈ£
[LIST]
|
2008-09-16 05:08:39, Á¶È¸ : 34,353 |
update: 080930
vip, nic device change
============================================================
eth0 : 192.168.100.0 [ none ] => [ vip ]
eth1 : 100.100.100.0 [ vip ] => [ node1-priv ]
eth2 : 10.10.10.0 [ priv ] => [ none ]
status [ 100.100.100.0 ]
------------------------------------------------------------
/etc/hosts
127.0.0.1 localhost.localdomain localhost
192.168.100.11 n1 localhost #eth0
192.168.100.12 n2
100.100.100.11 node1 #eth1
100.100.100.12 node2
100.100.100.21 node1-vip
100.100.100.22 node2-vip
100.100.100.254 gw
10.10.10.11 node1-priv #eth2
10.10.10.12 node2-priv
[root@node1 :/root]# oifcfg iflist
eth0 192.168.100.0
eth1 100.100.100.0
eth2 10.0.0.0
[root@node1 :/root]# oifcfg getif
eth1 100.100.100.0 global public
eth2 10.10.10.0 global cluster_interconnect
[oracle@node1 :/oracle/product/10g/backup/crs]$ srvctl config nodeapps -n node1 -a
VIP exists.: /node1-vip/100.100.100.21/255.255.255.0/eth1
[oracle@node1 :/oracle/product/10g/backup/crs]$ srvctl config nodeapps -n node2 -a
VIP exists.: /node2-vip/100.100.100.22/255.255.255.0/eth1
[root@node1 :/root]# crs_stat -t
Name Type Target State Host
------------------------------------------------------------
ora....SM1.asm application ONLINE ONLINE node1
ora....E1.lsnr application ONLINE ONLINE node1
ora.node1.gsd application ONLINE ONLINE node1
ora.node1.ons application ONLINE ONLINE node1
ora.node1.vip application ONLINE ONLINE node1
ora....SM2.asm application ONLINE ONLINE node2
ora....E2.lsnr application ONLINE ONLINE node2
ora.node2.gsd application ONLINE ONLINE node2
ora.node2.ons application ONLINE ONLINE node2
ora.node2.vip application ONLINE ONLINE node2
ora.rac.db application ONLINE ONLINE node1
ora....c1.inst application ONLINE ONLINE node1
ora....c2.inst application ONLINE ONLINE node2
[root@node1 :/root]#
[root@node1 :/root]# crs_stop -all
...
change eth0 [ 192.168.100.0 : none ] => [ 192.168.100.0 : VIP ]
eth1 [ 100.100.100.0 : VIP ] => [ 100.100.100.0 : Private ]
eth2 [ 10.10.10.0 : Private ] => remove
service: 100.100.100.0 [eth1] => 192.168.100.0 [eth0]
inter-connect: 10.10.10.0 [eth2] => 100.100.100.0 [eth1]
------------------------------------------------------------
vi /etc/hosts
---------------------------------------------------------
127.0.0.1 localhost.localdomain localhost
192.168.100.11 node1 localhost #eth0
192.168.100.12 node2
100.100.100.11 node1-priv #eth1
100.100.100.12 node2-priv
192.168.100.21 node1-vip
192.168.100.22 node2-vip
192.168.100.254 gw
#10.10.10.11 node1-priv #eth2
#10.10.10.12 node2-priv
ping node1 #192.168.100.11
ping node2 #192.168.100.12
ping node1-vip #192.168.100.21
ping node2-vip #192.168.100.22
ping node1-priv #100.100.100.11
ping node2-priv #100.100.100.12
ping gw #192.168.100.254
vi /etc/sysconfig/network-scripts/ifcfg-eth0
ONBOOT=yes
DEVICE=eth0
BOOTPROTO=static
BROADCAST=192.168.100.255
IPADDR=192.168.100.11
NETMASK=255.255.255.0
NETWORK=192.168.100.0
GATEWAY=192.168.100.254
vi /etc/sysconfig/network-scripts/ifcfg-eth1
#GATEWAY=100.100.100.254
[root@node1 :/root]# service network restart
[root@node1 :/root]# route
Kernel IP routing table
Destination Gateway Genmask Flags Metric Ref Use Iface
100.100.100.0 * 255.255.255.0 U 0 0 0 eth1
192.168.100.0 * 255.255.255.0 U 0 0 0 eth0
169.254.0.0 * 255.255.0.0 U 0 0 0 eth2
10.0.0.0 * 255.0.0.0 U 0 0 0 eth2
default gw 0.0.0.0 UG 0 0 0 eth0
[oracle@node1 :/oracle/product/10g/backup/crs]$ crs_stat -p ora.node1.vip > ora.node1.vip.cap
[oracle@node1 :/oracle/product/10g/backup/crs]$ crs_stat -p ora.node2.vip > ora.node2.vip.cap
[oracle@node1 :/oracle/product/10g/backup/crs]$ grep VIP *.cap
ora.node1.vip.cap:DESCRIPTION=CRS application for VIP on a node
ora.node1.vip.cap:USR_ORA_VIP=100.100.100.21
ora.node2.vip.cap:DESCRIPTION=CRS application for VIP on a node
ora.node2.vip.cap:USR_ORA_VIP=100.100.100.22
[oracle@node1 :/oracle/product/10g/backup/crs]$ vi *.cap
USR_ORA_IF=eth0
USR_ORA_VIP=192.168.100.21
USR_ORA_IF=eth0
USR_ORA_VIP=192.168.100.22
[oracle@node1 :/oracle/product/10g/backup/crs]$ grep VIP *.cap
ora.node1.vip.cap:DESCRIPTION=CRS application for VIP on a node
ora.node1.vip.cap:USR_ORA_VIP=192.168.100.21
ora.node2.vip.cap:DESCRIPTION=CRS application for VIP on a node
ora.node2.vip.cap:USR_ORA_VIP=192.168.100.22
[oracle@node1 :/oracle/product/10g/backup/crs]$ id
uid=0(root) gid=0(root) groups=0(root),1(bin),2(daemon),3(sys),4(adm),6(disk),10(wheel)
[oracle@node1 :/oracle/product/10g/backup/crs]$ crs_register ora.node1.vip -dir . -u
[oracle@node1 :/oracle/product/10g/backup/crs]$ crs_register ora.node2.vip -dir . -u
[oracle@node1 :/oracle/product/10g/backup/crs]$ crs_stat -p ora.node2.vip
USR_ORA_IF=eth0
...
USR_ORA_VIP=192.168.100.22
[oracle@node1 :/oracle/product/10g/backup/crs]$ srvctl modify nodeapps -n node1 -A 192.168.100.21/255.255.255.0/eth0
[oracle@node1 :/oracle/product/10g/backup/crs]$ srvctl modify nodeapps -n node2 -A 192.168.100.22/255.255.255.0/eth0
[oracle@node1 :/oracle/product/10g/backup/crs]$ srvctl config nodeapps -n node1 -a
VIP exists.: /node1-vip/192.168.100.21/255.255.255.0/eth0
[oracle@node1 :/oracle/product/10g/backup/crs]$ srvctl config nodeapps -n node2 -a
VIP exists.: /node2-vip/192.168.100.22/255.255.255.0/eth0
[oracle@node1 :/oracle/product/10g/backup/crs]$ oifcfg iflist
eth0 192.168.100.0
eth1 100.100.100.0
eth2 10.0.0.0
[oracle@node1 :/oracle/product/10g/backup/crs]$ oifcfg getif
eth1 100.100.100.0 global public
eth2 10.10.10.0 global cluster_interconnect
[oracle@node1 :/oracle/product/10g/backup/crs]$ oifcfg delif -global eth1
[oracle@node1 :/oracle/product/10g/backup/crs]$ oifcfg delif -global eth2
[oracle@node1 :/oracle/product/10g/backup/crs]$ oifcfg getif
[oracle@node1 :/oracle/product/10g/backup/crs]$ oifcfg setif -global eth0/192.168.100.0:public
[oracle@node1 :/oracle/product/10g/backup/crs]$ oifcfg setif -global eth1/100.100.100.0:cluster_interconnect
[oracle@node1 :/oracle/product/10g/backup/crs]$ oifcfg getif
eth0 192.168.100.0 global public
eth1 100.100.100.0 global cluster_interconnect
[oracle@node1 :/oracle/product/10g/backup/crs]$ crs_start -all
[oracle@node1 :/oracle/product/10g/backup/crs]$ crs_stat -t -v
Name Type R/RA F/FT Target State Host
----------------------------------------------------------------------
ora....SM1.asm application 0/5 0/0 ONLINE ONLINE node1
ora....E1.lsnr application 0/5 0/0 ONLINE ONLINE node1
ora.node1.gsd application 0/5 0/0 ONLINE ONLINE node1
ora.node1.ons application 0/3 0/0 ONLINE ONLINE node1
ora.node1.vip application 0/0 0/0 ONLINE ONLINE node1
ora....SM2.asm application 0/5 0/0 ONLINE ONLINE node2
ora....E2.lsnr application 0/5 0/0 ONLINE ONLINE node2
ora.node2.gsd application 0/5 0/0 ONLINE ONLINE node2
ora.node2.ons application 0/3 0/0 ONLINE ONLINE node2
ora.node2.vip application 0/0 0/0 ONLINE ONLINE node2
ora.rac.db application 0/1 0/1 ONLINE ONLINE node2
ora....c1.inst application 0/5 0/0 ONLINE ONLINE node1
ora....c2.inst application 0/5 0/0 ONLINE ONLINE node2
0809
================================================================================
service stop
--------------------------------------------------------------------------------
crs_stop -all
Edit IP [ OS ]
--------------------------------------------------------------------------------
/etc/hosts
/etc/sysconfig/network-scripts/ifcfg-eth1
listener.ora
update crs_register
--------------------------------------------------------------------------------
crs_stat -p ora.node1.vip > ora.node1.vip.cap
vi ora.node1.vip.cap
# change ip : USR_ORA_VIP=100.100.200.21
crs_register ora.node1.vip -d . -u
srvctl config check
--------------------------------------------------------------------------------
[oracle@node1 :/oracle/product/10g/crs]$ srvctl config nodeapps -n node1 -a
VIP exists.: /node1-vip/100.100.200.21/255.255.255.0/eth1
# srvctl modify nodeapps -n node1 -A 100.100.200.21/255.255.255.0/eth1
# srvctl modify nodeapps -n node2 -A 100.100.200.22/255.255.255.0/eth1
update oifcfg
--------------------------------------------------------------------------------
[oracle@node1 :/oracle/product/10g/crs]$ oifcfg getif
eth1 100.100.100.0 global public -- old ip
eth2 10.0.0.0 global cluster_interconnect
[oracle@node1 :/oracle/product/10g/crs]$ oifcfg iflist
eth0 192.168.100.0
eth1 100.100.200.0 -- new ip
eth2 10.0.0.0
oifcfg delif -global eth1
oifcfg setif -global eth1/100.100.200.0:public
# oifcfg delif -global eth2
# oifcfg setif -global eth2/10.0.0.0:cluster_interconnect
oifcfg getif
[oracle@node1 :/oracle/product/10g/crs]$ oifcfg getif
eth1 100.100.200.0 global public
eth2 10.0.0.0 global cluster_interconnect
crs/log/node1/racg/evtf.log
--------------------------------------------------------------------------------------------------------------
2008-09-15 10:07:57.293: [ RACG][3076387680] [5563][3076387680][default]: clsrevtf: Invalid resource attr ora.node1.vip
- gw inet addr:100.100.200.254 Bcast:100.255.255.255 Mask:255.0.0.0
=>gw inet addr:100.100.200.254 Bcast:100.100.200.255 Mask:255.255.255.0
gw [ 100.100.200.254 / 255.255.255.0 ]
--------------------------------------------------------------------------------
eth1 Link encap:Ethernet HWaddr 00:0C:29:62:50:68
[x] inet addr:100.100.200.254 Bcast:100.255.255.255 Mask:255.0.0.0
[0] inet addr:100.100.200.254 Bcast:100.100.200.255 Mask:255.255.255.0 | 18.97.9.172
|
|
|
|
|
service dwon log
----------------------------------------------------------------------------------------------------------------
Listener completed notification to CRS on start
16-SEP-2008 02:07:12 * (CONNECT_DATA=(CID=(PROGRAM=)(HOST=node1)(USER=oracle))(COMMAND=status)(ARGUMENTS=64)(SERVICE=LISTENER_NODE1)(VERSION=169869568)) * status * 0
16-SEP-2008 02:59:52 * service_died * rac1 * 12537
16-SEP-2008 02:59:54 * service_died * +ASM1 * 12537
No longer listening on: (DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=100.100.200.21)(PORT=1521)))
No longer listening on: (DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=100.100.200.11)(PORT=1521)))
No longer listening on: (DESCRIPTION=(ADDRESS=(PROTOCOL=ipc)(KEY=EXTPROC)))
Listener completed notification to CRS on stop
16-SEP-2008 02:59:58 * (CONNECT_DATA=(CID=(PROGRAM=)(HOST=node1)(USER=oracle))(COMMAND=stop)(ARGUMENTS=64)(SERVICE=LISTENER_NODE1)(VERSION=169869568)) * stop * 0
----------------------------------------------------------------------------------------------------------------
Tue Sep 16 02:59:52 2008
Shutting down instance (abort)
racg/evtf.log
----------------------------------------------------------------------------------------------------------------
2008-09-16 02:59:45.927: [ RACG][3076395872] [14887][3076395872][default]: clsrevtf: Invalid resource attr ora.node1.vip
2008-09-16 02:59:46.531: [ RACG][3076395872] [14916][3076395872][default]: clsrevtf: Invalid resource attr ora.node2.vip
racg/ora.rac.db.log
----------------------------------------------------------------------------------------------------------------
2008-09-16 02:59:53.945: [ RACG][3076408160] [15114][3076408160][ora.rac.db]: CLSR-0519: No instance found
racg/ora.node1.vip.log
----------------------------------------------------------------------------------------------------------------
2008-09-16 02:59:41.630: [ RACG][3076404064] [14491][3076404064][ora.node1.vip]: Interface eth1 checked failed (host=node1)
Invalid parameters, or failed to bring up VIP (host=node1)
2008-09-16 02:59:41.640: [ RACG][3076404064] [14491][3076404064][ora.node1.vip]: clsrcexecut: env ORACLE_CONFIG_HOME=/oracle/product/10g/crs
2008-09-16 02:59:41.640: [ RACG][3076404064] [14491][3076404064][ora.node1.vip]: clsrcexecut: cmd = /oracle/product/10g/crs/bin/racgeut -e _USR_ORA_DEBUG=0 54 /oracle/product/10g/crs/bin/racgvip check node1
2008-09-16 02:59:41.640: [ RACG][3076404064] [14491][3076404064][ora.node1.vip]: clsrcexecut: rc = 1, time = 6.480s
2008-09-16 02:59:41.640: [ RACG][3076404064] [14491][3076404064][ora.node1.vip]: end for resource = ora.node1.vip, action = check, status = 1, time = 6.590s
racg/ora.node2.vip.log
----------------------------------------------------------------------------------------------------------------
2008-09-16 02:59:42.140: [ RACG][3076391776] [14536][3076391776][ora.node2.vip]: Interface eth1 checked failed (host=node1)
Invalid parameters, or failed to bring up VIP (host=node1)
2008-09-16 02:59:42.140: [ RACG][3076391776] [14536][3076391776][ora.node2.vip]: clsrcexecut: env ORACLE_CONFIG_HOME=/oracle/product/10g/crs
2008-09-16 02:59:42.140: [ RACG][3076391776] [14536][3076391776][ora.node2.vip]: clsrcexecut: cmd = /oracle/product/10g/crs/bin/racgeut -e _USR_ORA_DEBUG=0 54 /oracle/product/10g/crs/bin/racgvip check node2
2008-09-16 02:59:42.140: [ RACG][3076391776] [14536][3076391776][ora.node2.vip]: clsrcexecut: rc = 1, time = 6.530s
2008-09-16 02:59:42.140: [ RACG][3076391776] [14536][3076391776][ora.node2.vip]: end for resource = ora.node2.vip, action = check, status = 1, time = 6.560s
[oracle@node1 :/oracle/product/10g/crs/bin]$ racgvip check node2
cannot execute /oracle/product/10g/crs/bin/crs_stat or cannot find _CAA_NAME (host=node1)
Invalid parameters, or failed to bring up VIP (host=node1)
export _USR_ORA_VIP=100.100.200.22
export _USR_ORA_NETMASK=255.255.255.0
export _USR_ORA_IF=Public
export _CAA_NAME=ora.node2.vip
racgvip check node2 |
2008-09-16 05:18:13
|
|
|
|
Ãß°¡Á¤º¸
srvctl modify nodeapps -n node1 -A 100.100.100.21/255.255.255.0/eth1
ora.node2.vip.log [ down Á¤º¸ ]
=============================================================================
Oracle Database 10g CRS Release 10.2.0.1.0 Production Copyright 1996, 2005 Oracle. All rights reserved.
2008-09-27 18:17:15.735: [ RACG][3076383584] [30974][3076383584][ora.node2.vip]: Interface eth1 checked failed (host=node2)
Invalid parameters, or failed to bring up VIP (host=node2)
2008-09-27 18:17:15.736: [ RACG][3076383584] [30974][3076383584][ora.node2.vip]: clsrcexecut: env ORACLE_CONFIG_HOME=/oracle/product/10g/crs
2008-09-27 18:17:15.736: [ RACG][3076383584] [30974][3076383584][ora.node2.vip]: clsrcexecut: cmd = /oracle/product/10g/crs/bin/racgeut -e _USR_ORA_DEBUG=0 54 /oracle/product/10g/crs/bin/racgvip check node2
2008-09-27 18:17:15.736: [ RACG][3076383584] [30974][3076383584][ora.node2.vip]: clsrcexecut: rc = 1, time = 6.510s
2008-09-27 18:17:15.736: [ RACG][3076383584] [30974][3076383584][ora.node2.vip]: end for resource = ora.node2.vip, action = check, status = 1, time = 6.540s
=============================================================================
È®ÀÎ
=============================================================================
[oracle@node1 :/oracle/product/10g/crs/log/node1/racg]$ oifcfg iflist
eth0 192.168.100.0
eth1 100.100.200.0
eth2 10.0.0.0
[oracle@node1 :/oracle/product/10g/crs/log/node1/racg]$ oifcfg getif
eth1 100.100.200.0 global public
eth2 10.0.0.0 global cluster_interconnect
Á¶Ä¡ [ 100.100.200.0 => 100.100.100.0 ]
=============================================================================
-- OS
vi /etc/hosts
vi listener.ora
-- CRS
emctl stop dbconsole
emctl stop agent
crs_stop -all
-- change VIP
oifcfg delif -global eth1/100.100.200.0:public
oifcfg setif -global eth1/100.100.100.0:public
oifcfg getif
-- crs update
crs_stat -p ora.node1.vip > ora.node1.vip
vi ora.node1.vip
crs_register ora.node1.vip -dir . -u
crs_stat -p ora.node1.vip
-- srvctl update
srvctl config nodeapps -n node1 -a
srvctl config nodeapps -n node2 -a
-- change VIP
srvctl modify nodeapps -n node1 -A 100.100.100.21/255.255.255.0/eth1
srvctl modify nodeapps -n node2 -A 100.100.100.22/255.255.255.0/eth1
-- service start
crs_start -all
emctl start dbconsole
emctl start agent
[oracle@node1 :/oracle/product/10g/crs/log]$ crsctl check crs
CSS appears healthy
CRS appears healthy
EVM appears healthy
[oracle@node1 :/oracle/product/10g/crs/log]$ crsctl start resources , crs_start -all
Starting resources.
Successfully started CRS resources
±âŸ Á¤º¸
=============================================================================
emctl stop dbconsole
emctl stop agent
crs_stop -all
-- change VIP
oifcfg iflist
oifcfg getif
oifcfg delif -global eth1/100.100.200.0:public
oifcfg setif -global eth1/100.100.100.0:public
oifcfg delif -global eth2
oifcfg setif -global eth2/10.10.10.0:cluster_interconnect
oifcfg getif
-- check
srvctl config nodeapps -n node1 -a
srvctl config nodeapps -n node2 -a
-- change VIP
srvctl modify nodeapps -n node1 -A 100.100.100.21/255.255.255.0/eth1
srvctl modify nodeapps -n node2 -A 100.100.100.22/255.255.255.0/eth1
-- service start
crs_start -all
emctl start dbconsole
emctl start agent
[oracle@node1 :/oracle/product/10g/crs/log]$ crsctl check crs
CSS appears healthy
CRS appears healthy
EVM appears healthy
[oracle@node1 :/oracle/product/10g/crs/log]$ crsctl start resources , crs_start -all
Starting resources.
Successfully started CRS resources
http://kr.forums.oracle.com/forums/thread.jspa?threadID=580406
http://www.pythian.com/blogs/424/oracle-clusterware-install-invalid-parameters-or-failed-to-bring-up-vip-after-interface-is-not-public
|
2008-09-29 08:32:55
|
|
|
|
/etc/sysconfig/network-scripts/ifcfg-eth1
ONBOOT=yes
DEVICE=eth1
BOOTPROTO=static
BROADCAST=100.100.100.255
IPADDR=100.100.100.11
NETMASK=255.255.255.0
NETWORK=100.100.100.0
GATEWAY=100.100.100.254 |
2008-09-29 08:37:21
|
|
|
|
/etc/hosts
=================================================
127.0.0.1 localhost.localdomain localhost
192.168.100.11 n1 localhost #eth0
192.168.100.12 n2
100.100.100.11 node1 #eth1
100.100.100.12 node2
100.100.100.21 node1-vip
100.100.100.22 node2-vip
100.100.100.254 gw
10.10.10.11 node1-priv #eth2
10.10.10.12 node2-priv
node1
===============================================================================
[root@node1 :/root]# netstat -rn
Kernel IP routing table
Destination Gateway Genmask Flags MSS Window irtt Iface
100.100.100.0 0.0.0.0 255.255.255.0 U 0 0 0 eth1
192.168.100.0 0.0.0.0 255.255.255.0 U 0 0 0 eth0
169.254.0.0 0.0.0.0 255.255.0.0 U 0 0 0 eth2
10.0.0.0 0.0.0.0 255.0.0.0 U 0 0 0 eth2
0.0.0.0 100.100.100.254 0.0.0.0 UG 0 0 0 eth1
node2
===============================================================================
[root@node2 :/root]# netstat -rn
Kernel IP routing table
Destination Gateway Genmask Flags MSS Window irtt Iface
100.100.100.0 0.0.0.0 255.255.255.0 U 0 0 0 eth1
192.168.100.0 0.0.0.0 255.255.255.0 U 0 0 0 eth0
169.254.0.0 0.0.0.0 255.255.0.0 U 0 0 0 eth0
10.0.0.0 0.0.0.0 255.0.0.0 U 0 0 0 eth2
0.0.0.0 192.168.100.254 0.0.0.0 UG 0 0 0 eth0
[oracle@node1 :/oracle/product/10g]$ crs_stat -t -v
Name Type R/RA F/FT Target State Host
----------------------------------------------------------------------
ora....SM1.asm application 0/5 0/0 ONLINE ONLINE node1
ora....E1.lsnr application 0/5 0/0 ONLINE ONLINE node1
ora.node1.gsd application 0/5 0/0 ONLINE ONLINE node1
ora.node1.ons application 0/3 0/0 ONLINE ONLINE node1
ora.node1.vip application 0/0 0/0 ONLINE ONLINE node1
ora....SM2.asm application 0/5 0/0 ONLINE ONLINE node2
ora....E2.lsnr application 0/5 0/0 ONLINE ONLINE node2
ora.node2.gsd application 0/5 0/0 ONLINE ONLINE node2
ora.node2.ons application 0/3 0/0 ONLINE ONLINE node2
ora.node2.vip application 0/0 0/0 ONLINE ONLINE node2
ora.rac.db application 0/1 0/1 ONLINE ONLINE node1
ora....c1.inst application 0/5 0/0 ONLINE ONLINE node1
ora....c2.inst application 0/5 0/0 ONLINE ONLINE node2
one node down... |
2008-09-29 16:47:06
|
|
|
|
100.100.100.254 gw µî·Ï
internet eth0 gw µî·Ï
route add default gw 192.168.100.254 eth0
route add default gw 100.100.100.254 eth1 |
2008-09-30 08:19:53
|
|
|
GATEWAY Ʋ¸®¸é vip ¾ÈµÊ
vi /etc/sysconfig/network-scripts/ifcfg-eth1
#GATEWAY=100.100.100.254 <<< ¶Ç´Â Á¦°Å
cat /etc/sysconfig/network
NETWORKING=yes
HOSTNAME=node1
GATEWAY=192.168.0.1 <<<
# service network stop
# service network start
[node1 :/oracle]$ route
Kernel IP routing table
Destination Gateway Genmask Flags Metric Ref Use Iface
192.168.0.0 * 255.255.255.0 U 0 0 0 eth1
10.10.10.0 * 255.255.255.0 U 0 0 0 eth0
169.254.0.0 * 255.255.0.0 U 0 0 0 eth1
default 192.168.0.1 0.0.0.0 UG 0 0 0 eth1 <<< |
2009-12-24 21:58:12
|
Copyright 1999-2024 Zeroboard / skin by ÃÖ±æÈ£(gilho.kr@gmail.com)
|
|
|