Step 1) Backup all the files and directories contains on ACFS
Step 2) Obtain the information about the current ACFS filesystem and ADVM volume:
opc@said-dbvm01 ~]$ sudo su -
Last login: Thu Jan 11 08:31:39 +06 2024 on pts/1
Last login: Thu Jan 11 08:46:30 +06 2024 on pts/0
[root@said-dbvm01 ~]# su - grid
Last login: Thu Jan 11 08:43:44 +06 2024
Last login: Thu Jan 11 08:46:33 +06 2024 on pts/0
[grid@said-dbvm01 ~]$ asmcmd
ASMCMD> volinfo --all
Diskgroup Name: DATAC1
Volume Name: ACFSVOL01
Volume Device: /dev/asm/acfsvol01-390
State: ENABLED
Size (MB): 1150976
Resize Unit (MB): 64
Redundancy: HIGH
Stripe Columns: 8
Stripe Width (K): 1024
Usage: ACFS
Mountpath: /acfs01
ASMCMD>
[grid@said-dbvm01 ~]$ df -m /acfs01/
Filesystem 1M-blocks Used Available Use% Mounted on
/dev/asm/acfsvol01-390 1150976 8427 1142550 1% /acfs01
[grid@said-dbvm01 ~]$
3) Stop and dismount the associated filesystem clusterwide as follows (as root user):
[root@said-dbvm01 ~]# /u01/app/19.0.0.0/grid/bin/crsctl stat res -t
--------------------------------------------------------------------------------
Name Target State Server State details
--------------------------------------------------------------------------------
Local Resources
--------------------------------------------------------------------------------
ora.DATAC1.ACFSVOL01.advm
ONLINE ONLINE dr01v-dbvm01 STABLE
ONLINE ONLINE dr01v-dbvm02 STABLE
ora.LISTENER.lsnr
ONLINE ONLINE dr01v-dbvm01 STABLE
ONLINE ONLINE dr01v-dbvm02 STABLE
ora.chad
ONLINE ONLINE dr01v-dbvm01 STABLE
ONLINE ONLINE dr01v-dbvm02 STABLE
ora.datac1.acfsvol01.acfs
ONLINE ONLINE dr01v-dbvm01 mounted on /acfs01,S
TABLE
ONLINE ONLINE dr01v-dbvm02 mounted on /acfs01,S
TABLE
ora.net1.network
ONLINE ONLINE dr01v-dbvm01 STABLE
ONLINE ONLINE dr01v-dbvm02 STABLE
ora.ons
ONLINE ONLINE dr01v-dbvm01 STABLE
ONLINE ONLINE dr01v-dbvm02 STABLE
ora.proxy_advm
ONLINE ONLINE dr01v-dbvm01 STABLE
ONLINE ONLINE dr01v-dbvm02 STABLE
--------------------------------------------------------------------------------
Cluster Resources
--------------------------------------------------------------------------------
ora.ASMNET1LSNR_ASM.lsnr(ora.asmgroup)
1 ONLINE ONLINE dr01v-dbvm01 STABLE
2 ONLINE ONLINE dr01v-dbvm02 STABLE
ora.DATAC1.dg(ora.asmgroup)
1 ONLINE ONLINE dr01v-dbvm01 STABLE
2 ONLINE ONLINE dr01v-dbvm02 STABLE
ora.LISTENER_SCAN1.lsnr
1 ONLINE ONLINE dr01v-dbvm01 STABLE
ora.LISTENER_SCAN2.lsnr
1 ONLINE ONLINE dr01v-dbvm02 STABLE
ora.LISTENER_SCAN3.lsnr
1 ONLINE ONLINE dr01v-dbvm01 STABLE
ora.RECOC1.dg(ora.asmgroup)
1 ONLINE ONLINE dr01v-dbvm01 STABLE
2 ONLINE ONLINE dr01v-dbvm02 STABLE
ora.asm(ora.asmgroup)
1 ONLINE ONLINE dr01v-dbvm01 Started,STABLE
2 ONLINE ONLINE dr01v-dbvm02 Started,STABLE
ora.asmnet1.asmnetwork(ora.asmgroup)
1 ONLINE ONLINE dr01v-dbvm01 STABLE
2 ONLINE ONLINE dr01v-dbvm02 STABLE
ora.badgcc.bacoredb_pdb1.paas.oracle.com.svc
1 OFFLINE OFFLINE STABLE
2 OFFLINE OFFLINE STABLE
ora.badgcc.db
1 OFFLINE OFFLINE Instance Shutdown,ST
ABLE
2 OFFLINE OFFLINE Instance Shutdown,ST
ABLE
ora.cvu
1 ONLINE ONLINE dr01v-dbvm02 STABLE
ora.dr01v-dbvm01.vip
1 ONLINE ONLINE dr01v-dbvm01 STABLE
ora.dr01v-dbvm02.vip
1 ONLINE ONLINE dr01v-dbvm02 STABLE
ora.qosmserver
1 ONLINE ONLINE dr01v-dbvm02 STABLE
ora.scan1.vip
1 ONLINE ONLINE dr01v-dbvm01 STABLE
ora.scan2.vip
1 ONLINE ONLINE dr01v-dbvm02 STABLE
ora.scan3.vip
1 ONLINE ONLINE dr01v-dbvm01 STABLE
--------------------------------------------------------------------------------
[root@dr01v-dbvm01 ~]#
[root@said-dbvm01 ~]# /u01/app/19.0.0.0/grid/bin/srvctl status filesystem -d /dev/asm/acfsvol01-390
ACFS file system /acfs01 is mounted on nodes dr01v-dbvm01,dr01v-dbvm02
[root@said-dbvm01 ~]#
[root@said-dbvm01 ~]# /u01/app/19.0.0.0/grid/bin/srvctl stop filesystem -d /dev/asm/acfsvol01-390
[root@said-dbvm01 ~]# /u01/app/19.0.0.0/grid/bin/srvctl status filesystem -d /dev/asm/acfsvol01-390
ACFS file system /acfs01 is not mounted
[root@said-dbvm01 ~]#
Step 4) Then disable the associated volume in every node:
[grid@said-dbvm01 ~]$ asmcmd voldisable -G DATAC1 ACFSVOL01
[grid@said-dbvm01 ~]$
[opc@said-dbvm02 ~]$ sudo su -
Last login: Thu Jan 11 08:49:21 +06 2024
Last login: Thu Jan 11 08:56:44 +06 2024 on pts/0
[root@said-dbvm02 ~]# su - grid
Last login: Thu Jan 11 08:49:21 +06 2024
Last login: Thu Jan 11 08:56:48 +06 2024 on pts/0
[grid@said-dbvm02 ~]$ asmcmd voldisable -G DATAC1 ACFSVOL01
[grid@said-dbvm02 ~]$
Step 5) After the volume is disabled in every node, it can be deleted from node #1 as follows:
[grid@said-dbvm01 ~]$ asmcmd voldelete -G DATAC1 ACFSVOL01
[grid@said-dbvm01 ~]$
[grid@said-dbvm02 ~]$ asmcmd voldelete -G DATAC1 ACFSVOL01
ORA-15032: not all alterations performed
ORA-15466: volume 'ACFSVOL01' in disk group 'DATAC1' does not exist (DBD ERROR: OCIStmtExecute)
[grid@said-dbvm02 ~]$
Step 6) Remove the associated ACFS and ADVM CRS resources as follows (as root user from node #1):
[root@said-dbvm01 ~]# /u01/app/19.0.0.0/grid/bin/srvctl remove filesystem -d /dev/asm/acfsvol01-390 -force
PRCT-1011 : Failed to run "advmutil". Detailed error: advmutil: ADVM-03168: Internal error: clscrs_res_get_attr(). attrName=CANONICAL_VOLUME_DEVICE 4,advmutil: ADVM-03180: Unable to obtain ASM volume device information for '/dev/asm/acfsvol01-390'
Step 7) Recreate the ADVM volume in the new diskgroup (e.g. ACFSDG):
SQL> ALTER DISKGROUP ACFSDG ADD VOLUME ACFSVOL SIZE 600G;
[grid@said-dbvm01 ~]$ sqlplus
SQL*Plus: Release 19.0.0.0.0 - Production on Thu Jan 11 09:03:25 2024
Version 19.20.0.0.0
Copyright (c) 1982, 2022, Oracle. All rights reserved.
Enter user-name: /as sysasm
Connected to:
Oracle Database 19c Enterprise Edition Release 19.0.0.0.0 - Production
Version 19.20.0.0.0
SQL> ALTER DISKGROUP RECOC1 ADD VOLUME ACFSVOL01 SIZE 600G;
Diskgroup altered.
SQL>
[grid@said-dbvm01 ~]$ asmcmd volinfo --all
Diskgroup Name: RECOC1
Volume Name: ACFSVOL01
Volume Device: /dev/asm/acfsvol01-178
State: ENABLED
Size (MB): 614400
Resize Unit (MB): 64
Redundancy: HIGH
Stripe Columns: 8
Stripe Width (K): 1024
Usage:
Mountpath:
[grid@said-dbvm01 ~]$
Step 9) Create an ACFS filesystem in the new volume:
$> /sbin/mkfs -t acfs /dev/asm/acfsvol01-178
[root@dr01v-dbvm01 ~]# /sbin/mkfs -t acfs /dev/asm/acfsvol01-178
mkfs.acfs: version = 19.0.0.0.0
mkfs.acfs: on-disk version = 46.0
mkfs.acfs: volume = /dev/asm/acfsvol01-178
mkfs.acfs: volume size = 644245094400 ( 600.00 GB )
mkfs.acfs: Format complete.
[root@dr01v-dbvm01 ~]#
Step 10) Create the CRS resource associated to the new ACFS filesystem as follows (as root user from node #1):
# srvctl add filesystem -d /dev/asm/acfsvol-286 -g 'ACFSDG' -v ACFSVOL -m /goldengate -u grid
[root@said-dbvm01 ~]# /u01/app/19.0.0.0/grid/bin/srvctl add filesystem -d /dev/asm/acfsvol01-178 -g 'RECOC1' -v ACFSVOL01 -m /acfs01 -u grid
[root@dr01v-dbvm01 ~]#
Step 11) Finally, start and mount the filesystem as follows (as root user from node #1):
# srvctl start filesystem -d /dev/asm/acfsvol-286
[root@said-dbvm01 ~]# /u01/app/19.0.0.0/grid/bin/srvctl start filesystem -d /dev/asm/acfsvol01-178
[root@said-dbvm01 ~]#
Step 12) Verify the new ACFS filesystem is mounted on all the nodes:
[root@said-dbvm01 ~]# df -m /acfs01/
Filesystem 1M-blocks Used Available Use% Mounted on
/dev/asm/acfsvol01-178 614400 1832 612569 1% /acfs01
[root@said-dbvm01 ~]#
[grid@said-dbvm02 ~]$ df -m /acfs01/
Filesystem 1M-blocks Used Available Use% Mounted on
/dev/asm/acfsvol01-178 614400 1832 612569 1% /acfs01
[grid@said-dbvm02 ~]$ df -h
Filesystem Size Used Avail Use% Mounted on
devtmpfs 252G 0 252G 0% /dev
tmpfs 504G 209M 504G 1% /dev/shm
tmpfs 252G 11M 252G 1% /run
tmpfs 252G 0 252G 0% /sys/fs/cgroup
/dev/mapper/VGExaDb-LVDbSys1 15G 7.7G 7.4G 51% /
/dev/sda1 509M 118M 391M 24% /boot
/dev/mapper/VGExaDb-LVDbKdump 20G 33M 20G 1% /crashfiles
/dev/mapper/VGExaDbDisk.u01.20.img-LVDBDisk 20G 1.9G 19G 10% /u01
/dev/mapper/VGExaDbDisk.grid19.0.0.0.230718.img-LVDBDisk 50G 12G 39G 23% /u01/app/19.0.0.0/grid
/dev/mapper/VGExaDb-LVDbVar1 5.0G 1.8G 3.3G 36% /var
/dev/mapper/VGExaDbDisk.u02_extra.img-LVDBDisk 57G 15G 40G 27% /u02
/dev/mapper/VGExaDb-LVDbHome 4.0G 45M 4.0G 2% /home
/dev/mapper/VGExaDb-LVDbTmp 3.0G 33M 3.0G 2% /tmp
/dev/mapper/VGExaDb-LVDbVarLog 18G 466M 18G 3% /var/log
/dev/mapper/VGExaDb-LVDbVarLogAudit 3.0G 161M 2.9G 6% /var/log/audit
tmpfs 51G 0 51G 0% /run/user/1001
tmpfs 51G 0 51G 0% /run/user/1000
tmpfs 51G 0 51G 0% /run/user/0
tmpfs 51G 0 51G 0% /run/user/2000
/dev/asm/acfsvol01-178 600G 1.8G 599G 1% /acfs01
[grid@said-dbvm02 ~]$
13) Copy back the files and directories from your backup (transient filesystem) to your new ACFS filesystem (“/acfs01”).
Showing posts with label ExaData. Show all posts
Showing posts with label ExaData. Show all posts
Wednesday, 10 January 2024
Step by step how to relocate an ACFS filesystem to another Diskgroup in Exadata
Subscribe to:
Posts (Atom)