Showing posts with label LVM in Linux 7. Show all posts
Showing posts with label LVM in Linux 7. Show all posts

Wednesday 5 February 2020

Resize lvm (Logical Volume) in Linux



[root@ncbsdb admin]# df -h
Filesystem           Size  Used Avail Use% Mounted on
devtmpfs             252G     0  252G   0% /dev
tmpfs                252G     0  252G   0% /dev/shm
tmpfs                252G   13M  252G   1% /run
tmpfs                252G     0  252G   0% /sys/fs/cgroup
/dev/mapper/ol-root  200G  5.0G  195G   3% /
/dev/sda2           1014M  222M  793M  22% /boot
/dev/mapper/ol-tmp    50G   42M   50G   1% /tmp
/dev/mapper/ol-var    50G  2.6G   48G   6% /var
/dev/mapper/ol-d01   688G  5.3G  683G   1% /d01
tmpfs                 51G   12K   51G   1% /run/user/42
tmpfs                 51G     0   51G   0% /run/user/54321
tmpfs                 51G     0   51G   0% /run/user/0
/dev/mapper/u01-u01  500G  5.3G  495G   2% /u01
/dev/mapper/u07-u07   99G   33M   99G   1% /u07
/dev/mapper/u02-u02  1.3T   34M  1.3T   1% /u02
/dev/mapper/u04-u04  500G   33M  500G   1% /u04
/dev/mapper/u05-u05  600G   34M  600G   1% /u05
/dev/mapper/u06-u06  400G   33M  400G   1% /u06
/dev/mapper/u09-u09  200G   33M  200G   1% /u09
/dev/mapper/u03-u03  484G   33M  484G   1% /u03

Here we want to resize the mount point /u03 from 484 GB to 499 GB

[root@ncbsdb admin]# pvs
  PV                  VG  Fmt  Attr PSize   PFree
  /dev/mapper/mpatha1 u02 lvm2 a--   <1 .30t="" g="" nbsp="" p="">  /dev/mapper/mpathe1 u01 lvm2 a--  499.98g       0
  /dev/mapper/mpathf1 u03 lvm2 a--  499.98g   15.98g
  /dev/mapper/mpathh1 u07 lvm2 a--   99.98g 1008.00m
  /dev/sda3           ol  lvm2 a--    1.09t    4.00m

Here we can see that the physical volume showing still 15.98 GB available

Now we will resize 

[root@ncbsdb admin]#lvresize -L+15G /dev/u03/u03

Now check the size of mount point /u03 but still showing 484 GB

[root@ncbsdb admin]# df -h
Filesystem           Size  Used Avail Use% Mounted on
devtmpfs             252G     0  252G   0% /dev
tmpfs                252G     0  252G   0% /dev/shm
tmpfs                252G   13M  252G   1% /run
tmpfs                252G     0  252G   0% /sys/fs/cgroup
/dev/mapper/ol-root  200G  5.0G  195G   3% /
/dev/sda2           1014M  222M  793M  22% /boot
/dev/mapper/ol-tmp    50G   42M   50G   1% /tmp
/dev/mapper/ol-var    50G  2.6G   48G   6% /var
/dev/mapper/ol-d01   688G  5.3G  683G   1% /d01
tmpfs                 51G   12K   51G   1% /run/user/42
tmpfs                 51G     0   51G   0% /run/user/54321
tmpfs                 51G     0   51G   0% /run/user/0
/dev/mapper/u01-u01  500G  5.3G  495G   2% /u01
/dev/mapper/u07-u07   99G   33M   99G   1% /u07
/dev/mapper/u02-u02  1.3T   34M  1.3T   1% /u02
/dev/mapper/u04-u04  500G   33M  500G   1% /u04
/dev/mapper/u05-u05  600G   34M  600G   1% /u05
/dev/mapper/u06-u06  400G   33M  400G   1% /u06
/dev/mapper/u09-u09  200G   33M  200G   1% /u09
/dev/mapper/u03-u03  484G   33M  484G   1% /u03

Now execute the command fsadm --- utility to resize or check filesystem on a device

[root@ncbsdb admin]# fsadm resize /dev/mapper/u03-u03
meta-data=/dev/mapper/u03-u03    isize=256    agcount=16, agsize=7929856 blks
         =                       sectsz=4096  attr=2, projid32bit=1
         =                       crc=0        finobt=0 spinodes=0
data     =                       bsize=4096   blocks=126877696, imaxpct=25
         =                       sunit=128    swidth=128 blks
naming   =version 2              bsize=4096   ascii-ci=0 ftype=1
log      =internal               bsize=4096   blocks=61952, version=2
         =                       sectsz=4096  sunit=1 blks, lazy-count=1
realtime =none                   extsz=4096   blocks=0, rtextents=0
data blocks changed from 126877696 to 130809856
[root@ncbsdb admin]#

Mount the /u03 mount point

[root@ncbsdb admin]# mount /dev/mapper/u03-u03 /u03
[root@ncbsdb admin]# df -h
Filesystem           Size  Used Avail Use% Mounted on
devtmpfs             252G     0  252G   0% /dev
tmpfs                252G     0  252G   0% /dev/shm
tmpfs                252G   13M  252G   1% /run
tmpfs                252G     0  252G   0% /sys/fs/cgroup
/dev/mapper/ol-root  200G  5.0G  195G   3% /
/dev/sda2           1014M  222M  793M  22% /boot
/dev/mapper/ol-tmp    50G   42M   50G   1% /tmp
/dev/mapper/ol-var    50G  2.6G   48G   6% /var
/dev/mapper/ol-d01   688G  5.3G  683G   1% /d01
tmpfs                 51G   12K   51G   1% /run/user/42
tmpfs                 51G     0   51G   0% /run/user/54321
tmpfs                 51G     0   51G   0% /run/user/0
/dev/mapper/u01-u01  500G  5.3G  495G   2% /u01
/dev/mapper/u07-u07   99G   33M   99G   1% /u07
/dev/mapper/u02-u02  1.3T   34M  1.3T   1% /u02
/dev/mapper/u04-u04  500G   33M  500G   1% /u04
/dev/mapper/u05-u05  600G   34M  600G   1% /u05
/dev/mapper/u06-u06  400G   33M  400G   1% /u06
/dev/mapper/u09-u09  200G   33M  200G   1% /u09
/dev/mapper/u03-u03  499G   33M  499G   1% /u03
[root@ncbsdb admin]#

Friday 7 September 2018

Step by step : How to add space in existing file system from Physical Volume (LVM)

Step 1. Check Physical Volume and Volume Group  

[root@oracledb ~]# pvs
  PV         VG   Fmt  Attr PSize  PFree
  /dev/sda2  rhel lvm2 a--  19.05g  4.00m
  /dev/sdc3       lvm2 a--  11.72g 11.72g
[root@oracledb ~]# vgs
  VG   #PV #LV #SN Attr   VSize  VFree
  rhel   1   4   0 wz--n- 19.05g 4.00m
 
Step 2. Now add 11.72 GB to rhel VG
 
[root@oracledb ~]# vgextend rhel /dev/sdc3
  Volume group "rhel" successfully extended
 
[root@oracledb ~]# vgs
  VG   #PV #LV #SN Attr   VSize  VFree
  rhel   2   4   0 wz--n- 30.77g 11.72g
 
Step 3. Check file system
 
[root@oracledb ~]# df -h
Filesystem             Size  Used Avail Use% Mounted on
/dev/mapper/rhel-root  7.9G  6.5G  1.4G  83% /
devtmpfs               909M     0  909M   0% /dev
tmpfs                  918M   84K  918M   1% /dev/shm
tmpfs                  918M  9.1M  909M   1% /run
tmpfs                  918M     0  918M   0% /sys/fs/cgroup
/dev/mapper/rhel-home  497M   47M  451M  10% /home
/dev/sda1              497M  122M  376M  25% /boot
/dev/mapper/rhel-u01   8.8G  6.9G  2.0G  78% /u01
/dev/sr0               3.5G  3.5G     0 100% /run/media/root/RHEL-7.0 Server.x86_64

Step 4. Check Logical Volume Status

[root@oracledb ~]# lvs
  LV   VG   Attr       LSize   Pool Origin Data%  Move Log Cpy%Sync Convert
  home rhel -wi-ao---- 500.00m
  root rhel -wi-ao----   7.81g
  swap rhel -wi-ao----   1.95g
  u01  rhel -wi-ao----   8.79g
[root@oracledb ~]# vgs
  VG   #PV #LV #SN Attr   VSize  VFree
  rhel   2   4   0 wz--n- 30.77g 11.72g
 
Step 5. Now Merge added 11.72 GB space with /dev/rhel/u01 
 
[root@oracledb ~]# lvextend /dev/rhel/
/dev/rhel/home  /dev/rhel/root  /dev/rhel/swap  /dev/rhel/u01
[root@oracledb ~]# lvextend /dev/rhel/u01 /dev/sdc3
  Extending logical volume u01 to 20.51 GiB
  Logical volume u01 successfully resized
[root@oracledb ~]#

Step 6. Still /dev/mapper/rhel-u01 size previous one but lvs command output showing added space

[root@oracledb ~]# df -h
Filesystem             Size  Used Avail Use% Mounted on
/dev/mapper/rhel-root  7.9G  6.5G  1.4G  83% /
devtmpfs               909M     0  909M   0% /dev
tmpfs                  918M   84K  918M   1% /dev/shm
tmpfs                  918M  9.1M  909M   1% /run
tmpfs                  918M     0  918M   0% /sys/fs/cgroup
/dev/mapper/rhel-home  497M   47M  451M  10% /home
/dev/sda1              497M  122M  376M  25% /boot
/dev/mapper/rhel-u01   8.8G  6.9G  2.0G  78% /u01
/dev/sr0               3.5G  3.5G     0 100% /run/media/root/RHEL-7.0 Server.x86_64
[root@oracledb ~]# lvs
  LV   VG   Attr       LSize   Pool Origin Data%  Move Log Cpy%Sync Convert
  home rhel -wi-ao---- 500.00m
  root rhel -wi-ao----   7.81g
  swap rhel -wi-ao----   1.95g
  u01  rhel -wi-ao----  20.51g
[root@oracledb ~]# cat /etc/fstab

#
# /etc/fstab
# Created by anaconda on Fri Mar 11 23:15:03 2016
#
# Accessible filesystems, by reference, are maintained under '/dev/disk'
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
#
/dev/mapper/rhel-root   /                       xfs     defaults        1 1
UUID=c6708dec-5103-402c-84f3-9bd7f39686ef /boot                   xfs     defaults        1 2
/dev/mapper/rhel-home   /home                   xfs     defaults        1 2
/dev/mapper/rhel-u01    /u01                    xfs     defaults        1 2
/dev/mapper/rhel-swap   swap                    swap    defaults        0 0
192.168.184.128:/netshared      /soft   nfs     _netdev 0       0

Step 7.  Now extend /u01 filesystem size

[root@oracledb ~]# resize2fs /dev/mapper/rhel-u01
resize2fs 1.42.9 (28-Dec-2013)
resize2fs: Bad magic number in super-block while trying to open /dev/mapper/rhel-u01
Couldn't find valid filesystem superblock.

Note: Possible reason is, the mounted filesystem would be XFS filesystem. SO resize2fs wont work for xfs filesystem, we should use xfs_growfs instead of resize2fs.

[root@oracledb ~]# resize
resize2fs   resizecons  resizepart
[root@oracledb ~]# xf
xfreerdp       xfs_copy       xfs_estimate   xfs_growfs     xfs_io         xfs_metadump   xfs_quota      xfs_rtcp
xfs_admin      xfs_db         xfs_freeze     xfs_info       xfs_logprint   xfs_mkfile     xfs_repair
xfs_bmap       xfsdump        xfs_fsr        xfsinvutil     xfs_mdrestore  xfs_ncheck     xfsrestore
[root@oracledb ~]# xfs_growfs /dev/mapper/rhel-u01
meta-data=/dev/mapper/rhel-u01   isize=256    agcount=4, agsize=576000 blks
         =                       sectsz=512   attr=2, projid32bit=1
         =                       crc=0
data     =                       bsize=4096   blocks=2304000, imaxpct=25
         =                       sunit=0      swidth=0 blks
naming   =version 2              bsize=4096   ascii-ci=0 ftype=0
log      =internal               bsize=4096   blocks=2560, version=2
         =                       sectsz=512   sunit=0 blks, lazy-count=1
realtime =none                   extsz=4096   blocks=0, rtextents=0
data blocks changed from 2304000 to 5376000

[root@oracledb ~]# df -h
Filesystem             Size  Used Avail Use% Mounted on
/dev/mapper/rhel-root  7.9G  6.5G  1.4G  83% /
devtmpfs               909M     0  909M   0% /dev
tmpfs                  918M   84K  918M   1% /dev/shm
tmpfs                  918M  9.1M  909M   1% /run
tmpfs                  918M     0  918M   0% /sys/fs/cgroup
/dev/mapper/rhel-home  497M   47M  451M  10% /home
/dev/sda1              497M  122M  376M  25% /boot
/dev/mapper/rhel-u01    21G  6.9G   14G  34% /u01
/dev/sr0               3.5G  3.5G     0 100% /run/media/root/RHEL-7.0 Server.x86_64
[root@oracledb ~]#


Thats it !!!!!!!!!!!!!!!!!!!!!

Tuesday 26 January 2016

Step by step to create LVM (PV, VG and LV) in Linux 7

Scenario : Here we will use two HDD (10 GB and 5 GB) to create 3 LVM ( LV1, LV2 and LV3) which will be sized as 2 GB, 3 GB and 4 GB respectively and will mapped as /data1, /data2 and /data3 accordingly.


Solution:

Step 1: Check that 2 HDD is attached or not 

[root@rhel7 ~]# fdisk -l

Disk /dev/sda: 21.5 GB, 21474836480 bytes, 41943040 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk label type: dos
Disk identifier: 0x000aab0f

   Device Boot      Start         End      Blocks   Id  System
/dev/sda1   *        2048     1026047      512000   83  Linux
/dev/sda2         1026048    21506047    10240000   83  Linux
/dev/sda3        21506048    25700351     2097152   82  Linux swap / Solaris
/dev/sda4        25700352    41943039     8121344    5  Extended
/dev/sda5        25702400    40038399     7168000   83  Linux

Disk /dev/sdb: 10.7 GB, 10737418240 bytes, 20971520 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk label type: dos
Disk identifier: 0x44245958

   Device Boot      Start         End      Blocks   Id  System

Disk /dev/sdc: 5368 MB, 5368709120 bytes, 10485760 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes


[root@rhel7 ~]#

 Step 2: Now we prepare this HDD s as for Linux LVM

 [root@rhel7 ~]# fdisk /dev/sdb
Welcome to fdisk (util-linux 2.23.2).

Changes will remain in memory only, until you decide to write them.
Be careful before using the write command.


Command (m for help): m
Command action
   a   toggle a bootable flag
   b   edit bsd disklabel
   c   toggle the dos compatibility flag
   d   delete a partition
   g   create a new empty GPT partition table
   G   create an IRIX (SGI) partition table
   l   list known partition types
   m   print this menu
   n   add a new partition
   o   create a new empty DOS partition table
   p   print the partition table
   q   quit without saving changes
   s   create a new empty Sun disklabel
   t   change a partition's system id
   u   change display/entry units
   v   verify the partition table
   w   write table to disk and exit
   x   extra functionality (experts only)

Command (m for help): n    [n for new partition]
Partition type:
   p   primary (0 primary, 0 extended, 4 free)
   e   extended
Select (default p):                 [default p for primary partition]
Using default response p
Partition number (1-4, default 1):          [default 1 for 1st partition]
First sector (2048-20971519, default 2048):
Using default value 2048
Last sector, +sectors or +size{K,M,G} (2048-20971519, default 20971519): +6G          [Partition size 6 GB]
Partition 1 of type Linux and of size 6 GiB is set

Command (m for help): p

Disk /dev/sdb: 10.7 GB, 10737418240 bytes, 20971520 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk label type: dos
Disk identifier: 0x44245958

   Device Boot      Start         End      Blocks   Id  System
/dev/sdb1            2048    12584959     6291456   83  Linux                        [Here partition is created for Linux now we have to change it for Linux LVM]

Command (m for help): t
Selected partition 1
Hex code (type L to list all codes): 8e
Changed type of partition 'Linux' to 'Linux LVM'

Command (m for help): p

Disk /dev/sdb: 10.7 GB, 10737418240 bytes, 20971520 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk label type: dos
Disk identifier: 0x44245958

   Device Boot      Start         End      Blocks   Id  System
/dev/sdb1            2048    12584959     6291456   8e  Linux LVM

Command (m for help): w                              [w for write and save]
The partition table has been altered!

Calling ioctl() to re-read partition table.
Syncing disks.

 Step 3: Now we can check that /dev/sdb1 partition is prepared for Linux LVM or not

[root@rhel7 ~]# fdisk -l

Disk /dev/sda: 21.5 GB, 21474836480 bytes, 41943040 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk label type: dos
Disk identifier: 0x000aab0f

   Device Boot      Start         End      Blocks   Id  System
/dev/sda1   *        2048     1026047      512000   83  Linux
/dev/sda2         1026048    21506047    10240000   83  Linux
/dev/sda3        21506048    25700351     2097152   82  Linux swap / Solaris
/dev/sda4        25700352    41943039     8121344    5  Extended
/dev/sda5        25702400    40038399     7168000   83  Linux

Disk /dev/sdb: 10.7 GB, 10737418240 bytes, 20971520 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk label type: dos
Disk identifier: 0x44245958

   Device Boot      Start         End      Blocks   Id  System
/dev/sdb1            2048    12584959     6291456   8e  Linux LVM

Disk /dev/sdc: 5368 MB, 5368709120 bytes, 10485760 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes

[root@rhel7 ~]#

Step 4: Now we will create another two partition using above process with 2 GB and 1 GB size for Linux LVM 

[root@rhel7 ~]# fdisk -l

Disk /dev/sda: 21.5 GB, 21474836480 bytes, 41943040 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk label type: dos
Disk identifier: 0x000aab0f

   Device Boot      Start         End      Blocks   Id  System
/dev/sda1   *        2048     1026047      512000   83  Linux
/dev/sda2         1026048    21506047    10240000   83  Linux
/dev/sda3        21506048    25700351     2097152   82  Linux swap / Solaris
/dev/sda4        25700352    41943039     8121344    5  Extended
/dev/sda5        25702400    40038399     7168000   83  Linux

Disk /dev/sdb: 10.7 GB, 10737418240 bytes, 20971520 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk label type: dos
Disk identifier: 0x44245958

   Device Boot      Start         End      Blocks   Id  System
/dev/sdb1            2048    12584959     6291456   8e  Linux LVM
/dev/sdb2        12584960    14682111     1048576   8e  Linux LVM

Disk /dev/sdc: 5368 MB, 5368709120 bytes, 10485760 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk label type: dos
Disk identifier: 0x6cba2f17

   Device Boot      Start         End      Blocks   Id  System
/dev/sdc1            2048     4196351     2097152   8e  Linux LVM
[root@rhel7 ~]#


Step 5: Now we will create two PV , one using /dev/sdb1 and /sdc1 and another using /dev/sdb2

[root@rhel7 ~]# pvcreate /dev/sdb1 /dev/sdc1
  Physical volume "/dev/sdb1" successfully created
  Physical volume "/dev/sdc1" successfully created
[root@rhel7 ~]# pvcreate /dev/sdb2
  Physical volume "/dev/sdb2" successfully created

[root@rhel7 ~]# pvdisplay
  "/dev/sdb1" is a new physical volume of "6.00 GiB"
  --- NEW Physical volume ---
  PV Name               /dev/sdb1
  VG Name
  PV Size               6.00 GiB
  Allocatable           NO
  PE Size               0
  Total PE              0
  Free PE               0
  Allocated PE          0
  PV UUID               dntVQt-iu1N-S3aD-PnCm-di0p-UPwd-QDkAJy

  "/dev/sdc1" is a new physical volume of "2.00 GiB"
  --- NEW Physical volume ---
  PV Name               /dev/sdc1
  VG Name
  PV Size               2.00 GiB
  Allocatable           NO
  PE Size               0
  Total PE              0
  Free PE               0
  Allocated PE          0
  PV UUID               FDZxdu-fGSk-PPgx-k5in-OJ8X-uTBO-RkKNsq

  "/dev/sdb2" is a new physical volume of "1.00 GiB"
  --- NEW Physical volume ---
  PV Name               /dev/sdb2
  VG Name
  PV Size               1.00 GiB
  Allocatable           NO
  PE Size               0
  Total PE              0
  Free PE               0
  Allocated PE          0
  PV UUID               MqHd3I-QbR0-vVS9-qmJ0-jPc7-oiHE-nMdH8R

[root@rhel7 ~]#

Step 6: Now we will create a vg named linux and allocate two pv into it

[root@rhel7 ~]# vgcreate -s 16M linux /dev/sdb1 /dev/sdb2 /dev/sdc1
  Volume group "linux" successfully created

[root@rhel7 ~]# vgdisplay
  --- Volume group ---
  VG Name               linux
  System ID
  Format                lvm2
  Metadata Areas        3
  Metadata Sequence No  1
  VG Access             read/write
  VG Status             resizable
  MAX LV                0
  Cur LV                0
  Open LV               0
  Max PV                0
  Cur PV                3
  Act PV                3
  VG Size               8.95 GiB
  PE Size               16.00 MiB
  Total PE              573
  Alloc PE / Size       0 / 0
  Free  PE / Size       573 / 8.95 GiB
  VG UUID               HLlZVq-29Wg-FpcI-0P3R-9NI1-aTKE-EJswOC

Note: Here physical extent (PE) size 16 MB, 4 MB is default

Step 7: Now we will create a lv1, lv2 and lv3 named lv1 with vg linux

[root@rhel7 ~]# lvcreate --name lv1 --size 2G linux
WARNING: ext4 signature detected on /dev/linux/lv1 at offset 1080. Wipe it? [y/n] y
  Wiping ext4 signature on /dev/linux/lv1.
  Logical volume "lv1" created

[root@rhel7 ~]# lvcreate --name lv2 --size 3G linux
WARNING: ext4 signature detected on /dev/linux/lv2 at offset 1080. Wipe it? [y/n] y
  Wiping ext4 signature on /dev/linux/lv2.
  Logical volume "lv2" created

[root@rhel7 ~]# lvcreate --name lv3 --size 3.5G linux
  Logical volume "lv3" created
[root@rhel7 ~]#

[root@rhel7 ~]# lvdisplay
  --- Logical volume ---
  LV Path                /dev/linux/lv1
  LV Name                lv1
  VG Name                linux
  LV UUID                ZhsY0L-LVEY-8RIS-fZue-wWaf-TMNK-7mPivm
  LV Write Access        read/write
  LV Creation host, time rhel7, 2016-01-26 16:22:47 +0600
  LV Status              available
  # open                 0
  LV Size                2.00 GiB
  Current LE             128
  Segments               1
  Allocation             inherit
  Read ahead sectors     auto
  - currently set to     8192
  Block device           253:0

  --- Logical volume ---
  LV Path                /dev/linux/lv2
  LV Name                lv2
  VG Name                linux
  LV UUID                FmRcHL-2GfR-dBFQ-VYP1-NaA1-J1Mw-dbxxBU
  LV Write Access        read/write
  LV Creation host, time rhel7, 2016-01-26 16:23:09 +0600
  LV Status              available
  # open                 0
  LV Size                3.00 GiB
  Current LE             192
  Segments               1
  Allocation             inherit
  Read ahead sectors     auto
  - currently set to     8192
  Block device           253:1

  --- Logical volume ---
  LV Path                /dev/linux/lv3
  LV Name                lv3
  VG Name                linux
  LV UUID                ez17be-QV8v-KG8b-mcTp-g8rx-sm36-tctaX1
  LV Write Access        read/write
  LV Creation host, time rhel7, 2016-01-26 16:23:27 +0600
  LV Status              available
  # open                 0
  LV Size                3.50 GiB
  Current LE             224
  Segments               3
  Allocation             inherit
  Read ahead sectors     auto
  - currently set to     8192
  Block device           253:2

[root@rhel7 ~]#

Step 8: Now create file system (ext4)

[root@rhel7 ~]# mkfs.ext4 /dev/linux/lv1
mke2fs 1.42.9 (28-Dec-2013)
Filesystem label=
OS type: Linux
Block size=4096 (log=2)
Fragment size=4096 (log=2)
Stride=0 blocks, Stripe width=0 blocks
131072 inodes, 524288 blocks
26214 blocks (5.00%) reserved for the super user
First data block=0
Maximum filesystem blocks=536870912
16 block groups
32768 blocks per group, 32768 fragments per group
8192 inodes per group
Superblock backups stored on blocks:
        32768, 98304, 163840, 229376, 294912

Allocating group tables: done
Writing inode tables: done
Creating journal (16384 blocks): done
Writing superblocks and filesystem accounting information: done

[root@rhel7 ~]# mkfs.ext4 /dev/linux/lv2
[root@rhel7 ~]# mkfs.ext4 /dev/linux/lv3
[root@rhel7 ~]#

Step 9: Now create three directory (/data1, /data2 and /data3) to mount three lvm 

[root@rhel7 ~]# mkdir /data1
[root@rhel7 ~]# mkdir /data2
[root@rhel7 ~]# mkdir /data3

Step 10: Now mount three lvm

[root@rhel7 /]# mount /dev/linux/lv1 /data1
[root@rhel7 /]# mount /dev/linux/lv2 /data2
[root@rhel7 /]# mount /dev/linux/lv3 /data3
[root@rhel7 /]# df -h
Filesystem             Size  Used Avail Use% Mounted on
/dev/sda2              9.8G  7.3G  2.6G  75% /
devtmpfs               486M     0  486M   0% /dev
tmpfs                  494M   80K  494M   1% /dev/shm
tmpfs                  494M  7.1M  487M   2% /run
tmpfs                  494M     0  494M   0% /sys/fs/cgroup
/dev/sda5              6.9G   33M  6.8G   1% /data
/dev/sda1              497M  118M  380M  24% /boot
/dev/mapper/linux-lv1  2.0G  6.0M  1.8G   1% /data1
/dev/mapper/linux-lv2  2.9G  9.0M  2.8G   1% /data2
/dev/mapper/linux-lv3  3.4G   14M  3.2G   1% /data3
[root@rhel7 /]#