[root@node2 dockerfile-sample1]# mkdir dockerfile-sample2 [root@node2 dockerfile-sample1]# cd dockerfile-sample2/ [root@node2 dockerfile-...

Udemy docker images and volume

[root@node2 dockerfile-sample1]# mkdir dockerfile-sample2
[root@node2 dockerfile-sample1]# cd dockerfile-sample2/
[root@node2 dockerfile-sample1]# ll -lrth
[root@node2 dockerfile-sample1]# vim Dockerfile
[root@node2 dockerfile-sample1]# vim index.html
[root@node2 dockerfile-sample1]# docker image build -t nginx-with-html .
[root@node2 dockerfile-sample1]# docker container run -p 80:80 --rm nginx-with-html

vim Dockerfile
FROM nginx:latest
WORKDIR /usr/share/nginx/html
COPY index.html index.html


===================================================================================

[root@node2 ~]# docker container run -d --name mysql -e MYSQL_ALLOW_EMPTY_PASSWORD=True mysql
[root@node2 ~]# docker container inspect mysql
[root@node2 ~]# docker volume ls
[root@node2 ~]# docker container run -d --name mysql2 -e MYSQL_ALLOW_EMPTY_PASSWORD=True -v mysql-db:/var/lib/mysql mysql
[root@node2 ~]# docker volume ls
DRIVER              VOLUME NAME
local               ac6eef37956b3d3db63f0ac8d8f7ff3a4b4ac140467d37dac4304e3d1113bbf1
local               mysql-db

[root@node2 ~]# docker volume inspect mysql-db
[
    {
        "CreatedAt": "2019-12-21T09:12:26-05:00",
        "Driver": "local",
        "Labels": null,
        "Mountpoint": "/var/lib/docker/volumes/mysql-db/_data",
        "Name": "mysql-db",
        "Options": null,
        "Scope": "local"
    }
]
[root@node2 ~]#
[root@node2 ~]# docker container inspect mysql2
[root@node2 ~]# docker volume create ravi
ravi
[root@node2 ~]# docker volume ls
DRIVER              VOLUME NAME
local               ac6eef37956b3d3db63f0ac8d8f7ff3a4b4ac140467d37dac4304e3d1113bbf1
local               mysql-db
local               ravi
[root@node2 ~]#

[root@node2 dockerfile-sample2]# docker container run -d --name nginx -p 80:80 -v $(pwd):/usr/share/nginx/html nginx
[root@node2 ~]# docker container run -d --name psql -v psql:/var/lib/postgresql/data postgres:9.6.1
[root@node2 ~]# docker container logs psql
[root@node2 ~]# docker container stop psql
[root@node2 ~]# docker container run -d --name psql2 -v psql:/var/lib/postgresql/data postgres:9.6.2
[root@node2 ~]# docker container logs psql2
[root@node2 ~]# docker run -p 80:4000 -v $(pwd):/site bretfirsher/jekyll-serve

0 coment�rios:

Note: only a member of this blog may post a comment.

[root@node2 ~]# docker container run -p 80:80 --name webhost -d nginx 6608c2fdab1ea4252d239341b96161b438d669dd3687ffed1d4504587f050ecf [r...

Udemy docker networking

[root@node2 ~]# docker container run -p 80:80 --name webhost -d nginx
6608c2fdab1ea4252d239341b96161b438d669dd3687ffed1d4504587f050ecf
[root@node2 ~]#
[root@node2 ~]# docker container port webhost
80/tcp -> 0.0.0.0:80
[root@node2 ~]# docker container inspect --format '{{ .NetworkSettings.IPAddress }}' webhost
172.17.0.5
[root@node2 ~]#
[root@node2 ~]# docker network ls
NETWORK ID          NAME                DRIVER              SCOPE
a71c392d30d7        bridge              bridge              local
b22e6d7fcb00        host                host                local
652f2a01662d        none                null                local
[root@node2 ~]# docker network inspect bridge
[root@node2 ~]# docker network create my_app_net
[root@node2 ~]# docker container run -d --name new_nginx --network my_app_net nginx
195e2b4dc3d7522764f39623eda9be906ab22a262e461f9fa7df319b2c9f5413
[root@node2 ~]# docker network connect 74522dcf77e7 6608c2fdab1e
[root@node2 ~]# docker network disconnect 74522dcf77e7 6608c2fdab1e

[root@node2 ~]# docker container exec -it my_nginx /bin/bash
[root@node2 ~]# docker container exec -it my_nginx ping new_nginx


[root@node2 ~]# conatiner -d --net dude --net-alias search elasticsearch:2
[root@node2 ~]# docker conatiner run -d --net dude --net-alias search elasticsearch:2
[root@node2 ~]# docker container run -d --net dude --net-alias search elasticsearch:2
[root@node2 ~]# docker container ls
[root@node2 ~]# docker container run --rm --net dude alpine nslookup search
[root@node2 ~]# docker container run --rm --net dude centos curl -s search:9200

0 coment�rios:

Note: only a member of this blog may post a comment.

[root@node2 ~]# docker container run --publish 80:80 nginx [root@node2 ~]# docker container run --publish 80:80 --detach nginx [root@node...

Udemy docker

[root@node2 ~]# docker container run --publish 80:80 nginx
[root@node2 ~]# docker container run --publish 80:80 --detach nginx
[root@node2 ~]# docker container run --publish 80:80 --detach --name webhost nginx
[root@node2 ~]# docker container logs webhost
[root@node2 ~]# docker container top webhost
[root@node2 ~]# docker container rm 1bd 4f9 d7a
[root@node2 ~]# docker container rm -f 1bd
[root@node2 ~]# docker run --name mongo -d mongo
[root@node2 ~]# docker top mongo
[root@node2 ~]# ps -aux |grep mongo
polkitd   3010  1.3  8.6 1576112 87776 ?       Ssl  08:41   0:00 mongod --bind_ip_all
root      3114  0.0  0.0 112660   972 pts/0    S+   08:42   0:00 grep --color=auto mongo
[root@node2 ~]# docker stop mongo
[root@node2 ~]# ps -aux |grep mongo
root      3158  0.0  0.0 112660   968 pts/0    R+   08:43   0:00 grep --color=auto mongo
[root@node2 ~]# docker start mongo
[root@node2 ~]# docker container run -d --name nginx nginx
[root@node2 ~]# docker container run -d --name mysql -e MYSQL_RANDOM_ROOT_PASSWORD=true mysql
[root@node2 ~]# docker container top mysql
UID                 PID                 PPID                C                   STIME               TTY                 TIME                CMD
polkitd             7599                7582                2                   08:57               ?                   00:00:01            mysqld
[root@node2 ~]# docker container top nginx
UID                 PID                 PPID                C                   STIME               TTY                 TIME                CMD
root                7462                7446                0                   08:54               ?                   00:00:00            nginx: master process nginx -g daemon off;
101                 7493                7462                0                   08:54               ?                   00:00:00            nginx: worker process
[root@node2 ~]#
[root@node2 ~]# docker container inspect mysql
[root@node2 ~]# docker container inspect nginx
[root@node2 ~]# docker container stats
CONTAINER ID        NAME                CPU %               MEM USAGE / LIMIT     MEM %               NET I/O             BLOCK I/O           PIDS
488be6106b2c        mysql               0.64%               369.7MiB / 992.4MiB   37.26%              648B / 0B           506MB / 966MB       38
58987bf1b3d3        nginx               0.00%               1.367MiB / 992.4MiB   0.14%               648B / 0B           0B / 0B             2
b1c486b813e1        mongo               0.27%               67.63MiB / 992.4MiB   6.81%               648B / 0B           34.9MB / 839kB      32

[root@node2 ~]# docker container run -it --name proxy nginx bash
[root@node2 ~]# docker container ls
CONTAINER ID        IMAGE               COMMAND                  CREATED             STATUS              PORTS                 NAMES
488be6106b2c        mysql               "docker-entrypoint.s…"   7 minutes ago       Up 7 minutes        3306/tcp, 33060/tcp   mysql
58987bf1b3d3        nginx               "nginx -g 'daemon of…"   10 minutes ago      Up 10 minutes       80/tcp                nginx
b1c486b813e1        mongo               "docker-entrypoint.s…"   22 minutes ago      Up 20 minutes       27017/tcp             mongo
[root@node2 ~]# docker container ls -a
CONTAINER ID        IMAGE               COMMAND                  CREATED              STATUS                      PORTS                 NAMES
66636011299a        nginx               "bash"                   About a minute ago   Exited (0) 11 seconds ago                         proxy
488be6106b2c        mysql               "docker-entrypoint.s…"   7 minutes ago        Up 7 minutes                3306/tcp, 33060/tcp   mysql
58987bf1b3d3        nginx               "nginx -g 'daemon of…"   10 minutes ago       Up 10 minutes               80/tcp                nginx
b1c486b813e1        mongo               "docker-entrypoint.s…"   22 minutes ago       Up 20 minutes               27017/tcp             mongo
[root@node2 ~]#

[root@node2 ~]# docker container run -it --name ubuntu ubuntu
Unable to find image 'ubuntu:latest' locally
latest: Pulling from library/ubuntu
Digest: sha256:6e9f67fa63b0323e9a1e587fd71c561ba48a034504fb804fd26fd8800039835d
Status: Downloaded newer image for ubuntu:latest
root@44d60cf2443a:/#
root@44d60cf2443a:/# apt-get update
Get:1 http://security.ubuntu.com/ubuntu bionic-security InRelease [88.7 kB]
Get:2 http://archive.ubuntu.com/ubuntu bionic InRelease [242 kB]
Get:3 http://security.ubuntu.com/ubuntu bionic-security/universe amd64 Packages [795 kB]
Get:4 http://archive.ubuntu.com/ubuntu bionic-updates InRelease [88.7 kB]
Get:5 http://archive.ubuntu.com/ubuntu bionic-backports InRelease [74.6 kB]     
Get:6 http://archive.ubuntu.com/ubuntu bionic/main amd64 Packages [1344 kB]
Get:7 http://security.ubuntu.com/ubuntu bionic-security/main amd64 Packages [761 kB]
Get:8 http://security.ubuntu.com/ubuntu bionic-security/multiverse amd64 Packages [6781 B]
Get:9 http://security.ubuntu.com/ubuntu bionic-security/restricted amd64 Packages [19.2 kB]
Get:10 http://archive.ubuntu.com/ubuntu bionic/restricted amd64 Packages [13.5 kB]
Get:11 http://archive.ubuntu.com/ubuntu bionic/universe amd64 Packages [11.3 MB]
Get:12 http://archive.ubuntu.com/ubuntu bionic/multiverse amd64 Packages [186 kB]           
Get:13 http://archive.ubuntu.com/ubuntu bionic-updates/universe amd64 Packages [1322 kB]
Get:14 http://archive.ubuntu.com/ubuntu bionic-updates/multiverse amd64 Packages [10.5 kB]
Get:15 http://archive.ubuntu.com/ubuntu bionic-updates/restricted amd64 Packages [32.7 kB]
Get:16 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 Packages [1057 kB]
Get:17 http://archive.ubuntu.com/ubuntu bionic-backports/main amd64 Packages [2496 B]
Get:18 http://archive.ubuntu.com/ubuntu bionic-backports/universe amd64 Packages [4244 B]
Fetched 17.4 MB in 5s (3761 kB/s)                         
Reading package lists... Done
root@44d60cf2443a:/#
root@44d60cf2443a:/# apt-get install curl -y
root@44d60cf2443a:/# curl google.com
<HTML><HEAD><meta http-equiv="content-type" content="text/html;charset=utf-8">
<TITLE>301 Moved</TITLE></HEAD><BODY>
<H1>301 Moved</H1>
The document has moved
<A HREF="http://www.google.com/">here</A>.
</BODY></HTML>
root@44d60cf2443a:/#

[root@node2 ~]# docker container start -ai ubuntu
root@44d60cf2443a:/# curl google.com
<HTML><HEAD><meta http-equiv="content-type" content="text/html;charset=utf-8">
<TITLE>301 Moved</TITLE></HEAD><BODY>
<H1>301 Moved</H1>
The document has moved
<A HREF="http://www.google.com/">here</A>.
</BODY></HTML>
root@44d60cf2443a:/#
[root@node2 ~]# docker container  exec -it mysql bash

[root@node2 ~]# docker container ls
CONTAINER ID        IMAGE               COMMAND                  CREATED             STATUS              PORTS                 NAMES
488be6106b2c        mysql               "docker-entrypoint.s…"   19 minutes ago      Up 19 minutes       3306/tcp, 33060/tcp   mysql
58987bf1b3d3        nginx               "nginx -g 'daemon of…"   22 minutes ago      Up 22 minutes       80/tcp                nginx
b1c486b813e1        mongo               "docker-entrypoint.s…"   34 minutes ago      Up 32 minutes       27017/tcp             mongo
[root@node2 ~]#
[root@node2 ~]# docker container run -it alpine bash
docker: Error response from daemon: OCI runtime create failed: container_linux.go:346: starting container process caused "exec: \"bash\": executable file not found in $PATH": unknown.
[root@node2 ~]# docker container run -it alpine sh
/ #

0 coment�rios:

Note: only a member of this blog may post a comment.

[root@test test1]# echo $HOME /root [root@test test1]# echo $USER root [root@test test1]# [root@test test1]# cat sayH.sh echo ...

Shell Script

[root@test test1]# echo $HOME
/root
[root@test test1]# echo $USER
root
[root@test test1]#


[root@test test1]# cat sayH.sh
echo " Type your first name"
read fname
echo "MY first name is" $fname
[root@test test1]#



How to change a color in shell script

Black="\[\033[0;30m\]"        # Black
Red="\[\033[0;31m\]"          # Red
Green="\[\033[0;32m\]"        # Green
Yellow="\[\033[0;33m\]"       # Yellow
Blue="\[\033[0;34m\]"         # Blue
Purple="\[\033[0;35m\]"       # Purple
Cyan="\[\033[0;36m\]"         # Cyan
White="\[\033[0;37m\]"        # White

Red="\[\033[1;31m\]"          # Bold Red
Red="\[\033[4;31m\]"          # Underline in Red
echo -e "\n\n ${Red}.................Copying user.sh to /etc/profile.d. This will set timestamp format for command history .....${NC}"
echo -e "\n\n ${Green}.................Copying user.sh to /etc/profile.d. This will set timestamp format for command history .....${NC}"
echo -e "\n\n ${Yellow}.................Copying user.sh to /etc/profile.d. This will set timestamp format for command history .....${NC}"
echo -e "\n\n ${Blue}.................Copying user.sh to /etc/profile.d. This will set timestamp format for command history .....${NC}"
echo -e "\n\n ${Purple}.................Copying user.sh to /etc/profile.d. This will set timestamp format for command history .....${NC}"
echo -e "\n\n ${Cyan}.................Copying user.sh to /etc/profile.d. This will set timestamp format for command history .....${NC}"
echo -e "\n\n ${White}.................Copying user.sh to /etc/profile.d. This will set timestamp format for command history .....${NC}"


How to change line and output

[root@test test1]# echo -n "Do not output the trailing new line"
Do not output the trailing new line[root@test test1]#

[root@test test1]# echo -e "Do not output the trailing new line"
Do not output the trailing new line
[root@test test1]#

[root@test test1]# echo -e "\a Do not output the trailing new line"
 Do not output the trailing new line
[root@test test1]#

[root@test test1]# echo -e "\b Do not output the trailing new line"                                                                                                           Do not output the trailing new line
[root@test test1]#

[root@test test1]# echo -e "\c Do not output the trailing new line"
[root@test test1]#

[root@test test1]# echo -e "\n Do not output the trailing new line"

 Do not output the trailing new line
[root@test test1]#

[root@test test1]# echo -e "\r Do not output the trailing new line"
 Do not output the trailing new line
[root@test test1]#

[root@test test1]# echo -e "\t Do not output the trailing new line"
Do not output the trailing new line
[root@test test1]#

[root@test test1]# echo -e "An apple a day keeps away \a\t\tdoctor\n"
An apple a day keeps away doctor

How to exper use :-
==================================================================
[root@test test1]#

[root@test test1]# expr 1 + 3
4
[root@test test1]# expr 2 - 1
1
[root@test test1]# expr 10 / 2
5
[root@test test1]# expr 20 % 3
2
[root@test test1]# expr 10 \* 3
30
[root@test test1]# echo `expr 6 + 3`
9
[root@test test1]#

====================================================================

[root@test test1]# cat nestedif.sh
#osch=0

echo "1. Unix (Sun Os)"
echo "2. Linux (Red Hat)"
echo -n "Select your os choice [1 or 2]? "
read osch

if [ $osch -eq 1 ] ; then

     echo "You Pick up Unix (Sun Os)"

else #### nested if i.e. if within if ######
         
       if [ $osch -eq 2 ] ; then
             echo "You Pick up Linux (Red Hat)"
       else
             echo "What you don't like Unix/Linux OS."
       fi
fi
[root@test test1]#

======================================================================

[root@test test1]# cat elf.sh
#
#!/bin/sh
# Script to test if..elif...else
#
if [ $1 -gt 0 ]; then
  echo "$1 is positive"
elif [ $1 -lt 0 ]
then
  echo "$1 is negative"
elif [ $1 -eq 0 ]
then
  echo "$1 is zero"
else
  echo "Opps! $1 is not number, give number"
fi
[root@test test1]#

=======================================================================

[root@test test1]# cat mtable.sh
#!/bin/sh
#
#Script to test for loop
#
#
if [ $# -eq 0 ]
then
echo "Error - Number missing form command line argument"
echo "Syntax : $0 number"
echo "Use to print multiplication table for given number"
exit 1
fi
n=$1
for i in 1 2 3 4 5 6 7 8 9 10
do
echo "$n * $i = `expr $i \* $n`"
done
[root@test test1]#

========================================================================

0 coment�rios:

Note: only a member of this blog may post a comment.

Q:1 Why to use NFS ? Ans: A Network File System (NFS) allows remote machine to mount file systems over a network and interact with those ...

NFS Interview Questions

Q:1 Why to use NFS ?

Ans: A Network File System (NFS) allows remote machine to mount file systems over a network and interact with those file systems as though they are mounted locally. This enables system administrators to consolidate resources onto centralized servers over the network.

Q:2 What is the default port of NFS server ?

Ans: By default NFS uses 2049 TCP port.

Q:3 What are different versions of NFS Server ?

Ans: Currently, there are three versions of NFS. NFS version 2 (NFSv2) is older and widely supported. NFS version 3 (NFSv3) supports safe asynchronous writes and is more robust at error handling than NFSv2; it also supports 64-bit file sizes and offsets, allowing clients to access more than 2Gb of file data.

NFS version 4 (NFSv4) works through firewalls and on the Internet, no longer requires an rpcbind service, supports ACLs, and utilizes stateful operations. Red Hat Enterprise Linux 6.X & Centos 6.X supports NFSv2,NFSv3, and NFSv4 clients. When mounting a file system via NFS, Red Hat Enterprise Linux uses NFSv4 by default, if the server supports it.

Q:4 What are configuration files of NFS server ?

Ans: ‘/etc/exports’ is the main configuration file that controls which file systems are exported to remote hosts and specifies options.
‘/etc/sysconfig/nfs‘ is the file through which we can fix ports for RQUOTAD_PORT, MOUNTD_PORT, LOCKD_TCPPORT, LOCKD_UDPPORT and STATD_PORT

Q:5 What are different options used in /etc/exports file ?

Ans: Below are list of options used in /etc/exports file :

ro: The directory is shared read only; the client machine will not be able to write to it. This is the default.
rw: The client machine will have read and write access to the directory.
root_squash: By default, any file request made by user root on the client machine is treated as if it is made by user nobody on the server. (Exactly which UID the request is mapped to depends on the UID of user “nobody” on the server, not the client.)
no_root_squash : if this option is used , then root on the client machine will have the same level of access to the files on the system as root on the server. This can have serious security implications, although it may be necessary if you want to perform any administrative work on the client machine that involves the exported directories. You should not specify this option without a good reason.
no_subtree_check : If only part of a volume is exported, a routine called subtree checking verifies that a file that is requested from the client is in the appropriate part of the volume. If the entire volume is exported, disabling this check will speed up transfers.
sync : Replies to the NFS request only after all data has been written to disk. This is much safer than async, and is the default in all nfs-utils versions after 1.0.0.
async : Replies to requests before the data is written to disk. This improves performance, but results in lost data if the server goes down.
no_wdelay : NFS has an optimization algorithm that delays disk writes if NFS deduces a likelihood of a related write request soon arriving. This saves disk writes and can speed performance
wdelay : Negation of no_wdelay , this is default
nohide : Normally, if a server exports two filesystems one of which is mounted on the other, then the client will have to mount both filesystems explicitly to get access to them. If it just mounts the parent, it will see an empty directory at the place where the other filesystem is mounted. That filesystem is “hidden”. Setting the nohide option on a filesystem causes it not to be hidden, and an appropriately authorised client will be able to move from the parent to that filesystem without noticing the change.
hide : Negation of nohide This is the default
Q:6 How to list available nfs share on local machine & remote machine ?

Ans: ‘showmount -e localhost’ : Shows the available shares on your local machine
‘showmount -e <Remote-server-ip or hostname>‘: Lists the available shares at the remote server

Q:7 What is pNFS ?

Ans: Parallel NFS (pNFS) as part of the NFS v4.1 standard is available as of Red Hat Enterprise Linux 6.4. The pNFS architecture improves the scalability of NFS, with possible improvements to performance. That is, when a server implements pNFS as well, a client is able to access data through multiple servers concurrently. It supports three storage protocols or layouts: files, objects, and blocks.

Q:8 What is the difference between Hard mount & Soft mount in nfs ?

Ans:  Difference between soft mount and hard mount is listed below :

 Soft Mount : Consider we have mounted a NFS share using ‘soft mount’ . When a program or application requests a file from the NFS filesystem, NFS client daemons will try to retrieve the data from the NFS server. But, if it doesn’t get any response from the NFS server (due to any crash or failure of NFS server), the NFS client will report an error to the process on the client machine requesting the file access. The advantage of this mechanism is “fast responsiveness” as it doesn’t wait for the NFS server to respond. But, the main disadvantage of this method is data corruption or loss of data. So, this is not a recommended option to use.
Hard Mount : Suppose we have mounted the NFS share using hard mount, it will repeatedly retry to contact the server. Once the server is back online the program will continue to execute undisturbed from the state where it was during server crash. We can use the mount option “intr” which allows NFS requests to be interrupted if the server goes down or cannot be reached. Hence the recommended settings are hard and intr options.
Q:9 How to check iostat of nfs mount points ?

Ans: Using command ‘nfsiostat‘ we can list iostat of nfs mount points. Use the below command :
# nfsiostat <interval> <count> <mount_point>

<interval> : specifies the amount of time in seconds between each report. The first report contains statistics for the time since each file system was mounted. Each subsequent report contains statistics collected during the interval since the previ-ous report.

<count> : If the <count> parameter is specified, the value of <count> determines the number of reports generated at seconds apart. if the interval parameter is specified without the <count> parameter, the command generates reports continuously.

<mount_point> : If one or more <mount point> names are specified, statistics for only these mount points will be displayed. Otherwise, all NFS mount points on the client are listed.

Q:10 How to check nfs server version ?

Ans: ‘nfsstat -o all’ command shows all information about active versions of NFS.

Q:11 What is portmap?

Ans: The portmapper keeps a list of what services are running on what ports. This list is used by a connecting machine to see what ports it wants to talk to access certain services.

Q:12 How to reexport all the directories of ‘/etc/exports’ file ?

Ans: Using the command ‘ exportfs -r ‘ , we can reexport or refresh entries of ‘/etc/exports’ file without restarting nfs service

0 coment�rios:

Note: only a member of this blog may post a comment.

User Management Interview Questions: 1. How to create a user ? Brief with full syntax.. /usr/sbin/useradd -u uid -g gid -c " User De...

User and Group managent interview questions

User Management Interview Questions:
1. How to create a user ? Brief with full syntax..
/usr/sbin/useradd -u uid -g gid -c " User Descriptions" -m -d "Home Directory Path" -s "Shell" username
Example :
#useradd -u 535 -g unix -c "Unixrock blog" -m -d /export/home/unixrock -s /usr/bin/bash unixrock

–u UID ( From 0 to 65535 ) , 0 is reserved for root
-g Primary Group
-m Force to Create Home Directory Specified by –d and copy default skeleton files in /etc/skel folder
-d User Home Directory Path
-c User Descriptions
-s Shell Path
2. What are the important files for User Management task?
/etc/passwd
/etc/shadow
/etc/group
/etc/default/passwd
/etc/default/login

3. Describe the /etc/passwd fields ?
/etc/passwd file is having 7 fields
username:password:uid:gid:comment:home-directory-path:login-shell
Example :
unixrock:x:535:121:Unixrock blog:/export/home/unixrock:/usr/bin/bash

4. Describe the /etc/shadow file fields ?
/etc/shadow file is having 9 fields
loginID:password:lastchg:min:max:warn:inactive:expire:reserved
Example:
unixrock:R1EbI61VDyM2I:15995:7:91:7:::

5. Describe the /etc/group file fields ?
/etc/group file is having 4 fields
groupname:group-pwd:GID:user-list
Example:
unixrock::121:unixrock, raj

6. What is the different between "su  UserID" and "su - UserID" ?
"su UserID"     -  Doesn't check the PATH and Current Working Directory
"su - UserID"   -  Load the User's Profiles  (PATH/Current Working Directory)

7. Default permission of Passwd/Shadow/Group files ?
/etc/passwd 644
/etc/shadow 644
/etc/group    400
bash-3.00# ls -ld /etc/passwd /etc/shadow /etc/group
-rw-r--r--   1 root     sys          459 Dec 10 16:32 /etc/group
-rw-r--r--   1 root     sys        18498 Dec 11 09:09 /etc/passwd
-r--------   1 root     sys        10334 Dec 11 09:35 /etc/shadow
bash-3.00#

8. Default permission of file and Directory ?
Default permission of file is 644  (666 - 022 (umask vaule))
Default permission of directory is 755  (777 -022 (umask vaule))

9. How to view the list of users who currently logged in the system ?
"who" command will show the users who logged in the current system.
The command refers /var/adm/utmpx to obtain the information.
bash-3.00# who
root       pts/2        Sep 10 22:11    (192.168.10.22)
root       pts/3        Dec 11 22:55    (192.168.10.22)
bash-3.00#
bash-3.00# ls -ld /var/adm/utmpx
-rw-r--r--   1 root     bin         2976 Dec 11 22:55 /var/adm/utmpx
bash-3.00# file /var/adm/utmpx
/var/adm/utmpx: data
bash-3.00#

10. How to view the User's login and logout details ?
"last " command will show the users login and logout details.
The command refers /var/adm/wtmpx to obtain the information.
bash-3.00# last
root      pts/3        192.168.10.22    Wed Dec 11 22:55   still logged in
root      sshd         192.168.10.22    Wed Dec 11 22:55   still logged in
root      pts/2        192.168.10.22    Tue Sep 10 22:11   still logged in
root      sshd         192.168.10.22    Tue Sep 10 22:11 - 22:55 (92+00:43)
reboot    system boot                   Tue Sep 10 22:03
reboot    system down                   Fri Sep  6 17:59
wtmp begins Tue Aug 13 01:32
bash-3.00#
bash-3.00# ls -ld /var/adm/wtmpx
-rw-r--r--   1 adm      adm        68820 Dec 11 22:55 /var/adm/wtmpx
bash-3.00# file /var/adm/wtmpx
/var/adm/wtmpx: data
bash-3.00#

11. How to view details information about the User?
"finger username" will show the details about the user.
bash-3.00# finger unixrock
Login name: unixrock
Directory: /home/unixrock               Shell: /bin/sh
Never logged in.
No unread mail
No Plan.
bash-3.00#

12. Describe about SETUID/SETGID/StickyBIT ?
SETUID/SETGID/StickyBIT

13. How to check Primary and Secondary Group of One User ?
"id -a username" will show the user's Primary and Secondary groups.
FYI, One User can be added in 15 no; of Secondary groups, But Only one Primary Group.
bash-3.00# id -a unixrock
uid=100(unixrock) gid=1(other) groups=1(other)
bash-3.00#
gid - Primary Group
groups - Secondary Group

14. How to rename the existing User ID ?
# usermod -l <newname> <oldname>
bash-3.00# usermod -l unixrock_new unixrock
UX: usermod: unixrock name too long.
bash-3.00# grep -i unixrock /etc/passwd
unixrock_new:x:100:1::/home/unixrock:/bin/sh
bash-3.00#

15. How to lock the User Account ?
# passwd -l UserID
bash-3.00# passwd -s unixrock
unixrock  PS
bash-3.00# passwd -l unixrock
passwd: password information changed for unixrock
bash-3.00# passwd -s unixrock
unixrock  LK
bash-3.00#

16. How to unlock the User Account?
# passwd -u <UserID>
bash-3.00# passwd -s unixrock
unixrock  LK
bash-3.00# passwd -u unixrock
passwd: password information changed for unixrock
bash-3.00# passwd -s unixrock
unixrock  PS
bash-3.00#

17. How to make the user account as non-expriry ?
# passwd -x -1 <userID>
bash-3.00# passwd -s unixrock
unixrock  PS    12/11/13     7    91     7
bash-3.00#
bash-3.00# passwd -x -1 unixrock
passwd: password information changed for unixrock
bash-3.00#
bash-3.00# passwd -s unixrock
unixrock  PS
bash-3.00#

18. How do we set force passwd change for User's first login ?
# passwd -f  <UserID>
bash-3.00# passwd -s unixrock
unixrock  PS    12/11/13     7    91     7
bash-3.00#
bash-3.00# passwd -f unixrock
passwd: password information changed for unixrock
bash-3.00#
bash-3.00# passwd -s unixrock
unixrock  PS    00/00/00     7    91     7
bash-3.00#

19. How to delete the User ID ?
# userdel <UserID> or # userdel -r <UserID>

-r option will delete the User's Home directory too.

20. Type of SHELLs ? What is initialization file for those SHELLS ?

/bin/bash  - Bourne Again shell
/bin/csh    - C shell
/bin/ksh   - Korn shell
/bin/tcsh   - TC shell
/bin/zsh    - Z shell

Bourne   /etc/profile    $HOME/.profile   /bin/sh     /etc/skel/local.profile
Korn      /etc/profile    $HOME/.profile   /bin/ksh   /etc/skel/local.profile
                                  $HOME/.kshrc
C           /etc/.login      $HOME/.cshrc    /bin/csh   /etc/skel/local.cshrc
                                  $HOME/.login                   /etc/skell/local.login

21. How to Check the User's Crontabs ? How to allow the User to access the Cron?
# crontab -l <username>
or
# ls -ltr /var/spool/cron/crontabs/

/etc/cron.d/cron.allow  : If the file exists, the users can use crontab whoever listed in that file.

22. How to check User's Present Working Directory Path? How to check the Obsolete Path of running process ?
Find the Present Working Directory Path
# pwd

0 coment�rios:

Note: only a member of this blog may post a comment.

Q: Which Users tare not allowed to login via ftp ? Ans: Users mentioned in the file ‘/etc/vsftpd/ftpusers’ are not allowed to login via ft...

FTP interview Questions & Answers

Q: Which Users tare not allowed to login via ftp ?
Ans: Users mentioned in the file ‘/etc/vsftpd/ftpusers’ are not allowed to login via ftp.
Q: What is default directory for ftp / Anonymous user ?
Ans : ‘/var/ftp’ is the default directory for ftp or Anonymous user

Q: How to change the default directory for ftp / Anonymous user ?
Ans: Edit the file ‘/etc/vsftpd/vsftpd.conf’ and change the below directive :

anon_root=/<Path-of-New-Directory>
After making above change either restart or reload vsftpd service.

Q: How to disable Anonymous user in vsftpd ?
Ans: Edit the conf file ‘/etc/vsftpd/vsftpd.conf’ and change below directive and restart the ftp service.

anonymous_enable=NO

Q: What is chroot environment in ftp server ?
Ans: chroot environment prevents the user from leaving its home directory means jail like environment where users are limited to their home directory only. It is the addon security of ftp server.

Q: How to enable chroot environment in vsftpd server ?
Ans : To enable chroot environment edit the file ‘/etc/vsftpd/vsftpd.conf’ and enable the below directives :

chroot_list_enable=YES
chroot_list_file=/etc/vsftpd.chroot_list
The chroot_list_file variable specifies the file which contains users that are chroot.

Q: How to enable only limited/allowed users are able to login via ftp ?
Ans: This can be done by editing the file ‘/etc/vsftpd/vsftpd.conf’ and add the below directives :

userlist_enable=YES
userlist_file=/etc/vsftpd.user_list
userlist_deny=NO

Q: How to set ftp banner in linux ?
Ans: Open the file ‘/etc/vsftpd/vsftpd.conf’ and set the below directive :

ftpd_banner= “Enter New Banner Here”

Q: How To limit the data transfer rate, number of clients & connections per IP for local users ?
Ans: Edit the ftp server’s config file(/etc/vsftpd/vsftpd.conf) and set the below directives :

local_max_rate=1000000 # Maximum data transfer rate in bytes per second
max_clients=50 # Maximum number of clients that may be connect

0 coment�rios:

Note: only a member of this blog may post a comment.

1. NFS is a protocol that allows a user to access files over a network; Samba is essentially a re-imaging of the Common Internet File Syste...

Difference between nfs and samba

1. NFS is a protocol that allows a user to access files over a network; Samba is essentially a re-imaging of the Common Internet File System.

2. NFS has four versions, the newest of which includes a stateful protocol; Samba has multiple versions, the latest of which allows file and print sharing between multiple computers.


SMB (Server Message Block)
0:16 – Server Message Block or SMB was originally designed by IBM back in the 80’s. After its inception, Microsoft took the protocol and added some features to it. The additions included features such as LAN Manager. Services such as this allowed Windows systems to map shared drives and have this new share act as a local drive on the computer.  For simplicity’s sake, I will skip the 3rd and 4th point but we will get back to those in a minute. With the release of Windows Vista, Microsoft also released SMB 2.0. This was a major revision of SMB which, besides adding additional features, reduced the “chattiness” of the protocol. In other words, it reduced the bandwidth used or data amount transmitted, over the network. SMB 3.0 was released with Windows 8 and Server 2008 R2. With more improvements and added functionality, SMB 3.0 was aimed towards increasing effectiveness in Datacenters. When mapping a share, Windows will automatically perform the negotiation and sort out what version of SMB to use.

Now what were the points that we skipped? In the late 90’s Microsoft had attempted to rename SMB to CIFS or Common Internet File System. Unfortunately for Microsoft, this attempt was unsuccessful so they were skipped in order to avoid confusion. CIFS had additional features, but the name simply did not catch on and Microsoft simply went back to SMB in future versions. Due to this, CIFS is referred to as a dialect of SMB though you may hear CIFS and SMB used synonymously, but they are basically referring to Windows file sharing. Moving forward however, the term CIFS should really not be used as it is a relic of the past and it should only be considered SMB.

NFS (Network File System)
2:04 – The next file system we will be examining is Network File System, or NFS. Originally developed by Sun in the late 80’s, NFS version 1 was used internally within Sun Microsystems and never released publically. It was Version 2 that was released to the public however. This provided basic file sharing capabilities and was used extensively within Unix based systems. As they released Version 3 in 1995, it was enhanced to add 64bit support and was able to handle files larger than 2 gigabytes. In 2000, Sun released version 4 of NFS with added performance along with security improvements. This allowed for security methods to be applied and utilized to authenticate users, e.g. Kerberos. These security measures made NFS version 4 much more secure when compared to previous revisions.

In The Real World
2:56 - Now I want to ask you a question. In the real world, which protocol would you use? Windows shares support both SMB and NFS, or even both at the same time. It is merely a matter of configuring which one you need or configuring both if you need to utilize both. Ideally, you’ll want to use a native protocol when possible. For example, if you are connecting two Unix systems, it will be best to utilize NFS. If you are connecting two Windows systems, SMB would be the obvious choice. Even though both achieve the goal of file sharing, there are differences in the way Windows and Unix based systems handle file systems and the users that will be using the systems. Mixing the two can lead to compatibility problems. As NFS is good for host authentication, it makes connecting two servers together rather easy. This can be placed in the boot up configuration and the data would be available to the operating system without the need for a user to be logged in. You could even connect to another server based on the IP address alone. Conversely, Windows requires user authentication in order to connect to an SMB share and generally the user is required to be logged in. It is possible to circumvent this and you would do this for some services running on the local system, but it is, unfortunately, not as simple as NFS makes it. Windows is an excellent choice for using authentication. Until NFS version 4, this was something that NFS did not handle well and was prone to more security problems. If you are using a domain, then Windows handles user authentication very well. Ultimately, the decision will be made on what operating systems you are using and your software requirements. In future videos, we will discuss how Windows file sharing (SMB) works and also how to incorporate NFS using Windows Server.  

0 coment�rios:

Note: only a member of this blog may post a comment.

FTP means File Transfer Protocol while SFTP means Secured File Transfer Protocol. FTP uses Port 21 whereas SFTP uses Port 22. SFTP shares...

FTP and SFTP contains a lot of difference between both of them .

FTP means File Transfer Protocol while SFTP means Secured File Transfer Protocol.
FTP uses Port 21 whereas SFTP uses Port 22.
SFTP shares its files with full security whereas FTP uploads or downloads its data without any security.
Filezilla is best for FTP whereas winSCP is regarded best for SFTP.
FTP sends data in plain text while SFTP provides a little bit of privacy.
FTP is used by anyone whereas SFTP is used by server owner because 22 port is not open in case of shared hosting.
SFTP is fully secured and satisfactory whereas FTP is less secured.

0 coment�rios:

Note: only a member of this blog may post a comment.

OS BOOT TIME RHEL6: 40 sec RHEL7: 20 sec MAXIMUM SIZE OF SINGLE PARTITION RHEL6: 50TB(EXT4) RHEL7: 500TB(XFS) BOOT LOADER RHEL6:...

What is Difference Between RHEL 6 & RHEL 7

OS BOOT TIME
RHEL6: 40 sec

RHEL7: 20 sec

MAXIMUM SIZE OF SINGLE PARTITION
RHEL6: 50TB(EXT4)

RHEL7: 500TB(XFS)

BOOT LOADER
RHEL6:  /boot/grub/grub.conf

RHEL7: /boot/grupb2/grub.cfg

PROCESSOR ARCHITECTURE
RHEL6: It support 32bit & 64bit both

RHEL7: It only support 64bit

HOW TO FORMAT OR ASSIGN A FILE SYSTEM IN
RHEL6:      #mkfs.ext4   /dev/hda4

RHEL7:       #mkfs.xfs   /dev/hda3

HOW TO REPAIR A FILE SYSTEM IN
RHEL6:  #fsck -y /dev/hda3

RHEL7:  #xfs_repair /dev/hda3

COMMAND TO MANAGE NETWORK IN RHEL6 AND RHEL7
RHEL6:  #setup

RHEL7:  #nmtui

HOSTNAME CONFIGURATION FILE
RHEL6:    /etc/sysconfig/network

RHEL7:    /etc/hostname

DEFAULT ISO IMAGE MOUNT PATH
RHEL6: /media

RHEL7: /run/media/root

FILE SYSTEM CHECK
RHEL6:   e2fsck

RHEL7:   xfs_repair

RESIZE A FILE SYSTEM
RHEL6:   #resize2fs -p /dev/vg00/lv1

RHEL7:    #xfs_growfs  /dev/vg00/lv1

TUNE A FILE SYSTEM
RHEL6: tune2fs

RHEL7: xfs_admin

IPTABLES AND FIREWALL
RHEL6: iptables

RHEL7: firewalld

IPtables
To see firewall status in RHEL7

#firewall-cmd   –state

To see Firewall status in RHEL6

#service iptables status

To stop firewall in RHEL7

#systemctl stop firewalld.service

To stop firewall in RHEL6

#service iptables stop

COMMUNICATION BETWEEN TCP AND UDP IN BACK END
RHEL6: netcat

RHEL7: ncat

INTERFACE NAME
RHEL6: eth0

RHEL7: ens198(N)

COMBINING NIC
RHEL6: Network Bonding

RHEL7: Team Driver

NSF Server Version
RHEL6:  NFSv2

RHEL7:  NFSV4

DATABASE USED
RHEL6: Mysql

RHEL7: mariaDB

RHEL7 also support Mysql

MANAGING SERVICES
RHEL6:

#service sshd restart

#chkconfig sshd on

RHEL7:

#systemctl restart sshd

#systemctl enable shhd

File System.
RHEL6 default file system is ext4

xfs is RHEL7 default file system.

Kernel Version
RHEL6 default kernel version is 2.6 while RHEL7 is 3.10

UID Allocation
In RHEL6 default UID assigned to users would start from 500 while in RHEL7 it’s starting from 1000.
But this can be changed if required by editing /etc/login.defs file.

Maximum supported File Size.
In RHEL6 maximum file size of an individual file can be up to 16TB while in RHEL7 it can be up to 500TB which is very large in comparison to RHEL6.

Maximum Supported File System Size.
In RHEL6 maximum file system size=16TB (for 64bit Machine) and 8TB (for 32 bit machine). While in RHEL7 maximum file system size is 500TB.

Also keep in mind that RHEL does not support XFS on 32-bit machines.

Change in file system structure.
In rhel6 /bin,/sbin,/lib and /lib64 are usually under /

In rhel7, now /bin,/sbin,/lib and /lib64 are nested under /usr.

The /tmp directory can now be used as a temporary file storage system (tmpfs)

Space Required to Installing RHEL7?

Now if you want to install RHEL7 in your machine, RedHat recommends minimum 5 GB of disk space to install this release of RHEL series for all supported architectures.

.Hostname lookup and setup
In rhel5 and rhel6 versions, we can edit file /etc/sysconfig/network to set hostname but in rhel7 we can directly change the hostname using below commands.

hostnamectl
nmtui
nmcli
Example:

in RHEL6              #hostname

in RHEL7              #hostnamectl  status   and #hostname

Few More notable changes in RHEL 7.
Netstat and ifconfig commands also disappeared from RHEL7 but it can be used by installing net-tools.
The move from sysvinit to systemd is one of most important change that has been made and which is a matter of concerned.
Command tail -n is replaced by journalctl -n
Command tail -f is replaced by journalctl -f
For displaying kernel messages instead of dmesg now in RHEL7 we use journalctl –k

0 coment�rios:

Note: only a member of this blog may post a comment.

[root@ipaserver ~]# vmstat -s [root@ipaserver ~]# free -g [root@ipaserver ~]# free -h [root@ipaserver ~]# cat /proc/meminfo 

Display memory info

[root@ipaserver ~]# vmstat -s
[root@ipaserver ~]# free -g
[root@ipaserver ~]# free -h
[root@ipaserver ~]# cat /proc/meminfo 

0 coment�rios:

Note: only a member of this blog may post a comment.

Q. WHAT IS A PORT? A port is piece of software which is used as docking point in your machine, where remote application can communicate. T...

IMPORTANT PORT NUMBERS FOR THE LINUX SYSTEM ADMINISTRATOR

Q. WHAT IS A PORT?
A port is piece of software which is used as docking point in your machine, where remote application can communicate. This is analogy to the physical ports for entering in to a country from different sea ports.

Q. WHAT IS HARDWARE PORT?
This is a physical peripheral connecting point to a machine from a physical device.

Q. WHAT IS A SOCKET?
Socket is combination of software Port and IP address.

Q. WHAT IS THE RANGE OF PORTS OR HOW MANY PORTS ARE THERE?
Port numbers can vary from 0 to 65535, so total we can get 65536 ports

Q. WHY PORT NUMBERS ARE JUST 65536?
This is because limitation in TCP/IP stack where the port number field is just 16bit size. So we get only 2^16(2 to the power of 16) ports which are equal to 65536 available ports

Q. WHAT ARE THE WELL-KNOWN PORTS OR ASSIGNED PORTS OR DEFAULT PORTS?
Well known ports are from 0 to 1023(total 2^10=1024 ports)

Q. WHAT DO YOU MEAN BY DEFAULT PORT?
Default port is a designated port for particular well-known service such as web server, mail server, ftp server etc. By default FTP uses 21 port, DNS uses 53 and Apache uses 80 port.

Q. CAN WE CHANGE DEFAULT PORT FOR A SERVICE(EXAMPLE APACHE, SQUID)?
Yes, we can change. In Apache and DNS we can change this using listen configuration entry in httpd.conf and named.conf. Squid have port entry in it’s squid.conf file to mention port number.

Q. WHAT ARE THE PROTOCOL NUMBERS FOR TCP AND UDP?
Do not confuse this one with port numbers. TCP and UDP have their own numbers in TCP/IP stack.

TCP protocol number: 6

UDP protocol number: 17

Q. IS THERE ANY WAY I CAN SEE ALL THE PORT INFORMATION IN LINUX?
Yes, you can get that from /etc/services files.

Q. HOW CAN I SEE OPEN PORTS IN LINUX?
Use nmap command.

WELL KNOWN PORTS
20 – FTP Data (For transferring FTP data)

21 – FTP Control (For starting FTP connection)

22 – SSH (For secure remote administration which uses SSL to encrypt the transmission)

23 – Telnet (For insecure remote administration)

25 – SMTP (Mail Transfer Agent for e-mail server such as SEND mail)

53 – DNS (Special service which uses both TCP and UDP)

67 – Bootp

68 – DHCP

69 – TFTP (Trivial file transfer protocol uses udp protocol for connection less transmission of data)

80 – HTTP/WWW(Apache)

88 – Kerberos

110 – POP3 (Mail delivery Agent)

123 – NTP (Network time protocol used for time syncing uses UDP protocol)

137 – NetBIOS (nmbd)

139 – SMB-Samba (smbd)

143 – IMAP

161 – SNMP (For network monitoring)

389 – LDAP (For centralized administration)

443 – HTTPS (HTTP+SSL for secure web access)

514 – Syslogd (udp port)

636 – ldaps (both ctp and udp)

873 – rsync

989 – FTPS-data

990 – FTPS

993 – IMAPS

1194 – openVPN

1812 – RADIUS

995 – POP3s

2049 – NFS (nfsd, rpc.nfsd, rpc, portmap)

2401 – CVS server

3306 – MySql

3690 – SVN

0 coment�rios:

Note: only a member of this blog may post a comment.

* If you want to make allow request which is originated form the private instance should be able to reach internet you make use of NAT * ...

ELB in AWS

* If you want to make allow request which is originated form the private instance should be able to reach internet you make use of NAT

* The traffic which originated from the internet and for these private instance that's comes via ELB

* ELB work as a Reserve Proxy
* NAT Gateway work as a Forward proxy
* NAT Gateway should place in pulic subnet
* ELB should place in public or private subnet
* NAT and ELB  both are the managed services but NAT Gateway live in one subnet as a ELB if we choose correctly can spam in multiple subnet
* In NAT we have two options NAT instace and NAT Gateway
* NAT Gateway can live only one availability zone or we can choose only one subnet for one instance


--------------------------------------------------

Elastic Load Balancer

* ELB is mananged service by amazon
* ELB distribute the taffic to n number of instances which are registed to ELB
* ELB does check continious health check which are registred to ELB, in case any intance become unhealthy ELB will stop send traffic to that particular instance.
* Intenaly ELB launch the instance so it consomme the IP from your subnet hence keep some buffer in terms of private IP address
* You can move Web-server to private subnet and keep only ELB in public Subnet. Within a VPC all instances can talk to each other(irrespective of subnet)
* Traffic gets distributed between 2 AZs in roub-robin fashion, with in AZ there are n number of instances and request will go which have lease of connections
* ELB should be access by DNS not by IP address
* If we use check botton "Create an internal load balancer" then it will be private load balancer
* If we know the URL then we do choose httpd and URL path ohterwise we should choose TCP
* And you have to enable Cross-Zone Load Balancing and enable Connection Draining, it will distribute traffic in all instance of all AZ equaly

* And connection draining is a time which ELB  we give to that unhealthy instance has some open connections with in particular time instance can relese his connection

Appilcation Load Balancer

* If configured a clasic load balancer and register N number of ec2 instance then every instance would have same type of content or would have all the file similar
* Where as in case of application load balancer we can go head and create rules based on which any incoming request can go to specific target group

0 coment�rios:

Note: only a member of this blog may post a comment.

* On the physical host, ec2 is a VM, and EBS volume not their in physical host they are connected via a network to this particular host * ...

Feature in AWS

* On the physical host, ec2 is a VM, and EBS volume not their in physical host they are connected via a network to this particular host
* Where as Instance store is their on the physical host it self
* When we stop any ec2 instance the resoureces of that particular instace become free which means CPU RAM will be free but data which is available in EBS volume that is connect by network is remain their
* We stop and start the instance the underline hardware change but we reboot the ec2 instance only os whould reboot nothing will change
========================================================================================================================================
ec2 Auto Recovery:- Auto recovery is a process which helps you to recover your instance in case something went wrong at your H/W level or on the host level
Main two prars in auto recovery:-
(1) System status check:- There is some issue with the software of pysical host. We can not do any thing on system status check
(2) Instance status check:- Which are reletated to our ec2 instance level

Events:- If any actvity have planed by amazon side it will show in event, so we can plane to stop or any migration of our instance.
Limit:- Aws provide limit for luacnhing any service suppose any developer start a for infinity loop to lauch a instance and this loop lauch many instances, limit feature  stop these type of actvity. if we want to increase limit of any service have to request to AWS

Limits are two types:-
(1) Soft limit:- we can request to AWS to increase the limit of that particular service i.e. ec2, s3, vpc etc
(2) Hard limit:- We can not request to AWS to increase the limit of that service just like security group, rule in security grop etc.

Private IP:- Private IP address is associated with eithernet of ec2 instance, when we login any ec2 instance it shows, this ip address is not accessable from out side the world, and belogns to particular VPC, when we stop/satar/reboot the instance ip address does not change.

Public IP:- Public ip address is accessable from out side the world, but these are limited, so for any application internal communication we use only private IP, when we stop and start the ec2 instance public IP automatic relese and new public IP assign, if we reboot any instance then public IP does not change.

Elastic IP:- Elastic IP work as a public IP, we generate and elastic ip and accociate with any ec2 instance and if we stop and start that ec2 instance then public IP wil not change due to this elastic ip which is associated with particular instance.
 Suppose we want do migrated any appilcation and we create power full server configuration then as per elastic ip side we simple deattach elastic ip from older server and attach to new serve.

0 coment�rios:

Note: only a member of this blog may post a comment.

[root@docker ~]# aws ec2 describe-instances --instance-ids i-0734100aebc91b85c [root@docker ~]# aws ec2 stop-instances --instance-ids i-07...

AWS ec2 CLI

[root@docker ~]# aws ec2 describe-instances --instance-ids i-0734100aebc91b85c
[root@docker ~]# aws ec2 stop-instances --instance-ids i-0734100aebc91b85c
[root@docker ~]# aws ec2 start-instances --instance-ids i-0734100aebc91b85c
[root@docker ~]# aws ec2 terminate-instances --instance-ids i-0734100aebc91b85c

Change hostname in AWS Linux server

[root@ip-172-31-20-169 ~]# rpm -qf /etc/cloud/cloud.cfg
[root@ip-172-31-20-169 ~]# rpm -qf /etc/cloud/cloud.cfg
[root@ip-172-31-20-169 ~]# vi /etc/cloud/cloud.cfg:- Add below parameter in mentioned file
preserve_hostname: true
[root@ip-172-31-20-169 ~]# vi /etc/hostname:- Give the hostname in mentioned file
awslabserver
[root@ip-172-31-20-169 ~]# vi /etc/hosts
172.31.20.169 awslabserver
[root@ip-172-31-20-169 ~]# init 6
[root@awslabserver ~]# uname -a
Linux awslabserver 4.14.138-114.102.amzn2.x86_64 #1 SMP Thu Aug 15 15:29:58 UTC 2019 x86_64 x86_64 x86_64 GNU/Linux

0 coment�rios:

Note: only a member of this blog may post a comment.

[root@docker ~]#  aws ec2 create-vpc --cidr-block 10.0.0.0/16            vpc-040630f59ce9188ea---VPCid [root@docker ~]# aws ec2 describe-v...

AWS VPC CLI

[root@docker ~]#  aws ec2 create-vpc --cidr-block 10.0.0.0/16            vpc-040630f59ce9188ea---VPCid
[root@docker ~]# aws ec2 describe-vpcs
[root@docker ~]# aws ec2 create-tags --resources vpc-040630f59ce9188ea --tags Key=Name,Value=CLI-VPC
[root@docker ~]# aws ec2 create-subnet --vpc-id vpc-040630f59ce9188ea --cidr-block 10.0.1.0/24
[root@docker ~]# aws ec2 create-tags --resources subnet-03b167b7e4e7fde24 --tags Key=Name,Value=Public-Subnet
[root@docker ~]# aws ec2 create-subnet --vpc-id vpc-040630f59ce9188ea --cidr-block 10.0.2.0/24
[root@docker ~]# aws ec2 create-tags --resources subnet-01f5fbf80d0bd44aa --tags Key=Name,Value=Private-Subnet
[root@docker ~]# aws ec2 describe-subnets
[root@docker ~]# aws ec2 create-internet-gateway
[root@docker ~]# aws ec2 create-tags --resources igw-04d67210fe8d1888f --tags Key=Name,Value=CLI-IGY
[root@docker ~]# aws ec2 attach-internet-gateway --internet-gateway-id igw-04d67210fe8d1888f --vpc-id vpc-040630f59ce9188ea
[root@docker ~]# aws ec2 allocate-address --domain vpc
[root@docker ~]# aws ec2 create-nat-gateway --subnet-id subnet-03b167b7e4e7fde24 --allocation-id eipalloc-0a804bd0125aeff3e
[root@docker ~]# aws ec2 create-tags --resources nat-0af1dfe5aa12ad301 --tags Key=Name,Value=CLI-Natgateway
[root@docker ~]# aws ec2 create-route-table --vpc-id vpc-040630f59ce9188ea
[root@docker ~]# aws ec2 create-tags --resources rtb-0828ddd487dfbb6cd --tags Key=Name,Value=CLI-PublicRouteTable
[root@docker ~]# aws ec2 create-route-table --vpc-id vpc-040630f59ce9188ea
[root@docker ~]# aws ec2 create-tags --resources rtb-03ddfa883bae4206f --tags Key=Name,Value=CLI-PrivateRouteTable
[root@docker ~]# aws ec2 create-route --route-table-id  rtb-0828ddd487dfbb6cd --destination-cidr-block 0.0.0.0/0 --gateway-id igw-04d67210fe8d1888f
[root@docker ~]# aws ec2 describe-route-tables
root@docker ~]# aws ec2 create-route --route-table-id rtb-03ddfa883bae4206f --destination-cidr-block 0.0.0.0/0 --nat-gateway nat-0af1dfe5aa12ad301
[root@docker ~]# aws ec2 associate-route-table --route-table-id rtb-0828ddd487dfbb6cd --subnet-id subnet-03b167b7e4e7fde24
[root@docker ~]# aws ec2 associate-route-table --route-table-id rtb-03ddfa883bae4206f --subnet-id subnet-01f5fbf80d0bd44aa
[root@docker ~]#  aws ec2 create-security-group --group-name CLI-WEB-Securitygroop --description "My Security Group" --vpc-id vpc-040630f59ce9188ea
[root@docker ~]# aws ec2 create-tags --resources sg-09b5d0442532e08d0 --tags Key=Name,Value=CLI-Security-SR
[root@docker ~]# aws ec2 authorize-security-group-ingress --group-id sg-09b5d0442532e08d0 --protocol tcp --port 22 --cidr 0.0.0.0/0
[root@docker ~]# aws ec2 authorize-security-group-ingress --group-id sg-09b5d0442532e08d0 --protocol tcp --port 80 --cidr 0.0.0.0/0
[root@docker ~]# aws ec2 create-key-pair --key-name Mykey123
[root@docker ~]# aws ec2 run-instances --image-id ami-04b762b4289fba92b  --count 1 --instance-type t2.micro --key-name Mykey123 --security-group-ids sg-09b5d0442532e08d0 --subnet-id subnet-03b167b7e4e7fde24

0 coment�rios:

Note: only a member of this blog may post a comment.

[root@docker ~]# aws s3 ls [root@docker ~]# aws s3 mb s3://pranchaldixit123 make_bucket: pranchaldixit123 [root@docker ~]# aws s3 rb s...

AWS S3 CLI

[root@docker ~]# aws s3 ls
[root@docker ~]# aws s3 mb s3://pranchaldixit123
make_bucket: pranchaldixit123
[root@docker ~]# aws s3 rb s3://pranchaldixit123
remove_bucket: pranchaldixit123
[root@docker ~]#
[root@docker ~]# aws s3 mb s3://pdixit123
make_bucket: pdixit123
[root@docker ~]#
[root@docker ~]# aws s3 ls
2019-10-14 02:02:52 pdixit123
[root@docker s3test]# aws s3 cp 1.txt s3://$BucketName/firstobject
[root@docker s3test]# aws s3 cp 1.txt s3://pdixit123/firstobject
[root@docker s3test]# aws s3 cp 2.txt s3://pdixit123/firstobject
[root@docker s3test]# aws s3 cp 3.txt s3://pdixit123/firstobject
root@docker s3test]# touch permTest.txt
[root@docker s3test]# aws s3 cp permTest.txt s3://pdixit123 --acl public-read
[root@docker s3test]# touch more1.txt
[root@docker s3test]# touch more2.txt
[root@docker s3test]# touch more3.txt
[root@docker s3test]# aws s3 sync . s3://pdixit123
[root@docker s3test]# aws s3 ls s3://pdixit123/
[root@docker s3test]# rm more2.txt
[root@docker s3test]# aws s3 sync . s3://pdixit123 --delete
[root@docker s3test]# aws s3 sync . s3://pdixit123 -delete -exclude more3.txt
[root@docker s3test]# aws s3api list-objects --bucket pdixit123
[root@docker s3test]# aws s3 rb s3://pdixit123
[root@docker s3test]# aws s3 rb s3://pdixit123 --force
[root@docker s3test]# aws s3api put-bucket-versioning --bucket simpletestbucket1 --versioning-configuration Status=Enabled
[root@docker s3test]# touch b.txt
[root@docker s3test]# aws s3 cp b.txt s3://simpletestbucket1
[root@docker s3test]# aws s3api list-object-versions --bucket simpletestbucket1
[root@docker s3test]# aws s3api list-object-versions --bucket simpletestbucket1
[root@docker s3test]# aws s3api list-object-versions --bucket simpletestbucket1
[root@docker s3test]# aws s3 cp s3://simpletestbucket1/a.txt .
[root@docker s3test]# aws s3 cp s3://simpletestbucket1/b.txt .
[root@docker s3test]# aws s3 ls s3://simpletestbucket1
[root@docker s3test]# aws s3 cp s3://simpletestbucket1/a.txt  s3://gdixit123
[root@docker s3test]# aws s3 cp s3://simpletestbucket1 .  --recursive
root@docker s3test]# aws s3 cp s3://simpletestbucket1 s3://gdixit123 --recursive
[root@docker s3test]# aws s3 mv s3://simpletestbucket1 . --recursive
[root@docker s3test]# aws s3 mv  ab2.txt s3://simpletestbucket1
[root@docker s3test]# aws s3 mv s3://simpletestbucket1/ab2.txt s3://gdixit123
[root@docker s3test]# aws s3 mv s3://simpletestbucket1 . --recursive
[root@docker s3test]# aws s3 sync . s3://simpletestbucket1
[root@docker s3test]# aws s3 sync s3://simpletestbucket1 s3://gdixit123
[root@docker ~]# aws s3 rm s3://simpletestbucket1/ab.txt
[root@docker ~]# aws s3 rm s3://simpletestbucket1/ab.txt
[root@docker ~]# aws s3api put-bucket-tagging --bucket gdixit123 --tagging 'TagSet=[{Key=client,Value=scaleway}]'
[root@docker ~]# aws s3api get-bucket-tagging --bucket gdixit123
[root@docker ~]# aws s3api delete-bucket-tagging --bucket gdixit123
[root@docker ~]# aws s3api put-object-tagging --bucket gdixit123 --key a.txt --tagging 'TagSet=[{Key=client,Value=scaleway},{Key=service,Value=objectstorage}]'
[root@docker ~]# aws s3api get-object-tagging --bucket mybucket --key a.txt
[root@docker ~]# aws s3api put-bucket-acl --bucket gdixit123 --grant-full-control id=891367088372

0 coment�rios:

Note: only a member of this blog may post a comment.

[root@docker ~]# aws iam list-users USERS arn:aws:iam::891367088372:user/admin 2019-10-09T14:26:04Z / AIDA47CM6MD2AN7VASBDF admin US...

AWS IAM CLI

[root@docker ~]# aws iam list-users
USERS arn:aws:iam::891367088372:user/admin 2019-10-09T14:26:04Z / AIDA47CM6MD2AN7VASBDF admin
USERS arn:aws:iam::891367088372:user/admin2 2019-10-13T12:20:54Z / AIDA47CM6MD2OLSC2IMCR admin2
[root@docker ~]# aws iam list-groups
GROUPS arn:aws:iam::891367088372:group/Admin 2019-10-09T14:27:31Z AGPA47CM6MD2LOQU7WHU2 Admin /
[root@docker ~]# aws iam list-roles
[root@docker ~]# aws iam create-user --user-name pranchal
USER arn:aws:iam::891367088372:user/pranchal 2019-10-13T12:53:19Z / AIDA47CM6MD2AC7Y4TXX7 pranchal
[root@docker ~]# aws iam create-access-key --user-name pranchal
ACCESSKEY AKIA47CM6MD2NKJCHCSJ 2019-10-13T12:55:00Z Oxa+39HjQbzzWUXMBHCOvtI8zBEZ6RMbcel0Gjcp Active pranchal
[root@docker ~]# aws iam create-group --group-name developer
GROUP arn:aws:iam::891367088372:group/developer 2019-10-13T12:57:26Z AGPA47CM6MD2BYIWA5OTR developer /
[root@docker ~]# aws iam add-user-to-group --user-name pranchal --group-name developer
[root@docker ~]# aws iam list-policies
[root@docker ~]# aws iam list-policies --scope AWS |more
[root@docker ~]# aws iam attach-group-policy --policy-arn arn:aws:iam::aws:policy/AmazonEC2FullAccess --group-name developer
[root@docker ~]# aws iam list-attached-group-policies --group-name developer
[root@docker ~]# aws iam create-role --role-name TestRole --assume-role-policy-document file://ec2-role-trust-policy.jason
[root@docker ~]# aws iam list-roles
[root@docker ~]# aws iam delete-role --role-name TestRole
[root@docker ~]# aws iam create-user --user-name dummy
[root@docker ~]# aws iam create-group --group-name dummy
[root@docker ~]# aws iam delete-user --user-name dummy
[root@docker ~]# aws iam delete-group --group-name dummy

0 coment�rios:

Note: only a member of this blog may post a comment.

docker volume create myvlo1 docker volume ls docker volume inspect myvol1 docker run --name Myjenkins1 -v myvol1:/var/jenkins_home -p 80...

docker Volume

docker volume create myvlo1
docker volume ls
docker volume inspect myvol1
docker run --name Myjenkins1 -v myvol1:/var/jenkins_home -p 8090:9090 -p 50000:50000
docker run --name MyJenkins1 -v myvol1:/var/jenkins_home -p 8090:8090 -p 50000:50000 jenkins

docker volume rm myvlo1

0 coment�rios:

Note: only a member of this blog may post a comment.

[root@docker ~]# docker network --help [root@docker ~]# docker network ls NETWORK ID          NAME                DRIVER              SCO...

Docker Networking

[root@docker ~]# docker network --help
[root@docker ~]# docker network ls
NETWORK ID          NAME                DRIVER              SCOPE
4288b2609ccc        bridge              bridge              local
f21d9af6b09e        host                host                local
42281628272f        none                null                local

[root@docker ~]# docker network create mynetwork
[root@docker ~]# docker network ls
[root@docker ~]# docker network inspect mynetwork
[root@docker ~]# docker container ls
[root@docker ~]# docker container ls -a
[root@docker ~]# docker run  --name centoscontainer -d -it --net mynetwork centos /bin/bash
[root@docker ~]# docker container ls -a
[root@docker ~]# docker inspect mynetwork
[root@docker ~]# docker run -d -it --name centoscontainer2 centos /bin/bash
[root@docker ~]# docker container ls
[root@docker ~]# docker container ls -a
[root@docker ~]# docker network connect mynetwork centoscontainer2
[root@docker ~]# docker network inspect
[root@docker ~]# docker network inspect  mynetwork
[root@docker ~]# docker  exec -it centoscontainer2 /bin/bash
[root@docker ~]# docker  start centoscontainer
[root@docker ~]# docker attach centoscontainer2
[root@docker ~]# docker attach centoscontainer
[root@docker ~]# docker network disconnect mynetwork centoscontainer
[root@docker ~]# docker network inspect mynetwork
[root@docker ~]# docker network ls
[root@docker ~]# docker network disconnect mynetwork centoscontainer2
[root@docker ~]# docker network rm mynetwork
[root@docker ~]# docker network ls

0 coment�rios:

Note: only a member of this blog may post a comment.

[root@docker ~]# docker container run hello-world [root@docker ~]# docker image ls [root@docker ~]# docker ps -a [root@docker ~]# docker...

DOCKER BASIC-2

[root@docker ~]# docker container run hello-world
[root@docker ~]# docker image ls
[root@docker ~]# docker ps -a
[root@docker ~]# docker ps
[root@docker ~]# docker ps -a
[root@docker ~]# docker version
[root@docker ~]# systemctl status docker
[root@docker ~]# docker run -i -t --name "MyfirstContainer" centos:latest /bin/bash
root@02ac310f3cce:/# top
root@02ac310f3cce:/# df -h
root@02ac310f3cce:/# hostname -f
root@02ac310f3cce:/# ps -ef
root@02ac310f3cce:/# exit
[root@docker ~]# cd /var/lib/docker/overlay2/
[root@docker ~]# ls
[root@docker ~]# ll -lrth
[root@docker ~]# docker images ls
[root@docker ~]# docker image ls
[root@docker ~]# docker run -i -t --name container1 ubuntu /bin/bash
[root@docker ~]# docker image ls
[root@docker ~]# docker run -d -t --name "Mycentoscontainer" centos:7 /bin/bash
[root@docker ~]# docker container ls
[root@docker ~]# docker exec -i -t ba5b34de9f70 /bin/bash
[root@ba5b34de9f70 /]# hostname -f
[root@ba5b34de9f70 /]# rpm -qa |grep httpd
[root@ba5b34de9f70 /]# yum install -y httpd
[root@ba5b34de9f70 /]# systemctl status httpd
[root@ba5b34de9f70 /]# rpm -qa |grep httpd
[root@ba5b34de9f70 /]# exit
[root@docker ~]# docker container ls
[root@docker ~]# docker commit ba5b34de9f70 pranchaldixit/centos-httpd:7
[root@docker ~]# docker image ls

0 coment�rios:

Note: only a member of this blog may post a comment.

groupmod -g 501 nicgep usermod -u 501 nicgep usermod -u 502 gepreps useradd  -u 502 -g 501  gepreps passwd gepreps

user mod

groupmod -g 501 nicgep
usermod -u 501 nicgep
usermod -u 502 gepreps
useradd  -u 502 -g 501  gepreps
passwd gepreps

0 coment�rios:

Note: only a member of this blog may post a comment.

# wget https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm # rpm -ivh epel-release-latest-7.noarch.rpm # yum install p...

AWS CLI Configuration

# wget https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
# rpm -ivh epel-release-latest-7.noarch.rpm
# yum install python-pip
# pip list
# pip install awscli botocore
# pip list awscli
# aws
# aws configure
# ls -al /root/.aws/credentials
# cat  /root/.aws/credentials
# cat ~/.aws/config
# aws ec2 describe-regions
# aws configure
# aws ec2 describe-regions
# aws ec2 describe-instances
# aws ec2 describe-regions --output table
# aws iam list-access-key
# aws iam list-access-keys

0 coment�rios:

Note: only a member of this blog may post a comment.

# uname -r To enable the ELRepo repository on CentOS 7 # rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org # rpm -Uvh http://...

Upgrading Kernel in CentOS 7

# uname -r

To enable the ELRepo repository on CentOS 7

# rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
# rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm

To list the available kernel.related packages:

# yum --disablerepo="*" --enablerepo="elrepo-kernel" list available

Install the latest mainline stable kernel

# yum --enablerepo=elrepo-kernel install kernel-ml
# reboot
# uname -sr

0 coment�rios:

Note: only a member of this blog may post a comment.

VPC (1) Eech subnet associated with one private IP address (2) Every subnet has Main route table which is by default created, and we can ...

VPC in AWS

VPC
(1) Eech subnet associated with one private IP address
(2) Every subnet has Main route table which is by default created, and we can also create the custom route table.
(3) for public subnet the route table should be public and for private subnet route table should be private
(4) There is no route table explicitly associated with any subnet, then main route of that VPC is associated with that subnet.
(5) Nat gateway helps all the internet boud traffic which is getting originated from your private instances to go to internet and it's replay send back to our private instance
========================================================

Security Groups:-

(1) Operates at the instance level (first layer of defence)
(2) Support allow users only
(3) Is statefull: Retrun traffic is automaticly allowed
(4) We evalute all rules before deciding whether to allow traffic
(5) Applies to an instance only someone specifies the security group when launcing the instance, or associates the security group with the instance later on

Network ACL:-

(1) Operates at the subnet level (second layer of defence)
(2) Support allow rules and deny rules
(3) Is statefull: Retrun traffic must be explicitly allowed by rules
(4) We process rules in munber order when deciding whether to allow traffic
(5) Automatically applies to all instances in the subnets it's associates with
(6) In NACL if rules rank is samller then prority is high

Route Tables:-

* when we create any subnet, 5 IP address reserved for internal uses (first four and last one IP address)
* When we created any VPC 1 route table also created by defaults and that is call a main routes table of VPC
* We can also create a custom route table
* Here we have created one public route table and one private route table, public route table associated with public subnet and private route table associated with private subnet
* if there is no route table explicitly associated with subnet then main route table of that VPC will be used for the subnet
* one subnet have only one route table associated with this but one route table associated with multiple subnet

========================================================================

VPC:-

Start = 10.0.0.0/26
start - 10.0.0.0

2 ^ (32-26)= 2 ^ 6 =64

End =10.0.0.63

Now we have a 64 ip address and divide into 4 subnet means every subnet have 16 IP address

* VPC exit in a particular region

First subnet - 10.0.0.0/28
Second Subnet - 10.0.0.16/28
Third Subnet -  10.0.0.32/28
Fourth Sunbet - 10.0.0.48/28


* We have allocate 16 IP address but only available IP are 11
* 5 IP address are kept for internal uses

* In public subnet can use property "Enable auto-assign public ipv4 address" it allocate public ip automaticly

* In VPC we can select edit DNS resolution as "yes"

* In VPC we can slelect edit DNS hostnames as "yes" by this can get private hostname

* When any VPC created one route table created by default and that is called main route table of VPC

* We have created two routetable one public route table and second is private routetable

* public route table associated with public subnet and private route table associated private subnet

* if any route table have not associated with any subnet then main route table would associated to all subnet in that VPC

* One subnet associated with one route table but one route table associated with multiple subnet

* For internet access from public route table internet gateway should attach to VPC and in public route table rule destination should be 0.0.0.0/ and target internet gateway

* We keep Nat gateway in public subnet and also provide an IP to Nat gateway

* Now go to private route table choose destination 0.0.0.0/0 and target Nat gateway

* So if any internet boud traffic generated by private subnet that will send to Nat gateway

* Nat Gateway is salable service, when traffic increased on need worry about this, it manage automatically

* Nat Gateway also take one private IP that's why one privat IP address got reduce from public subnet 

0 coment�rios:

Note: only a member of this blog may post a comment.

On Demand instance :- This is a default mode, if we use this instance 3.5 years have to pay for 4 years, if we have start and stop the inst...

EC2 Instance in AWS

On Demand instance :- This is a default mode, if we use this instance 3.5 years have to pay for 4 years, if we have start and stop the instance the new billong start, but rebooting the instance same billing started
======================
(1) pay by hours
(2) Any partial hour converted to full hour
(3) New billing cycle starts whenever any instance change running state
(4) Billing cycle ends when instance change to stoping state

Reserved instance :- This instance work in time duration, suppose we want run any application 3-4 year we can use this instance, possible duration 1 year or 3 year
====================
(1) 2 term available- 1 year or 3 years
(2) 3 payment options
     - Full Upfront
- Partial Upfront
- No Upfront (not for 3 years term)

(3)Lots of saving  in comparison to On-demand
(4)Gives your capity Guarantee as well
(5)You commit the usage for chosen term
(6)You can re-sell on AWS if you choose not to use
(7)consider for full term

Scheduled Reservation:-
(1) Available for 3 frequencies- Daily, weekly or monthly
(2) Saving when compare to On-Demand
(3)Good for recurring workloads requiring lesser numner of hours
(4) 1 year available

Spot instance :- Unused capacity at AWS given in market for bidding
(2)Look at pricing history and dicated bid price
(3)Instance are terminated with 2 miniuts notice when market price goes above bid price
(4)If terminated by AWS last partial hours is free
(5)Optionally use spot Block options with bid to block the instance (maximum 6 hours)

Dedicated Instances:-
(1)Comparatively, higher rates that on demand instance
(2)In additional to that $2 per hour - An additional fee is charged once per hour in which at lease one Dedicated instance of any type is running in a region

Dedicated Host:-
(1)Pay for full Physical host, irrespective of number of instance running
(2)Suitable when you want to use hardware boud licenses
(3)Underlying Host does not change when you stop and start an instance

============================================================

Instance Family Current Generation Instance Types
====================================================
General purpose

a1.medium | a1.large | a1.xlarge | a1.2xlarge | a1.4xlarge | m4.large | m4.xlarge | m4.2xlarge | m4.4xlarge | m4.10xlarge | m4.16xlarge | m5.large | m5.xlarge | m5.2xlarge | m5.4xlarge | m5.12xlarge | m5.24xlarge | m5.metal | m5a.large | m5a.xlarge | m5a.2xlarge | m5a.4xlarge | m5a.12xlarge | m5a.24xlarge | m5ad.large | m5ad.xlarge | m5ad.2xlarge | m5ad.4xlarge | m5ad.12xlarge | m5ad.24xlarge | m5d.large | m5d.xlarge | m5d.2xlarge | m5d.4xlarge | m5d.12xlarge | m5d.24xlarge | m5d.metal | t2.nano | t2.micro | t2.small | t2.medium | t2.large | t2.xlarge | t2.2xlarge | t3.nano | t3.micro | t3.small | t3.medium | t3.large | t3.xlarge | t3.2xlarge

Compute optimized

c4.large | c4.xlarge | c4.2xlarge | c4.4xlarge | c4.8xlarge | c5.large | c5.xlarge | c5.2xlarge | c5.4xlarge | c5.9xlarge | c5.18xlarge | c5d.xlarge | c5d.2xlarge | c5d.4xlarge | c5d.9xlarge | c5d.18xlarge | c5n.large | c5n.xlarge | c5n.2xlarge | c5n.4xlarge | c5n.9xlarge | c5n.18xlarge

Memory optimized

r4.large | r4.xlarge | r4.2xlarge | r4.4xlarge | r4.8xlarge | r4.16xlarge | r5.large | r5.xlarge | r5.2xlarge | r5.4xlarge | r5.12xlarge | r5.24xlarge | r5.metal | r5a.large | r5a.xlarge | r5a.2xlarge | r5a.4xlarge | r5a.12xlarge | r5a.24xlarge | r5ad.large | r5ad.xlarge | r5ad.2xlarge | r5ad.4xlarge | r5ad.12xlarge | r5ad.24xlarge | r5d.large | r5d.xlarge | r5d.2xlarge | r5d.4xlarge | r5d.12xlarge | r5d.24xlarge | r5d.metal | u-6tb1.metal | u-9tb1.metal | u-12tb1.metal | x1.16xlarge | x1.32xlarge | x1e.xlarge | x1e.2xlarge | x1e.4xlarge | x1e.8xlarge | x1e.16xlarge | x1e.32xlarge | z1d.large | z1d.xlarge | z1d.2xlarge | z1d.3xlarge | z1d.6xlarge | z1d.12xlarge | z1d.metal

Storage optimized

d2.xlarge | d2.2xlarge | d2.4xlarge | d2.8xlarge | h1.2xlarge | h1.4xlarge | h1.8xlarge | h1.16xlarge | i3.large | i3.xlarge | i3.2xlarge | i3.4xlarge | i3.8xlarge | i3.16xlarge | i3.metal

Accelerated computing

f1.2xlarge | f1.4xlarge | f1.16xlarge | g3s.xlarge | g3.4xlarge | g3.8xlarge | g3.16xlarge | p2.xlarge | p2.8xlarge | p2.16xlarge | p3.2xlarge | p3.8xlarge | p3.16xlarge | p3dn.24xlarg

=======================================================================
Volume Type:-
(1)General Purpose SSD (gp2)*
Description:- General purpose SSD volume that balances price and performance for a wide variety of workloads
Volume Size:- 1 GiB - 16 TiB
Max. IOPS**/Volume:-16,000***

(2)Provisioned IOPS SSD (io1)
Description:- Highest-performance SSD volume for mission-critical low-latency or high-throughput workloads
Volume Size:- 4 GiB - 16 TiB
Max. IOPS**/Volume:-64,000****

(3)Throughput Optimized HDD (st1)
Description:- Low-cost HDD volume designed for frequently accessed, throughput-intensive workloads
Volume Size:- 500 GiB - 16 TiB
Max. IOPS**/Volume:-500

(4)Cold HDD (sc1)
Description:- Lowest cost HDD volume designed for less frequently accessed workloads
Volume Size:- 500 GiB - 16 TiB
Max. IOPS**/Volume:-500


========================================================================

EBS Volume:-
* The root volume, we can not change it's type, based on AMI the type of volume is fixed, and boot volume showing in form of gp2, IO1, Magnetic
* There are five types of volume
* In linux operating system the root volume should be mounted on /dev/xvda
* In windows operating system the root volume should be mounted on /dev/sda1
* while we have take snapshot of any EBS volume it is store in incremental form
* Snapshot alway store in s3, before store in s3 it also compress
* if EBS volume in encrypted format then snapshot will be in encrypted otherwise it will store in normaly
* we can increase in root EBS volume but can not decrease
* shanpshot should be same availability zone so we can attached to the same availability zone server
* We can increase EBS volume size on line and off line also.
* We can take copy of snapshot also and attached that copy to any availability zone.

========================================================================

ELB:-

* ELB can be public or private (depends on Subsets you choose to launch it)
* It is highly recommended that you choose 2 subnets of different AZ to launch ELB
* You can move Web-server to private Subnet and keep only ELB in public Subnet. With a VPC all instances take to each other
* Traffic gets distributed between 2 AZs in round robin fashion
* Whin in an AZ, the instance having least number of open connection gets the next request
* ELB should alway be accessed by using DNS and not IP
* Define health check with care and look at the status of instances
* If we don't know exact ULR path in ELB configuration then we can select TCP instead of HTTP
* In case we have not equal no of instance then we should check cross-zone load balancing this algorithms help to distributed tarffic equaly
* connection Draning means if our any instance fell unhealth then in mentioned option we provide particlular time and ELB stop to send taffic to that instance

========================================================================

Launch configuration:-

* Collection of attribute value to be used for launching ec2 instance
* When system need any new instance then autoscaling group will be lanuch the instance according to the lanuch configuration

Auto-Scaling Group-

* Define:- Min, Max & Desired (optional)
* ELB (optional)
* Subnets (choose this accross 2 AZs)
* Cooldown period
* Scale-Out policy (with Warmup Period)
* Scale-in policy (Connection draining)

=============================================================

* ELB should be public subnet where as instances should be in private subnet
* In autoscaling group we need to give group size that means how many instaces you want to launch
* In auto scaling group must have choose VPC and in VPC must choose subnet and these subnet should be private
* In auto scaling we can set alarm and after gerenation of alarm provide the time period to lanuch a new ec2 instance this time may 5 min or more and also mentioned the average CPU utilization where want to generate the alarm.
* In the same way we can decrease the auto scaling size when load average goes down
*  We can remove instance in three way (1) fix number (2) in persentage (3) set something
* We pass the ELB id in autoscaling group with the help of this process autoscaling group launch the instances
* cool down period means for which the auto scaling group for the wait when it excute on scale in or scale out policy (default time 300)

=====================================================================
* There are two types of disk
(1)EBS:-EBS is your traditional hard disk
(2)Instance storage:- This is not persistent, if stop the ec2 instance and start it again then data goes way

* shutdown Behaviour:- when enable this feature this is asking for shutdown or termination
* Delete on Termimation menas when I deleted the  ec2 instance the hard disk automatically deleted
* I ecu is equal to 1 Gigahurts

Status checks:- when we launch any instance there are two types of checks
(1) System Status Checks
(2) instace status checks

Description:- ec2 instance description as given below
(1)instace id :- this is a unique id
(2)instace status
(3)instace type
(4)Every instance have two ip address one is public and other is private, only public ip accessable from out side
(5)Availability zone
(6)Security group:- It show the rule which we have added to ec2 instance
(7)AMI ID:- it show the AMI id
(8)platefrom:- linux/windows/any os
(9)VPC:-show the vpc id
(10)subnet id :- show the subnet id of ec2 instance

0 coment�rios:

Note: only a member of this blog may post a comment.

16 Regions:-Any geographic location that has data center is called Regions 42 availability zones:- With in Regions there have minimum 2 da...

AWS

16 Regions:-Any geographic location that has data center is called Regions
42 availability zones:- With in Regions there have minimum 2 data center and they isolated each other that is calles availability zones.
60 Edge Locations:- These are points which present as AWS CDN service, name of CDN service is CLOUD FRONT
AZ:- us-east-1a, us-east-1b

AZ
==
EC2 instance 
EBS Volume
RDS instance 
Redshift node
Subnet
Elastic File system 

Regional 
=========
S3 Bucket
AMI
Snapshot (EBS/RDS)
Dynamo DB
VPC
Elastic IP
SQS,SNS
CloudWatch Metric 

Global
======= 
IAM 
Route 53
Cloud Front

0 coment�rios:

Note: only a member of this blog may post a comment.

Step 1:- Install git # yum install git Step 2:- check the git version # git --version Step3:- sign up https://github.com/ or create a...

GIT Basics

Step 1:- Install git
# yum install git

Step 2:- check the git version
# git --version

Step3:- sign up https://github.com/ or create a free account

Step4:- Add your github email and user name to git

# git config --global user.email pranchaldixit321@gmail.com
# git config --global user.name pranchaldixit

Step5:- Add files and folders to git

Step6:- command
        on the terminal go to location of folder/project
# cd /root/Desktop/tools/git/myfolder
# git init
Initialized empty Git repository in /root/Desktop/tools/git/myfolder/.git/
# git status
# ls -al
# git status
# touch test.txt
# git status
# git add test.txt
# git status
# git commit -m "added test.txt"
# git status
# vim test.txt
# touch index.html
# git status
# git add .
# git status
# git commit -m "add index.html, modified test.txt"
# git status
# git remote add origin https://github.com/Pranchaldixit/Repo1.git
# git push -u origin master
# git log

Branching and Merging
===========================
# cd /root/Desktop/tools/git/myfolder
# git branch MyNewBranch
# git checkout MyNewBranch
# touch test2.txt
# git status
# git commit -m "added test2.txt"
# git status
# git commit  -m "added test2.txt"
# git add .
# git commit  -m "added test2.txt"
# git push -u origin MyNewBranch
# git checkout master
# ls -al
# git merge MyNewBranch
# git push -u origin master
# git branch -d MyNewBranch ===========Delete from local Machine
# git push origin --delete MyNewBranch==Delete from Main repo

========================================================================
# mkdir netauto
# cd netauto/
# ls
# vi s1
# ls
# git init
# la
# git config --global user.name "pranchal dixit"
# git config --global user.email "pranchaldixit123@gmail.com"
# git config --list
# git status
# git add s1
# git status
# git commit -m "add file s1"
# git status
# git log
# cp s1 s2
# vi s2
# vi s1
# git status
# git diff
# vi s1
# git diff
# git add .
# git status
# git diff --staged
# git commit -m "add s2 and edit s1"
# git log
# git log -p
# git rm s2
# git status
# git commit
# git log
# vi s1
# git diff
# git status
# git checkout -- s1
# git diff
# git status
# more s1
# vi s1
# git digg
# git diff
# git add s1
# git diff
# git diff --staged
# git status
# git reset HEAD s1
# git status
# git checkout -- s1
# git status
# git log --s2
# git log -- s2
# git checkout 6280 --s2
# git checkout 6280 -- s2
# ls
# git status
# git commit -m "restore s2"
# touch myapp.pyc
# touch logs/log1.log
# mkdir logs
# touch logs/log1.log
# touch logs/log2.log
# git status
# vi .gitignore
# git status
# git add .
# git commit -m "add .gitignore file"

========================================================================

# mkdir net-auto
# cd net-auto/
# ls
# vi s1
# git init
# git add s1
# git commit -m "create s1"
# cp s1 s2
# git add s2
# git commit -m "create s2"
# git log
# git log --all --decorate --oneline --graph
# alias graph="git log --all --decorate --oneline --graph"
# graph
# git branch SDN
# git branch auth
# git branch
# graph
# git checkout SDN
# graph
# vi s1
# git add s1 ; git commit -m "SDN for s1"
# graph
# cat s1
# git checkout auth
# git branch
# cat s1
# git checkout SDN
# cat s1
# git checkout auth
# vi s1
# git status
# commit -a -m "auth for s1"
# git commit -a -m "auth for s1"
# graph
# git status
# git checkout master
# git diff master..SDN
# git merge SND
# git merge SDN
# cat s1
# graph
# git branch --merged
# git branch -d SDN
# git branch -d auth
# git status
# git merge auth
# graph
# git branch --merged
# git branch -d auth
# git checlout -b dev
# git checkout -b dev
# vi s1
# git diff
# git commit -a -m "update S1 VLANS"
# git checkout master
# vi s1
# git commit a -m "update s1"
# git commit -a -m "update s1"
# graph
# git status
# git merge dev
# git status
# git merge --abort
# graph
# git status
# git merge dev
# git status
# vi s1
# git add s1
# git status
# git commit
# graph
# git branch -d dev
# git log
# git checkout 93ed315
# graph
# git checkout master
# graph
# git checkout 93ed315
# graph
# git branch stage
# graph
# git checkout stage
# graph
# git status
# git checkout master
# vi s1
# git status
# git checkout statge
# git checkout stage
# git stash
# git status
# vi s1
# git stash
# git stash list
# git stash list -p
# git stash apply
# git diff
# git commit -a -m "remove auth"
# git stash list
# git stash apply
# git diff
# git stash list
# git stash save "add yellow vlan"
# git stash list
# git stash save "add yellow vlan"
# git stash list


0 coment�rios:

Note: only a member of this blog may post a comment.