PeopleSoft: to start and stop elastic search

 cd $ES_HOME/bin

>ps -ef | grep elast

>ps -ef | grep cli

root        6316       1  0 May05 ?        01:22:11 /usr/sbin/adclient

smmsp    1173397       1  0 Jul10 ?        00:00:00 sendmail: Queue runner@01:00:00 for /var/spool/clientmqueue

psoft    1328581 1327640  0 Jul18 pts/0    00:04:12 ./../node/bin/node ./../src/cli/dist


nohup ./elasticsearch &   to start elasticsearch




psoft-pseshrdevapp602:NONE:/usr/opt/app/es7/pt/Kibana7.10.0/bin>ls -lrt

total 16

-rwxr-xr-x. 1 psoft psoft  813 Dec  9  2022 kibana-plugin

-rwxr-xr-x. 1 psoft psoft  776 Dec  9  2022 kibana-keystore

-rwxr-xr-x. 1 psoft psoft  838 Dec  9  2022 kibana

-rw-------. 1 psoft psoft 3058 Jul 18 17:07 nohup.out

psoft-pseshrdevapp602:NONE:/usr/opt/app/es7/pt/Kibana7.10.0/bin>rm nohup.out

psoft-pseshrdevapp602:NONE:/usr/opt/app/es7/pt/Kibana7.10.0/bin>nohup ./kibana &  to start kibana

[1]     1328581

psoft-pseshrdevapp602:NONE:/usr/opt/app/es7/pt/Kibana7.10.0/bin>nohup: ignoring input and appending output to 'nohup.out'


psoft-pseshrdevapp602:NONE:/usr/opt/app/es7/pt/Kibana7.10.0/bin>

psoft-pseshrdevapp602:NONE:/usr/opt/app/es7/pt/Kibana7.10.0/bin>

psoft-pseshrdevapp602:NONE:/usr/opt/app/es7/pt/Kibana7.10.0/bin>ps -ef | grep 1328581

psoft    1328581 1327640 38 17:10 pts/0    00:00:04 ./../node/bin/node ./../src/cli/dist

psoft    1328602 1327640  0 17:10 pts/0    00:00:00 grep --color=auto 1328581

psoft-pseshrdevapp602:NONE:/usr/opt/app/es7/pt/Kibana7.10.0/bin>




psoft-psesapp601.tmw.com:NONE:/usr/opt/app/psoft>cd ..

psoft-psesapp601.tmw.com:NONE:/usr/opt/app>ls

CorreLog  es23204  fs_share  oracle  psoft

psoft-psesapp601.tmw.com:NONE:/usr/opt/app>cd es23204/

psoft-psesapp601.tmw.com:NONE:/usr/opt/app/es23204>ls

pt

psoft-psesapp601.tmw.com:NONE:/usr/opt/app/es23204>cd pt

psoft-psesapp601.tmw.com:NONE:/usr/opt/app/es23204/pt>ls

es2.3.2  es_jre1.8.0_144

psoft-psesapp601.tmw.com:NONE:/usr/opt/app/es23204/pt>cd es2.3.2/

psoft-psesapp601.tmw.com:NONE:/usr/opt/app/es23204/pt/es2.3.2>ls

bin  config  data  lib  LICENSE.txt  logs  modules  NOTICE.txt  plugins  README.textile

psoft-psesapp601.tmw.com:NONE:/usr/opt/app/es23204/pt/es2.3.2>cd bin

psoft-psesapp601.tmw.com:NONE:/usr/opt/app/es23204/pt/es2.3.2/bin>ls

elasticsearch         elasticsearch.in.sh            elasticsearch-service-x86.exe  nohup.out   service.bat

elasticsearch.bat     elasticsearch-service-mgr.exe  elasticsearchuser              plugin

elasticsearch.in.bat  elasticsearch-service-x64.exe  elasticsearchuser.bat          plugin.bat

psoft-psesapp601.tmw.com:NONE:/usr/opt/app/es23204/pt/es2.3.2/bin>./elasticsearch

[2018-09-27 12:13:30,384][WARN ][bootstrap                ] Unable to lock JVM Memory: error=12,reason=Cannot allocate memory

[2018-09-27 12:13:30,419][WARN ][bootstrap                ] This can result in part of the JVM being swapped out.

[2018-09-27 12:13:30,419][WARN ][bootstrap                ] Increase RLIMIT_MEMLOCK, soft limit: 65536, hard limit: 65536

[2018-09-27 12:13:30,420][WARN ][bootstrap                ] These can be adjusted by modifying /etc/security/limits.conf, for example:

        # allow user 'psoft' mlockall

        psoft soft memlock unlimited

        psoft hard memlock unlimited

[2018-09-27 12:13:30,420][WARN ][bootstrap                ] If you are logged in interactively, you will have to re-login for the new limits to                                                   take effect.

[2018-09-27 12:13:30,610][INFO ][node                     ] [Phineas T. Horton] version[2.3.2], pid[13903], build[b9e4a6a/2016-04-21T16:03:47Z]

[2018-09-27 12:13:30,610][INFO ][node                     ] [Phineas T. Horton] initializing ...

[2018-09-27 12:13:32,551][INFO ][plugins                  ] [Phineas T. Horton] modules [reindex], plugins [analysis-phonetic, mapper-attachmen                                                  ts, orcl-acl-plugin, orcl-crawl-plugin, delete-by-query, orcl-ssl-plugin, orcl-authentication-plugin], sites []

[2018-09-27 12:13:32,643][INFO ][env                      ] [Phineas T. Horton] using [1] data paths, mounts [[/usr/opt/app (/dev/mapper/uservg                                                  -usr_opt_app_lv)]], net usable_space [187gb], net total_space [199.9gb], spins? [possibly], types [xfs]

[2018-09-27 12:13:32,643][INFO ][env                      ] [Phineas T. Horton] heap size [6.9gb], compressed ordinary object pointers [true]

[2018-09-27 12:13:32,643][WARN ][env                      ] [Phineas T. Horton] max file descriptors [6000] for elasticsearch process likely to                                                  o low, consider increasing to at least [65536]

[2018-09-27 12:13:33,132][INFO ][http                     ] [Phineas T. Horton] Using [org.elasticsearch.http.netty.NettyHttpServerTransport] a                                                  s http transport, overridden by [orclssl]

[2018-09-27 12:13:33,395][INFO ][transport                ] [Phineas T. Horton] Using [com.peoplesoft.pt.elasticsearch.orclssl.transport.OrclSS                                                  LNettyTransport] as transport, overridden by [orclssl]

[2018-09-27 12:13:35,666][INFO ][node                     ] [Phineas T. Horton] initialized

[2018-09-27 12:13:35,666][INFO ][node                     ] [Phineas T. Horton] starting ...

[2018-09-27 12:13:35,813][INFO ][transport                ] [Phineas T. Horton] publish_address {psesapp601.tmw.com/172.31.102.99:9300}, bound_                                                  addresses {172.31.102.99:9300}

[2018-09-27 12:13:35,819][INFO ][discovery                ] [Phineas T. Horton] ESCLUSTER/iJCpck0nQzibqCyV0fQF5Q

[2018-09-27 12:13:35,860][WARN ][com.peoplesoft.pt.elasticsearch.orclssl.transport.OrclSSLNettyTransport] [Phineas T. Horton] exception caught                                                   on transport layer [[id: 0x3ed5ae0f]], closing connection

java.net.SocketException: Protocol family unavailable

        at sun.nio.ch.Net.connect0(Native Method)

        at sun.nio.ch.Net.connect(Net.java:454)

        at sun.nio.ch.Net.connect(Net.java:446)

        at sun.nio.ch.SocketChannelImpl.connect(SocketChannelImpl.java:648)

        at org.jboss.netty.channel.socket.nio.NioClientSocketPipelineSink.connect(NioClientSocketPipelineSink.java:108)

        at org.jboss.netty.channel.socket.nio.NioClientSocketPipelineSink.eventSunk(NioClientSocketPipelineSink.java:70)

        at org.jboss.netty.channel.DefaultChannelPipeline.sendDownstream(DefaultChannelPipeline.java:574)

        at org.jboss.netty.channel.Channels.connect(Channels.java:634)

        at org.jboss.netty.channel.AbstractChannel.connect(AbstractChannel.java:216)

        at org.jboss.netty.bootstrap.ClientBootstrap.connect(ClientBootstrap.java:229)

        at org.jboss.netty.bootstrap.ClientBootstrap.connect(ClientBootstrap.java:182)

        at org.elasticsearch.transport.netty.NettyTransport.connectToChannelsLight(NettyTransport.java:949)

        at org.elasticsearch.transport.netty.NettyTransport.connectToNode(NettyTransport.java:916)

        at org.elasticsearch.transport.netty.NettyTransport.connectToNodeLight(NettyTransport.java:888)

        at org.elasticsearch.transport.TransportService.connectToNodeLight(TransportService.java:267)

        at org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing$3.run(UnicastZenPing.java:395)

        at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)

        at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)

        at java.lang.Thread.run(Thread.java:748)

[2018-09-27 12:13:37,347][WARN ][com.peoplesoft.pt.elasticsearch.orclssl.transport.OrclSSLNettyTransport] [Phineas T. Horton] exception caught                                                   on transport layer [[id: 0x38855ffe]], closing connection

java.net.SocketException: Protocol family unavailable

        at sun.nio.ch.Net.connect0(Native Method)

        at sun.nio.ch.Net.connect(Net.java:454)

        at sun.nio.ch.Net.connect(Net.java:446)

        at sun.nio.ch.SocketChannelImpl.connect(SocketChannelImpl.java:648)

        at org.jboss.netty.channel.socket.nio.NioClientSocketPipelineSink.connect(NioClientSocketPipelineSink.java:108)

        at org.jboss.netty.channel.socket.nio.NioClientSocketPipelineSink.eventSunk(NioClientSocketPipelineSink.java:70)

        at org.jboss.netty.channel.DefaultChannelPipeline.sendDownstream(DefaultChannelPipeline.java:574)

        at org.jboss.netty.channel.Channels.connect(Channels.java:634)

        at org.jboss.netty.channel.AbstractChannel.connect(AbstractChannel.java:216)

        at org.jboss.netty.bootstrap.ClientBootstrap.connect(ClientBootstrap.java:229)

        at org.jboss.netty.bootstrap.ClientBootstrap.connect(ClientBootstrap.java:182)

        at org.elasticsearch.transport.netty.NettyTransport.connectToChannelsLight(NettyTransport.java:949)

        at org.elasticsearch.transport.netty.NettyTransport.connectToNode(NettyTransport.java:916)

        at org.elasticsearch.transport.netty.NettyTransport.connectToNodeLight(NettyTransport.java:888)

        at org.elasticsearch.transport.TransportService.connectToNodeLight(TransportService.java:267)

        at org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing$3.run(UnicastZenPing.java:395)

        at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)

        at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)

        at java.lang.Thread.run(Thread.java:748)

[2018-09-27 12:13:38,849][WARN ][com.peoplesoft.pt.elasticsearch.orclssl.transport.OrclSSLNettyTransport] [Phineas T. Horton] exception caught                                                   on transport layer [[id: 0x9a46dba2]], closing connection

java.net.SocketException: Protocol family unavailable

        at sun.nio.ch.Net.connect0(Native Method)

        at sun.nio.ch.Net.connect(Net.java:454)

        at sun.nio.ch.Net.connect(Net.java:446)

        at sun.nio.ch.SocketChannelImpl.connect(SocketChannelImpl.java:648)

        at org.jboss.netty.channel.socket.nio.NioClientSocketPipelineSink.connect(NioClientSocketPipelineSink.java:108)

        at org.jboss.netty.channel.socket.nio.NioClientSocketPipelineSink.eventSunk(NioClientSocketPipelineSink.java:70)

        at org.jboss.netty.channel.DefaultChannelPipeline.sendDownstream(DefaultChannelPipeline.java:574)

        at org.jboss.netty.channel.Channels.connect(Channels.java:634)

        at org.jboss.netty.channel.AbstractChannel.connect(AbstractChannel.java:216)

        at org.jboss.netty.bootstrap.ClientBootstrap.connect(ClientBootstrap.java:229)

        at org.jboss.netty.bootstrap.ClientBootstrap.connect(ClientBootstrap.java:182)

        at org.elasticsearch.transport.netty.NettyTransport.connectToChannelsLight(NettyTransport.java:949)

        at org.elasticsearch.transport.netty.NettyTransport.connectToNode(NettyTransport.java:916)

        at org.elasticsearch.transport.netty.NettyTransport.connectToNodeLight(NettyTransport.java:888)

        at org.elasticsearch.transport.TransportService.connectToNodeLight(TransportService.java:267)

        at org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing$3.run(UnicastZenPing.java:395)

        at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)

        at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)

        at java.lang.Thread.run(Thread.java:748)

[2018-09-27 12:13:38,878][INFO ][cluster.service          ] [Phineas T. Horton] new_master {Phineas T. Horton}{iJCpck0nQzibqCyV0fQF5Q}{172.31.1                                                  02.99}{psesapp601.tmw.com/172.31.102.99:9300}, reason: zen-disco-join(elected_as_master, [0] joins received)

[2018-09-27 12:13:38,908][INFO ][http                     ] [Phineas T. Horton] publish_address {psesapp601.tmw.com/172.31.102.99:9200}, bound_                                                  addresses {172.31.102.99:9200}

[2018-09-27 12:13:38,908][INFO ][node                     ] [Phineas T. Horton] started

[2018-09-27 12:13:39,387][INFO ][gateway                  ] [Phineas T. Horton] recovered [4] indices into cluster_state

[2018-09-27 12:13:55,163][INFO ][cluster.routing.allocation] [Phineas T. Horton] Cluster health status changed from [RED] to [YELLOW] (reason: [shards started [[orcl_es_defaultindex_fsprod][4]]

Comments

Popular posts from this blog

Oracle: To clean up WRI$_ADV_OBJECTS