shjung 1 an în urmă
părinte
comite
9eab787051

+ 5 - 0
conf/ggits-comm-server.yml

@@ -3,5 +3,10 @@ spring:
     active: test
 
 application:
+  kafka:
+    producer:
+      dd:
+    consumer:
+
   ggits-server-ip: 192.168.24.22
 #  ggits-server-ip: 127.0.0.1

+ 60 - 0
src/main/java/com/ggits/comm/server/config/KafkaConfig.java

@@ -0,0 +1,60 @@
+package com.ggits.comm.server.config;
+
+import com.ggits.app.common.xnet.NetUtils;
+import lombok.Data;
+import lombok.extern.slf4j.Slf4j;
+import org.apache.kafka.clients.consumer.ConsumerConfig;
+import org.springframework.boot.context.properties.ConfigurationProperties;
+import org.springframework.stereotype.Component;
+
+import javax.annotation.PostConstruct;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+@Slf4j
+@Data
+@Component
+@ConfigurationProperties(prefix = "application.kafka")
+public class KafkaConfig {
+
+    private String bootstrapServers;
+    private String groupId = "ggits-comm-server";
+    private String pingTopic = "ping-topic";
+
+    private String consumerGroupId = "tsi-comm-server";
+    private String consumerAckConfig = "1";
+
+    private boolean multiConnect = false;
+    private boolean enableNode = false;
+    private String nodeServers = "";
+    public List<Map<String, String>> props = new ArrayList<Map<String, String>>();
+
+    @PostConstruct
+    private void init() {
+
+        log.info("{}", this);
+    }
+
+    public String getGroupId() {
+        return this.consumerGroupId + "-" + NetUtils.getHostName();
+    }
+
+    public Map<String, Object> getConsumerPropertiesMap() {
+        Map<String, Object> properties = new HashMap();
+        properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, this.bootstrapServers);
+        properties.put(ConsumerConfig.GROUP_ID_CONFIG, getGroupId());
+        properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true");
+        properties.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, 1);
+        properties.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "10000");
+        properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
+        properties.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, "100");
+        properties.put(ConsumerConfig.CHECK_CRCS_CONFIG, false);
+        properties.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 1);
+        properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, org.apache.kafka.common.serialization.StringDeserializer.class);
+        properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, org.apache.kafka.common.serialization.LongDeserializer.class);
+
+        return properties;
+    }
+}

+ 86 - 0
src/main/java/com/ggits/comm/server/kafka/KafkaConsumerService.java

@@ -0,0 +1,86 @@
+package com.ggits.comm.server.kafka;
+
+import com.ggits.comm.server.config.KafkaConfig;
+import com.ggits.comm.server.process.dbms.DbmsDataProcess;
+import lombok.RequiredArgsConstructor;
+import lombok.extern.slf4j.Slf4j;
+import org.apache.kafka.clients.consumer.Consumer;
+import org.apache.kafka.common.TopicPartition;
+import org.springframework.kafka.core.ConsumerFactory;
+import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
+import org.springframework.kafka.listener.ConcurrentMessageListenerContainer;
+import org.springframework.kafka.listener.ConsumerAwareRebalanceListener;
+import org.springframework.kafka.listener.ContainerProperties;
+
+import javax.annotation.PostConstruct;
+import java.util.Collection;
+
+@Slf4j
+@RequiredArgsConstructor
+//@Service
+public class KafkaConsumerService {
+
+    private final KafkaConfig config;
+    private final DbmsDataProcess dbmsDataProcess;
+
+    private ConcurrentMessageListenerContainer<String, Long> kafkaListenerContainer;
+
+    @PostConstruct
+    void init() {
+        log.info("[{}] ------------------", this.getClass().getSimpleName());
+        start();
+    }
+
+    public void start() {
+
+        if (this.kafkaListenerContainer != null) {
+            if (!this.kafkaListenerContainer.isRunning()) {
+                log.warn("kafkaListenerContainer restart");
+                this.kafkaListenerContainer.start();
+            }
+            return;
+        }
+
+        ContainerProperties containerProperties = new ContainerProperties(this.config.getPingTopic());
+        containerProperties.setGroupId(this.config.getGroupId());
+        containerProperties.setPollTimeout(5000);
+        //containerProperties.setAckMode(ContainerProperties.AckMode.MANUAL);
+        containerProperties.setMessageListener(new TsiKafkaConsumerWorker(this.dbmsDataProcess));
+        containerProperties.setConsumerRebalanceListener(new ConsumerAwareRebalanceListener() {
+            @Override
+            public void onPartitionsRevokedBeforeCommit(Consumer<?, ?> consumer, Collection<TopicPartition> partitions) {
+            }
+            @Override
+            public void onPartitionsRevokedAfterCommit(Consumer<?, ?> consumer, Collection<TopicPartition> partitions) {
+            }
+            @Override
+            public void onPartitionsAssigned(Consumer<?, ?> consumer, Collection<TopicPartition> partitions) {
+                consumer.seekToEnd(partitions);
+            }
+        });
+
+        ConsumerFactory<String, Long> consumerFactory = new DefaultKafkaConsumerFactory<>(this.config.getConsumerPropertiesMap());
+        this.kafkaListenerContainer = new ConcurrentMessageListenerContainer<>(consumerFactory, containerProperties);
+        this.kafkaListenerContainer.setBeanName("consumer");
+        this.kafkaListenerContainer.setConcurrency(1);
+        this.kafkaListenerContainer.setErrorHandler((thrownException, data) -> {
+            log.error("kafkaListenerContainer error: {}", thrownException.getMessage());
+            this.kafkaListenerContainer.stop();
+        });
+
+        this.kafkaListenerContainer.start();
+    }
+
+    public void shutdown() {
+        try {
+            //if (this.consumer != null) {
+            //    this.consumer.close();;
+            //}
+            if (this.kafkaListenerContainer != null) {
+                this.kafkaListenerContainer.stop();
+            }
+        }
+        catch(Exception ignored) {
+        }
+    }
+}

+ 170 - 0
src/main/java/com/ggits/comm/server/kafka/KafkaProducerService.java

@@ -0,0 +1,170 @@
+package com.ggits.comm.server.kafka;
+
+import com.ggits.app.common.kafka.KafkaProducerFactory;
+import com.ggits.app.common.utils.TimeUtils;
+import com.ggits.comm.server.config.KafkaConfig;
+import com.ggits.comm.server.process.dbms.DbmsDataProcess;
+import lombok.AllArgsConstructor;
+import lombok.extern.slf4j.Slf4j;
+import org.apache.kafka.clients.producer.Callback;
+import org.apache.kafka.clients.producer.ProducerConfig;
+import org.apache.kafka.clients.producer.RecordMetadata;
+import org.springframework.kafka.core.KafkaTemplate;
+import org.springframework.kafka.support.SendResult;
+import org.springframework.stereotype.Service;
+import org.springframework.util.concurrent.ListenableFuture;
+import org.springframework.util.concurrent.ListenableFutureCallback;
+
+import javax.annotation.PostConstruct;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+
+@Slf4j
+@AllArgsConstructor
+@Service
+public class KafkaProducerService {
+
+    private final KafkaConfig config;
+    private final DbmsDataProcess dbmsDataProcess;
+
+    private KafkaTemplate<String, byte[]> nodeProducer;
+    private KafkaTemplate<String, Long> pingProducer;
+
+
+    @PostConstruct
+    void init() {
+        //this.callback = new ProducerResultCallback();
+
+        if (this.config.isMultiConnect()) {
+            // 각각의 Producer 에 대하여 KafkaTemplate 를 생성해서 사용 한다.
+            if (this.config.isEnableNode()) {
+                this.nodeProducer = KafkaProducerFactory.createByteArrayTemplate(this.config.getNodeServers(), this.config.props);
+            }
+        }
+        else {
+            // 하나의 Producer KafkaTemplate 로 데이터를 전송하는 경우
+            // 동일한 KafkaTemplate 를 사용 한다.
+            KafkaTemplate<String, byte[]> producer = KafkaProducerFactory.createByteArrayTemplate(this.config.getBootstrapServers(), this.config.props);
+            if (this.config.isEnableNode()) {
+                this.nodeProducer = producer;
+            }
+        }
+
+        createPingProducer();
+
+        log.info("[{}] ------------------", this.getClass().getSimpleName());
+        log.info("[{}]   nodeProducer: {}", this.getClass().getSimpleName(), this.nodeProducer);
+        log.info("[{}]   pingProducer: {}", this.getClass().getSimpleName(), this.pingProducer);
+
+        //this.producer = new KafkaProducer<String, byte[]>(KafkaProducerFactory.getProperties(this.config.getBootstrapServers(), this.config.props));
+    }
+
+    public void createPingProducer() {
+        Map<String, Object> props = new HashMap<>();
+        props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, this.config.getBootstrapServers());
+        props.put(ProducerConfig.ACKS_CONFIG, this.config.getConsumerAckConfig());
+        props.put(ProducerConfig.RETRIES_CONFIG, 0);
+        props.put(ProducerConfig.LINGER_MS_CONFIG, 1);
+        props.put(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG, 3000);
+        props.put(ProducerConfig.DELIVERY_TIMEOUT_MS_CONFIG, 4000);
+        props.put(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG, 3000);
+        props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, org.apache.kafka.common.serialization.StringSerializer.class);
+        props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, org.apache.kafka.common.serialization.LongSerializer.class);
+
+        this.pingProducer = KafkaProducerFactory.createProducerTemplate(props);
+        this.pingProducer.setDefaultTopic(this.config.getPingTopic());
+    }
+    public void shutdown() {
+        try {
+            if (this.nodeProducer != null) {
+                this.nodeProducer.destroy();
+            }
+            if (this.pingProducer != null) {
+                this.pingProducer.destroy();
+            }
+        }
+        catch(Exception e) {
+            log.error("Failed to shutdown: {}", e.getMessage());
+        }
+    }
+    public void sendPing() {
+        if (this.pingProducer == null ) {
+            log.info("sendPing: pingProducer == null");
+            return;
+        }
+
+        long sendNanoTime = System.nanoTime();
+//        TsiTpmsManager.getInstance().getKafkaTransVo().setSendNanoTime(sendNanoTime);   // nano seconds
+//        TsiTpmsManager.getInstance().getKafkaTransVo().setSendTm(0);                    // micro seconds
+//        TsiTpmsManager.getInstance().getKafkaTransVo().setRecvTm(0);                    // micro seconds
+
+        ListenableFuture<SendResult<String, Long>> future =  this.pingProducer.sendDefault("key", sendNanoTime);
+        future.addCallback(new ListenableFutureCallback<SendResult<String, Long>>() {
+
+            @Override
+            public void onSuccess(SendResult<String, Long> result) {
+                long recvNanoTime = System.nanoTime();
+                long sendTime = TimeUnit.MICROSECONDS.convert(Math.abs(recvNanoTime - sendNanoTime), TimeUnit.NANOSECONDS);
+//                TsiTpmsManager.getInstance().getKafkaTransVo().setSendTm(sendTime);
+                log.info("send ping success: {}, {}", sendNanoTime, TimeUtils.elapsedTimeStr(recvNanoTime - sendNanoTime));
+            }
+            @Override
+            public void onFailure(Throwable ex) {
+                long recvNanoTime = System.nanoTime();
+//                TsiTpmsManager.getInstance().getKafkaTransVo().setSendNanoTime(0);
+//                KafkaTransVo stat = new KafkaTransVo(AbstractDbmsVo.DBMS_KAFKA_TRANS_HS);
+//                stat.setHostName(TsiTpmsManager.getInstance().getKafkaTransVo().getHostName());
+//                stat.setStatus(0);
+//                stat.setSendTm(TimeUnit.MICROSECONDS.convert(Math.abs(recvNanoTime - sendNanoTime), TimeUnit.NANOSECONDS));
+//                stat.setRecvTm(0);
+//                tsiCvimDbmsService.add(stat, (int)Thread.currentThread().getId());
+//                log.error("send ping failed: {}, {}, {}", sendNanoTime, TimeUtils.elapsedTimeStr(recvNanoTime - sendNanoTime), ex.getMessage());
+//
+//                // 카프카 전송 오류 알람 저장
+//                String value = "Send Failed";
+//                if (ex != null) {
+//                    value = ex.getMessage().substring(0, 99);
+//                }
+//                AlarmOccrVo alarm = new AlarmOccrVo(AbstractDbmsVo.DBMS_ALARM_OCCR_HS);
+//                alarm.setAlarmCode(TsiAlarmConfigVo.KAFKA_01);
+//                alarm.setAlarmTarget(producerConfig.getBootstrapServers());
+//                alarm.setAlarmValue(value);
+//                tsiCvimDbmsService.add(alarm, (int)Thread.currentThread().getId());
+            }
+        });
+    }
+
+    public void sendNode(String key, byte[] data) {
+        if (this.nodeProducer != null) {
+            try {
+                this.nodeProducer.send(key, key, data);
+            }
+            catch (Exception e) {
+                log.error("sendNode: {}, {}: {}", key, key, e.toString());
+            }
+        }
+    }
+
+    protected void send(KafkaTemplate<String, byte[]> kafka, String topic, String key, byte[] data) {
+        try {
+            kafka.send(topic, key, data);
+        }
+        catch(Exception e) {
+            log.error("kafka.send: {}, Exception: {}", topic, e.getMessage());
+        }
+    }
+
+    private static class ProducerResultCallback implements Callback {
+        @Override
+        public void onCompletion(RecordMetadata recordMetadata, Exception e) {
+            if (e != null) {
+                log.error("Error while producing message to topic: {}, {}", recordMetadata, e.toString());
+            }
+            else {
+                String message = String.format("sent message to topic:%s partition:%s  offset:%s", recordMetadata.topic(), recordMetadata.partition(), recordMetadata.offset());
+                System.out.println(message);
+            }
+        }
+    }
+}

+ 50 - 0
src/main/java/com/ggits/comm/server/kafka/TsiKafkaConsumerWorker.java

@@ -0,0 +1,50 @@
+package com.ggits.comm.server.kafka;
+
+import com.ggits.app.common.utils.TimeUtils;
+import com.ggits.comm.server.process.dbms.DbmsDataProcess;
+import lombok.AllArgsConstructor;
+import lombok.extern.slf4j.Slf4j;
+import org.apache.kafka.clients.consumer.ConsumerRecord;
+import org.springframework.kafka.listener.MessageListener;
+import org.springframework.kafka.support.Acknowledgment;
+
+@Slf4j
+@AllArgsConstructor
+public class TsiKafkaConsumerWorker implements MessageListener<String, Long> {
+
+    private DbmsDataProcess dbmsDataProcess;
+
+    @Override
+    public void onMessage(ConsumerRecord<String, Long> record) {
+        Long sendNanoTime = record.value();
+        Long recvNanoTime = System.nanoTime();
+
+//        KafkaTransVo stat = new KafkaTransVo(AbstractDbmsVo.DBMS_KAFKA_TRANS_HS);
+//        stat.setHostName(TsiTpmsManager.getInstance().getKafkaTransVo().getHostName());
+//        stat.setStatus(1);
+//        if (TsiTpmsManager.getInstance().getKafkaTransVo().getSendNanoTime() == sendNanoTime) {
+//            stat.setSendTm(TsiTpmsManager.getInstance().getKafkaTransVo().getSendTm());
+//            stat.setRecvTm(TimeUnit.MICROSECONDS.convert(Math.abs(recvNanoTime - sendNanoTime), TimeUnit.NANOSECONDS));
+//        }
+//        else {
+//            stat.setRecvTm(TimeUnit.MICROSECONDS.convert(Math.abs(recvNanoTime - sendNanoTime), TimeUnit.NANOSECONDS));
+//            stat.setSendTm(stat.getRecvTm());
+//            log.info("recv ping success, sendNanoTime miss match: {}, {}", sendNanoTime, TimeUtils.elapsedTimeStr(recvNanoTime - sendNanoTime));
+//        }
+//        dbmsDataProcess.add(stat, (int)Thread.currentThread().getId());
+//        log.info("recv ping success: {}, {}", sendNanoTime, TimeUtils.elapsedTimeStr(recvNanoTime - sendNanoTime));
+    }
+
+    @Override
+    public void onMessage(ConsumerRecord<String, Long> record, Acknowledgment acknowledgment) {
+        try {
+            Long sendNanoTime = record.value();
+            Long recvNanoTime = System.nanoTime();
+            log.info("recv ping success, ack: {}, {}", sendNanoTime, TimeUtils.elapsedTimeStr(recvNanoTime - sendNanoTime));
+            //acknowledgment.acknowledge();
+        } catch (Exception e) {
+            log.error("onMessage:" + e.getMessage());
+        }
+    }
+
+}

+ 7 - 6
src/main/java/com/ggits/comm/server/process/dbms/DbmsDataProcess.java

@@ -95,13 +95,14 @@ public class DbmsDataProcess {
                     }
                     List<IntStatusDto> intStatusLists = (List<IntStatusDto>)data.getData();
                     cnt = this.sigIntDao.updateStatus(intStatusLists);
-                    log.info("DBMS_DATA_INT_STATUS_UPDATE: [{}, {}] ({}/{}) {}/{} EA. Pop({})/Parsing({})/Dbms({})/Total({}) ms.",
+                    log.info("INT_STATUS_UPDATE: [{}, {}], {}",
                             data.getRegionCd(), data.getRegionId(),
-                            center.getQSeq(), data.getQIdx(),
-                            intStatusLists.size(), cnt,
-                            data.getPopTimestamp(), data.getParsingTimestamp(),
-                            System.currentTimeMillis() - start,
-                            System.currentTimeMillis() - data.getTimestamp());
+                            String.format("%4d/%4d EA. Pop(%5d)/Parsing(%5d)/Dbms(%5d)/Total(%5d) ms.",
+                                    intStatusLists.size(), cnt,
+                                    data.getPopTimestamp(), data.getParsingTimestamp(),
+                                    System.currentTimeMillis() - start,
+                                    System.currentTimeMillis() - data.getTimestamp()));
+
                     intStatusLists.clear();
                     break;
                 case DBMS_DATA_CENTER_STTS:

+ 6 - 3
src/main/java/com/ggits/comm/server/process/work/GgitsPacketProcess.java

@@ -1,6 +1,7 @@
 package com.ggits.comm.server.process.work;
 
 import com.ggits.comm.server.config.ApplicationConfig;
+import com.ggits.comm.server.kafka.KafkaProducerService;
 import com.ggits.comm.server.process.AbstractAppProcess;
 import com.ggits.comm.server.process.AbstractAppWorker;
 import com.ggits.comm.server.process.dbms.DbmsDataProcess;
@@ -23,9 +24,11 @@ public class GgitsPacketProcess extends AbstractAppProcess {
     private final ApplicationConfig config;
     private final DbmsDataProcess dbmsDataProcess;
     private final ApplicationRepository repo;
+    private final KafkaProducerService kafkaProducer;
+
     @PostConstruct
     void init() {
-        this.workerGroup = new ThreadGroup("packetProcess");
+        this.workerGroup = new ThreadGroup("PacketProcess");
     }
 
     public boolean add(Object object, int idx) {
@@ -56,10 +59,10 @@ public class GgitsPacketProcess extends AbstractAppProcess {
         qSize *= 4;
         qSize /= this.workers;
         for (int ii = 0; ii < this.workers; ii++) {
-            GgitsPacketWorker packetWorker = new GgitsPacketWorker(ii, qSize, this.repo, this.dbmsDataProcess);
+            GgitsPacketWorker packetWorker = new GgitsPacketWorker(ii, qSize, this.repo, this.dbmsDataProcess, this.kafkaProducer);
             this.workerList.add(packetWorker);
             Thread worker = new Thread(workerGroup, packetWorker);
-            worker.setName(String.format("packetWorker-%03d.%03d", this.workers, ii+1));
+            worker.setName(String.format("PacketWorker-%03d.%03d", this.workers, ii+1));
             worker.setDaemon(true);
             this.threadList.add(worker);
         }

+ 5 - 1
src/main/java/com/ggits/comm/server/process/work/GgitsPacketWorker.java

@@ -5,6 +5,7 @@ import com.ggits.comm.server.dto.IntDto;
 import com.ggits.comm.server.dto.IntStatusDto;
 import com.ggits.comm.server.dto.RegionCenter;
 import com.ggits.comm.server.dto.UnknownIntDto;
+import com.ggits.comm.server.kafka.KafkaProducerService;
 import com.ggits.comm.server.process.AbstractAppWorker;
 import com.ggits.comm.server.process.dbms.DbmsData;
 import com.ggits.comm.server.process.dbms.DbmsDataProcess;
@@ -24,13 +25,16 @@ public class GgitsPacketWorker extends AbstractAppWorker implements Runnable {
 
     private final ApplicationRepository repo;
     private final DbmsDataProcess dbmsDataProcess;
+    private final KafkaProducerService kafkaProducer;
+
     private final LinkedBlockingQueue<SigGgitsTsinfoDto> DATA_QUEUE;
 
-    public GgitsPacketWorker(int idx, int qSize, ApplicationRepository repo, DbmsDataProcess dbmsDataProcess) {
+    public GgitsPacketWorker(int idx, int qSize, ApplicationRepository repo, DbmsDataProcess dbmsDataProcess, KafkaProducerService kafkaProducer) {
         this.idx = idx;
         this.qSize = qSize;
         this.repo = repo;
         this.dbmsDataProcess = dbmsDataProcess;
+        this.kafkaProducer = kafkaProducer;
         this.DATA_QUEUE = new LinkedBlockingQueue<>(qSize);
     }
 

+ 16 - 0
src/main/resources/application.yml

@@ -35,6 +35,7 @@ management:
         include: health, metrics
 
 application:
+  ggits-server-ip: 192.168.24.22
   process-id: 81010
   packet-workers: 0
   packet-queue-size: 0
@@ -44,6 +45,21 @@ application:
     dbms: 0
     work: 60
 
+  kafka:
+    bootstrap-servers: 192.168.11.23:9092
+    group-id: ggits-comm-server
+    consumer-ack-config: 1
+    ping-topic: ping-topic
+    multi-connect: false
+    node-servers:
+    enable-node: false
+    props:
+    #  - request.timeout.ms: 100
+    #  - max.block.ms: 100
+    #  - transactional.id: tsi-comm-server-01
+    #  - acks: 0
+    #  - retries: 0
+    #  - linger.ms: 1
 ---
 spring:
   config:

+ 36 - 16
start.sh

@@ -1,18 +1,37 @@
-#!/bin/sh
-
-export SERVICE_HOME=/home/cvim/test
-export SERVICE_NAME=sig-comm-server
-export SERVICE_VERSION=0.0.1-SNAPSHOT
-export EXE_NAME=$SERVICE_NAME-$SERVICE_VERSION.jar
-export PID_NAME=$SERVICE_NAME.pid
-export SERVICE_PID=$SERVICE_HOME/conf/$PID_NAME
-
-cd $SERVICE_HOME
+#!/bin/sh
+
+#SERVICE_HOME=$(dirname $0)
+USER_HOME=/tcs/itcs
+SERVICE_HOME=/tcs/itcs/bin
+
+usage() {
+	echo "Usage:" $0 "[ggits/sig]"
+	echo "RUN ggits-comm-server:" $0 "ggits"
+	echo "RUN sig-comm-server:"   $0 "sig"
+	exit
+}
+
+case $1 in
+  ggits)
+    SERVICE_NAME=ggits-comm-server
+    SERVICE_VERSION=0.0.1
+    ;;
+  sig)
+    SERVICE_NAME=sig-comm-server
+    SERVICE_VERSION=0.0.1
+    ;;
+  *)
+    usage
+    ;;
+esac
+
+
+EXE_NAME=${SERVICE_HOME}/$SERVICE_NAME-$SERVICE_VERSION.jar
+PID_NAME=$SERVICE_NAME.pid
+SERVICE_PID=$SERVICE_HOME/conf/$PID_NAME
 
 export JAVA_OPT="-server"
-export JAVA_OPT="$JAVA_OPT -Xms4096m -Xmx4096m"
-export JAVA_OPT="$JAVA_OPT -Xlog:gc*:file=logs/gc.log"
-export CONFIG_OPT="--spring.config.location=conf//application.yml"
+export JAVA_OPT="$JAVA_OPT -Xlog:gc*:file=${USER_HOME}/logs/${SERVICE_NAME}/${SERVICE_NAME}.gc.log"
 
 if [ ! -z "$SERVICE_PID" ]; then
   if [ -f "$SERVICE_PID" ]; then
@@ -21,14 +40,15 @@ if [ ! -z "$SERVICE_PID" ]; then
   fi
 fi
 
-nohup java $JAVA_OPT -jar ./$EXE_NAME $CONFIG_OPT 1> /dev/null 2>&1 &
+nohup /opt/java8/bin/java $JAVA_OPT -jar $EXE_NAME > /dev/null &
 
 echo "$SERVICE_NAME is started...."
 
 sleep 1
 
-ps -eaf | grep $SERVICE_NAME | grep -v grep |wc -l
+ps -eaf | grep $SERVICE_NAME | grep -v grep |grep -v tail |wc -l
 
 sleep 1
 
-ps -eaf | grep $SERVICE_NAME | grep -v grep
+ps -eaf | grep $SERVICE_NAME | grep -v grep |grep -v tail
+

+ 4 - 11
stat.sh

@@ -1,16 +1,9 @@
-#!/bin/sh
+#!/bin/sh
 
-export SERVICE_HOME=/home/cvim/test
-export SERVICE_NAME=sig-comm-server
-export SERVICE_VERSION=0.0.1-SNAPSHOT
-export EXE_NAME=$SERVICE_NAME-$SERVICE_VERSION.jar
-export PID_NAME=$SERVICE_NAME.pid
-export SERVICE_PID=$SERVICE_HOME/conf/$PID_NAME
+export SERVICE_NAME=-server
 
-cd $SERVICE_HOME
-
-ps -eaf | grep $SERVICE_NAME | grep -v grep |wc -l
+ps -eaf | grep $SERVICE_NAME | grep -v grep | grep -v tail | grep -v kafka | grep java | wc -l
 
 sleep 1
 
-ps -eaf | grep $SERVICE_NAME | grep -v grep
+ps -eaf | grep $SERVICE_NAME | grep -v grep | grep -v tail | grep -v kafka | grep java

+ 32 - 10
stop.sh

@@ -1,13 +1,34 @@
-#!/bin/sh
+#!/bin/sh
 
-export SERVICE_HOME=/home/cvim/test
-export SERVICE_NAME=sig-comm-server
-export SERVICE_VERSION=0.0.1-SNAPSHOT
-export EXE_NAME=$SERVICE_NAME-$SERVICE_VERSION.jar
-export PID_NAME=$SERVICE_NAME.pid
-export SERVICE_PID=$SERVICE_HOME/conf/$PID_NAME
+#SERVICE_HOME=$(dirname $0)
+USER_HOME=/tcs/itcs
+SERVICE_HOME=/tcs/itcs/bin
 
-cd $SERVICE_HOME
+usage() {
+	echo "Usage:" $0 "[ggits/sig]"
+	echo "RUN ggits-comm-server:" $0 "ggits"
+	echo "RUN sig-comm-server:"   $0 "sig"
+    exit
+}
+
+case $1 in
+  ggits)
+    SERVICE_NAME=ggits-comm-server
+    SERVICE_VERSION=0.0.1
+    ;;
+  sig)
+    SERVICE_NAME=sig-comm-server
+    SERVICE_VERSION=0.0.1
+    ;;
+  *)
+    usage
+    ;;
+esac
+
+
+EXE_NAME=$SERVICE_NAME-$SERVICE_VERSION.jar
+PID_NAME=$SERVICE_NAME.pid
+SERVICE_PID=$SERVICE_HOME/conf/$PID_NAME
 
 if [ ! -z "$SERVICE_PID" ]; then
   if [ -f "$SERVICE_PID" ]; then
@@ -19,8 +40,8 @@ if [ ! -z "$SERVICE_PID" ]; then
   fi
 fi
 
-LOOP=$(seq 0 9)
-for i in $LOOP
+i=0
+while [ $i -ne 9 ]
 do
   sleep 1
   if [ -f "$SERVICE_PID" ]; then
@@ -29,6 +50,7 @@ do
     echo "$SERVICE_NAME stopped.........."
     exit
   fi
+  i=$(($i+1))
 done
 
 echo "$SERVICE_NAME cannot be terminated......."