shjung 10 months ago
parent
commit
5727e9c58e

+ 3 - 30
src/main/java/com/ggits/comm/server/config/KafkaConfig.java

@@ -1,15 +1,12 @@
 package com.ggits.comm.server.config;
 
-import com.ggits.app.common.xnet.NetUtils;
 import lombok.Data;
 import lombok.extern.slf4j.Slf4j;
-import org.apache.kafka.clients.consumer.ConsumerConfig;
 import org.springframework.boot.context.properties.ConfigurationProperties;
 import org.springframework.stereotype.Component;
 
 import javax.annotation.PostConstruct;
 import java.util.ArrayList;
-import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
@@ -19,42 +16,18 @@ import java.util.Map;
 @ConfigurationProperties(prefix = "application.kafka")
 public class KafkaConfig {
 
+    public static final String SIG_ALL_TOPIC = "sig-all";
+
     private String bootstrapServers;
     private String groupId = "ggits-comm-server";
-    private String pingTopic = "ping-topic";
-
-    private String consumerGroupId = "tsi-comm-server";
-    private String consumerAckConfig = "1";
 
-    private boolean multiConnect = false;
     private boolean enableNode = false;
-    private String nodeServers = "";
+    private boolean enableSig = false;
     public List<Map<String, String>> props = new ArrayList<Map<String, String>>();
 
     @PostConstruct
     private void init() {
-
         log.info("{}", this);
     }
 
-    public String getGroupId() {
-        return this.consumerGroupId + "-" + NetUtils.getHostName();
-    }
-
-    public Map<String, Object> getConsumerPropertiesMap() {
-        Map<String, Object> properties = new HashMap();
-        properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, this.bootstrapServers);
-        properties.put(ConsumerConfig.GROUP_ID_CONFIG, getGroupId());
-        properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true");
-        properties.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, 1);
-        properties.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "10000");
-        properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
-        properties.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, "100");
-        properties.put(ConsumerConfig.CHECK_CRCS_CONFIG, false);
-        properties.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 1);
-        properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, org.apache.kafka.common.serialization.StringDeserializer.class);
-        properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, org.apache.kafka.common.serialization.LongDeserializer.class);
-
-        return properties;
-    }
 }

+ 94 - 7
src/main/java/com/ggits/comm/server/dto/IntStatusDto.java

@@ -1,6 +1,7 @@
 package com.ggits.comm.server.dto;
 
 import lombok.AllArgsConstructor;
+import lombok.Getter;
 import lombok.NoArgsConstructor;
 import lombok.ToString;
 
@@ -14,8 +15,10 @@ import java.io.Serializable;
 public class IntStatusDto implements Serializable {
     public static final long serialVersionUID = 1L;
 
+    public static final int MAX_KAFKA_DATA_SIZE = 51;
+
     public String regionCd;                   	//	N	VARCHAR2(3)	N
-    public int intNo;                       //	N	NUMBER(6)	N
+    public int    intNo;                        //	N	NUMBER(6)	N
     public String COLLCT_DTIME;                	//	N	DATE	Y
     public String SYS_COLLCT_DTIME;            	//	N	DATE Y
     public String COMM_ON_OFF_FLAG;            	//	N	CHAR(1)	Y
@@ -49,22 +52,77 @@ public class IntStatusDto implements Serializable {
     public int    GROUP_NO;                     //	N	NUMBER(6)	Y
     public String PPC_CONTRL_FLAG;              //	N	CHAR(1)	Y
 
-    public IntStatusDto(String regionCd, int intNo) {
+    private long nodeId;
+    @Getter
+    private byte[] kafkaData;
+
+    // nodeid       4
+    // status       1   //교차로 상태(0: 통신이상, 1: 정상, 2: 점멸, 3: 소등, 4: 수동진행, 5: 현시유지)
+    // time         4
+    // oper Mode    1   //제어기 운영모드 코드(0:SCU 고정주기 모드, 1:감응하지 않는 OFFLINE 제어모드, 2:감응되는 OFFLINE 제어모드, 4:감응되는 온라인 제어모드, 5:감응하지 않는 온라인 제어모드)
+    // a ring phase 1   //RING A의 PHASE (0 ~ 7)
+    // b ring phase 1   //RING B의 PHASE (0 ~ 7)
+    // mapNodeId    1   //맵 번호( 0 : 일반제, 1~5: 시차제, 6 : 전용맵)
+    // reserved     7   //reserved
+
+//    nodeid                        4
+//    time                          4
+//    version                       1   // 1, 2
+//    COMM_ON_OFF_FLAG	            1   N	CHAR(1)	    Y			통신 ON/OFF 여부(0: 정상, 1: 통신 FAIL)
+//    CONTRLR_OPER_MODE_CD	        1   N	CHAR(1)	    Y			제어기 운영모드 코드(0:SCU 고정주기 모드, 1:감응하지 않는 OFFLINE 제어모드, 2:감응되는 OFFLINE 제어모드, 4:감응되는 온라인 제어모드, 5:감응하지 않는 온라인 제어모드)
+//    A_RING_PHASE_VAL	            1   N	NUMBER(3)	Y			A 링 현시값(0~7)
+//    B_RING_PHASE_VAL	            1   N	NUMBER(3)	Y			B 링 현시값(0~7)
+//    SIGLIGHT_TURNOFF_FLAG	        1   N	CHAR(1)	    Y			신호등 소등 여부(1: 소등, 0: 정상)
+//    SIGLIGHT_BLINK_FLAG	        1   N	CHAR(1)	    Y			신호등 점멸 여부(1: 점멸, 0: 정상)
+//    CONTRLR_MANUAL_FLAG	        1   N	CHAR(1)	    Y			제어기 수동 여부(1 : ON(수동),  0 : OFF(자동))
+//    CONTRLR_TMDIFF_CONTRL_FLAG    1	N	CHAR(1)	    Y			제어기 시차 제어 여부(시차제 좌회전 여부 1: 수행중, 0: 수행 안함)
+//    MAP_NO	                    1   N	NUMBER(1)	Y			맵 번호( 0 : 일반제, 1~5: 시차제, 6 : 전용맵)
+//    INT_SIG_CYCLE_CNT	            1   N	NUMBER(3)	Y			교차로 신호 주기 카운트
+//    INT_SIG_CYCLE_LEN	            1   N	NUMBER(3)	Y			교차로 신호 주기 값
+//    A_RING_1_PHASE_VAL	        1   N	NUMBER(3)	Y			A링 1현시값
+//    A_RING_2_PHASE_VAL	        1   N	NUMBER(3)	Y			A링 2현시값
+//    A_RING_3_PHASE_VAL	        1   N	NUMBER(3)	Y			A링 3현시값
+//    A_RING_4_PHASE_VAL	        1   N	NUMBER(3)	Y			A링 4현시값
+//    A_RING_5_PHASE_VAL	        1   N	NUMBER(3)	Y			A링 5현시값
+//    A_RING_6_PHASE_VAL	        1   N	NUMBER(3)	Y			A링 6현시값
+//    A_RING_7_PHASE_VAL	        1   N	NUMBER(3)	Y			A링 7현시값
+//    A_RING_8_PHASE_VAL	        1   N	NUMBER(3)	Y			A링 8현시값
+//    B_RING_1_PHASE_VAL	        1   N	NUMBER(3)	Y			B링 1현시값
+//    B_RING_2_PHASE_VAL	        1   N	NUMBER(3)	Y			B링 2현시값
+//    B_RING_3_PHASE_VAL	        1   N	NUMBER(3)	Y			B링 3현시값
+//    B_RING_4_PHASE_VAL	        1   N	NUMBER(3)	Y			B링 4현시값
+//    B_RING_5_PHASE_VAL	        1   N	NUMBER(3)	Y			B링 5현시값
+//    B_RING_6_PHASE_VAL	        1   N	NUMBER(3)	Y			B링 6현시값
+//    B_RING_7_PHASE_VAL	        1   N	NUMBER(3)	Y			B링 7현시값
+//    B_RING_8_PHASE_VAL	        1   N	NUMBER(3)	Y			B링 8현시값
+//    PPC_CONTRL_FLAG	            1   N	CHAR(1)	    Y			PPC제어여부(0: PPC Disable 1: PPC Enabled)
+
+    public IntStatusDto(String regionCd, int intNo, long nodeId) {
         this.regionCd = regionCd;
         this.intNo = intNo;
-        initStatus();
+        this.nodeId = nodeId;
+        this.kafkaData = new byte[MAX_KAFKA_DATA_SIZE];
+
+        this.kafkaData[0] = (byte)((nodeId      ) & 0x000000FF);
+        this.kafkaData[1] = (byte)((nodeId >> 8 ) & 0x000000FF);
+        this.kafkaData[2] = (byte)((nodeId >> 16) & 0x000000FF);
+        this.kafkaData[3] = (byte)((nodeId >> 24) & 0x000000FF);
+        this.kafkaData[4] = (byte)0x02;
+
+        initStatus(0);
     }
-    public void initStatus() {
+
+    public void initStatus(long unixTimestamp) {
         this.COMM_ON_OFF_FLAG = "1";            	//	N	CHAR(1)	Y
         this.CONTRLR_OPER_MODE_CD = "0";        	//	N	CHAR(1)	Y
 
-        this.A_RING_PHASE_VAL = 0;				//	N	NUMBER(3)	Y
-        this.B_RING_PHASE_VAL = 0;             //	N	NUMBER(3)	Y
+        this.A_RING_PHASE_VAL = 0;				    //	N	NUMBER(3)	Y
+        this.B_RING_PHASE_VAL = 0;                  //	N	NUMBER(3)	Y
         this.SIGLIGHT_TURNOFF_FLAG = "0";       	//	N	CHAR(1)	Y
         this.SIGLIGHT_BLINK_FLAG = "0";         	//	N	CHAR(1)	Y
         this.CONTRLR_MANUAL_FLAG = "0";         	//	N	CHAR(1)	Y
         this.CONTRLR_TMDIFF_CONTRL_FLAG = "0";  	//	N	CHAR(1)	Y
-        this.MAP_NO             = 0;                       //	N	NUMBER(1)	Y
+        this.MAP_NO             = 0;                //	N	NUMBER(1)	Y
 
         this.INT_SIG_CYCLE_CNT  = 0;           //	N	NUMBER(3)	Y
         this.INT_SIG_CYCLE_LEN  = 0;           //	N	NUMBER(3)	Y
@@ -87,5 +145,34 @@ public class IntStatusDto implements Serializable {
         this.SIMULFLAG          = "N";         //	N	CHAR(1)	Y
         this.GROUP_NO           = 0;           //	N	NUMBER(6)	Y
         this.PPC_CONTRL_FLAG    = "0";         //	N	CHAR(1)	Y
+
+        int idx = 5;
+        this.kafkaData[idx++] = (byte)((unixTimestamp      ) & 0x000000FF);
+        this.kafkaData[idx++] = (byte)((unixTimestamp >> 8 ) & 0x000000FF);
+        this.kafkaData[idx++] = (byte)((unixTimestamp >> 16) & 0x000000FF);
+        this.kafkaData[idx++] = (byte)((unixTimestamp >> 24) & 0x000000FF);
+        for (int ii = idx; ii < MAX_KAFKA_DATA_SIZE; ii++) {
+            this.kafkaData[ii] = (byte)0x00;
+        }
+    }
+
+    public void setOperStatus(int operStts) {
+        this.kafkaData[9] = (byte)(operStts & 0xFF);
+    }
+
+    public void setPhase(int aRingPhase, int aRingStep, int bRingPhase, int bRingStep, int holdPhase) {
+        if (holdPhase > 0) {
+            aRingPhase = holdPhase-1;
+            bRingPhase = holdPhase-1;
+        }
+        aRingPhase = (aRingPhase << 5);
+        bRingPhase = (bRingPhase << 5);
+
+        this.kafkaData[10] = (byte)(aRingPhase | aRingStep);
+        this.kafkaData[11] = (byte)(bRingPhase | bRingStep);
+    }
+    public void setStatus(int status, int holdPhase) {
+        int ppc = holdPhase > 0 ? 1 : 0;
+        this.kafkaData[12] = (byte)(status | ppc);
     }
 }

+ 1 - 1
src/main/java/com/ggits/comm/server/entity/TbInt.java

@@ -39,7 +39,7 @@ public class TbInt implements Serializable {
                 .mainIntNo(this.mainIntNo)
                 .groupNo(this.groupNo)
                 .nodeId(this.nodeId)
-                .status(new IntStatusDto(this.regionCd, this.intNo))
+                .status(new IntStatusDto(this.regionCd, this.intNo, this.nodeId))
                 .build();
     }
 }

+ 0 - 86
src/main/java/com/ggits/comm/server/kafka/KafkaConsumerService.java

@@ -1,86 +0,0 @@
-package com.ggits.comm.server.kafka;
-
-import com.ggits.comm.server.config.KafkaConfig;
-import com.ggits.comm.server.process.dbms.DbmsDataProcess;
-import lombok.RequiredArgsConstructor;
-import lombok.extern.slf4j.Slf4j;
-import org.apache.kafka.clients.consumer.Consumer;
-import org.apache.kafka.common.TopicPartition;
-import org.springframework.kafka.core.ConsumerFactory;
-import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
-import org.springframework.kafka.listener.ConcurrentMessageListenerContainer;
-import org.springframework.kafka.listener.ConsumerAwareRebalanceListener;
-import org.springframework.kafka.listener.ContainerProperties;
-
-import javax.annotation.PostConstruct;
-import java.util.Collection;
-
-@Slf4j
-@RequiredArgsConstructor
-//@Service
-public class KafkaConsumerService {
-
-    private final KafkaConfig config;
-    private final DbmsDataProcess dbmsDataProcess;
-
-    private ConcurrentMessageListenerContainer<String, Long> kafkaListenerContainer;
-
-    @PostConstruct
-    void init() {
-        log.info("[{}] ------------------", this.getClass().getSimpleName());
-        start();
-    }
-
-    public void start() {
-
-        if (this.kafkaListenerContainer != null) {
-            if (!this.kafkaListenerContainer.isRunning()) {
-                log.warn("kafkaListenerContainer restart");
-                this.kafkaListenerContainer.start();
-            }
-            return;
-        }
-
-        ContainerProperties containerProperties = new ContainerProperties(this.config.getPingTopic());
-        containerProperties.setGroupId(this.config.getGroupId());
-        containerProperties.setPollTimeout(5000);
-        //containerProperties.setAckMode(ContainerProperties.AckMode.MANUAL);
-        containerProperties.setMessageListener(new TsiKafkaConsumerWorker(this.dbmsDataProcess));
-        containerProperties.setConsumerRebalanceListener(new ConsumerAwareRebalanceListener() {
-            @Override
-            public void onPartitionsRevokedBeforeCommit(Consumer<?, ?> consumer, Collection<TopicPartition> partitions) {
-            }
-            @Override
-            public void onPartitionsRevokedAfterCommit(Consumer<?, ?> consumer, Collection<TopicPartition> partitions) {
-            }
-            @Override
-            public void onPartitionsAssigned(Consumer<?, ?> consumer, Collection<TopicPartition> partitions) {
-                consumer.seekToEnd(partitions);
-            }
-        });
-
-        ConsumerFactory<String, Long> consumerFactory = new DefaultKafkaConsumerFactory<>(this.config.getConsumerPropertiesMap());
-        this.kafkaListenerContainer = new ConcurrentMessageListenerContainer<>(consumerFactory, containerProperties);
-        this.kafkaListenerContainer.setBeanName("consumer");
-        this.kafkaListenerContainer.setConcurrency(1);
-        this.kafkaListenerContainer.setErrorHandler((thrownException, data) -> {
-            log.error("kafkaListenerContainer error: {}", thrownException.getMessage());
-            this.kafkaListenerContainer.stop();
-        });
-
-        this.kafkaListenerContainer.start();
-    }
-
-    public void shutdown() {
-        try {
-            //if (this.consumer != null) {
-            //    this.consumer.close();
-            //}
-            if (this.kafkaListenerContainer != null) {
-                this.kafkaListenerContainer.stop();
-            }
-        }
-        catch(Exception ignored) {
-        }
-    }
-}

+ 18 - 111
src/main/java/com/ggits/comm/server/kafka/KafkaProducerService.java

@@ -1,24 +1,13 @@
 package com.ggits.comm.server.kafka;
 
 import com.ggits.app.common.kafka.KafkaProducerFactory;
-import com.ggits.app.common.utils.TimeUtils;
 import com.ggits.comm.server.config.KafkaConfig;
-import com.ggits.comm.server.process.dbms.DbmsDataProcess;
 import lombok.AllArgsConstructor;
 import lombok.extern.slf4j.Slf4j;
-import org.apache.kafka.clients.producer.Callback;
-import org.apache.kafka.clients.producer.ProducerConfig;
-import org.apache.kafka.clients.producer.RecordMetadata;
 import org.springframework.kafka.core.KafkaTemplate;
-import org.springframework.kafka.support.SendResult;
 import org.springframework.stereotype.Service;
-import org.springframework.util.concurrent.ListenableFuture;
-import org.springframework.util.concurrent.ListenableFutureCallback;
 
 import javax.annotation.PostConstruct;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.concurrent.TimeUnit;
 
 @Slf4j
 @AllArgsConstructor
@@ -26,113 +15,53 @@ import java.util.concurrent.TimeUnit;
 public class KafkaProducerService {
 
     private final KafkaConfig config;
-    private final DbmsDataProcess dbmsDataProcess;
 
+    private KafkaTemplate<String, byte[]> sigProducer;
     private KafkaTemplate<String, byte[]> nodeProducer;
-    private KafkaTemplate<String, Long> pingProducer;
 
 
     @PostConstruct
     void init() {
         //this.callback = new ProducerResultCallback();
 
-        if (this.config.isMultiConnect()) {
-            // 각각의 Producer 에 대하여 KafkaTemplate 를 생성해서 사용 한다.
-            if (this.config.isEnableNode()) {
-                this.nodeProducer = KafkaProducerFactory.createByteArrayTemplate(this.config.getNodeServers(), this.config.props);
-            }
+        // 동일한 KafkaTemplate 를 사용 한다.
+        KafkaTemplate<String, byte[]> producer = KafkaProducerFactory.createByteArrayTemplate(this.config.getBootstrapServers(), this.config.props);
+        if (this.config.isEnableNode()) {
+            this.nodeProducer = producer;
         }
-        else {
-            // 하나의 Producer KafkaTemplate 로 데이터를 전송하는 경우
-            // 동일한 KafkaTemplate 를 사용 한다.
-            KafkaTemplate<String, byte[]> producer = KafkaProducerFactory.createByteArrayTemplate(this.config.getBootstrapServers(), this.config.props);
-            if (this.config.isEnableNode()) {
-                this.nodeProducer = producer;
-            }
+        if (this.config.isEnableSig()) {
+            this.sigProducer = producer;
         }
 
-        createPingProducer();
-
         log.info("[{}] ------------------", this.getClass().getSimpleName());
-        log.info("[{}]   nodeProducer: {}", this.getClass().getSimpleName(), this.nodeProducer);
-        log.info("[{}]   pingProducer: {}", this.getClass().getSimpleName(), this.pingProducer);
+        log.info("[{}]   nodeProducer: {}, {}", this.getClass().getSimpleName(), this.config.isEnableNode(), this.nodeProducer);
+        log.info("[{}]    sigProducer: {}, {}", this.getClass().getSimpleName(), this.config.isEnableSig(), this.sigProducer);
 
         //this.producer = new KafkaProducer<String, byte[]>(KafkaProducerFactory.getProperties(this.config.getBootstrapServers(), this.config.props));
     }
 
-    public void createPingProducer() {
-        Map<String, Object> props = new HashMap<>();
-        props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, this.config.getBootstrapServers());
-        props.put(ProducerConfig.ACKS_CONFIG, this.config.getConsumerAckConfig());
-        props.put(ProducerConfig.RETRIES_CONFIG, 0);
-        props.put(ProducerConfig.LINGER_MS_CONFIG, 1);
-        props.put(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG, 3000);
-        props.put(ProducerConfig.DELIVERY_TIMEOUT_MS_CONFIG, 4000);
-        props.put(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG, 3000);
-        props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, org.apache.kafka.common.serialization.StringSerializer.class);
-        props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, org.apache.kafka.common.serialization.LongSerializer.class);
-
-        this.pingProducer = KafkaProducerFactory.createProducerTemplate(props);
-        this.pingProducer.setDefaultTopic(this.config.getPingTopic());
-    }
     public void shutdown() {
         try {
             if (this.nodeProducer != null) {
                 this.nodeProducer.destroy();
             }
-            if (this.pingProducer != null) {
-                this.pingProducer.destroy();
+            if (this.sigProducer != null) {
+                this.sigProducer.destroy();
             }
         }
         catch(Exception e) {
             log.error("Failed to shutdown: {}", e.getMessage());
         }
     }
-    public void sendPing() {
-        if (this.pingProducer == null ) {
-            log.info("sendPing: pingProducer == null");
-            return;
-        }
-
-        long sendNanoTime = System.nanoTime();
-//        TsiTpmsManager.getInstance().getKafkaTransVo().setSendNanoTime(sendNanoTime);   // nano seconds
-//        TsiTpmsManager.getInstance().getKafkaTransVo().setSendTm(0);                    // micro seconds
-//        TsiTpmsManager.getInstance().getKafkaTransVo().setRecvTm(0);                    // micro seconds
-
-        ListenableFuture<SendResult<String, Long>> future =  this.pingProducer.sendDefault("key", sendNanoTime);
-        future.addCallback(new ListenableFutureCallback<SendResult<String, Long>>() {
-
-            @Override
-            public void onSuccess(SendResult<String, Long> result) {
-                long recvNanoTime = System.nanoTime();
-                long sendTime = TimeUnit.MICROSECONDS.convert(Math.abs(recvNanoTime - sendNanoTime), TimeUnit.NANOSECONDS);
-//                TsiTpmsManager.getInstance().getKafkaTransVo().setSendTm(sendTime);
-                log.info("send ping success: {}, {}", sendNanoTime, TimeUtils.elapsedTimeStr(recvNanoTime - sendNanoTime));
+    public void sendSig(String key, byte[] data) {
+        if (this.sigProducer != null) {
+            try {
+                this.sigProducer.send(KafkaConfig.SIG_ALL_TOPIC, key, data);
             }
-            @Override
-            public void onFailure(Throwable ex) {
-                long recvNanoTime = System.nanoTime();
-//                TsiTpmsManager.getInstance().getKafkaTransVo().setSendNanoTime(0);
-//                KafkaTransVo stat = new KafkaTransVo(AbstractDbmsVo.DBMS_KAFKA_TRANS_HS);
-//                stat.setHostName(TsiTpmsManager.getInstance().getKafkaTransVo().getHostName());
-//                stat.setStatus(0);
-//                stat.setSendTm(TimeUnit.MICROSECONDS.convert(Math.abs(recvNanoTime - sendNanoTime), TimeUnit.NANOSECONDS));
-//                stat.setRecvTm(0);
-//                tsiCvimDbmsService.add(stat, (int)Thread.currentThread().getId());
-//                log.error("send ping failed: {}, {}, {}", sendNanoTime, TimeUtils.elapsedTimeStr(recvNanoTime - sendNanoTime), ex.getMessage());
-//
-//                // 카프카 전송 오류 알람 저장
-//                String value = "Send Failed";
-//                if (ex != null) {
-//                    value = ex.getMessage().substring(0, 99);
-//                }
-//                AlarmOccrVo alarm = new AlarmOccrVo(AbstractDbmsVo.DBMS_ALARM_OCCR_HS);
-//                alarm.setAlarmCode(TsiAlarmConfigVo.KAFKA_01);
-//                alarm.setAlarmTarget(producerConfig.getBootstrapServers());
-//                alarm.setAlarmValue(value);
-//                tsiCvimDbmsService.add(alarm, (int)Thread.currentThread().getId());
+            catch (Exception e) {
+                log.error("sendSig: {}, {}: {}", KafkaConfig.SIG_ALL_TOPIC, key, e.toString());
             }
-        });
+        }
     }
 
     public void sendNode(String key, byte[] data) {
@@ -145,26 +74,4 @@ public class KafkaProducerService {
             }
         }
     }
-
-    protected void send(KafkaTemplate<String, byte[]> kafka, String topic, String key, byte[] data) {
-        try {
-            kafka.send(topic, key, data);
-        }
-        catch(Exception e) {
-            log.error("kafka.send: {}, Exception: {}", topic, e.getMessage());
-        }
-    }
-
-    private static class ProducerResultCallback implements Callback {
-        @Override
-        public void onCompletion(RecordMetadata recordMetadata, Exception e) {
-            if (e != null) {
-                log.error("Error while producing message to topic: {}, {}", recordMetadata, e.toString());
-            }
-            else {
-                String message = String.format("sent message to topic:%s partition:%s  offset:%s", recordMetadata.topic(), recordMetadata.partition(), recordMetadata.offset());
-                System.out.println(message);
-            }
-        }
-    }
 }

+ 0 - 50
src/main/java/com/ggits/comm/server/kafka/TsiKafkaConsumerWorker.java

@@ -1,50 +0,0 @@
-package com.ggits.comm.server.kafka;
-
-import com.ggits.app.common.utils.TimeUtils;
-import com.ggits.comm.server.process.dbms.DbmsDataProcess;
-import lombok.AllArgsConstructor;
-import lombok.extern.slf4j.Slf4j;
-import org.apache.kafka.clients.consumer.ConsumerRecord;
-import org.springframework.kafka.listener.MessageListener;
-import org.springframework.kafka.support.Acknowledgment;
-
-@Slf4j
-@AllArgsConstructor
-public class TsiKafkaConsumerWorker implements MessageListener<String, Long> {
-
-    private DbmsDataProcess dbmsDataProcess;
-
-    @Override
-    public void onMessage(ConsumerRecord<String, Long> record) {
-        Long sendNanoTime = record.value();
-        Long recvNanoTime = System.nanoTime();
-
-//        KafkaTransVo stat = new KafkaTransVo(AbstractDbmsVo.DBMS_KAFKA_TRANS_HS);
-//        stat.setHostName(TsiTpmsManager.getInstance().getKafkaTransVo().getHostName());
-//        stat.setStatus(1);
-//        if (TsiTpmsManager.getInstance().getKafkaTransVo().getSendNanoTime() == sendNanoTime) {
-//            stat.setSendTm(TsiTpmsManager.getInstance().getKafkaTransVo().getSendTm());
-//            stat.setRecvTm(TimeUnit.MICROSECONDS.convert(Math.abs(recvNanoTime - sendNanoTime), TimeUnit.NANOSECONDS));
-//        }
-//        else {
-//            stat.setRecvTm(TimeUnit.MICROSECONDS.convert(Math.abs(recvNanoTime - sendNanoTime), TimeUnit.NANOSECONDS));
-//            stat.setSendTm(stat.getRecvTm());
-//            log.info("recv ping success, sendNanoTime miss match: {}, {}", sendNanoTime, TimeUtils.elapsedTimeStr(recvNanoTime - sendNanoTime));
-//        }
-//        dbmsDataProcess.add(stat, (int)Thread.currentThread().getId());
-//        log.info("recv ping success: {}, {}", sendNanoTime, TimeUtils.elapsedTimeStr(recvNanoTime - sendNanoTime));
-    }
-
-    @Override
-    public void onMessage(ConsumerRecord<String, Long> record, Acknowledgment acknowledgment) {
-        try {
-            Long sendNanoTime = record.value();
-            Long recvNanoTime = System.nanoTime();
-            log.info("recv ping success, ack: {}, {}", sendNanoTime, TimeUtils.elapsedTimeStr(recvNanoTime - sendNanoTime));
-            //acknowledgment.acknowledge();
-        } catch (Exception e) {
-            log.error("onMessage:" + e.getMessage());
-        }
-    }
-
-}

+ 18 - 18
src/main/java/com/ggits/comm/server/process/work/GgitsPacketWorker.java

@@ -95,7 +95,7 @@ public class GgitsPacketWorker extends AbstractAppWorker implements Runnable {
             short hour  = (short)(data.buffer[idx++] & 0xFF);
             short min   = (short)(data.buffer[idx++] & 0xFF);
             short sec   = (short)(data.buffer[idx++] & 0xFF);
-            String COLLCT_DTIME = String.format("%4d%02d%02d%02d%02d%02d", year+2000, month, day, hour, min, sec);
+            String collctDtime = String.format("%4d%02d%02d%02d%02d%02d", year+2000, month, day, hour, min, sec);
 
             int sequence   = ((data.buffer[idx++] & 0xFF) << 8) | (data.buffer[idx++] & 0xFF);
             int regionId   = ((data.buffer[idx++] & 0xFF) << 8) | (data.buffer[idx++] & 0xFF);
@@ -152,6 +152,8 @@ public class GgitsPacketWorker extends AbstractAppWorker implements Runnable {
             int holdPhase;      // PHASE HOLD 명령 또는 PPC에 의해 고정된 현시번호 (1-8)
             int omitPhase;      // PHASE OMIT 명령을 받은 현시번호 또는 감응제어에 의해 생략 중인 현시번호(1-8)
 
+            long unixTimestamp = System.currentTimeMillis() / 1000L; // 초 단위로 변환
+
             for (int ii = 0; ii < count; ii++) {
                 intLcNo = ((data.buffer[idx++] & 0xFF) << 24) |
                           ((data.buffer[idx++] & 0xFF) << 16) |
@@ -196,17 +198,6 @@ public class GgitsPacketWorker extends AbstractAppWorker implements Runnable {
                 blink         = (lcStts >> 1) & 0x01;
                 dbError       = (lcStts     ) & 0x01;
 
-//import java.util.Date;
-//// Unix 시간 계산
-//Date date = new Date(2023 - 1900, 1, 15, 0, 0, 0);
-//long unixTime = date.getTime() / 1000L;
-//// 4바이트 배열에 값 넣기
-//byte[] currTime = new byte[4];
-//currTime[0] = (byte) (unixTime & 0xFF);
-//currTime[1] = (byte) ((unixTime >> 8) & 0xFF);
-//currTime[2] = (byte) ((unixTime >> 16) & 0xFF);
-//currTime[3] = (byte) ((unixTime >> 24) & 0xFF);
-
                 String ppcControl = "0";
                 String manualFlag = "0";
                 String turnOffFlag = "0";
@@ -214,9 +205,12 @@ public class GgitsPacketWorker extends AbstractAppWorker implements Runnable {
 
                 // 경기도 교차로 상태(0: 통신이상, 1: 정상, 2: 점멸, 3: 소등, 4: 수동진행, 5: 현시유지)
                 IntStatusDto status = intDto.getStatus();
-                status.initStatus();
+                status.initStatus(unixTimestamp);
+
+                status.COLLCT_DTIME = collctDtime;
+                status.SYS_COLLCT_DTIME = collctDtime;
 
-                if (lcStts != 0) {
+                if (operStts != 0) {
                     if (holdPhase > 0) {
                         aRingPhase = holdPhase-1;
                         bRingPhase = holdPhase-1;
@@ -232,7 +226,6 @@ public class GgitsPacketWorker extends AbstractAppWorker implements Runnable {
                         blinkFlag = "1";
                     }
 
-                    status.COLLCT_DTIME = COLLCT_DTIME;
                     status.COMM_ON_OFF_FLAG = String.valueOf(centerComm);       // 통신 ON/OFF 여부(0: 정상, 1: 통신 FAIL)
                     status.CONTRLR_OPER_MODE_CD = String.valueOf(operMode);     // 제어기 운영모드 코드(0 : SCU 고정주기 모드
                                                                                 //  1 : 감응하지 않는 OFFLINE 제어모드
@@ -246,17 +239,24 @@ public class GgitsPacketWorker extends AbstractAppWorker implements Runnable {
                     status.SIGLIGHT_BLINK_FLAG = blinkFlag;                     // 신호등 점멸 여부(1: 점멸, 0: 정상)
                     status.CONTRLR_MANUAL_FLAG = manualFlag;                    // 제어기 수동 여부(1 : ON(수동),  0 : OFF(자동))
                     status.PPC_CONTRL_FLAG = ppcControl;                        // PPC제어여부(0: PPC Disable 1: PPC Enabled)
+
+                    if (intDto.getNodeId() > 0) {
+                        status.setOperStatus(operStts);
+                        status.setPhase(aRingCode, aRingStep, bRingCode, bRingStep, holdPhase);
+                        status.setStatus(lcStts, holdPhase);
+                        this.kafkaProducer.sendNode(Long.toString(intDto.getNodeId()), status.getKafkaData());
+                        this.kafkaProducer.sendSig(Long.toString(intDto.getNodeId()), status.getKafkaData());
+                    }
                 }
+
                 statusLists.add(status);
             }
             if (statusLists.isEmpty()) {
-                log.warn("WorkDataProcess.process: [{}], Int Status Data Empty: {}, {} ms.", data.getLocalPort(), COLLCT_DTIME, System.currentTimeMillis() - popTimestamp);
+                log.warn("WorkDataProcess.process: [{}], Int Status Data Empty: {}, {} ms.", data.getLocalPort(), collctDtime, System.currentTimeMillis() - popTimestamp);
                 return;
             }
-//            log.info("WorkDataProcess.process: [{}], {} EA. {} Bytes. Parse {} ms.", data.getLocalPort(), count, data.getBuffer().length, System.currentTimeMillis() - parsetime);
             this.dbmsDataProcess.add(new DbmsData(data.getCenter(), this.idx, data.getTimestamp(), popTimestamp, System.currentTimeMillis(),
                     center.getRegionCd(), center.getRegionId(), DbmsData.DBMS_DATA_INT_STATUS_UPDATE, false, statusLists));
-//            log.info("WorkDataProcess.process: [{}], {} EA. {} Bytes. {} ms.[Q Added]", data.getLocalPort(), count, data.getBuffer().length, System.currentTimeMillis() - timestamp);
         }
         catch (Exception e) {
             log.error("WorkDataProcess.process: Exception: [{}] {}", data.getLocalPort(), e.toString());

+ 2 - 4
src/main/resources/application.yml

@@ -49,10 +49,8 @@ application:
     bootstrap-servers: 192.168.11.23:9092
     group-id: ggits-comm-server
     consumer-ack-config: 1
-    ping-topic: ping-topic
-    multi-connect: false
-    node-servers:
-    enable-node: false
+    enable-node: true
+    enable-sig: true
     props:
     #  - request.timeout.ms: 100
     #  - max.block.ms: 100