Bläddra i källkod

apply cluster message data to interface class

HANTE 1 månad sedan
förälder
incheckning
630ed94c71

+ 43 - 0
its-cluster/src/main/java/com/its/common/cluster/codec/HaClusterMessageDecoder.java

@@ -0,0 +1,43 @@
+package com.its.common.cluster.codec;
+
+import com.its.common.cluster.vo.HaClusterMessage;
+import io.netty.buffer.ByteBuf;
+import io.netty.channel.ChannelHandlerContext;
+import io.netty.handler.codec.MessageToMessageDecoder;
+import lombok.extern.slf4j.Slf4j;
+
+import java.io.ByteArrayInputStream;
+import java.io.ObjectInputStream;
+import java.util.List;
+
+@Slf4j
+public class HaClusterMessageDecoder extends MessageToMessageDecoder<ByteBuf> {
+
+    private final boolean isLogging;
+
+    public HaClusterMessageDecoder(boolean isLogging) {
+        this.isLogging = isLogging;
+    }
+
+    @Override
+    protected void decode(ChannelHandlerContext ctx, ByteBuf in, List<Object> out) throws Exception {
+        // 패킷 길이(4바이트)를 따로 저장
+        int packetLength = in.readInt();
+        if (this.isLogging) {
+            log.info("HaClusterMessageDecoder.decode: packetLength: {}", packetLength);
+        }
+
+        // 남은 데이터 부분 읽기
+        byte[] bytes = new byte[in.readableBytes()];
+        in.readBytes(bytes);
+
+        ByteArrayInputStream bis = new ByteArrayInputStream(bytes);
+        ObjectInputStream ois = new ObjectInputStream(bis);
+        HaClusterMessage msg = (HaClusterMessage) ois.readObject();
+
+        out.add(msg);
+
+        ois.close();
+        bis.close();
+    }
+}

+ 11 - 2
its-cluster/src/main/java/com/its/common/cluster/utils/HaClusterMessageEncoder.java → its-cluster/src/main/java/com/its/common/cluster/codec/HaClusterMessageEncoder.java

@@ -1,5 +1,6 @@
-package com.its.common.cluster.utils;
+package com.its.common.cluster.codec;
 
+import com.its.common.cluster.vo.HaClusterMessage;
 import io.netty.buffer.ByteBuf;
 import io.netty.channel.ChannelHandlerContext;
 import io.netty.handler.codec.MessageToByteEncoder;
@@ -9,6 +10,12 @@ import java.io.ObjectOutputStream;
 
 public class HaClusterMessageEncoder extends MessageToByteEncoder<HaClusterMessage> {
 
+    private final boolean isLogging;
+
+    public HaClusterMessageEncoder(boolean isLogging) {
+        this.isLogging = isLogging;
+    }
+
     @Override
     protected void encode(ChannelHandlerContext ctx, HaClusterMessage msg, ByteBuf out) throws Exception {
         ByteArrayOutputStream bos = new ByteArrayOutputStream();
@@ -16,8 +23,10 @@ public class HaClusterMessageEncoder extends MessageToByteEncoder<HaClusterMessa
         oos.writeObject(msg);
         oos.flush();
         byte[] bytes = bos.toByteArray();
-        out.writeInt(bytes.length); // 데이터 길이를 포함해서 전송
+
+        out.writeInt(bytes.length); // 길이 필드 추가
         out.writeBytes(bytes);
+
         oos.close();
         bos.close();
     }

+ 39 - 25
its-cluster/src/main/java/com/its/common/cluster/master/HaClusterMasterHandler.java → its-cluster/src/main/java/com/its/common/cluster/handler/HaClusterMasterHandler.java

@@ -1,6 +1,7 @@
-package com.its.common.cluster.master;
+package com.its.common.cluster.handler;
 
-import com.its.common.cluster.utils.HaClusterMessage;
+import com.its.common.cluster.service.AbstractHaClusterMasterService;
+import com.its.common.cluster.vo.HaClusterMessage;
 import com.its.common.cluster.utils.HaUtils;
 import com.its.common.cluster.vo.AbstractHaClusterConfig;
 import com.its.common.cluster.vo.HaInfo;
@@ -24,20 +25,29 @@ public class HaClusterMasterHandler extends ChannelInboundHandlerAdapter {
     public void channelRead(ChannelHandlerContext ctx, Object msg) {
         if (msg instanceof HaClusterMessage) {
             HaClusterMessage clusterMsg = (HaClusterMessage) msg;
-            log.info("ClusterMasterHandler.channelRead: [{}], {}, [FROM: serverId: {}, serverTime: {}, infos: {}]",
-                    this.clusterConfig.getServerId(), HaUtils.getTcpAddress(ctx.channel()),
-                    clusterMsg.getServerId(), clusterMsg.getServerTime(), clusterMsg.getInfos().size());
-
             HaInfo cluster = ctx.channel().attr(AbstractHaClusterConfig.CLUSTER_ATTRIBUTE_KEY).get();
             if (cluster == null) {
-                log.error("RECV: [{}]. Not Found Channel Cluster Object... Oops Will be closed.", HaUtils.getAddress(ctx.channel()));
+                log.error("HaClusterMasterHandler.channelRead: [{}], {}, [FROM: serverId: {}, master: {}, serverTime: {}], Not Found Channel Cluster Object. Will be closed.",
+                        this.clusterConfig.getServerId(), HaUtils.getTcpAddress(ctx.channel()),
+                        clusterMsg.getServerId(), clusterMsg.isMaster(), clusterMsg.getServerTime());
+
                 closeChannel(ctx.channel());
                 return;
             }
 
+            if (this.clusterConfig.isLogging()) {
+                MDC.put("id", cluster.getLogKey());
+
+                log.info("HaClusterMasterHandler.channelRead: [{}], {}, [FROM: serverId: {}, master: {}, serverTime: {}]",
+                        this.clusterConfig.getServerId(), HaUtils.getTcpAddress(ctx.channel()),
+                        clusterMsg.getServerId(), clusterMsg.isMaster(), clusterMsg.getServerTime());
+
+                MDC.remove(cluster.getLogKey());
+                MDC.clear();
+            }
+
             cluster.getElectionState().setLastRecvTime();
             this.masterService.onClusterMessage(clusterMsg);
-//            ctx.writeAndFlush(clusterMsg);
         }
     }
 
@@ -45,21 +55,23 @@ public class HaClusterMasterHandler extends ChannelInboundHandlerAdapter {
     public void channelInactive(ChannelHandlerContext ctx) throws Exception {
         HaInfo cluster = ctx.channel().attr(AbstractHaClusterConfig.CLUSTER_ATTRIBUTE_KEY).get();
         if (cluster == null) {
-            log.error("{}.++channelInactive: Unknown Cluster: {}.", this.getClass().getSimpleName(), HaUtils.getAddress(ctx.channel()));
+            log.error("HaClusterMasterHandler.channelInactive: Unknown Cluster: {}.", HaUtils.getAddress(ctx.channel()));
             return;
         }
-        try {
+
+        if (this.clusterConfig.isLogging()) {
             MDC.put("id", cluster.getLogKey());
-            log.info("{}.++channelInactive: [{}, {}].", this.getClass().getSimpleName(), cluster.getServerId(), cluster.getIpAddress());
-            cluster.getElectionState().disConnect();
 
-            ctx.channel().attr(AbstractHaClusterConfig.CLUSTER_ATTRIBUTE_KEY).set(null);
-            ctx.fireChannelInactive();
-        }
-        finally {
+            log.info("HaClusterMasterHandler.channelInactive: [{}, {}].", cluster.getServerId(), cluster.getIpAddress());
+
             MDC.remove(cluster.getLogKey());
             MDC.clear();
         }
+
+        cluster.getElectionState().disConnect();
+
+        ctx.channel().attr(AbstractHaClusterConfig.CLUSTER_ATTRIBUTE_KEY).set(null);
+        ctx.fireChannelInactive();
     }
 
     @Override
@@ -67,27 +79,29 @@ public class HaClusterMasterHandler extends ChannelInboundHandlerAdapter {
         if (e instanceof IdleStateEvent) {
             HaInfo cluster = ctx.channel().attr(AbstractHaClusterConfig.CLUSTER_ATTRIBUTE_KEY).get();
             if (cluster == null) {
-                log.error("{}.userEventTriggered: Unknown Cluster: {}.", this.getClass().getSimpleName(), HaUtils.getAddress(ctx.channel()));
+                log.error("HaClusterMasterHandler.userEventTriggered: Unknown Cluster: {}.", HaUtils.getAddress(ctx.channel()));
                 return;
             }
 
             IdleStateEvent evt = (IdleStateEvent) e;
 
-            MDC.put("id", cluster.getLogKey());
-            log.info("{}.++userEventTriggered: {}. {}", this.getClass().getSimpleName(), HaUtils.getAddress(ctx.channel()), evt);
+            if (this.clusterConfig.isLogging()) {
+                MDC.put("id", cluster.getLogKey());
+                log.info("HaClusterMasterHandler.userEventTriggered: {}. {}", HaUtils.getAddress(ctx.channel()), evt);
+                MDC.remove(cluster.getLogKey());
+                MDC.clear();
+            }
 
             if (evt.state() == IdleState.READER_IDLE) {
                 long recvTimeout = System.currentTimeMillis() - cluster.getElectionState().getLastRecvTime();
                 long heartbeatTimeout = this.clusterConfig.getSyncSeconds() * 1000L * 3;
                 if (recvTimeout > heartbeatTimeout) {
-                    log.info("{}.++userEventTriggered: {}. [{}, {}]. Heartbeat timeout, {}, {} ms. Will be closed.",
-                            this.getClass().getSimpleName(), HaUtils.getAddress(ctx.channel()),
-                            cluster.getLogKey(), cluster.getIpAddress(), recvTimeout, heartbeatTimeout);
+                    log.warn("HaClusterMasterHandler.userEventTriggered: {}. [{}, {}]. Heartbeat timeout, {}, {} ms. Will be closed.",
+                            HaUtils.getAddress(ctx.channel()), cluster.getLogKey(), cluster.getIpAddress(), recvTimeout, heartbeatTimeout);
+
                     closeChannel(ctx.channel());
                 }
             }
-            MDC.remove(cluster.getLogKey());
-            MDC.clear();
         }
         ctx.fireUserEventTriggered(e);
     }
@@ -101,7 +115,7 @@ public class HaClusterMasterHandler extends ChannelInboundHandlerAdapter {
             }
         }
         catch (Exception e) {
-            log.error("ApplicationRepository.closeChannel Exception: {}", e.getMessage());
+            log.error("HaClusterMasterHandler.closeChannel: Exception: {}", e.getMessage());
         }
     }
 

+ 3 - 2
its-cluster/src/main/java/com/its/common/cluster/slave/HaClusterSlaveHandler.java → its-cluster/src/main/java/com/its/common/cluster/handler/HaClusterSlaveHandler.java

@@ -1,5 +1,6 @@
-package com.its.common.cluster.slave;
+package com.its.common.cluster.handler;
 
+import com.its.common.cluster.service.AbstractHaClusterSlaveService;
 import com.its.common.cluster.vo.HaInfo;
 import io.netty.channel.ChannelHandlerContext;
 import io.netty.channel.ChannelInboundHandlerAdapter;
@@ -19,7 +20,7 @@ public class HaClusterSlaveHandler extends ChannelInboundHandlerAdapter {
 
     @Override
     public void channelActive(final ChannelHandlerContext ctx) {
-        this.slaveService.sendSyncData(this.cluster, ctx.channel(), null);
+        this.slaveService.sendSyncData(this.cluster, ctx.channel());
     }
 
 //    @Override

+ 4 - 4
its-cluster/src/main/java/com/its/common/cluster/master/AbstractHaClusterMasterService.java → its-cluster/src/main/java/com/its/common/cluster/service/AbstractHaClusterMasterService.java

@@ -1,6 +1,6 @@
-package com.its.common.cluster.master;
+package com.its.common.cluster.service;
 
-import com.its.common.cluster.utils.HaClusterMessage;
+import com.its.common.cluster.vo.HaClusterMessage;
 import com.its.common.cluster.utils.HaPlatform;
 import com.its.common.cluster.utils.HaUtils;
 import com.its.common.cluster.vo.AbstractHaClusterConfig;
@@ -103,7 +103,7 @@ public abstract class AbstractHaClusterMasterService {
         return serverBootstrap;
     }
 
-    public abstract void election(boolean isMaster);
+    public abstract void election(int serverId, boolean isMaster);
     public abstract void onClusterMessage(HaClusterMessage message);
 
     private void electionMasterSchedule() {
@@ -127,7 +127,7 @@ public abstract class AbstractHaClusterMasterService {
             log.info("ClusterMasterService:electionMasterSchedule: serverId: {}, Master: {}.",
                     this.clusterConfig.getServerId(), this.clusterConfig.isMaster());
 
-            election(this.clusterConfig.isMaster());
+            election(this.clusterConfig.getServerId(), this.clusterConfig.isMaster());
 
         }, 2 * 1000L);
     }

+ 12 - 19
its-cluster/src/main/java/com/its/common/cluster/slave/AbstractHaClusterSlaveService.java → its-cluster/src/main/java/com/its/common/cluster/service/AbstractHaClusterSlaveService.java

@@ -1,12 +1,7 @@
-package com.its.common.cluster.slave;
+package com.its.common.cluster.service;
 
-import com.its.common.cluster.utils.HaClusterMessage;
-import com.its.common.cluster.utils.AbstractHaClusterMessageData;
-import com.its.common.cluster.utils.HaClusterSlaveBootstrapFactory;
+import com.its.common.cluster.vo.*;
 import com.its.common.cluster.utils.HaUtils;
-import com.its.common.cluster.vo.AbstractHaClusterConfig;
-import com.its.common.cluster.vo.HaInfo;
-import com.its.common.cluster.vo.HaNET;
 import io.netty.channel.Channel;
 import io.netty.channel.ChannelFuture;
 import lombok.RequiredArgsConstructor;
@@ -109,11 +104,11 @@ public abstract class AbstractHaClusterSlaveService {
         return sdfDate.format(dtLog);
     }
 
-    public abstract List<AbstractHaClusterMessageData> getClusterMessageData();
+    public abstract List<HaClusterMessageData> getClusterMessageData();
 
     private HaClusterMessage getClusterMessage() {
 //        List<HaClusterMessageData> details = new ArrayList<>();
-        List<AbstractHaClusterMessageData> details = getClusterMessageData();
+        List<HaClusterMessageData> details = getClusterMessageData();
 //        List<String> keySet = new ArrayList<>(ApplicationRepository.CENTER_MAP.keySet());
 //        Collections.sort(keySet);
 //        for (String key : keySet) {
@@ -125,6 +120,7 @@ public abstract class AbstractHaClusterSlaveService {
 //        }
         return HaClusterMessage.builder()
                 .serverId(this.clusterConfig.getServerId())
+                .master(this.clusterConfig.isMaster())
                 .serverTime(getSysTime())
                 .infos(details)
                 .build();
@@ -133,14 +129,13 @@ public abstract class AbstractHaClusterSlaveService {
     private void dataSyncSchedule() {
         log.info("ClusterSlaveService:dataSyncSchedule: {} seconds.", this.clusterConfig.getSyncSeconds());
         this.taskFuture = this.taskScheduler.scheduleAtFixedRate(() -> {
-            HaClusterMessage clusterMsg = getClusterMessage();
             for (Map.Entry<Integer, HaInfo> entry : this.clusterConfig.getClusterMap().entrySet()) {
                 HaInfo cluster = entry.getValue();
                 if (cluster.getServerId() == this.clusterConfig.getServerId()) {
                     continue;
                 }
                 if (cluster.getSyncState().getState() != HaNET.CLOSED) {
-                    sendSyncData(cluster, cluster.getSyncState().getChannel(), clusterMsg);
+                    sendSyncData(cluster, cluster.getSyncState().getChannel());
                 }
             }
 
@@ -148,24 +143,22 @@ public abstract class AbstractHaClusterSlaveService {
 
     }
 
-    public void sendSyncData(final HaInfo cluster, final Channel channel, HaClusterMessage clusterMsg) {
-        if (null == clusterMsg) {
-            clusterMsg = getClusterMessage();
-        }
+    public void sendSyncData(final HaInfo cluster, final Channel channel) {
+        HaClusterMessage clusterMsg = getClusterMessage();
         try {
             MDC.put("id", cluster.getLogKey());
             ChannelFuture f = channel.writeAndFlush(clusterMsg);
             f.awaitUninterruptibly();
             if (f.isDone() || f.isSuccess()) {
-                log.info("ClusterSlaveService.sendSyncData: [{}], {}, [--TO: serverId: {}, serverTime: {}, infos: {}]",
+                log.info("ClusterSlaveService.sendSyncData: [{}], {}, [--TO: serverId: {}, (serverId: {}, serverTime: {})]",
                         this.clusterConfig.getServerId(), HaUtils.getTcpAddress(channel),
-                        clusterMsg.getServerId(), clusterMsg.getServerTime(), clusterMsg.getInfos().size());
+                        cluster.getServerId(), clusterMsg.getServerId(), clusterMsg.getServerTime());
             }
         }
         catch (Exception e) {
-            log.info("ClusterSlaveService.sendSyncData: [{}], {}, Failed: [--TO: serverId: {}, serverTime: {}, infos: {}]",
+            log.info("ClusterSlaveService.sendSyncData: [{}], {}, Failed: [--TO: serverId: {}, (serverId: {}, serverTime: {})]",
                     this.clusterConfig.getServerId(), HaUtils.getTcpAddress(channel),
-                    clusterMsg.getServerId(), clusterMsg.getServerTime(), clusterMsg.getInfos().size());
+                    cluster.getServerId(), clusterMsg.getServerId(), clusterMsg.getServerTime());
             log.info("ClusterSlaveService.sendSyncData: [{}], {}, Failed: {}",
                     this.clusterConfig.getServerId(), HaUtils.getTcpAddress(channel), e.getMessage());
         }

+ 11 - 8
its-cluster/src/main/java/com/its/common/cluster/master/HaClusterMasterInitializer.java → its-cluster/src/main/java/com/its/common/cluster/service/HaClusterMasterInitializer.java

@@ -1,13 +1,15 @@
-package com.its.common.cluster.master;
+package com.its.common.cluster.service;
 
-import com.its.common.cluster.utils.HaClusterMessageDecoder;
-import com.its.common.cluster.utils.HaClusterMessageEncoder;
+import com.its.common.cluster.codec.HaClusterMessageDecoder;
+import com.its.common.cluster.codec.HaClusterMessageEncoder;
+import com.its.common.cluster.handler.HaClusterMasterHandler;
 import com.its.common.cluster.utils.HaUtils;
 import com.its.common.cluster.vo.AbstractHaClusterConfig;
 import com.its.common.cluster.vo.HaInfo;
 import io.netty.channel.Channel;
 import io.netty.channel.ChannelInitializer;
 import io.netty.channel.ChannelPipeline;
+import io.netty.handler.codec.LengthFieldBasedFrameDecoder;
 import io.netty.handler.logging.LogLevel;
 import io.netty.handler.logging.LoggingHandler;
 import io.netty.handler.timeout.IdleStateHandler;
@@ -69,13 +71,14 @@ public class HaClusterMasterInitializer extends ChannelInitializer<Channel> {
 
             IdleStateHandler idleStateHandler = new IdleStateHandler(this.clusterConfig.getSyncSeconds(), 0, 0, TimeUnit.SECONDS);
             ChannelPipeline pipeline = channel.pipeline();
-            if (this.clusterConfig.isLogging()) {
-                pipeline.addLast(new LoggingHandler(LogLevel.INFO));
-            }
+//            if (this.clusterConfig.isLogging()) {
+//                pipeline.addLast(new LoggingHandler(LogLevel.INFO));
+//            }
             pipeline.addLast(idleStateHandler);
 
-            pipeline.addLast(new HaClusterMessageDecoder());
-            pipeline.addLast(new HaClusterMessageEncoder());
+            pipeline.addLast(new LengthFieldBasedFrameDecoder(8192, 0, 4));
+            pipeline.addLast(new HaClusterMessageDecoder(this.clusterConfig.isLogging()));
+            pipeline.addLast(new HaClusterMessageEncoder(this.clusterConfig.isLogging()));
             pipeline.addLast(new HaClusterMasterHandler(this.masterService, this.clusterConfig));
         }
         finally {

+ 11 - 9
its-cluster/src/main/java/com/its/common/cluster/slave/HaClusterSlave.java → its-cluster/src/main/java/com/its/common/cluster/service/HaClusterSlave.java

@@ -1,13 +1,14 @@
-package com.its.common.cluster.slave;
+package com.its.common.cluster.service;
 
-import com.its.common.cluster.utils.HaClusterMessageDecoder;
-import com.its.common.cluster.utils.HaClusterMessageEncoder;
-import com.its.common.cluster.utils.HaClusterSlaveBootstrapFactory;
+import com.its.common.cluster.codec.HaClusterMessageDecoder;
+import com.its.common.cluster.codec.HaClusterMessageEncoder;
+import com.its.common.cluster.handler.HaClusterSlaveHandler;
 import com.its.common.cluster.vo.AbstractHaClusterConfig;
 import com.its.common.cluster.vo.HaInfo;
 import io.netty.bootstrap.Bootstrap;
 import io.netty.channel.*;
 import io.netty.channel.socket.SocketChannel;
+import io.netty.handler.codec.LengthFieldBasedFrameDecoder;
 import io.netty.handler.logging.LogLevel;
 import io.netty.handler.logging.LoggingHandler;
 import io.netty.handler.timeout.IdleStateHandler;
@@ -56,14 +57,15 @@ public class HaClusterSlave implements Callable<Object> {
                     // 핸들러가 실행되는 순서는 추가된 순서에 의해 결정된다.(Inbound: head=>tail, Outbound: tail=>head, name2ctx)
                     @Override
                     public void initChannel(SocketChannel ch) {
-                        if (cluster.isLogging()) {
-                            ch.pipeline().addLast(new LoggingHandler(LogLevel.INFO));
-                        }
+//                        if (cluster.isLogging()) {
+//                            ch.pipeline().addLast(new LoggingHandler(LogLevel.INFO));
+//                        }
                         IdleStateHandler idleStateHandler = new IdleStateHandler(10, 0, 0, TimeUnit.SECONDS);
 
                         ch.pipeline().addLast(idleStateHandler);
-                        ch.pipeline().addLast(new HaClusterMessageDecoder());
-                        ch.pipeline().addLast(new HaClusterMessageEncoder());
+                        ch.pipeline().addLast(new LengthFieldBasedFrameDecoder(8192, 0, 4));
+                        ch.pipeline().addLast(new HaClusterMessageDecoder(clusterConfig.isLogging()));
+                        ch.pipeline().addLast(new HaClusterMessageEncoder(clusterConfig.isLogging()));
                         ch.pipeline().addLast(new HaClusterSlaveHandler(slaveService, cluster));
                     }
                 });

+ 2 - 1
its-cluster/src/main/java/com/its/common/cluster/utils/HaClusterSlaveBootstrapFactory.java → its-cluster/src/main/java/com/its/common/cluster/service/HaClusterSlaveBootstrapFactory.java

@@ -1,5 +1,6 @@
-package com.its.common.cluster.utils;
+package com.its.common.cluster.service;
 
+import com.its.common.cluster.utils.HaUtils;
 import io.netty.bootstrap.Bootstrap;
 import io.netty.channel.ChannelOption;
 import io.netty.channel.EventLoopGroup;

+ 0 - 24
its-cluster/src/main/java/com/its/common/cluster/utils/AbstractHaClusterMessageData.java

@@ -1,24 +0,0 @@
-package com.its.common.cluster.utils;
-
-import lombok.Data;
-
-import java.io.Serializable;
-
-@Data
-public abstract class AbstractHaClusterMessageData implements Serializable {
-    private static final long serialVersionUID = 1L;
-
-    private String centerId;
-    private int state;
-    private String connTm;
-    private String disConnTm;
-
-    private String lastSendTm;
-    private int totalSends;
-    private long baseTm;
-    private long sendTm;
-    private int sendSeconds;
-
-    private Object object;
-
-}

+ 0 - 35
its-cluster/src/main/java/com/its/common/cluster/utils/HaClusterMessageDecoder.java

@@ -1,35 +0,0 @@
-package com.its.common.cluster.utils;
-
-import io.netty.buffer.ByteBuf;
-import io.netty.channel.ChannelHandlerContext;
-import io.netty.handler.codec.ByteToMessageDecoder;
-
-import java.io.ByteArrayInputStream;
-import java.io.ObjectInputStream;
-import java.util.List;
-
-public class HaClusterMessageDecoder extends ByteToMessageDecoder {
-
-    @Override
-    protected void decode(ChannelHandlerContext ctx, ByteBuf in, List<Object> out) throws Exception {
-        if (in.readableBytes() < 4) {
-            return;
-        }
-
-        in.markReaderIndex();
-        int dataLength = in.readInt();
-
-        if (in.readableBytes() < dataLength) {
-            in.resetReaderIndex();
-            return;
-        }
-
-        byte[] bytes = new byte[dataLength];
-        in.readBytes(bytes);
-        ByteArrayInputStream bis = new ByteArrayInputStream(bytes);
-        ObjectInputStream ois = new ObjectInputStream(bis);
-        out.add(ois.readObject());
-        ois.close();
-        bis.close();
-    }
-}

+ 10 - 13
its-cluster/src/main/java/com/its/common/cluster/vo/AbstractHaClusterConfig.java

@@ -21,23 +21,22 @@ public abstract class AbstractHaClusterConfig {
 
     private boolean master = false;
 
-    private int syncSeconds = -1;
-    private int serverId = -1;
-    private String ipAddress;
-    private int syncPort = -1;  // 포트 1: 데이터 동기화를 위한 포트
-//    private int electionPort;   // 포트 2: 리더선출을 위한 포트
+    // 클러스터 파일에서 읽어오는 정보(xxx.cfg)
+    private int serverId = -1;      // 서버 ID (1부터 시작, 0은 사용하지 않음)
+    private int syncSeconds = -1;   // 데이터 동기화 주기 (초 단위, 최소 5초, 최대 60초)
+    private String ipAddress;       // 클러스터 서버의 IP 주소
+    private int syncPort = -1;      // 데이터 동기화를 위한 포트
 
-    private String configFile;
-    private boolean logging = false;
+    // application.yml 에 설정되는 정보(application.ha-cluster)
+    private boolean enabled = false;    // 클러스터 기능 사용 여부
+    private boolean logging = false;    // 라이브러리 내 로깅 여부
+    private String configFile;          // 클러스터 설정 파일 경로
 
     private final HashMap<Integer, HaInfo> clusterMap = new HashMap<>();
 
     @PostConstruct
     private void init() throws IOException {
-
-        log.info("[{}] -------------------------", this.getClass().getSimpleName());
         loadClusterConfig();
-        log.info("{}", this);
     }
 
     public HaInfo get(String ipAddress) {
@@ -102,8 +101,6 @@ public abstract class AbstractHaClusterConfig {
 //    }
 
     private void loadClusterConfig() throws IOException {
-        log.info("loadClusterConfig.configFile: {}", this.configFile);
-
         this.serverId = getIntValue("server.id", 1);
         this.syncSeconds = getIntValue("syncSeconds", 5);
         if (this.syncSeconds < 5) {
@@ -138,7 +135,7 @@ public abstract class AbstractHaClusterConfig {
                                 .serverId(serverId)
                                 .ipAddress(ipAddress)
                                 .syncPort(syncPort)
-                                .logging(false)
+                                .logging(this.logging)
                                 .electionState(new HaNetState())
                                 .syncState(new HaNetState())
                                 .build();

+ 3 - 2
its-cluster/src/main/java/com/its/common/cluster/utils/HaClusterMessage.java → its-cluster/src/main/java/com/its/common/cluster/vo/HaClusterMessage.java

@@ -1,4 +1,4 @@
-package com.its.common.cluster.utils;
+package com.its.common.cluster.vo;
 
 import lombok.Builder;
 import lombok.Data;
@@ -12,7 +12,8 @@ public class HaClusterMessage implements Serializable {
     private static final long serialVersionUID = 1L;
 
     private int serverId;
+    private boolean master;
     private String serverTime;
 
-    private List<AbstractHaClusterMessageData> infos;
+    private List<HaClusterMessageData> infos;
 }

+ 10 - 0
its-cluster/src/main/java/com/its/common/cluster/vo/HaClusterMessageData.java

@@ -0,0 +1,10 @@
+package com.its.common.cluster.vo;
+
+import lombok.Builder;
+import lombok.Data;
+
+import java.io.Serializable;
+
+public interface HaClusterMessageData extends Serializable {
+    String getType(); // 타입 구분을 위한 메서드 추가
+}

+ 5 - 5
its-cluster/src/main/java/com/its/common/cluster/vo/HaNET.java

@@ -1,14 +1,14 @@
 package com.its.common.cluster.vo;
 
-public class HaNET {
+public final class HaNET {
 
     private HaNET() {
         throw new IllegalStateException("HaNET class");
     }
 
-    public final static int CLOSED = 0; /* 종료된 상태 */
-    public final static int LOGIN_WAIT = 1;      /* 최초 연결후 로그인 기다림 */
-    public final static int DATA_TRANS = 2;      /* data trans state */
-    public final static int TERMINATE = 2;
+    public static final int CLOSED = 0;         /* 종료된 상태 */
+    public static final int LOGIN_WAIT = 1;      /* 최초 연결후 로그인 기다림 */
+    public static final int DATA_TRANS = 2;      /* data trans state */
+    public static final int TERMINATE = 3;
 
 }