Skip to content

Commit a038d23

Browse files
fix(hstore): JRaft maxEntriesSize configuration parameters do not take effect (#2630)
Co-authored-by: imbajin <[email protected]>
1 parent df921e9 commit a038d23

File tree

4 files changed

+12
-8
lines changed

4 files changed

+12
-8
lines changed

.asf.yaml

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,8 @@ github:
2828
del_branch_on_merge: true
2929
#labels:
3030
enabled_merge_buttons:
31-
merge: false
31+
# TODO: disable it after common merged
32+
merge: true
3233
rebase: true
3334
squash: true
3435
protected_branches:

hugegraph-store/hg-store-core/src/main/java/org/apache/hugegraph/store/options/HgStoreEngineOptions.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,7 @@ public static class RaftOptions {
9898
/**
9999
* The maximum number of entries in AppendEntriesRequest
100100
*/
101-
private final int maxEntriesSize = 256;
101+
private int maxEntriesSize = 256;
102102
/**
103103
* Raft cluster data backlog occurs, rate limiting wait time in milliseconds.
104104
**/

hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/AppConfig.java

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -182,6 +182,8 @@ public class Raft {
182182
private int maxSegmentFileSize;
183183
@Value("${raft.maxReplicatorInflightMsgs:256}")
184184
private int maxReplicatorInflightMsgs;
185+
@Value("${raft.maxEntriesSize:256}")
186+
private int maxEntriesSize;
185187

186188
}
187189

hugegraph-store/hg-store-node/src/main/java/org/apache/hugegraph/store/node/grpc/HgStoreNodeService.java

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -100,6 +100,7 @@ public void init() {
100100
.isUseRocksDBSegmentLogStorage());
101101
setMaxSegmentFileSize(appConfig.getRaft().getMaxSegmentFileSize());
102102
setMaxReplicatorInflightMsgs(appConfig.getRaft().getMaxReplicatorInflightMsgs());
103+
setMaxEntriesSize(appConfig.getRaft().getMaxEntriesSize());
103104
}});
104105
setFakePdOptions(new FakePdOptions() {{
105106
setStoreList(appConfig.getFakePdConfig().getStoreList());
@@ -125,9 +126,9 @@ public List<Integer> getGraphLeaderPartitionIds(String graphName) {
125126
}
126127

127128
/**
128-
* 添加raft 任务,转发数据给raft
129+
* Add raft task, forward data to raft
129130
*
130-
* @return true 表示数据已被提交,false表示未提交,用于单副本入库减少批次拆分
131+
* @return true means the data has been submitted, false means not submitted, used to reduce batch splitting for single-replica storage
131132
*/
132133
public <Req extends com.google.protobuf.GeneratedMessageV3>
133134
void addRaftTask(byte methodId, String graphName, Integer partitionId, Req req,
@@ -140,14 +141,14 @@ void addRaftTask(byte methodId, String graphName, Integer partitionId, Req req,
140141
}
141142
//
142143
try {
143-
// 序列化,
144+
// Serialization
144145
final byte[] buffer = new byte[req.getSerializedSize() + 1];
145146
final CodedOutputStream output = CodedOutputStream.newInstance(buffer);
146147
output.write(methodId);
147148
req.writeTo(output);
148149
output.checkNoSpaceLeft();
149150
output.flush();
150-
// 传送给raft
151+
// Add raft task
151152
storeEngine.addRaftTask(graphName, partitionId,
152153
RaftOperation.create(methodId, buffer, req), closure);
153154

@@ -159,7 +160,7 @@ void addRaftTask(byte methodId, String graphName, Integer partitionId, Req req,
159160
}
160161

161162
/**
162-
* 来自日志的任务,一般是follower 或者 日志回滚的任务
163+
* Tasks from logs, generally tasks from followers or log rollbacks
163164
*/
164165
@Override
165166
public boolean invoke(int partId, byte[] request, RaftClosure response) throws
@@ -190,7 +191,7 @@ public boolean invoke(int partId, byte[] request, RaftClosure response) throws
190191
}
191192

192193
/**
193-
* 处理raft传送过来的数据
194+
* Process the data sent by raft
194195
*/
195196
@Override
196197
public boolean invoke(int partId, byte methodId, Object req, RaftClosure response) throws

0 commit comments

Comments
 (0)