diff --git a/common/src/main/java/org/tron/common/parameter/CommonParameter.java b/common/src/main/java/org/tron/common/parameter/CommonParameter.java
index 8f19e607497..3f57d64981a 100644
--- a/common/src/main/java/org/tron/common/parameter/CommonParameter.java
+++ b/common/src/main/java/org/tron/common/parameter/CommonParameter.java
@@ -14,7 +14,6 @@
import org.tron.common.logsfilter.FilterQuery;
import org.tron.common.setting.RocksDbSettings;
import org.tron.core.Constant;
-import org.tron.core.config.args.Overlay;
import org.tron.core.config.args.SeedNode;
import org.tron.core.config.args.Storage;
import org.tron.p2p.P2pConfig;
@@ -445,8 +444,6 @@ public class CommonParameter {
@Getter
public Storage storage;
@Getter
- public Overlay overlay;
- @Getter
public SeedNode seedNode;
@Getter
public EventPluginConfig eventPluginConfig;
@@ -494,8 +491,16 @@ public class CommonParameter {
public int jsonRpcMaxBlockFilterNum = 50000;
@Getter
@Setter
+ public int jsonRpcMaxBatchSize = 100;
+ @Getter
+ @Setter
+ public int jsonRpcMaxResponseSize = 25 * 1024 * 1024;
+ @Getter
+ @Setter
+ public int jsonRpcMaxAddressSize = 1000;
+ @Getter
+ @Setter
public int jsonRpcMaxLogFilterNum = 20000;
-
@Getter
@Setter
public int maxTransactionPendingSize;
diff --git a/common/src/main/java/org/tron/core/config/BeanDefaults.java b/common/src/main/java/org/tron/core/config/BeanDefaults.java
new file mode 100644
index 00000000000..1b9d8836759
--- /dev/null
+++ b/common/src/main/java/org/tron/core/config/BeanDefaults.java
@@ -0,0 +1,145 @@
+package org.tron.core.config;
+
+import com.typesafe.config.Config;
+import com.typesafe.config.ConfigFactory;
+import com.typesafe.config.ConfigObject;
+import com.typesafe.config.ConfigValue;
+import com.typesafe.config.ConfigValueType;
+import java.beans.BeanInfo;
+import java.beans.Introspector;
+import java.beans.PropertyDescriptor;
+import java.lang.reflect.Method;
+import java.util.ArrayList;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Generates a Typesafe {@link Config} from a bean instance's current field values.
+ *
+ *
Used by each {@code XxxConfig.fromConfig()} to replace the role that
+ * {@code reference.conf} played: ensures every key ConfigBeanFactory needs is
+ * present, so a partial user config works without throwing
+ * {@code ConfigException.Missing}.
+ *
+ *
Only public getter+setter pairs (standard JavaBean properties) are included —
+ * the same set that {@code ConfigBeanFactory.create()} auto-binds. Keys are
+ * decapitalized exactly as ConfigBeanFactory does:
+ * {@code Character.toLowerCase(name.charAt(0)) + name.substring(1)}.
+ *
+ *
Nested bean fields are recursed into nested HOCON objects.
+ * {@code List} fields are serialized as HOCON lists (empty by default).
+ * Fields with no public setter (e.g. {@code @Getter(AccessLevel.NONE)} overrides)
+ * are automatically skipped — these are handled manually in each
+ * {@code fromConfig()} via {@code hasPath} guards.
+ */
+public final class BeanDefaults {
+
+ private BeanDefaults() {}
+
+ /**
+ * Convert {@code bean}'s public JavaBean properties to a Typesafe Config.
+ * The resulting Config can be used as a {@code withFallback()} for a user's
+ * config section to guarantee all keys are present for ConfigBeanFactory.
+ */
+ public static Config toConfig(Object bean) {
+ return ConfigFactory.parseMap(toMap(bean));
+ }
+
+ /**
+ * Returns a copy of {@code config} with all null-valued leaf paths removed.
+ * Call this on a user-supplied config section before {@link Config#withFallback}
+ * so that HOCON {@code null} entries in legacy configs do not shadow bean defaults.
+ *
+ *
Uses {@link ConfigObject#entrySet()} (not {@link Config#entrySet()}) because
+ * the latter silently excludes null values, making them impossible to detect.
+ */
+ public static Config stripNullLeaves(Config config) {
+ return stripNullObject(config.root()).toConfig();
+ }
+
+ /**
+ * Returns a copy of {@code config} where the value at {@code fromKey} is moved to
+ * {@code toKey}, leaving the original key absent. If {@code fromKey} is absent, the
+ * config is returned unchanged. Use this in {@code fromConfig()} to bridge config keys
+ * that violate JavaBean naming (e.g. {@code pBFTExpireNum} → {@code PBFTExpireNum}) so
+ * that {@code ConfigBeanFactory} finds the value under the key it derives from the setter.
+ */
+ public static Config remapKey(Config config, String fromKey, String toKey) {
+ if (!config.hasPath(fromKey)) {
+ return config;
+ }
+ return config.withValue(toKey, config.getValue(fromKey)).withoutPath(fromKey);
+ }
+
+ private static ConfigObject stripNullObject(ConfigObject obj) {
+ ConfigObject result = obj;
+ for (Map.Entry entry : obj.entrySet()) {
+ ConfigValue v = entry.getValue();
+ if (v.valueType() == ConfigValueType.NULL) {
+ result = result.withoutKey(entry.getKey());
+ } else if (v.valueType() == ConfigValueType.OBJECT) {
+ result = result.withValue(entry.getKey(), stripNullObject((ConfigObject) v));
+ }
+ }
+ return result;
+ }
+
+ private static Map toMap(Object bean) {
+ Map map = new LinkedHashMap<>();
+ BeanInfo info;
+ try {
+ info = Introspector.getBeanInfo(bean.getClass());
+ } catch (java.beans.IntrospectionException e) {
+ // Programming error: bean class does not conform to JavaBean spec.
+ // Propagate immediately so the misconfigured class is identified at startup,
+ // rather than returning a silent empty map that produces a confusing
+ // ConfigException.Missing pointing at the user config.
+ throw new IllegalStateException("Cannot introspect bean: " + bean.getClass().getName(), e);
+ }
+ for (PropertyDescriptor pd : info.getPropertyDescriptors()) {
+ Method getter = pd.getReadMethod();
+ Method setter = pd.getWriteMethod();
+ // Skip read-only properties (no setter) — matches ConfigBeanFactory's contract
+ if (getter == null || setter == null) {
+ continue;
+ }
+ // Use the property name exactly as Introspector produced it.
+ // ConfigBeanFactory does configProps.get(beanProp.getName()) — the lookup key
+ // is the property name verbatim, not decapitalized. For ordinary camelCase
+ // setters (setMaxConnections → "MaxConnections" → decapitalize → "maxConnections")
+ // Introspector already returns the lowercase form. For setters that start with
+ // two consecutive uppercase letters (setPBFTEnable → "PBFTEnable") the JavaBean
+ // spec forbids decapitalization, so pd.getName() == "PBFTEnable" — matching the
+ // capital-P key that config.conf uses for those fields.
+ try {
+ String key = pd.getName();
+ Object value = getter.invoke(bean);
+ map.put(key, toValue(value));
+ } catch (Exception ignored) {
+ // Best-effort: skip individual unresolvable property so that the rest of
+ // the defaults are still emitted. getter.invoke() is the only realistic
+ // throw site (InvocationTargetException / IllegalAccessException).
+ }
+ }
+ return map;
+ }
+
+ private static Object toValue(Object value) {
+ if (value == null) {
+ return "";
+ }
+ if (value instanceof Boolean || value instanceof Number || value instanceof String) {
+ return value;
+ }
+ if (value instanceof List) {
+ List list = new ArrayList<>();
+ for (Object item : (List>) value) {
+ list.add(toValue(item));
+ }
+ return list;
+ }
+ // Assume nested bean — recurse so it becomes a nested HOCON object.
+ return toMap(value);
+ }
+}
diff --git a/common/src/main/java/org/tron/core/config/Configuration.java b/common/src/main/java/org/tron/core/config/Configuration.java
index 80735290b8c..9870f56a194 100644
--- a/common/src/main/java/org/tron/core/config/Configuration.java
+++ b/common/src/main/java/org/tron/core/config/Configuration.java
@@ -48,10 +48,11 @@ public static com.typesafe.config.Config getByFileName(
private static void resolveConfigFile(String fileName, File confFile) {
if (confFile.exists()) {
- config = ConfigFactory.parseFile(confFile)
- .withFallback(ConfigFactory.defaultReference());
+ config = ConfigFactory.parseFile(confFile);
} else if (Thread.currentThread().getContextClassLoader().getResourceAsStream(fileName)
!= null) {
+ // ConfigFactory.load merges system properties (higher priority than the file),
+ // which tests rely on to override storage.db.engine via -D flags.
config = ConfigFactory.load(fileName);
} else {
throw new IllegalArgumentException(
diff --git a/common/src/main/java/org/tron/core/config/args/BlockConfig.java b/common/src/main/java/org/tron/core/config/args/BlockConfig.java
index 4746f390e0c..a0e187f1b5e 100644
--- a/common/src/main/java/org/tron/core/config/args/BlockConfig.java
+++ b/common/src/main/java/org/tron/core/config/args/BlockConfig.java
@@ -7,9 +7,11 @@
import com.typesafe.config.Config;
import com.typesafe.config.ConfigBeanFactory;
+import com.typesafe.config.ConfigFactory;
import lombok.Getter;
import lombok.Setter;
import lombok.extern.slf4j.Slf4j;
+import org.tron.core.config.BeanDefaults;
import org.tron.core.exception.TronError;
/**
@@ -21,12 +23,10 @@
public class BlockConfig {
private boolean needSyncCheck = false;
- private long maintenanceTimeInterval = 21600000L;
+ private long maintenanceTimeInterval = 6 * 3600 * 1000L; // 6 hours
private long proposalExpireTime = DEFAULT_PROPOSAL_EXPIRE_TIME;
private int checkFrozenTime = 1;
- // Defaults come from reference.conf (loaded globally via Configuration.java)
-
/**
* Create BlockConfig from the "block" section of the application config.
* Also checks that committee.proposalExpireTime is not used (must use block.proposalExpireTime).
@@ -38,8 +38,12 @@ public static BlockConfig fromConfig(Config config) {
+ "config.conf, please set the value in block.proposalExpireTime.", PARAMETER_INIT);
}
- Config blockSection = config.getConfig("block");
- BlockConfig blockConfig = ConfigBeanFactory.create(blockSection, BlockConfig.class);
+ Config defaults = BeanDefaults.toConfig(new BlockConfig());
+ Config userSection = config.hasPath("block")
+ ? BeanDefaults.stripNullLeaves(config.getConfig("block"))
+ : ConfigFactory.empty();
+ Config section = userSection.withFallback(defaults);
+ BlockConfig blockConfig = ConfigBeanFactory.create(section, BlockConfig.class);
blockConfig.postProcess();
return blockConfig;
}
diff --git a/common/src/main/java/org/tron/core/config/args/CommitteeConfig.java b/common/src/main/java/org/tron/core/config/args/CommitteeConfig.java
index 5cd9de842a0..5c698e82c82 100644
--- a/common/src/main/java/org/tron/core/config/args/CommitteeConfig.java
+++ b/common/src/main/java/org/tron/core/config/args/CommitteeConfig.java
@@ -2,6 +2,8 @@
import com.typesafe.config.Config;
import com.typesafe.config.ConfigBeanFactory;
+import com.typesafe.config.ConfigFactory;
+import org.tron.core.config.BeanDefaults;
import lombok.Getter;
import lombok.Setter;
import lombok.extern.slf4j.Slf4j;
@@ -35,29 +37,8 @@ public class CommitteeConfig {
private long allowProtoFilterNum = 0;
private long allowAccountStateRoot = 0;
private long changedDelegation = 0;
- // NON-STANDARD NAMING: "allowPBFT" and "pBFTExpireNum" in config.conf contain
- // consecutive uppercase letters ("PBFT"), which violates JavaBean naming convention.
- // ConfigBeanFactory derives config keys from setter names using JavaBean rules:
- // setPBFTExpireNum -> property "PBFTExpireNum" (capital P, per JavaBean spec)
- // but config.conf uses "pBFTExpireNum" (lowercase p) -> mismatch -> binding fails.
- //
- // These two fields are excluded from auto-binding and handled manually in fromConfig().
- // TODO: Rename config keys to standard camelCase (allowPbft, pbftExpireNum) when
- // PBFT feature is enabled and a breaking config change is acceptable.
- @Getter(lombok.AccessLevel.NONE)
- @Setter(lombok.AccessLevel.NONE)
private long allowPBFT = 0;
- @Getter(lombok.AccessLevel.NONE)
- @Setter(lombok.AccessLevel.NONE)
private long pBFTExpireNum = 20;
-
- // Only getters are exposed. No public setters — ConfigBeanFactory scans public
- // setters via reflection and would derive key "PBFTExpireNum" / "AllowPBFT"
- // (JavaBean uppercase rule), which does not match config keys "pBFTExpireNum"
- // / "allowPBFT" and would throw. Values are assigned to fields directly in
- // fromConfig() below.
- public long getAllowPBFT() { return allowPBFT; }
- public long getPBFTExpireNum() { return pBFTExpireNum; }
private long allowTvmFreeze = 0;
private long allowTvmVote = 0;
private long allowTvmLondon = 0;
@@ -86,27 +67,17 @@ public class CommitteeConfig {
// proposalExpireTime is NOT a committee field — it's in block.* and handled by BlockConfig
- // Defaults come from reference.conf (loaded globally via Configuration.java)
-
- /**
- * Create CommitteeConfig from the "committee" section of the application config.
- *
- * Note: allowPBFT and pBFTExpireNum have non-standard JavaBean naming (consecutive
- * uppercase letters) which causes ConfigBeanFactory key mismatch. These two fields
- * are excluded from automatic binding and handled manually after.
- */
- private static final String PBFT_EXPIRE_NUM_KEY = "pBFTExpireNum";
- private static final String ALLOW_PBFT_KEY = "allowPBFT";
-
public static CommitteeConfig fromConfig(Config config) {
- Config section = config.getConfig("committee");
-
+ Config defaults = BeanDefaults.toConfig(new CommitteeConfig());
+ Config userSection = config.hasPath("committee")
+ ? BeanDefaults.stripNullLeaves(config.getConfig("committee"))
+ : ConfigFactory.empty();
+ // pBFTExpireNum: config key uses lowercase-p prefix, but setPBFTExpireNum causes
+ // Introspector to derive "PBFTExpireNum" (consecutive uppercase prevents decapitalization).
+ // Remap so ConfigBeanFactory finds it under the expected key.
+ userSection = BeanDefaults.remapKey(userSection, "pBFTExpireNum", "PBFTExpireNum");
+ Config section = userSection.withFallback(defaults);
CommitteeConfig cc = ConfigBeanFactory.create(section, CommitteeConfig.class);
- // Ensure the manually-named fields get the right values from the original keys
- cc.allowPBFT = section.hasPath(ALLOW_PBFT_KEY) ? section.getLong(ALLOW_PBFT_KEY) : 0;
- cc.pBFTExpireNum = section.hasPath(PBFT_EXPIRE_NUM_KEY)
- ? section.getLong(PBFT_EXPIRE_NUM_KEY) : 20;
-
cc.postProcess();
return cc;
}
diff --git a/common/src/main/java/org/tron/core/config/args/EventConfig.java b/common/src/main/java/org/tron/core/config/args/EventConfig.java
index ac1731de2dc..0dd8750e148 100644
--- a/common/src/main/java/org/tron/core/config/args/EventConfig.java
+++ b/common/src/main/java/org/tron/core/config/args/EventConfig.java
@@ -8,6 +8,7 @@
import lombok.Getter;
import lombok.Setter;
import lombok.extern.slf4j.Slf4j;
+import org.tron.core.config.BeanDefaults;
/**
* Event subscribe configuration bean.
@@ -25,11 +26,9 @@ public class EventConfig {
private String server = "";
private String dbconfig = "";
private boolean contractParse = true;
- @Getter(lombok.AccessLevel.NONE)
- @Setter(lombok.AccessLevel.NONE)
+ // Config key is "native" (Java reserved word); remapped to this field in fromConfig().
private NativeConfig nativeQueue = new NativeConfig();
- public NativeConfig getNativeQueue() { return nativeQueue; }
// Topics list has optional fields (ethCompatible, redundancy, solidified) that
// not all items have. ConfigBeanFactory requires all bean fields to exist in config.
// Excluded from auto-binding, read manually in fromConfig().
@@ -37,12 +36,16 @@ public class EventConfig {
@Setter(lombok.AccessLevel.NONE)
private List topics = new ArrayList<>();
- public List getTopics() { return topics; }
+ public List getTopics() {
+ return topics;
+ }
+
private FilterConfig filter = new FilterConfig();
@Getter
@Setter
public static class NativeConfig {
+
private boolean useNativeQueue = true;
private int bindport = 5555;
private int sendqueuelength = 1000;
@@ -51,6 +54,7 @@ public static class NativeConfig {
@Getter
@Setter
public static class TopicConfig {
+
private String triggerName = "";
private boolean enable = false;
private String topic = "";
@@ -62,14 +66,13 @@ public static class TopicConfig {
@Getter
@Setter
public static class FilterConfig {
+
private String fromblock = "";
private String toblock = "";
private List contractAddress = new ArrayList<>();
private List contractTopic = new ArrayList<>();
}
- // Defaults come from reference.conf (loaded globally via Configuration.java)
-
/**
* Create EventConfig from the "event.subscribe" section of the application config.
*
@@ -77,55 +80,29 @@ public static class FilterConfig {
* "nativeQueue" but config key is "native". We handle this manually after binding.
*/
public static EventConfig fromConfig(Config config) {
- Config section = config.getConfig("event.subscribe");
-
- // "native" is a Java reserved word, "topics" has optional fields per item —
- // strip both before binding, read manually
- String nativeKey = "native";
- String topicsKey = "topics";
- Config bindable = section.withoutPath(nativeKey).withoutPath(topicsKey)
- .withoutPath("topicDefaults");
- EventConfig ec = ConfigBeanFactory.create(bindable, EventConfig.class);
+ Config defaults = BeanDefaults.toConfig(new EventConfig());
+ Config userSection = config.hasPath("event.subscribe")
+ ? BeanDefaults.stripNullLeaves(config.getConfig("event.subscribe"))
+ : ConfigFactory.empty();
- // manually bind "native" sub-section
- Config nativeSection = section.hasPath(nativeKey)
- ? section.getConfig(nativeKey) : ConfigFactory.empty();
- ec.nativeQueue = new NativeConfig();
- if (nativeSection.hasPath("useNativeQueue")) {
- ec.nativeQueue.useNativeQueue = nativeSection.getBoolean("useNativeQueue");
- }
- if (nativeSection.hasPath("bindport")) {
- ec.nativeQueue.bindport = nativeSection.getInt("bindport");
- }
- if (nativeSection.hasPath("sendqueuelength")) {
- ec.nativeQueue.sendqueuelength = nativeSection.getInt("sendqueuelength");
- }
+ // "native" is a Java reserved word — remap to the field name so ConfigBeanFactory
+ // auto-binds it as NativeConfig nativeQueue. topics has optional fields per item
+ // so it is excluded from auto-binding and populated manually below.
+ Config bindable = BeanDefaults.remapKey(userSection, "native", "nativeQueue")
+ .withoutPath("topics")
+ .withoutPath("topicDefaults")
+ .withFallback(defaults);
+ EventConfig ec = ConfigBeanFactory.create(bindable, EventConfig.class);
- // manually bind topics — each item may have optional fields
- if (section.hasPath(topicsKey)) {
+ // topics: apply per-item BeanDefaults so optional fields (solidified, ethCompatible,
+ // redundancy) don't require every item to declare them explicitly.
+ if (userSection.hasPath("topics")) {
+ Config topicDefaults = BeanDefaults.toConfig(new TopicConfig());
ec.topics = new ArrayList<>();
- for (com.typesafe.config.ConfigObject obj : section.getObjectList(topicsKey)) {
- Config tc = obj.toConfig();
- TopicConfig topic = new TopicConfig();
- if (tc.hasPath("triggerName")) {
- topic.triggerName = tc.getString("triggerName");
- }
- if (tc.hasPath("enable")) {
- topic.enable = tc.getBoolean("enable");
- }
- if (tc.hasPath("topic")) {
- topic.topic = tc.getString("topic");
- }
- if (tc.hasPath("solidified")) {
- topic.solidified = tc.getBoolean("solidified");
- }
- if (tc.hasPath("ethCompatible")) {
- topic.ethCompatible = tc.getBoolean("ethCompatible");
- }
- if (tc.hasPath("redundancy")) {
- topic.redundancy = tc.getBoolean("redundancy");
- }
- ec.topics.add(topic);
+ for (com.typesafe.config.ConfigObject obj : userSection.getObjectList("topics")) {
+ ec.topics.add(ConfigBeanFactory.create(
+ BeanDefaults.stripNullLeaves(obj.toConfig()).withFallback(topicDefaults),
+ TopicConfig.class));
}
}
diff --git a/common/src/main/java/org/tron/core/config/args/GenesisConfig.java b/common/src/main/java/org/tron/core/config/args/GenesisConfig.java
index a17e06d5c0f..6d87ede74f7 100644
--- a/common/src/main/java/org/tron/core/config/args/GenesisConfig.java
+++ b/common/src/main/java/org/tron/core/config/args/GenesisConfig.java
@@ -2,6 +2,8 @@
import com.typesafe.config.Config;
import com.typesafe.config.ConfigBeanFactory;
+import com.typesafe.config.ConfigFactory;
+import org.tron.core.config.BeanDefaults;
import java.util.ArrayList;
import java.util.List;
import lombok.Getter;
@@ -41,10 +43,12 @@ public static class WitnessConfig {
private long voteCount = 0;
}
- // Defaults come from reference.conf (loaded globally via Configuration.java)
-
public static GenesisConfig fromConfig(Config config) {
- Config section = config.getConfig("genesis.block");
+ Config defaults = BeanDefaults.toConfig(new GenesisConfig());
+ Config userSection = config.hasPath("genesis.block")
+ ? BeanDefaults.stripNullLeaves(config.getConfig("genesis.block"))
+ : ConfigFactory.empty();
+ Config section = userSection.withFallback(defaults);
return ConfigBeanFactory.create(section, GenesisConfig.class);
}
}
diff --git a/common/src/main/java/org/tron/core/config/args/MetricsConfig.java b/common/src/main/java/org/tron/core/config/args/MetricsConfig.java
index 5547dfa6d3a..d5034428009 100644
--- a/common/src/main/java/org/tron/core/config/args/MetricsConfig.java
+++ b/common/src/main/java/org/tron/core/config/args/MetricsConfig.java
@@ -2,6 +2,8 @@
import com.typesafe.config.Config;
import com.typesafe.config.ConfigBeanFactory;
+import com.typesafe.config.ConfigFactory;
+import org.tron.core.config.BeanDefaults;
import lombok.Getter;
import lombok.Setter;
import lombok.extern.slf4j.Slf4j;
@@ -24,13 +26,21 @@ public static class PrometheusConfig {
private int port = 9527;
}
- // Defaults come from reference.conf (loaded globally via Configuration.java)
+ @Getter
+ @Setter
+ public static class InfluxDbConfig {
+ private String ip = "";
+ private int port = 8086;
+ private String database = "metrics";
+ private int metricsReportInterval = 10;
+ }
- /**
- * Create MetricsConfig from the "node.metrics" section of the application config.
- */
public static MetricsConfig fromConfig(Config config) {
- Config section = config.getConfig("node.metrics");
+ Config defaults = BeanDefaults.toConfig(new MetricsConfig());
+ Config userSection = config.hasPath("node.metrics")
+ ? BeanDefaults.stripNullLeaves(config.getConfig("node.metrics"))
+ : ConfigFactory.empty();
+ Config section = userSection.withFallback(defaults);
return ConfigBeanFactory.create(section, MetricsConfig.class);
}
}
diff --git a/common/src/main/java/org/tron/core/config/args/NodeConfig.java b/common/src/main/java/org/tron/core/config/args/NodeConfig.java
index d280336182d..2b931bb5557 100644
--- a/common/src/main/java/org/tron/core/config/args/NodeConfig.java
+++ b/common/src/main/java/org/tron/core/config/args/NodeConfig.java
@@ -12,6 +12,7 @@
import lombok.Getter;
import lombok.Setter;
import lombok.extern.slf4j.Slf4j;
+import org.tron.core.config.BeanDefaults;
import org.tron.core.exception.TronError;
// Node configuration bean for the "node" section of config.conf.
@@ -45,7 +46,9 @@ public class NodeConfig {
@Setter(lombok.AccessLevel.NONE)
private boolean isOpenFullTcpDisconnect = false;
- public boolean isOpenFullTcpDisconnect() { return isOpenFullTcpDisconnect; }
+ public boolean isOpenFullTcpDisconnect() {
+ return isOpenFullTcpDisconnect;
+ }
// node.discovery.* — HOCON merges into node { discovery { ... } }, auto-bound
private DiscoveryConfig discovery = new DiscoveryConfig();
@@ -62,12 +65,30 @@ public class NodeConfig {
@Setter(lombok.AccessLevel.NONE)
private long shutdownBlockCount = -1;
- public boolean isDiscoveryEnable() { return discovery.isEnable(); }
- public boolean isDiscoveryPersist() { return discovery.isPersist(); }
- public String getDiscoveryExternalIp() { return discovery.getExternal().getIp(); }
- public String getShutdownBlockTime() { return shutdownBlockTime; }
- public long getShutdownBlockHeight() { return shutdownBlockHeight; }
- public long getShutdownBlockCount() { return shutdownBlockCount; }
+ public boolean isDiscoveryEnable() {
+ return discovery.isEnable();
+ }
+
+ public boolean isDiscoveryPersist() {
+ return discovery.isPersist();
+ }
+
+ public String getDiscoveryExternalIp() {
+ return discovery.getExternal().getIp();
+ }
+
+ public String getShutdownBlockTime() {
+ return shutdownBlockTime;
+ }
+
+ public long getShutdownBlockHeight() {
+ return shutdownBlockHeight;
+ }
+
+ public long getShutdownBlockCount() {
+ return shutdownBlockCount;
+ }
+
private int inactiveThreshold = 600;
private boolean metricsEnable = false;
private int blockProducedTimeOut = 50;
@@ -95,8 +116,8 @@ public class NodeConfig {
private double activeConnectFactor = 0.1;
private double connectFactor = 0.6;
// Legacy alias `maxActiveNodesWithSameIp` has no bean field: we only peek at it via
- // section.hasPath() below. Keeping it field-less means reference.conf doesn't have to
- // ship a default that would otherwise mask the modern `maxConnectionsWithSameIp` key.
+ // section.hasPath() below. Keeping it field-less means BeanDefaults does not emit a
+ // default that would mask the modern `maxConnectionsWithSameIp` key.
// ---- Sub-beans matching config's dot-notation nested structure ----
private ListenConfig listen = new ListenConfig();
@@ -105,12 +126,29 @@ public class NodeConfig {
private SolidityConfig solidity = new SolidityConfig();
// Convenience getters for backward compatibility with applyNodeConfig
- public int getListenPort() { return listen.getPort(); }
- public int getConnectionTimeout() { return connection.getTimeout(); }
- public int getFetchBlockTimeout() { return fetchBlock.getTimeout(); }
- public int getSolidityThreads() { return solidity.getThreads(); }
- public int getChannelReadTimeout() { return channel.getRead().getTimeout(); }
- public int getValidContractProtoThreads() { return validContractProto.getThreads(); }
+ public int getListenPort() {
+ return listen.getPort();
+ }
+
+ public int getConnectionTimeout() {
+ return connection.getTimeout();
+ }
+
+ public int getFetchBlockTimeout() {
+ return fetchBlock.getTimeout();
+ }
+
+ public int getSolidityThreads() {
+ return solidity.getThreads();
+ }
+
+ public int getChannelReadTimeout() {
+ return channel.getRead().getTimeout();
+ }
+
+ public int getValidContractProtoThreads() {
+ return validContractProto.getThreads();
+ }
// ---- List fields (manually read) ----
private List active = new ArrayList<>();
@@ -206,69 +244,30 @@ public static class HttpConfig {
private long maxMessageSize = 4194304;
private int maxNestingDepth = 100;
private int maxTokenCount = 100_000;
- // PBFT fields — handled manually (same naming issue as CommitteeConfig)
- // Default must match CommonParameter.pBFTHttpEnable = true
- @Getter(lombok.AccessLevel.NONE)
- @Setter(lombok.AccessLevel.NONE)
+ // pBFTEnable/pBFTPort: fromConfig() remaps "pBFTEnable"→"PBFTEnable" so
+ // ConfigBeanFactory finds these under the JavaBean-derived key name.
private boolean pBFTEnable = true;
- @Getter(lombok.AccessLevel.NONE)
- @Setter(lombok.AccessLevel.NONE)
private int pBFTPort = 8092;
-
- public boolean isPBFTEnable() {
- return pBFTEnable;
- }
-
- public void setPBFTEnable(boolean v) {
- this.pBFTEnable = v;
- }
-
- public int getPBFTPort() {
- return pBFTPort;
- }
-
- public void setPBFTPort(int v) {
- this.pBFTPort = v;
- }
}
@Getter
@Setter
public static class RpcConfig {
+
private boolean enable = true;
private int port = 50051;
private boolean solidityEnable = true;
private int solidityPort = 50061;
- // PBFT fields — handled manually
- @Getter(lombok.AccessLevel.NONE)
- @Setter(lombok.AccessLevel.NONE)
+ // pBFTEnable/pBFTPort: remapped in NodeConfig.fromConfig() (same reason as HttpConfig).
private boolean pBFTEnable = true;
- @Getter(lombok.AccessLevel.NONE)
- @Setter(lombok.AccessLevel.NONE)
private int pBFTPort = 50071;
- public boolean isPBFTEnable() {
- return pBFTEnable;
- }
-
- public void setPBFTEnable(boolean v) {
- this.pBFTEnable = v;
- }
-
- public int getPBFTPort() {
- return pBFTPort;
- }
-
- public void setPBFTPort(int v) {
- this.pBFTPort = v;
- }
-
private int thread = 0;
- private int maxConcurrentCallsPerConnection = 2147483647;
- private int flowControlWindow = 1048576;
+ private int maxConcurrentCallsPerConnection = Integer.MAX_VALUE;
+ private int flowControlWindow = 1024 * 1024;
private long maxConnectionIdleInMillis = Long.MAX_VALUE;
private long maxConnectionAgeInMillis = Long.MAX_VALUE;
- private int maxMessageSize = 4194304;
+ private int maxMessageSize = 4 * 1024 * 1024;
private int maxHeaderListSize = 8192;
private int maxRstStream = 0;
private int secondsPerWindow = 0;
@@ -280,37 +279,22 @@ public void setPBFTPort(int v) {
@Getter
@Setter
public static class JsonRpcConfig {
+
private boolean httpFullNodeEnable = false;
private int httpFullNodePort = 8545;
private boolean httpSolidityEnable = false;
private int httpSolidityPort = 8555;
- // PBFT fields — handled manually
- @Getter(lombok.AccessLevel.NONE)
- @Setter(lombok.AccessLevel.NONE)
+ // httpPBFTEnable/httpPBFTPort: setHttpPBFTEnable → property "httpPBFTEnable" — matches
+ // config key directly, no remapping needed.
private boolean httpPBFTEnable = false;
- @Getter(lombok.AccessLevel.NONE)
- @Setter(lombok.AccessLevel.NONE)
private int httpPBFTPort = 8565;
- public boolean isHttpPBFTEnable() {
- return httpPBFTEnable;
- }
-
- public void setHttpPBFTEnable(boolean v) {
- this.httpPBFTEnable = v;
- }
-
- public int getHttpPBFTPort() {
- return httpPBFTPort;
- }
-
- public void setHttpPBFTPort(int v) {
- this.httpPBFTPort = v;
- }
-
private int maxBlockRange = 5000;
private int maxSubTopics = 1000;
private int maxBlockFilterNum = 50000;
+ private int maxBatchSize = 100;
+ private int maxResponseSize = 25 * 1024 * 1024;
+ private int maxAddressSize = 1000;
private int maxLogFilterNum = 20000;
private long maxMessageSize = 4194304;
}
@@ -350,8 +334,6 @@ public static class DnsConfig {
private String awsHostZoneId = "";
}
- // Defaults come from reference.conf (loaded globally via Configuration.java)
-
// ===========================================================================
// Factory method
// ===========================================================================
@@ -363,22 +345,28 @@ public static class DnsConfig {
* solidity.threads) become nested HOCON objects and cannot be auto-bound to flat
* Java fields. They are read manually after ConfigBeanFactory binding.
*
- * PBFT-named fields in http, rpc, and jsonrpc sub-beans have the same JavaBean
- * naming issue as CommitteeConfig and are patched manually.
+ *
pBFT-prefixed fields in http and rpc sub-beans are remapped before binding
+ * (pBFTEnable → PBFTEnable) because consecutive uppercase letters prevent
+ * Introspector from decapitalizing the JavaBean property name.
+ * jsonrpc.httpPBFT* binds directly (httpPBFTEnable → property "httpPBFTEnable" ✓).
*
- *
List fields (active, passive, fastForward, disabledApi) are read manually
- * since ConfigBeanFactory expects typed bean lists, not string lists.
+ *
List fields (active, passive, fastForward, disabledApi) auto-bind via
+ * ConfigBeanFactory's List<String> support.
*/
public static NodeConfig fromConfig(Config config) {
- // Normalize human-readable size values (e.g. "4m") to numeric bytes so
- // ConfigBeanFactory's primitive int/long binding succeeds; same step
- // enforces non-negative and <= Integer.MAX_VALUE before bean creation
- // so failures point at the user-facing config path.
- Config section = normalizeMaxMessageSizes(config).getConfig("node");
-
- // Auto-bind all fields and sub-beans. ConfigBeanFactory fails fast with a
- // descriptive path on any `= null` value — external configs that use the
- // HOCON null keyword should fix their config rather than rely on silent coercion.
+ Config defaults = BeanDefaults.toConfig(new NodeConfig());
+ Config userSection = config.hasPath("node")
+ ? BeanDefaults.stripNullLeaves(config.getConfig("node"))
+ : ConfigFactory.empty();
+ // pBFTEnable/pBFTPort: config uses lowercase-p prefix; JavaBean derives "PBFTEnable"
+ // (consecutive uppercase prevents Introspector from decapitalizing). Remap so
+ // ConfigBeanFactory binds the user value instead of silently using the default.
+ userSection = BeanDefaults.remapKey(userSection, "http.pBFTEnable", "http.PBFTEnable");
+ userSection = BeanDefaults.remapKey(userSection, "http.pBFTPort", "http.PBFTPort");
+ userSection = BeanDefaults.remapKey(userSection, "rpc.pBFTEnable", "rpc.PBFTEnable");
+ userSection = BeanDefaults.remapKey(userSection, "rpc.pBFTPort", "rpc.PBFTPort");
+ Config section = userSection.withFallback(defaults);
+
NodeConfig nc = ConfigBeanFactory.create(section, NodeConfig.class);
// isOpenFullTcpDisconnect: boolean "is" prefix breaks JavaBean pairing
@@ -400,10 +388,11 @@ public static NodeConfig fromConfig(Config config) {
nc.maxConnectionsWithSameIp = section.getInt("maxActiveNodesWithSameIp");
}
- // Legacy key fallback: node.fullNodeAllowShieldedTransaction -> allowShieldedTransactionApi.
- // reference.conf does not ship the legacy key, so hasPath here reliably means the user
+ // Legacy key fallback: node.allowShieldedTransactionApi -> fullNodeAllowShieldedTransaction.
+ // BeanDefaults does not emit this legacy key, so hasPath here reliably means the user
// set it in their config. When present, it overrides the modern key.
- if (section.hasPath("fullNodeAllowShieldedTransaction")) {
+ if (!userSection.hasPath("allowShieldedTransactionApi") &&
+ userSection.hasPath("fullNodeAllowShieldedTransaction")) {
nc.allowShieldedTransactionApi = section.getBoolean("fullNodeAllowShieldedTransaction");
logger.warn("Configuring [node.fullNodeAllowShieldedTransaction] will be deprecated. "
+ "Please use [node.allowShieldedTransactionApi] instead.");
@@ -416,7 +405,6 @@ public static NodeConfig fromConfig(Config config) {
nc.shutdownBlockCount = config.hasPath("node.shutdown.BlockCount")
? config.getLong("node.shutdown.BlockCount") : -1;
-
nc.postProcess();
return nc;
}
diff --git a/common/src/main/java/org/tron/core/config/args/Overlay.java b/common/src/main/java/org/tron/core/config/args/Overlay.java
deleted file mode 100644
index bdaa40724c7..00000000000
--- a/common/src/main/java/org/tron/core/config/args/Overlay.java
+++ /dev/null
@@ -1,22 +0,0 @@
-package org.tron.core.config.args;
-
-import lombok.Getter;
-import org.apache.commons.lang3.Range;
-
-public class Overlay {
-
- @Getter
- private int port;
-
- /**
- * Monitor port number.
- */
- public void setPort(final int port) {
- Range range = Range.between(0, 65535);
- if (!range.contains(port)) {
- throw new IllegalArgumentException("Port(" + port + ") must in [0, 65535]");
- }
-
- this.port = port;
- }
-}
diff --git a/common/src/main/java/org/tron/core/config/args/RateLimiterConfig.java b/common/src/main/java/org/tron/core/config/args/RateLimiterConfig.java
index eed5ef1898b..33f42569154 100644
--- a/common/src/main/java/org/tron/core/config/args/RateLimiterConfig.java
+++ b/common/src/main/java/org/tron/core/config/args/RateLimiterConfig.java
@@ -2,6 +2,8 @@
import com.typesafe.config.Config;
import com.typesafe.config.ConfigBeanFactory;
+import com.typesafe.config.ConfigFactory;
+import org.tron.core.config.BeanDefaults;
import java.util.ArrayList;
import java.util.List;
import lombok.Getter;
@@ -66,10 +68,12 @@ public static class RpcRateLimitItem {
private String paramString = "";
}
- // Defaults come from reference.conf (loaded globally via Configuration.java)
-
public static RateLimiterConfig fromConfig(Config config) {
- Config section = config.getConfig("rate.limiter");
+ Config defaults = BeanDefaults.toConfig(new RateLimiterConfig());
+ Config userSection = config.hasPath("rate.limiter")
+ ? BeanDefaults.stripNullLeaves(config.getConfig("rate.limiter"))
+ : ConfigFactory.empty();
+ Config section = userSection.withFallback(defaults);
return ConfigBeanFactory.create(section, RateLimiterConfig.class);
}
}
diff --git a/common/src/main/java/org/tron/core/config/args/StorageConfig.java b/common/src/main/java/org/tron/core/config/args/StorageConfig.java
index 3d7046ebae2..6981db23c72 100644
--- a/common/src/main/java/org/tron/core/config/args/StorageConfig.java
+++ b/common/src/main/java/org/tron/core/config/args/StorageConfig.java
@@ -2,7 +2,9 @@
import com.typesafe.config.Config;
import com.typesafe.config.ConfigBeanFactory;
+import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigObject;
+import org.tron.core.config.BeanDefaults;
import java.util.ArrayList;
import java.util.List;
import lombok.Getter;
@@ -31,38 +33,20 @@ public class StorageConfig {
private TxCacheConfig txCache = new TxCacheConfig();
private List properties = new ArrayList<>();
- // merkleRoot is a nested object (e.g. { reward-vi = "hash..." }) not a string.
- // Excluded from auto-binding, handled by Storage class directly.
- @Getter(lombok.AccessLevel.NONE)
- @Setter(lombok.AccessLevel.NONE)
- private Object merkleRoot;
-
// Raw storage config sub-tree, kept for setCacheStrategies/setDbRoots which
// have dynamic keys that ConfigBeanFactory cannot bind.
- @Getter(lombok.AccessLevel.NONE)
@Setter(lombok.AccessLevel.NONE)
private Config rawStorageConfig;
- public Config getRawStorageConfig() {
- return rawStorageConfig;
- }
-
// LevelDB per-database option overrides (default, defaultM, defaultL).
// Excluded from auto-binding: optional partial overrides that ConfigBeanFactory cannot handle.
- @Getter(lombok.AccessLevel.NONE)
@Setter(lombok.AccessLevel.NONE)
private DbOptionOverride defaultDbOption;
- @Getter(lombok.AccessLevel.NONE)
@Setter(lombok.AccessLevel.NONE)
private DbOptionOverride defaultMDbOption;
- @Getter(lombok.AccessLevel.NONE)
@Setter(lombok.AccessLevel.NONE)
private DbOptionOverride defaultLDbOption;
- public DbOptionOverride getDefaultDbOption() { return defaultDbOption; }
- public DbOptionOverride getDefaultMDbOption() { return defaultMDbOption; }
- public DbOptionOverride getDefaultLDbOption() { return defaultLDbOption; }
-
@Getter
@Setter
public static class DbConfig {
@@ -194,11 +178,16 @@ public static class PropertyConfig {
private int maxOpenFiles = 100;
}
- // Defaults come from reference.conf (loaded globally via Configuration.java)
-
public static StorageConfig fromConfig(Config config) {
- Config section = config.getConfig("storage");
-
+ Config defaults = BeanDefaults.toConfig(new StorageConfig());
+ Config userSection = config.hasPath("storage")
+ ? BeanDefaults.stripNullLeaves(config.getConfig("storage"))
+ : ConfigFactory.empty();
+ // User's storage section takes priority; defaults fill in any omitted scalar keys.
+ // readDbOption() uses hasPath() on the merged section, so user-set optional keys
+ // (default, defaultM, defaultL) are still detected correctly because they are
+ // absent from BeanDefaults and only present when the user explicitly set them.
+ Config section = userSection.withFallback(defaults);
StorageConfig sc = ConfigBeanFactory.create(section, StorageConfig.class);
sc.rawStorageConfig = section;
diff --git a/common/src/main/java/org/tron/core/config/args/VmConfig.java b/common/src/main/java/org/tron/core/config/args/VmConfig.java
index 00ba85aa6cc..39b5449d78e 100644
--- a/common/src/main/java/org/tron/core/config/args/VmConfig.java
+++ b/common/src/main/java/org/tron/core/config/args/VmConfig.java
@@ -2,6 +2,8 @@
import com.typesafe.config.Config;
import com.typesafe.config.ConfigBeanFactory;
+import com.typesafe.config.ConfigFactory;
+import org.tron.core.config.BeanDefaults;
import lombok.AccessLevel;
import lombok.Getter;
import lombok.Setter;
@@ -38,15 +40,14 @@ public class VmConfig {
@Setter(AccessLevel.NONE)
private long constantCallTimeoutMs = 0L;
- /**
- * Create VmConfig from the "vm" section of the application config.
- * Defaults come from reference.conf (loaded globally via Configuration.java),
- * so no per-bean DEFAULTS needed.
- */
public static VmConfig fromConfig(Config config) {
- Config vmSection = config.getConfig("vm");
- VmConfig vmConfig = ConfigBeanFactory.create(vmSection, VmConfig.class);
- vmConfig.postProcess(vmSection);
+ Config defaults = BeanDefaults.toConfig(new VmConfig());
+ Config userSection = config.hasPath("vm")
+ ? BeanDefaults.stripNullLeaves(config.getConfig("vm"))
+ : ConfigFactory.empty();
+ Config section = userSection.withFallback(defaults);
+ VmConfig vmConfig = ConfigBeanFactory.create(section, VmConfig.class);
+ vmConfig.postProcess(userSection);
return vmConfig;
}
diff --git a/common/src/main/resources/reference.conf b/common/src/main/resources/reference.conf
index b2e9898f27b..76225aa0bed 100644
--- a/common/src/main/resources/reference.conf
+++ b/common/src/main/resources/reference.conf
@@ -380,18 +380,20 @@ node {
httpPBFTEnable = false
httpPBFTPort = 8565
- # Maximum blocks range for eth_getLogs, >0 otherwise no limit
+ # The maximum blocks range to retrieve logs for eth_getLogs, default: 5000, <=0 means no limit
maxBlockRange = 5000
-
- # Maximum topics within a topic criteria, >0 otherwise no limit
+ # Allowed max address count in filter request, default: 1000, <=0 means no limit
+ maxAddressSize = 1000
+ # The maximum number of allowed topics within a topic criteria, default: 1000, <=0 means no limit
maxSubTopics = 1000
-
- # Maximum number for blockFilter. >0 otherwise no limit
+ # Allowed maximum number for blockFilter, default: 50000, <=0 means no limit
maxBlockFilterNum = 50000
-
- # Maximum number of concurrent eth_newFilter registrations, >0 otherwise no limit
+ # Allowed batch size, default: 100, <=0 means no limit
+ maxBatchSize = 100
+ # Allowed max response byte size, default: 26214400 (25 MB), <=0 means no limit
+ maxResponseSize = 26214400
+ # Allowed maximum number for newFilter, <=0 means no limit
maxLogFilterNum = 20000
-
# Maximum JSON-RPC request body size, default 4MB. Independent from rpc.maxMessageSize.
maxMessageSize = 4M
}
diff --git a/common/src/test/java/org/tron/core/config/BeanDefaultsTest.java b/common/src/test/java/org/tron/core/config/BeanDefaultsTest.java
new file mode 100644
index 00000000000..fff457e3e00
--- /dev/null
+++ b/common/src/test/java/org/tron/core/config/BeanDefaultsTest.java
@@ -0,0 +1,235 @@
+package org.tron.core.config;
+
+import com.typesafe.config.Config;
+import com.typesafe.config.ConfigBeanFactory;
+import com.typesafe.config.ConfigFactory;
+import org.junit.Assert;
+import org.junit.Test;
+import org.tron.core.config.args.CommitteeConfig;
+import org.tron.core.config.args.MetricsConfig;
+import org.tron.core.config.args.NodeConfig;
+import org.tron.core.config.args.RateLimiterConfig;
+import org.tron.core.config.args.StorageConfig;
+import org.tron.core.config.args.VmConfig;
+
+/**
+ * Verifies that BeanDefaults.toConfig() produces a Config that:
+ * 1. Contains the correct default values from Java field initializers.
+ * 2. Satisfies ConfigBeanFactory.create() without ConfigException.Missing.
+ * 3. Is properly overridden when a user value is supplied via withFallback().
+ */
+public class BeanDefaultsTest {
+
+ // ── VmConfig ─────────────────────────────────────────────────────────────
+
+ @Test
+ public void vmConfig_defaultValues() {
+ Config cfg = BeanDefaults.toConfig(new VmConfig());
+
+ Assert.assertFalse(cfg.getBoolean("supportConstant"));
+ Assert.assertEquals(100_000_000L, cfg.getLong("maxEnergyLimitForConstant"));
+ Assert.assertEquals(500, cfg.getInt("lruCacheSize"));
+ Assert.assertEquals(0.0, cfg.getDouble("minTimeRatio"), 0.0);
+ Assert.assertEquals(5.0, cfg.getDouble("maxTimeRatio"), 0.0);
+ Assert.assertEquals(10, cfg.getInt("longRunningTime"));
+ Assert.assertFalse(cfg.getBoolean("estimateEnergy"));
+ Assert.assertEquals(3, cfg.getInt("estimateEnergyMaxRetry"));
+ Assert.assertFalse(cfg.getBoolean("vmTrace"));
+ Assert.assertFalse(cfg.getBoolean("saveInternalTx"));
+ Assert.assertFalse(cfg.getBoolean("saveFeaturedInternalTx"));
+ Assert.assertFalse(cfg.getBoolean("saveCancelAllUnfreezeV2Details"));
+ }
+
+ @Test
+ public void vmConfig_roundTrip_withConfigBeanFactory() {
+ Config defaults = BeanDefaults.toConfig(new VmConfig());
+ // ConfigBeanFactory must not throw ConfigException.Missing
+ VmConfig vm = ConfigBeanFactory.create(defaults, VmConfig.class);
+ Assert.assertFalse(vm.isSupportConstant());
+ Assert.assertEquals(500, vm.getLruCacheSize());
+ }
+
+ @Test
+ public void vmConfig_userValueOverridesDefault() {
+ Config user = ConfigFactory.parseString("lruCacheSize = 999");
+ Config merged = user.withFallback(BeanDefaults.toConfig(new VmConfig()));
+ VmConfig vm = ConfigBeanFactory.create(merged, VmConfig.class);
+ Assert.assertEquals(999, vm.getLruCacheSize());
+ // other fields keep defaults
+ Assert.assertEquals(10, vm.getLongRunningTime());
+ }
+
+ // ── NodeConfig nested bean ────────────────────────────────────────────────
+
+ @Test
+ public void nodeConfig_defaultScalars() {
+ Config cfg = BeanDefaults.toConfig(new NodeConfig());
+
+ Assert.assertEquals(30, cfg.getInt("maxConnections"));
+ Assert.assertEquals(8, cfg.getInt("minConnections"));
+ Assert.assertEquals(1000, cfg.getInt("maxTps"));
+ Assert.assertTrue(cfg.getBoolean("openPrintLog"));
+ Assert.assertFalse(cfg.getBoolean("walletExtensionApi"));
+ }
+
+ @Test
+ public void nodeConfig_nestedBeans_present() {
+ Config cfg = BeanDefaults.toConfig(new NodeConfig());
+
+ // listen.port should exist as a nested object
+ Assert.assertTrue(cfg.hasPath("listen"));
+ Assert.assertEquals(18888, cfg.getInt("listen.port"));
+
+ // discovery.enable
+ Assert.assertTrue(cfg.hasPath("discovery"));
+ Assert.assertFalse(cfg.getBoolean("discovery.enable"));
+
+ // http.fullNodeEnable
+ Assert.assertTrue(cfg.hasPath("http"));
+ Assert.assertTrue(cfg.getBoolean("http.fullNodeEnable"));
+ Assert.assertEquals(8090, cfg.getInt("http.fullNodePort"));
+
+ // rpc.enable
+ Assert.assertTrue(cfg.hasPath("rpc"));
+ Assert.assertTrue(cfg.getBoolean("rpc.enable"));
+ Assert.assertEquals(50051, cfg.getInt("rpc.port"));
+ }
+
+ @Test
+ public void nodeConfig_listFields_empty() {
+ Config cfg = BeanDefaults.toConfig(new NodeConfig());
+ Assert.assertTrue(cfg.getList("active").isEmpty());
+ Assert.assertTrue(cfg.getList("passive").isEmpty());
+ Assert.assertTrue(cfg.getList("fastForward").isEmpty());
+ Assert.assertTrue(cfg.getList("disabledApi").isEmpty());
+ }
+
+ @Test
+ public void nodeConfig_pBFTFields_usePropertyNameAsIs() {
+ Config cfg = BeanDefaults.toConfig(new NodeConfig());
+ // setPBFTEnable → Introspector property name "PBFTEnable" (capital P, two consecutive
+ // uppercase letters → JavaBean spec forbids decapitalization).
+ // ConfigBeanFactory looks up configProps.get("PBFTEnable"), so the map key must match.
+ Assert.assertTrue(cfg.hasPath("http.PBFTEnable"));
+ Assert.assertTrue(cfg.getBoolean("http.PBFTEnable"));
+ Assert.assertEquals(8092, cfg.getInt("http.PBFTPort"));
+
+ Assert.assertTrue(cfg.hasPath("rpc.PBFTEnable"));
+ Assert.assertEquals(50071, cfg.getInt("rpc.PBFTPort"));
+ }
+
+ @Test
+ public void nodeConfig_roundTrip_withConfigBeanFactory() {
+ Config defaults = BeanDefaults.toConfig(new NodeConfig());
+ // Must not throw — all keys present
+ NodeConfig nc = ConfigBeanFactory.create(defaults, NodeConfig.class);
+ Assert.assertEquals(30, nc.getMaxConnections());
+ Assert.assertEquals(18888, nc.getListenPort());
+ Assert.assertTrue(nc.getRpc().isEnable());
+ }
+
+ // ── StorageConfig nested bean ─────────────────────────────────────────────
+
+ @Test
+ public void storageConfig_defaultValues() {
+ Config cfg = BeanDefaults.toConfig(new StorageConfig());
+
+ Assert.assertEquals("LEVELDB", cfg.getString("db.engine"));
+ Assert.assertFalse(cfg.getBoolean("db.sync"));
+ Assert.assertEquals("database", cfg.getString("db.directory"));
+ Assert.assertEquals(7, cfg.getInt("dbSettings.levelNumber"));
+ Assert.assertEquals(1, cfg.getInt("checkpoint.version"));
+ Assert.assertTrue(cfg.getBoolean("checkpoint.sync"));
+ Assert.assertEquals(1, cfg.getInt("snapshot.maxFlushCount"));
+ Assert.assertTrue(cfg.getList("properties").isEmpty());
+ }
+
+ @Test
+ public void storageConfig_roundTrip_withConfigBeanFactory() {
+ Config defaults = BeanDefaults.toConfig(new StorageConfig());
+ StorageConfig sc = ConfigBeanFactory.create(defaults, StorageConfig.class);
+ Assert.assertEquals("LEVELDB", sc.getDb().getEngine());
+ Assert.assertEquals(7, sc.getDbSettings().getLevelNumber());
+ }
+
+ // ── MetricsConfig nested sub-beans ───────────────────────────────────────
+
+ @Test
+ public void metricsConfig_defaultValues() {
+ Config cfg = BeanDefaults.toConfig(new MetricsConfig());
+
+ Assert.assertFalse(cfg.getBoolean("storageEnable"));
+ Assert.assertFalse(cfg.getBoolean("prometheus.enable"));
+ Assert.assertEquals(9527, cfg.getInt("prometheus.port"));
+ Assert.assertEquals("", cfg.getString("influxdb.ip"));
+ Assert.assertEquals(8086, cfg.getInt("influxdb.port"));
+ Assert.assertEquals("metrics", cfg.getString("influxdb.database"));
+ Assert.assertEquals(10, cfg.getInt("influxdb.metricsReportInterval"));
+ }
+
+ @Test
+ public void metricsConfig_roundTrip() {
+ Config defaults = BeanDefaults.toConfig(new MetricsConfig());
+ MetricsConfig mc = ConfigBeanFactory.create(defaults, MetricsConfig.class);
+ Assert.assertEquals(9527, mc.getPrometheus().getPort());
+ }
+
+ // ── RateLimiterConfig ────────────────────────────────────────────────────
+
+ @Test
+ public void rateLimiterConfig_defaultValues() {
+ Config cfg = BeanDefaults.toConfig(new RateLimiterConfig());
+
+ Assert.assertEquals(50000, cfg.getInt("global.qps"));
+ Assert.assertEquals(10000, cfg.getInt("global.ip.qps"));
+ Assert.assertEquals(1000, cfg.getInt("global.api.qps"));
+ Assert.assertTrue(cfg.getList("http").isEmpty());
+ Assert.assertTrue(cfg.getList("rpc").isEmpty());
+ }
+
+ @Test
+ public void rateLimiterConfig_roundTrip() {
+ Config defaults = BeanDefaults.toConfig(new RateLimiterConfig());
+ RateLimiterConfig rl = ConfigBeanFactory.create(defaults, RateLimiterConfig.class);
+ Assert.assertEquals(50000, rl.getGlobal().getQps());
+ Assert.assertTrue(rl.getHttp().isEmpty());
+ }
+
+ // ── CommitteeConfig ───────────────────────────────────────────────────────
+
+ @Test
+ public void committeeConfig_allZeroDefaults() {
+ Config cfg = BeanDefaults.toConfig(new CommitteeConfig());
+
+ Assert.assertEquals(0L, cfg.getLong("allowCreationOfContracts"));
+ Assert.assertEquals(0L, cfg.getLong("allowMultiSign"));
+ Assert.assertEquals(0L, cfg.getLong("allowTvmCancun"));
+ }
+
+ @Test
+ public void committeeConfig_roundTrip() {
+ Config defaults = BeanDefaults.toConfig(new CommitteeConfig());
+ CommitteeConfig cc = ConfigBeanFactory.create(defaults, CommitteeConfig.class);
+ Assert.assertEquals(0L, cc.getAllowCreationOfContracts());
+ }
+
+ @Test
+ public void stripNullLeaves_removesNullPaths() {
+ Config cfg = ConfigFactory.parseString("a = null\nb = 1\nc.d = null\nc.e = 2");
+ Config stripped = BeanDefaults.stripNullLeaves(cfg);
+ Assert.assertFalse(stripped.hasPath("a"));
+ Assert.assertTrue(stripped.hasPath("b"));
+ Assert.assertFalse(stripped.hasPath("c.d"));
+ Assert.assertTrue(stripped.hasPath("c.e"));
+ }
+
+ @Test
+ public void nodeConfig_fromConfig_toleratesNullExternalIp() {
+ // Legacy configs used "node.discovery.external.ip = null" — must not throw.
+ Config cfg = ConfigFactory.parseString(
+ "node { discovery { external { ip = null } } }");
+ NodeConfig nc = NodeConfig.fromConfig(cfg);
+ Assert.assertNotNull(nc);
+ Assert.assertEquals("", nc.getDiscoveryExternalIp());
+ }
+}
diff --git a/common/src/test/java/org/tron/core/config/args/EventConfigTest.java b/common/src/test/java/org/tron/core/config/args/EventConfigTest.java
index 361d9f48581..1aaca42f6fc 100644
--- a/common/src/test/java/org/tron/core/config/args/EventConfigTest.java
+++ b/common/src/test/java/org/tron/core/config/args/EventConfigTest.java
@@ -22,11 +22,11 @@ private static Config withRef() {
public void testDefaults() {
Config empty = withRef();
EventConfig ec = EventConfig.fromConfig(empty);
- // reference.conf has event.subscribe with enable=false, topics with 7 entries
+ // BeanDefaults provides scalar defaults; topics list is empty by default (user must configure)
assertFalse(ec.isEnable());
assertEquals(0, ec.getVersion());
assertEquals("", ec.getPath());
- assertFalse(ec.getTopics().isEmpty()); // reference.conf has default topic entries
+ assertTrue(ec.getTopics().isEmpty());
}
@Test
diff --git a/common/src/test/java/org/tron/core/config/args/GenesisConfigTest.java b/common/src/test/java/org/tron/core/config/args/GenesisConfigTest.java
index 5e653a79b7f..4f3e8829ade 100644
--- a/common/src/test/java/org/tron/core/config/args/GenesisConfigTest.java
+++ b/common/src/test/java/org/tron/core/config/args/GenesisConfigTest.java
@@ -22,10 +22,10 @@ private static Config withRef() {
public void testDefaults() {
Config empty = withRef();
GenesisConfig gc = GenesisConfig.fromConfig(empty);
- // reference.conf has genesis.block with timestamp, parentHash, assets, witnesses
- assertEquals("0", gc.getTimestamp());
- assertFalse(gc.getAssets().isEmpty()); // reference.conf has seed accounts
- assertFalse(gc.getWitnesses().isEmpty()); // reference.conf has seed witnesses
+ // BeanDefaults: timestamp/parentHash default to "", assets/witnesses to empty lists
+ assertEquals("", gc.getTimestamp());
+ assertTrue(gc.getAssets().isEmpty());
+ assertTrue(gc.getWitnesses().isEmpty());
}
@Test
diff --git a/common/src/test/java/org/tron/core/config/args/NodeConfigTest.java b/common/src/test/java/org/tron/core/config/args/NodeConfigTest.java
index a52c51c1ba4..5dbc161c4f8 100644
--- a/common/src/test/java/org/tron/core/config/args/NodeConfigTest.java
+++ b/common/src/test/java/org/tron/core/config/args/NodeConfigTest.java
@@ -105,10 +105,10 @@ public void testRpcDefaultsFromReference() {
NodeConfig.RpcConfig rpc = nc.getRpc();
// reference.conf provides actual final defaults, no sentinel conversion needed
- assertEquals(2147483647, rpc.getMaxConcurrentCallsPerConnection());
+ assertEquals(Integer.MAX_VALUE, rpc.getMaxConcurrentCallsPerConnection());
assertEquals(1048576, rpc.getFlowControlWindow());
- assertEquals(9223372036854775807L, rpc.getMaxConnectionIdleInMillis());
- assertEquals(9223372036854775807L, rpc.getMaxConnectionAgeInMillis());
+ assertEquals(Long.MAX_VALUE, rpc.getMaxConnectionIdleInMillis());
+ assertEquals(Long.MAX_VALUE, rpc.getMaxConnectionAgeInMillis());
assertEquals(4194304, rpc.getMaxMessageSize());
assertEquals(8192, rpc.getMaxHeaderListSize());
assertEquals(1, rpc.getMinEffectiveConnection());
@@ -307,13 +307,13 @@ public void testShieldedApiLegacyKeyRespected() {
}
@Test
- public void testShieldedApiLegacyKeyTakesPriorityOverModern() {
- // Consistent with maxActiveNodesWithSameIp: legacy key presence wins over modern.
+ public void testShieldedApiModernKeyTakesPriorityOverLegacy() {
+ // When both keys are present, allowShieldedTransactionApi (modern) wins.
NodeConfig nc = NodeConfig.fromConfig(
withRef("node {\n"
+ " allowShieldedTransactionApi = false\n"
+ " fullNodeAllowShieldedTransaction = true\n"
+ "}"));
- assertTrue(nc.isAllowShieldedTransactionApi());
+ assertFalse(nc.isAllowShieldedTransactionApi());
}
}
diff --git a/framework/build.gradle b/framework/build.gradle
index fd59d3cc4e7..0ce33f253cf 100644
--- a/framework/build.gradle
+++ b/framework/build.gradle
@@ -56,6 +56,7 @@ dependencies {
}
testImplementation group: 'org.springframework', name: 'spring-test', version: "${springVersion}"
+ testImplementation group: 'javax.portlet', name: 'portlet-api', version: '3.0.1'
implementation group: 'org.zeromq', name: 'jeromq', version: '0.5.3'
api project(":chainbase")
api project(":protocol")
diff --git a/framework/src/main/java/org/tron/core/config/args/Args.java b/framework/src/main/java/org/tron/core/config/args/Args.java
index a97792f1a19..68cb9740f16 100644
--- a/framework/src/main/java/org/tron/core/config/args/Args.java
+++ b/framework/src/main/java/org/tron/core/config/args/Args.java
@@ -61,7 +61,6 @@
import org.tron.p2p.P2pConfig;
import org.tron.p2p.dns.update.DnsType;
import org.tron.p2p.dns.update.PublishConfig;
-import org.tron.p2p.utils.NetUtil;
import org.tron.program.Version;
@Slf4j(topic = "app")
@@ -559,6 +558,9 @@ private static void applyNodeConfig(NodeConfig nc) {
PARAMETER.jsonRpcMaxBlockRange = jsonrpc.getMaxBlockRange();
PARAMETER.jsonRpcMaxSubTopics = jsonrpc.getMaxSubTopics();
PARAMETER.jsonRpcMaxBlockFilterNum = jsonrpc.getMaxBlockFilterNum();
+ PARAMETER.jsonRpcMaxBatchSize = jsonrpc.getMaxBatchSize();
+ PARAMETER.jsonRpcMaxResponseSize = jsonrpc.getMaxResponseSize();
+ PARAMETER.jsonRpcMaxAddressSize = jsonrpc.getMaxAddressSize();
PARAMETER.jsonRpcMaxLogFilterNum = jsonrpc.getMaxLogFilterNum();
PARAMETER.jsonRpcMaxMessageSize = jsonrpc.getMaxMessageSize();
diff --git a/framework/src/main/java/org/tron/core/services/filter/BufferedResponseWrapper.java b/framework/src/main/java/org/tron/core/services/filter/BufferedResponseWrapper.java
new file mode 100644
index 00000000000..7076746b2a0
--- /dev/null
+++ b/framework/src/main/java/org/tron/core/services/filter/BufferedResponseWrapper.java
@@ -0,0 +1,178 @@
+package org.tron.core.services.filter;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.OutputStreamWriter;
+import java.io.PrintWriter;
+import java.nio.charset.StandardCharsets;
+import javax.servlet.ServletOutputStream;
+import javax.servlet.WriteListener;
+import javax.servlet.http.HttpServletResponse;
+import javax.servlet.http.HttpServletResponseWrapper;
+import lombok.Getter;
+
+/**
+ * Buffers the response body without writing to the underlying response,
+ * so the caller can replay it after the handler returns.
+ *
+ * If {@code maxBytes > 0} and the response would exceed that limit, the
+ * {@link #isOverflow()} flag is set instead of throwing. The caller should check this flag after
+ * the handler returns and write its own error response when true.
+ *
+ *
Header-mutating methods ({@code setStatus}, {@code setContentType}) are buffered here and
+ * only forwarded to the real response via {@link #commitToResponse()}.
+ */
+public class BufferedResponseWrapper extends HttpServletResponseWrapper {
+
+ private final HttpServletResponse actual;
+ private final ByteArrayOutputStream buffer = new ByteArrayOutputStream();
+ private final int maxBytes;
+ private int status = HttpServletResponse.SC_OK;
+ private String contentType;
+ private boolean committed = false;
+ @Getter
+ private volatile boolean overflow = false;
+
+ private final ServletOutputStream outputStream = new ServletOutputStream() {
+ @Override
+ public void write(int b) {
+ if (overflow) {
+ return;
+ }
+ if (maxBytes > 0 && buffer.size() >= maxBytes) {
+ markOverflow();
+ return;
+ }
+ buffer.write(b);
+ }
+
+ @Override
+ public void write(byte[] b, int off, int len) {
+ if (overflow) {
+ return;
+ }
+ if (maxBytes > 0 && buffer.size() + len > maxBytes) {
+ markOverflow();
+ return;
+ }
+ buffer.write(b, off, len);
+ }
+
+ @Override
+ public boolean isReady() {
+ return true;
+ }
+
+ @Override
+ public void setWriteListener(WriteListener writeListener) {
+ }
+ };
+
+ private final PrintWriter writer =
+ new PrintWriter(new OutputStreamWriter(outputStream, StandardCharsets.UTF_8), true);
+
+ /**
+ * @param response the wrapped response
+ * @param maxBytes max allowed response bytes; {@code 0} means no limit
+ */
+ public BufferedResponseWrapper(HttpServletResponse response, int maxBytes) {
+ super(response);
+ this.actual = response;
+ this.maxBytes = maxBytes;
+ }
+
+ private void markOverflow() {
+ overflow = true;
+ buffer.reset();
+ }
+
+ /**
+ * Early-detection path: if the framework reports the full content length before writing any
+ * bytes, we can flag overflow without buffering anything.
+ */
+ @Override
+ public void setContentLength(int len) {
+ if (maxBytes > 0 && len > maxBytes) {
+ markOverflow();
+ }
+ }
+
+ @Override
+ public void setContentLengthLong(long len) {
+ if (maxBytes > 0 && len > maxBytes) {
+ markOverflow();
+ }
+ }
+
+ @Override
+ public int getStatus() {
+ return this.status;
+ }
+
+ @Override
+ public void setStatus(int sc) {
+ this.status = sc;
+ }
+
+ @Override
+ public void setHeader(String name, String value) {
+ if ("content-length".equalsIgnoreCase(name)) {
+ try {
+ setContentLengthLong(Long.parseLong(value));
+ } catch (NumberFormatException ignored) {
+ // malformed value, skip overflow check
+ }
+ } else {
+ super.setHeader(name, value);
+ }
+ }
+
+ @Override
+ public void addHeader(String name, String value) {
+ if ("content-length".equalsIgnoreCase(name)) {
+ try {
+ setContentLengthLong(Long.parseLong(value));
+ } catch (NumberFormatException ignored) {
+ // malformed value, skip overflow check
+ }
+ } else {
+ super.addHeader(name, value);
+ }
+ }
+
+ @Override
+ public void setContentType(String type) {
+ this.contentType = type;
+ }
+
+ @Override
+ public ServletOutputStream getOutputStream() {
+ return outputStream;
+ }
+
+ @Override
+ public PrintWriter getWriter() {
+ return writer;
+ }
+
+ public void commitToResponse() throws IOException {
+ if (committed) {
+ throw new IllegalStateException("commitToResponse() already called");
+ }
+ committed = true;
+ // Flush the PrintWriter's OutputStreamWriter encoder into our ByteArrayOutputStream.
+ // PrintWriter(autoFlush=true) only auto-flushes on println/printf/format, not print/write,
+ // so bytes can remain buffered in the encoder until an explicit flush.
+ writer.flush();
+ if (overflow) {
+ return;
+ }
+ if (contentType != null) {
+ actual.setContentType(contentType);
+ }
+ actual.setStatus(status);
+ actual.setContentLength(buffer.size());
+ buffer.writeTo(actual.getOutputStream());
+ actual.getOutputStream().flush();
+ }
+}
diff --git a/framework/src/main/java/org/tron/core/services/filter/CachedBodyRequestWrapper.java b/framework/src/main/java/org/tron/core/services/filter/CachedBodyRequestWrapper.java
new file mode 100644
index 00000000000..683fe849f71
--- /dev/null
+++ b/framework/src/main/java/org/tron/core/services/filter/CachedBodyRequestWrapper.java
@@ -0,0 +1,97 @@
+package org.tron.core.services.filter;
+
+import java.io.BufferedReader;
+import java.io.ByteArrayInputStream;
+import java.io.InputStreamReader;
+import java.nio.charset.Charset;
+import java.nio.charset.IllegalCharsetNameException;
+import java.nio.charset.StandardCharsets;
+import java.nio.charset.UnsupportedCharsetException;
+import javax.servlet.ReadListener;
+import javax.servlet.ServletInputStream;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletRequestWrapper;
+
+/**
+ * Wraps a request to replay a pre-read body from a byte array,
+ * allowing the body to be read more than once.
+ *
+ *
Scope: designed for synchronous, raw-body POST endpoints
+ * (e.g. JSON-RPC). It is NOT compatible with:
+ *
+ * {@code application/x-www-form-urlencoded} — cached body cannot back
+ * {@code getParameter*}.
+ * multipart — {@code getPart()/getParts()} read from the original
+ * (already-consumed) stream.
+ * async non-blocking I/O — see {@code setReadListener}.
+ * request dispatch / forward chains.
+ *
+ *
+ * Multiple calls to {@code getInputStream()} (or {@code getReader()})
+ * are allowed and each returns a fresh stream over the same cached body —
+ * a deliberate extension of the standard servlet contract.
+ */
+public class CachedBodyRequestWrapper extends HttpServletRequestWrapper {
+
+ private enum BodyAccessor { NONE, STREAM, READER }
+
+ private final byte[] body;
+ private BodyAccessor accessor = BodyAccessor.NONE;
+
+ public CachedBodyRequestWrapper(HttpServletRequest request, byte[] body) {
+ super(request);
+ this.body = body;
+ }
+
+ @Override
+ public ServletInputStream getInputStream() {
+ if (accessor == BodyAccessor.READER) {
+ throw new IllegalStateException("getReader() has already been called on this request");
+ }
+ accessor = BodyAccessor.STREAM;
+ final ByteArrayInputStream bais = new ByteArrayInputStream(body);
+ return new ServletInputStream() {
+ @Override
+ public int read() {
+ return bais.read();
+ }
+
+ @Override
+ public int read(byte[] b, int off, int len) {
+ return bais.read(b, off, len);
+ }
+
+ @Override
+ public boolean isFinished() {
+ return bais.available() == 0;
+ }
+
+ @Override
+ public boolean isReady() {
+ return true;
+ }
+
+ @Override
+ public void setReadListener(ReadListener readListener) {
+ throw new UnsupportedOperationException(
+ "async I/O is not supported on cached body");
+ }
+ };
+ }
+
+ @Override
+ public BufferedReader getReader() {
+ if (accessor == BodyAccessor.STREAM) {
+ throw new IllegalStateException("getInputStream() has already been called on this request");
+ }
+ accessor = BodyAccessor.READER;
+ String encoding = getCharacterEncoding();
+ Charset charset;
+ try {
+ charset = encoding != null ? Charset.forName(encoding) : StandardCharsets.UTF_8;
+ } catch (IllegalCharsetNameException | UnsupportedCharsetException ex) {
+ charset = StandardCharsets.UTF_8;
+ }
+ return new BufferedReader(new InputStreamReader(new ByteArrayInputStream(body), charset));
+ }
+}
diff --git a/framework/src/main/java/org/tron/core/services/jsonrpc/JsonRpcServlet.java b/framework/src/main/java/org/tron/core/services/jsonrpc/JsonRpcServlet.java
index 104a0e9e470..2093930ca98 100644
--- a/framework/src/main/java/org/tron/core/services/jsonrpc/JsonRpcServlet.java
+++ b/framework/src/main/java/org/tron/core/services/jsonrpc/JsonRpcServlet.java
@@ -1,10 +1,18 @@
package org.tron.core.services.jsonrpc;
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.node.ArrayNode;
+import com.fasterxml.jackson.databind.node.ObjectNode;
import com.googlecode.jsonrpc4j.HttpStatusCodeProvider;
import com.googlecode.jsonrpc4j.JsonRpcInterceptor;
import com.googlecode.jsonrpc4j.JsonRpcServer;
import com.googlecode.jsonrpc4j.ProxyUtil;
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
import java.io.IOException;
+import java.io.InputStream;
import java.util.Collections;
import javax.servlet.ServletConfig;
import javax.servlet.ServletException;
@@ -14,15 +22,30 @@
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import org.tron.common.parameter.CommonParameter;
-import org.tron.core.Wallet;
-import org.tron.core.db.Manager;
-import org.tron.core.services.NodeInfoService;
+import org.tron.core.services.filter.BufferedResponseWrapper;
+import org.tron.core.services.filter.CachedBodyRequestWrapper;
import org.tron.core.services.http.RateLimiterServlet;
@Component
@Slf4j(topic = "API")
public class JsonRpcServlet extends RateLimiterServlet {
+ private static final ObjectMapper MAPPER = new ObjectMapper();
+
+ private enum JsonRpcError {
+ PARSE_ERROR(-32700),
+ INVALID_REQUEST(-32600),
+ INTERNAL_ERROR(-32603),
+ EXCEED_LIMIT(-32005),
+ RESPONSE_TOO_LARGE(-32003);
+
+ private final int code;
+
+ JsonRpcError(int code) {
+ this.code = code;
+ }
+ }
+
private JsonRpcServer rpcServer = null;
@Autowired
@@ -66,6 +89,182 @@ public Integer getJsonRpcCode(int httpStatusCode) {
@Override
protected void doPost(HttpServletRequest req, HttpServletResponse resp) throws IOException {
- rpcServer.handle(req, resp);
+ CommonParameter parameter = CommonParameter.getInstance();
+
+ // Transport IOException from readBody propagates as HTTP 500 (genuine IO failure).
+ byte[] body = readBody(req.getInputStream());
+ JsonNode rootNode;
+ try {
+ rootNode = MAPPER.readTree(body);
+ if (rootNode == null || rootNode.isMissingNode()) {
+ writeJsonRpcError(resp, JsonRpcError.PARSE_ERROR, "Parse error", null, false);
+ return;
+ }
+ } catch (JsonProcessingException e) {
+ writeJsonRpcError(resp, JsonRpcError.PARSE_ERROR, "Parse error", null, false);
+ return;
+ }
+
+ boolean isBatch = rootNode.isArray();
+ if (isBatch && rootNode.isEmpty()) {
+ writeJsonRpcError(resp, JsonRpcError.INVALID_REQUEST, "Invalid Request", null, false);
+ return;
+ }
+ int batchSize = parameter.getJsonRpcMaxBatchSize();
+ if (isBatch && batchSize > 0 && rootNode.size() > batchSize) {
+ writeJsonRpcError(resp, JsonRpcError.EXCEED_LIMIT,
+ "Batch size " + rootNode.size() + " exceeds the limit of " + batchSize, null, true);
+ return;
+ }
+
+ int maxResponseSize = parameter.getJsonRpcMaxResponseSize();
+ if (isBatch) {
+ handleBatch(resp, rootNode, maxResponseSize);
+ } else {
+ handleSingle(req, resp, rootNode, body, maxResponseSize);
+ }
+ }
+
+ private void handleSingle(HttpServletRequest req, HttpServletResponse resp,
+ JsonNode rootNode, byte[] body, int maxResponseSize) throws IOException {
+ CachedBodyRequestWrapper cachedReq = new CachedBodyRequestWrapper(req, body);
+ BufferedResponseWrapper bufferedResp = new BufferedResponseWrapper(
+ resp, maxResponseSize);
+
+ try {
+ rpcServer.handle(cachedReq, bufferedResp);
+ } catch (RuntimeException e) {
+ logger.error("RPC execution failed", e);
+ writeJsonRpcError(resp, JsonRpcError.INTERNAL_ERROR, "Internal error",
+ rootNode.get("id"), false);
+ return;
+ }
+
+ bufferedResp.commitToResponse();
+ if (bufferedResp.isOverflow()) {
+ writeJsonRpcError(resp, JsonRpcError.RESPONSE_TOO_LARGE,
+ "Response exceeds the limit of " + maxResponseSize + " bytes",
+ rootNode.get("id"), false);
+ }
+ }
+
+ private void handleBatch(HttpServletResponse resp, JsonNode rootNode, int maxResponseSize)
+ throws IOException {
+
+ ArrayNode batchResult = MAPPER.createArrayNode();
+ int accumulatedSize = 2; // "[]"
+ boolean overflow = false;
+
+ for (int i = 0; i < rootNode.size(); i++) {
+ JsonNode subRequest = rootNode.get(i);
+
+ if (overflow) {
+ // Notifications (no "id") do not get a response even on overflow.
+ if (subRequest.has("id")) {
+ batchResult.add(buildErrorNode(JsonRpcError.RESPONSE_TOO_LARGE,
+ "Response exceeds the limit of " + maxResponseSize + " bytes",
+ subRequest.get("id")));
+ }
+ continue;
+ }
+
+ byte[] subBody;
+ try {
+ subBody = MAPPER.writeValueAsBytes(subRequest);
+ } catch (JsonProcessingException e) {
+ writeJsonRpcError(resp, JsonRpcError.INTERNAL_ERROR, "Internal error", null, true);
+ return;
+ }
+
+ ByteArrayOutputStream subOutput = new ByteArrayOutputStream();
+ try {
+ rpcServer.handleRequest(new ByteArrayInputStream(subBody), subOutput);
+ } catch (RuntimeException e) {
+ logger.error("RPC execution failed for batch sub-request {}", i, e);
+ writeJsonRpcError(resp, JsonRpcError.INTERNAL_ERROR, "Internal error", null, true);
+ return;
+ }
+
+ byte[] responseBytes = subOutput.toByteArray();
+ if (responseBytes.length == 0) {
+ continue; // notification — no response
+ }
+
+ // comma(,) separator between array elements
+ int addition = responseBytes.length + (!batchResult.isEmpty() ? 1 : 0);
+ if (maxResponseSize > 0 && accumulatedSize + addition > maxResponseSize) {
+ overflow = true;
+ batchResult.add(buildErrorNode(JsonRpcError.RESPONSE_TOO_LARGE,
+ "Response exceeds the limit of " + maxResponseSize + " bytes",
+ subRequest.get("id")));
+ continue;
+ }
+ accumulatedSize += addition;
+
+ JsonNode responseNode;
+ try {
+ responseNode = MAPPER.readTree(responseBytes);
+ } catch (IOException e) {
+ writeJsonRpcError(resp, JsonRpcError.INTERNAL_ERROR, "Internal error", null, true);
+ return;
+ }
+ batchResult.add(responseNode);
+ }
+
+ // JSON-RPC 2.0 §6: MUST NOT return an empty Array when there are no response objects.
+ if (batchResult.isEmpty()) {
+ resp.setStatus(HttpServletResponse.SC_OK);
+ resp.setContentLength(0);
+ return;
+ }
+
+ byte[] finalBytes = MAPPER.writeValueAsBytes(batchResult);
+ resp.setContentType("application/json-rpc; charset=utf-8");
+ resp.setStatus(HttpServletResponse.SC_OK);
+ resp.setContentLength(finalBytes.length);
+ resp.getOutputStream().write(finalBytes);
+ resp.getOutputStream().flush();
+ }
+
+ private byte[] readBody(InputStream in) throws IOException {
+ ByteArrayOutputStream buffer = new ByteArrayOutputStream();
+ byte[] tmp = new byte[4096];
+ int n;
+ while ((n = in.read(tmp)) != -1) {
+ buffer.write(tmp, 0, n);
+ }
+ return buffer.toByteArray();
+ }
+
+ private ObjectNode buildErrorNode(JsonRpcError error, String message, JsonNode id) {
+ ObjectNode errorObj = MAPPER.createObjectNode();
+ errorObj.put("jsonrpc", "2.0");
+ ObjectNode errNode = errorObj.putObject("error");
+ errNode.put("code", error.code);
+ errNode.put("message", message);
+ if (id != null && !id.isNull() && !id.isMissingNode()) {
+ errorObj.set("id", id);
+ } else {
+ errorObj.putNull("id");
+ }
+ return errorObj;
+ }
+
+ private void writeJsonRpcError(HttpServletResponse resp, JsonRpcError error, String message,
+ JsonNode id, boolean isBatch) throws IOException {
+ ObjectNode errorObj = buildErrorNode(error, message, id);
+ byte[] bytes;
+ if (isBatch) {
+ ArrayNode arr = MAPPER.createArrayNode();
+ arr.add(errorObj);
+ bytes = MAPPER.writeValueAsBytes(arr);
+ } else {
+ bytes = MAPPER.writeValueAsBytes(errorObj);
+ }
+ resp.setContentType("application/json-rpc; charset=utf-8");
+ resp.setStatus(HttpServletResponse.SC_OK);
+ resp.setContentLength(bytes.length);
+ resp.getOutputStream().write(bytes);
+ resp.getOutputStream().flush();
}
-}
\ No newline at end of file
+}
diff --git a/framework/src/main/java/org/tron/core/services/jsonrpc/filters/LogFilter.java b/framework/src/main/java/org/tron/core/services/jsonrpc/filters/LogFilter.java
index 42bc123d4bc..d2bd58f6c56 100644
--- a/framework/src/main/java/org/tron/core/services/jsonrpc/filters/LogFilter.java
+++ b/framework/src/main/java/org/tron/core/services/jsonrpc/filters/LogFilter.java
@@ -50,6 +50,10 @@ public LogFilter(FilterRequest fr) throws JsonRpcInvalidParamsException {
withContractAddress(addressToByteArray((String) fr.getAddress()));
} else if (fr.getAddress() instanceof ArrayList) {
+ int maxAddressSize = Args.getInstance().getJsonRpcMaxAddressSize();
+ if (maxAddressSize > 0 && ((ArrayList>) fr.getAddress()).size() > maxAddressSize) {
+ throw new JsonRpcInvalidParamsException("exceed max addresses: " + maxAddressSize);
+ }
List addr = new ArrayList<>();
int i = 0;
for (Object s : (ArrayList) fr.getAddress()) {
diff --git a/framework/src/main/resources/config.conf b/framework/src/main/resources/config.conf
index 4c26cb90fba..1021637a437 100644
--- a/framework/src/main/resources/config.conf
+++ b/framework/src/main/resources/config.conf
@@ -11,20 +11,22 @@ storage {
# Whether to write transaction result in transactionRetStore
transHistory.switch = "on",
+ index.directory = "index",
+ index.switch = "on",
# setting can improve leveldb performance .... start, deprecated for arm
# node: if this will increase process fds,you may be check your ulimit if 'too many open files' error occurs
# see https://github.com/tronprotocol/tips/blob/master/tip-343.md for detail
# if you find block sync has lower performance, you can try this settings
- # default = {
- # maxOpenFiles = 100
- # }
- # defaultM = {
- # maxOpenFiles = 500
- # }
- # defaultL = {
- # maxOpenFiles = 1000
- # }
+ default = {
+ # maxOpenFiles = 100
+ }
+ defaultM = {
+ # maxOpenFiles = 500
+ }
+ defaultL = {
+ # maxOpenFiles = 1000
+ }
# setting can improve leveldb performance .... end, deprecated for arm
# You can customize the configuration for each database. Otherwise, the database settings will use
@@ -77,18 +79,18 @@ storage {
balance.history.lookup = false
- # checkpoint.version = 2
- # checkpoint.sync = true
+ checkpoint.version = 1
+ checkpoint.sync = true
- # the estimated number of block transactions (default 1000, min 100, max 10000).
- # so the total number of cached transactions is 65536 * txCache.estimatedTransactions
- # txCache.estimatedTransactions = 1000
+ # the estimated number of block transactions (min 100, max 10000).
+ # total cached transactions = 65536 * txCache.estimatedTransactions
+ txCache.estimatedTransactions = 1000
# if true, transaction cache initialization will be faster. Default: false
txCache.initOptimization = true
# The number of blocks flushed to db in each batch during node syncing. Default: 1
- # snapshot.maxFlushCount = 1
+ snapshot.maxFlushCount = 1
# data root setting, for check data, currently, only reward-vi is used.
# merkleRoot = {
@@ -98,8 +100,9 @@ storage {
}
node.discovery = {
- enable = true
- persist = true
+ enable = true # default: false
+ persist = true # default: false
+ # external.ip = "" # default: "" (auto-detect)
}
# custom stop condition
@@ -143,16 +146,16 @@ node.metrics = {
node {
# trust node for solidity node
# trustNode = "ip:port"
- trustNode = "127.0.0.1:50051"
+ trustNode = ""
# expose extension api to public or not
- walletExtensionApi = true
+ walletExtensionApi = false
listen.port = 18888
connection.timeout = 2
- fetchBlock.timeout = 200
+ fetchBlock.timeout = 500
# syncFetchBatchNum = 2000
# Maximum number of blocks allowed in-flight (requested but not yet processed).
@@ -200,6 +203,40 @@ node {
isOpenFullTcpDisconnect = false
inactiveThreshold = 600 //seconds
+ # Maximum number of fast-forward peers. Default: 4
+ maxFastForwardNum = 4
+
+ # Netty work thread counts; 0 = auto (availableProcessors). Default: tcp=0, udp=1
+ tcpNettyWorkThreadNum = 0
+ udpNettyWorkThreadNum = 1
+
+ # Maximum number of shielded transactions in the pending pool. Default: 10
+ shieldedTransInPendingMaxCounts = 10
+
+ # Block cache timeout in seconds. Default: 60
+ blockCacheTimeout = 60
+
+ # Minimum data length (bytes) to read from TCP. Default: 2048
+ receiveTcpMinDataLength = 2048
+
+ # Maximum pending transaction pool size. Default: 2000
+ maxTransactionPendingSize = 2000
+
+ # Timeout for pending transactions in milliseconds. Default: 60000
+ pendingTransactionTimeout = 60000
+
+ # Required agreement count for block consensus; 0 = auto (2/3 of witness count + 1). Default: 0
+ agreeNodeCount = 0
+
+ # Enable node-level metrics collection (prerequisite for prometheus/influxdb reporting). Default: false
+ metricsEnable = false
+
+ # Channel read timeout in seconds; 0 = no timeout. Default: 0
+ channel.read.timeout = 0
+
+ # Threads for contract protobuf validation; 0 = auto (availableProcessors). Default: 0
+ validContractProto.threads = 0
+
p2p {
version = 11111 # Mainnet:11111; Nile:201910292; Shasta:1
}
@@ -281,6 +318,9 @@ node {
# The switch of the reflection service, effective for all gRPC services, used for grpcurl tool. Default: false
reflectionService = false
+
+ # Cache transactions in the RPC layer. Default: false
+ trxCacheEnable = false
}
# number of solidity thread in the FullNode.
@@ -388,17 +428,22 @@ node {
# httpPBFTEnable = false
# httpPBFTPort = 8565
- # The maximum blocks range to retrieve logs for eth_getLogs, default value is 5000,
- # should be > 0, otherwise means no limit.
+ # The maximum blocks range to retrieve logs for eth_getLogs, default: 5000, <=0 means no limit
maxBlockRange = 5000
-
- # The maximum number of allowed topics within a topic criteria, default value is 1000,
- # should be > 0, otherwise means no limit.
+ # Allowed max address count in filter request, default: 1000, <=0 means no limit
+ maxAddressSize = 1000
+ # The maximum number of allowed topics within a topic criteria, default: 1000, <=0 means no limit
maxSubTopics = 1000
- # Allowed maximum number for blockFilter, >0 otherwise no limit
+ # Allowed maximum number for blockFilter, default: 50000, <=0 means no limit
maxBlockFilterNum = 50000
- # Allowed maximum number for newFilter, >0 otherwise no limit
+ # Allowed batch size, default: 100, <=0 means no limit
+ maxBatchSize = 100
+ # Allowed max response byte size, default: 26214400 (25 MB), <=0 means no limit
+ maxResponseSize = 26214400
+ # Allowed maximum number for newFilter, <=0 means no limit
maxLogFilterNum = 20000
+ # Maximum JSON-RPC request body size, default 4MB. Independent from rpc.maxMessageSize.
+ maxMessageSize = 4M
}
# Disabled api list, it will work for http, rpc and pbft, both FullNode and SolidityNode,
@@ -461,15 +506,17 @@ rate.limiter = {
]
p2p = {
- # syncBlockChain = 3.0
- # fetchInvData = 3.0
- # disconnect = 1.0
+ syncBlockChain = 3.0
+ fetchInvData = 3.0
+ disconnect = 1.0
}
# global qps, default 50000
global.qps = 50000
# IP-based global qps, default 10000
global.ip.qps = 10000
+ # API-based global qps, default 1000
+ global.api.qps = 1000
}
@@ -872,3 +919,7 @@ event.subscribe = {
]
}
}
+
+# Block number from which the energy limit calculation applies.
+# Note: the config key contains a legacy typo ("enery") preserved for backward compatibility.
+# enery.limit.block.num = 4727890
diff --git a/framework/src/test/java/org/tron/common/ParameterTest.java b/framework/src/test/java/org/tron/common/ParameterTest.java
index 1e7bbc1545c..9575dfc197a 100644
--- a/framework/src/test/java/org/tron/common/ParameterTest.java
+++ b/framework/src/test/java/org/tron/common/ParameterTest.java
@@ -224,7 +224,6 @@ public void testCommonParameter() {
assertEquals(1000, parameter.getRateLimiterGlobalQps());
parameter.setRateLimiterGlobalIpQps(100);
assertEquals(100, parameter.getRateLimiterGlobalIpQps());
- assertNull(parameter.getOverlay());
assertNull(parameter.getEventPluginConfig());
assertNull(parameter.getEventFilter());
parameter.setCryptoEngine(ECKey_ENGINE);
diff --git a/framework/src/test/java/org/tron/core/config/args/ArgsTest.java b/framework/src/test/java/org/tron/core/config/args/ArgsTest.java
index bb3d1c4b210..9bec7f46eff 100644
--- a/framework/src/test/java/org/tron/core/config/args/ArgsTest.java
+++ b/framework/src/test/java/org/tron/core/config/args/ArgsTest.java
@@ -161,7 +161,7 @@ public void testInitService() {
storage.put("storage.db.directory", "database");
Config config = ConfigFactory.defaultOverrides()
.withFallback(ConfigFactory.parseMap(storage))
- .withFallback(ConfigFactory.defaultReference());
+ .withFallback(ConfigFactory.empty());
// test default value
Args.applyConfigParams(config);
Assert.assertTrue(Args.getInstance().isRpcEnable());
@@ -190,7 +190,7 @@ public void testInitService() {
storage.put("node.jsonrpc.maxSubTopics", "20");
config = ConfigFactory.defaultOverrides()
.withFallback(ConfigFactory.parseMap(storage))
- .withFallback(ConfigFactory.defaultReference());
+ .withFallback(ConfigFactory.empty());
// test value
Args.applyConfigParams(config);
Assert.assertTrue(Args.getInstance().isRpcEnable());
@@ -219,7 +219,7 @@ public void testInitService() {
storage.put("node.jsonrpc.maxSubTopics", "1000");
config = ConfigFactory.defaultOverrides()
.withFallback(ConfigFactory.parseMap(storage))
- .withFallback(ConfigFactory.defaultReference());
+ .withFallback(ConfigFactory.empty());
// test value
Args.applyConfigParams(config);
Assert.assertFalse(Args.getInstance().isRpcEnable());
@@ -248,7 +248,7 @@ public void testInitService() {
storage.put("node.jsonrpc.maxSubTopics", "40");
config = ConfigFactory.defaultOverrides()
.withFallback(ConfigFactory.parseMap(storage))
- .withFallback(ConfigFactory.defaultReference());
+ .withFallback(ConfigFactory.empty());
// test value
Args.applyConfigParams(config);
Assert.assertFalse(Args.getInstance().isRpcEnable());
@@ -268,7 +268,7 @@ public void testInitService() {
storage.put("node.jsonrpc.maxSubTopics", "0");
config = ConfigFactory.defaultOverrides()
.withFallback(ConfigFactory.parseMap(storage))
- .withFallback(ConfigFactory.defaultReference());
+ .withFallback(ConfigFactory.empty());
// check value
Args.applyConfigParams(config);
Assert.assertEquals(0, Args.getInstance().getJsonRpcMaxBlockRange());
@@ -279,7 +279,7 @@ public void testInitService() {
storage.put("node.jsonrpc.maxSubTopics", "-4");
config = ConfigFactory.defaultOverrides()
.withFallback(ConfigFactory.parseMap(storage))
- .withFallback(ConfigFactory.defaultReference());
+ .withFallback(ConfigFactory.empty());
// check value
Args.applyConfigParams(config);
Assert.assertEquals(-2, Args.getInstance().getJsonRpcMaxBlockRange());
@@ -381,7 +381,7 @@ public void testFetchBlockTimeoutClampedBelowMin() {
override.put("storage.db.directory", "database");
override.put("node.fetchBlock.timeout", "50");
Config config = ConfigFactory.parseMap(override)
- .withFallback(ConfigFactory.defaultReference());
+ .withFallback(ConfigFactory.empty());
Args.applyConfigParams(config);
Assert.assertEquals(100, Args.getInstance().getFetchBlockTimeout());
Args.clearParam();
@@ -393,7 +393,7 @@ public void testFetchBlockTimeoutClampedAboveMax() {
override.put("storage.db.directory", "database");
override.put("node.fetchBlock.timeout", "2000");
Config config = ConfigFactory.parseMap(override)
- .withFallback(ConfigFactory.defaultReference());
+ .withFallback(ConfigFactory.empty());
Args.applyConfigParams(config);
Assert.assertEquals(1000, Args.getInstance().getFetchBlockTimeout());
Args.clearParam();
@@ -434,7 +434,7 @@ public void testFetchBlockTimeoutInRangeUnchanged() {
override.put("storage.db.directory", "database");
override.put("node.fetchBlock.timeout", "500");
Config config = ConfigFactory.parseMap(override)
- .withFallback(ConfigFactory.defaultReference());
+ .withFallback(ConfigFactory.empty());
Args.applyConfigParams(config);
Assert.assertEquals(500, Args.getInstance().getFetchBlockTimeout());
Args.clearParam();
diff --git a/framework/src/test/java/org/tron/core/config/args/OverlayTest.java b/framework/src/test/java/org/tron/core/config/args/OverlayTest.java
deleted file mode 100644
index 1b7045c5b21..00000000000
--- a/framework/src/test/java/org/tron/core/config/args/OverlayTest.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * java-tron is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * java-tron is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see .
- */
-
-package org.tron.core.config.args;
-
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-public class OverlayTest {
-
- private Overlay overlay = new Overlay();
-
- @Before
- public void setOverlay() {
- overlay.setPort(8080);
- }
-
- @Test(expected = IllegalArgumentException.class)
- public void whenSetOutOfBoundsPort() {
- overlay.setPort(-1);
- }
-
- @Test
- public void getOverlay() {
- Assert.assertEquals(8080, overlay.getPort());
- }
-}
diff --git a/framework/src/test/java/org/tron/core/jsonrpc/JsonRpcTest.java b/framework/src/test/java/org/tron/core/jsonrpc/JsonRpcTest.java
index bd357101da3..5f577194dff 100644
--- a/framework/src/test/java/org/tron/core/jsonrpc/JsonRpcTest.java
+++ b/framework/src/test/java/org/tron/core/jsonrpc/JsonRpcTest.java
@@ -8,12 +8,14 @@
import java.util.ArrayList;
import java.util.BitSet;
+import java.util.Collections;
import java.util.List;
import org.bouncycastle.util.encoders.Hex;
import org.junit.Assert;
import org.junit.Test;
import org.tron.common.bloom.Bloom;
import org.tron.common.crypto.Hash;
+import org.tron.common.parameter.CommonParameter;
import org.tron.common.runtime.vm.DataWord;
import org.tron.common.utils.ByteArray;
import org.tron.common.utils.ByteUtil;
@@ -242,6 +244,58 @@ public void testLogFilter() {
}
}
+ @Test
+ public void testLogFilterAddressSizeLimit() {
+ // Two valid 20-byte addresses (40 hex chars with 0x prefix)
+ String addr1 = "0xaa6612f03443517ced2bdcf27958c22353ceeab9";
+ String addr2 = "0xbb7723a04554628ced3cdf38069b433464ffbc0a";
+ String addr3 = "0xcc8834b15665739def4de049f17a544575aabcd1";
+
+ int savedLimit = CommonParameter.getInstance().jsonRpcMaxAddressSize;
+ try {
+ CommonParameter.getInstance().jsonRpcMaxAddressSize = 2;
+
+ // Exactly at limit — must not throw
+ ArrayList atLimit = new ArrayList<>();
+ atLimit.add(addr1);
+ atLimit.add(addr2);
+ FilterRequest frAtLimit = new FilterRequest();
+ frAtLimit.setAddress(atLimit);
+ try {
+ new LogFilter(frAtLimit);
+ } catch (JsonRpcInvalidParamsException e) {
+ Assert.fail("address list at limit should not throw: " + e.getMessage());
+ }
+
+ // One over limit — must throw with expected message
+ ArrayList overLimit = new ArrayList<>();
+ overLimit.add(addr1);
+ overLimit.add(addr2);
+ overLimit.add(addr3);
+ FilterRequest frOverLimit = new FilterRequest();
+ frOverLimit.setAddress(overLimit);
+ try {
+ new LogFilter(frOverLimit);
+ Assert.fail("address list over limit should have thrown JsonRpcInvalidParamsException");
+ } catch (JsonRpcInvalidParamsException e) {
+ Assert.assertTrue(e.getMessage().contains("exceed max addresses:"));
+ }
+
+ // Limit = 0 means disabled — large list must pass
+ CommonParameter.getInstance().jsonRpcMaxAddressSize = 0;
+ ArrayList largeList = new ArrayList<>(Collections.nCopies(500, addr1));
+ FilterRequest frDisabled = new FilterRequest();
+ frDisabled.setAddress(largeList);
+ try {
+ new LogFilter(frDisabled);
+ } catch (JsonRpcInvalidParamsException e) {
+ Assert.fail("limit=0 should disable the check: " + e.getMessage());
+ }
+ } finally {
+ CommonParameter.getInstance().jsonRpcMaxAddressSize = savedLimit;
+ }
+ }
+
private int[] getBloomIndex(String s) {
Bloom bloom = Bloom.create(Hash.sha3(ByteArray.fromHexString(s)));
BitSet bs = BitSet.valueOf(bloom.getData());
diff --git a/framework/src/test/java/org/tron/core/services/filter/BufferedResponseWrapperTest.java b/framework/src/test/java/org/tron/core/services/filter/BufferedResponseWrapperTest.java
new file mode 100644
index 00000000000..d7828fa5cd0
--- /dev/null
+++ b/framework/src/test/java/org/tron/core/services/filter/BufferedResponseWrapperTest.java
@@ -0,0 +1,287 @@
+package org.tron.core.services.filter;
+
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+import org.junit.Before;
+import org.junit.Test;
+import org.springframework.mock.web.MockHttpServletResponse;
+
+public class BufferedResponseWrapperTest {
+
+ private MockHttpServletResponse mockResp;
+
+ @Before
+ public void setUp() {
+ mockResp = new MockHttpServletResponse();
+ }
+
+ // --- isOverflow: false cases ---
+
+ @Test
+ public void noLimit_neverOverflows() throws IOException {
+ BufferedResponseWrapper w = new BufferedResponseWrapper(mockResp, 0);
+ w.getOutputStream().write(new byte[1024 * 1024]);
+ assertFalse(w.isOverflow());
+ }
+
+ @Test
+ public void withinLimit_notOverflow() throws IOException {
+ BufferedResponseWrapper w = new BufferedResponseWrapper(mockResp, 10);
+ w.getOutputStream().write(new byte[10]);
+ assertFalse(w.isOverflow());
+ }
+
+ @Test
+ public void exactlyAtLimit_notOverflow() throws IOException {
+ BufferedResponseWrapper w = new BufferedResponseWrapper(mockResp, 5);
+ w.getOutputStream().write(new byte[]{1, 2, 3, 4, 5});
+ assertFalse(w.isOverflow());
+ }
+
+ // --- isOverflow: true via write ---
+
+ @Test
+ public void oneBytePastLimit_overflow() throws IOException {
+ BufferedResponseWrapper w = new BufferedResponseWrapper(mockResp, 5);
+ w.getOutputStream().write(new byte[]{1, 2, 3, 4, 5, 6});
+ assertTrue(w.isOverflow());
+ }
+
+ @Test
+ public void singleByteWrite_triggerOverflow() throws IOException {
+ BufferedResponseWrapper w = new BufferedResponseWrapper(mockResp, 3);
+ w.getOutputStream().write(1);
+ w.getOutputStream().write(2);
+ w.getOutputStream().write(3);
+ assertFalse(w.isOverflow());
+ w.getOutputStream().write(4);
+ assertTrue(w.isOverflow());
+ }
+
+ @Test
+ public void overflow_bufferIsReleasedOnOverflow() throws IOException {
+ BufferedResponseWrapper w = new BufferedResponseWrapper(mockResp, 4);
+ w.getOutputStream().write(new byte[]{1, 2, 3, 4, 5});
+ assertTrue(w.isOverflow());
+ // After overflow, further writes are silently discarded — no exception
+ w.getOutputStream().write(new byte[100]);
+ assertTrue(w.isOverflow());
+ }
+
+ // --- isOverflow: true via setContentLength ---
+
+ @Test
+ public void setContentLength_exceedsLimit_overflow() {
+ BufferedResponseWrapper w = new BufferedResponseWrapper(mockResp, 100);
+ w.setContentLength(101);
+ assertTrue(w.isOverflow());
+ }
+
+ @Test
+ public void setContentLength_exactlyAtLimit_notOverflow() {
+ BufferedResponseWrapper w = new BufferedResponseWrapper(mockResp, 100);
+ w.setContentLength(100);
+ assertFalse(w.isOverflow());
+ }
+
+ @Test
+ public void setContentLengthLong_exceedsLimit_overflow() {
+ BufferedResponseWrapper w = new BufferedResponseWrapper(mockResp, 100);
+ w.setContentLengthLong(101L);
+ assertTrue(w.isOverflow());
+ }
+
+ @Test
+ public void setContentLength_noLimit_neverOverflows() {
+ BufferedResponseWrapper w = new BufferedResponseWrapper(mockResp, 0);
+ w.setContentLength(Integer.MAX_VALUE);
+ assertFalse(w.isOverflow());
+ }
+
+ // --- setContentLength early detection: writes after early overflow are discarded ---
+
+ @Test
+ public void earlyOverflow_subsequentWritesDiscarded() throws IOException {
+ BufferedResponseWrapper w = new BufferedResponseWrapper(mockResp, 10);
+ w.setContentLength(20);
+ assertTrue(w.isOverflow());
+ w.getOutputStream().write(new byte[5]);
+ // Nothing committed to actual response
+ assertFalse(mockResp.isCommitted());
+ }
+
+ // --- commitToResponse ---
+
+ @Test
+ public void commitToResponse_writesBodyAndHeaders() throws IOException {
+ BufferedResponseWrapper w = new BufferedResponseWrapper(mockResp, 0);
+ byte[] data = "hello".getBytes(StandardCharsets.UTF_8);
+ w.setStatus(200);
+ w.setContentType("application/json");
+ w.getOutputStream().write(data);
+ w.commitToResponse();
+
+ assertEquals(200, mockResp.getStatus());
+ assertEquals("application/json", mockResp.getContentType());
+ assertArrayEquals(data, mockResp.getContentAsByteArray());
+ }
+
+ @Test
+ public void commitToResponse_setsCorrectContentLength() throws IOException {
+ BufferedResponseWrapper w = new BufferedResponseWrapper(mockResp, 0);
+ byte[] data = new byte[]{10, 20, 30};
+ w.getOutputStream().write(data);
+ w.commitToResponse();
+
+ assertEquals(3, mockResp.getContentLength());
+ }
+
+ @Test
+ public void commitToResponse_emptyBuffer_writesZeroBytes() throws IOException {
+ BufferedResponseWrapper w = new BufferedResponseWrapper(mockResp, 100);
+ w.setStatus(200);
+ w.commitToResponse();
+
+ assertEquals(0, mockResp.getContentLength());
+ assertEquals(0, mockResp.getContentAsByteArray().length);
+ }
+
+ // --- header buffering: nothing reaches actual response until commit ---
+
+ @Test
+ public void statusNotForwardedBeforeCommit() throws IOException {
+ BufferedResponseWrapper w = new BufferedResponseWrapper(mockResp, 0);
+ w.setStatus(201);
+ // MockHttpServletResponse defaults to 200
+ assertEquals(200, mockResp.getStatus());
+ w.commitToResponse();
+ assertEquals(201, mockResp.getStatus());
+ }
+
+ // --- getStatus() ---
+
+ @Test
+ public void getStatus_returnsBufferedValue() {
+ BufferedResponseWrapper w = new BufferedResponseWrapper(mockResp, 0);
+ w.setStatus(404);
+ assertEquals(404, w.getStatus());
+ // actual response must still be untouched
+ assertEquals(200, mockResp.getStatus());
+ }
+
+ @Test
+ public void getStatus_defaultIs200() {
+ BufferedResponseWrapper w = new BufferedResponseWrapper(mockResp, 0);
+ assertEquals(200, w.getStatus());
+ }
+
+ // --- setHeader / addHeader for Content-Length ---
+
+ @Test
+ public void setHeader_contentLength_exceedsLimit_overflow() {
+ BufferedResponseWrapper w = new BufferedResponseWrapper(mockResp, 100);
+ w.setHeader("Content-Length", "101");
+ assertTrue(w.isOverflow());
+ // Content-Length must NOT have been forwarded to the actual response
+ assertNull(mockResp.getHeader("Content-Length"));
+ }
+
+ @Test
+ public void setHeader_contentLength_withinLimit_noOverflow() {
+ BufferedResponseWrapper w = new BufferedResponseWrapper(mockResp, 100);
+ w.setHeader("Content-Length", "100");
+ assertFalse(w.isOverflow());
+ }
+
+ @Test
+ public void setHeader_contentLength_caseInsensitive_overflow() {
+ BufferedResponseWrapper w = new BufferedResponseWrapper(mockResp, 50);
+ w.setHeader("content-length", "51");
+ assertTrue(w.isOverflow());
+ }
+
+ @Test
+ public void setHeader_contentLength_malformed_ignored() {
+ BufferedResponseWrapper w = new BufferedResponseWrapper(mockResp, 100);
+ w.setHeader("Content-Length", "not-a-number");
+ assertFalse(w.isOverflow());
+ }
+
+ @Test
+ public void setHeader_nonContentLength_passesThroughToActual() {
+ BufferedResponseWrapper w = new BufferedResponseWrapper(mockResp, 0);
+ w.setHeader("X-Custom-Header", "hello");
+ assertEquals("hello", mockResp.getHeader("X-Custom-Header"));
+ }
+
+ @Test
+ public void addHeader_contentLength_exceedsLimit_overflow() {
+ BufferedResponseWrapper w = new BufferedResponseWrapper(mockResp, 100);
+ w.addHeader("Content-Length", "200");
+ assertTrue(w.isOverflow());
+ assertNull(mockResp.getHeader("Content-Length"));
+ }
+
+ @Test
+ public void addHeader_contentLength_malformed_ignored() {
+ BufferedResponseWrapper w = new BufferedResponseWrapper(mockResp, 100);
+ w.addHeader("Content-Length", "bad");
+ assertFalse(w.isOverflow());
+ }
+
+ @Test
+ public void addHeader_nonContentLength_passesThroughToActual() {
+ BufferedResponseWrapper w = new BufferedResponseWrapper(mockResp, 0);
+ w.addHeader("X-Trace-Id", "abc123");
+ assertEquals("abc123", mockResp.getHeader("X-Trace-Id"));
+ }
+
+ // --- commitToResponse idempotency ---
+
+ @Test(expected = IllegalStateException.class)
+ public void commitToResponse_secondCall_throwsIllegalState() throws IOException {
+ BufferedResponseWrapper w = new BufferedResponseWrapper(mockResp, 0);
+ w.commitToResponse();
+ w.commitToResponse();
+ }
+
+ // --- getWriter path ---
+
+ @Test
+ public void writeViaWriter_commitToResponse_flushesBody() throws IOException {
+ BufferedResponseWrapper w = new BufferedResponseWrapper(mockResp, 0);
+ w.getWriter().print("hello");
+ w.getWriter().flush();
+ w.commitToResponse();
+ assertEquals("hello", mockResp.getContentAsString());
+ }
+
+ @Test
+ public void writeViaWriter_noExplicitFlush_commitToResponse_flushesBody() throws IOException {
+ // Regression: PrintWriter(autoFlush=true) does NOT flush on plain print(); bytes can sit
+ // in the OutputStreamWriter encoder until commitToResponse() flushes the writer internally.
+ BufferedResponseWrapper w = new BufferedResponseWrapper(mockResp, 0);
+ w.getWriter().print("hello");
+ w.commitToResponse();
+ assertEquals("hello", mockResp.getContentAsString());
+ assertEquals(5, mockResp.getContentLength());
+ }
+
+ @Test
+ public void writeViaWriter_noExplicitFlush_flushTripsOverflow() throws IOException {
+ // Regression: bytes buffered in the encoder may push the total past maxBytes when
+ // commitToResponse() flushes — overflow must be detected and nothing written to actual.
+ BufferedResponseWrapper w = new BufferedResponseWrapper(mockResp, 3);
+ w.getWriter().print("hello"); // 5 bytes, not yet in ByteArrayOutputStream
+ assertFalse("overflow must not trigger before flush", w.isOverflow());
+ w.commitToResponse();
+ assertTrue("flush inside commitToResponse must trip overflow", w.isOverflow());
+ assertEquals(0, mockResp.getContentAsByteArray().length);
+ }
+}
diff --git a/framework/src/test/java/org/tron/core/services/filter/CachedBodyRequestWrapperTest.java b/framework/src/test/java/org/tron/core/services/filter/CachedBodyRequestWrapperTest.java
new file mode 100644
index 00000000000..813b1a61bea
--- /dev/null
+++ b/framework/src/test/java/org/tron/core/services/filter/CachedBodyRequestWrapperTest.java
@@ -0,0 +1,109 @@
+package org.tron.core.services.filter;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+import org.junit.Test;
+import org.springframework.mock.web.MockHttpServletRequest;
+
+public class CachedBodyRequestWrapperTest {
+
+ private static final byte[] BODY = "hello world".getBytes(StandardCharsets.UTF_8);
+
+ private static byte[] readFully(javax.servlet.ServletInputStream in) throws IOException {
+ ByteArrayOutputStream out = new ByteArrayOutputStream();
+ byte[] buf = new byte[128];
+ int n;
+ while ((n = in.read(buf)) != -1) {
+ out.write(buf, 0, n);
+ }
+ return out.toByteArray();
+ }
+
+ // --- getInputStream ---
+
+ @Test
+ public void getInputStream_returnsBodyContent() throws IOException {
+ CachedBodyRequestWrapper w = new CachedBodyRequestWrapper(new MockHttpServletRequest(), BODY);
+ byte[] read = readFully(w.getInputStream());
+ assertEquals(new String(BODY, StandardCharsets.UTF_8),
+ new String(read, StandardCharsets.UTF_8));
+ }
+
+ @Test
+ public void getInputStream_calledTwice_bothSucceed() throws IOException {
+ CachedBodyRequestWrapper w = new CachedBodyRequestWrapper(new MockHttpServletRequest(), BODY);
+ w.getInputStream();
+ // second call of the same accessor is allowed by the servlet spec
+ w.getInputStream();
+ }
+
+ // --- getReader ---
+
+ @Test
+ public void getReader_returnsBodyContent() throws IOException {
+ CachedBodyRequestWrapper w = new CachedBodyRequestWrapper(new MockHttpServletRequest(), BODY);
+ String line = w.getReader().readLine();
+ assertEquals("hello world", line);
+ }
+
+ @Test
+ public void getReader_calledTwice_bothSucceed() throws IOException {
+ CachedBodyRequestWrapper w = new CachedBodyRequestWrapper(new MockHttpServletRequest(), BODY);
+ w.getReader();
+ w.getReader();
+ }
+
+ // --- mutual exclusion ---
+
+ @Test(expected = IllegalStateException.class)
+ public void getReader_afterGetInputStream_throws() throws IOException {
+ CachedBodyRequestWrapper w = new CachedBodyRequestWrapper(new MockHttpServletRequest(), BODY);
+ w.getInputStream();
+ w.getReader();
+ }
+
+ @Test(expected = IllegalStateException.class)
+ public void getInputStream_afterGetReader_throws() throws IOException {
+ CachedBodyRequestWrapper w = new CachedBodyRequestWrapper(new MockHttpServletRequest(), BODY);
+ w.getReader();
+ w.getInputStream();
+ }
+
+ // --- stream contract ---
+
+ @Test
+ public void getInputStream_isFinished_afterFullRead() throws IOException {
+ CachedBodyRequestWrapper w = new CachedBodyRequestWrapper(new MockHttpServletRequest(), BODY);
+ javax.servlet.ServletInputStream in = w.getInputStream();
+ while (in.read() != -1) {
+ // drain
+ }
+ assertTrue(in.isFinished());
+ }
+
+ @Test
+ public void getInputStream_isReady_returnsTrue() {
+ CachedBodyRequestWrapper w = new CachedBodyRequestWrapper(new MockHttpServletRequest(), BODY);
+ assertTrue(w.getInputStream().isReady());
+ }
+
+ @Test
+ public void getInputStream_emptyBody_isFinishedImmediately() {
+ CachedBodyRequestWrapper w = new CachedBodyRequestWrapper(new MockHttpServletRequest(),
+ new byte[0]);
+ assertTrue(w.getInputStream().isFinished());
+ }
+
+ @Test
+ public void getReader_usesRequestCharacterEncoding() throws IOException {
+ MockHttpServletRequest req = new MockHttpServletRequest();
+ req.setCharacterEncoding("UTF-8");
+ byte[] utf8Body = "tron".getBytes(StandardCharsets.UTF_8);
+ CachedBodyRequestWrapper w = new CachedBodyRequestWrapper(req, utf8Body);
+ assertEquals("tron", w.getReader().readLine());
+ }
+}
diff --git a/framework/src/test/java/org/tron/core/services/jsonrpc/JsonRpcServletTest.java b/framework/src/test/java/org/tron/core/services/jsonrpc/JsonRpcServletTest.java
new file mode 100644
index 00000000000..fa45ca48876
--- /dev/null
+++ b/framework/src/test/java/org/tron/core/services/jsonrpc/JsonRpcServletTest.java
@@ -0,0 +1,264 @@
+package org.tron.core.services.jsonrpc;
+
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
+
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.googlecode.jsonrpc4j.JsonRpcServer;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.lang.reflect.Field;
+import java.nio.charset.StandardCharsets;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.springframework.mock.web.MockHttpServletRequest;
+import org.springframework.mock.web.MockHttpServletResponse;
+import org.tron.common.parameter.CommonParameter;
+
+public class JsonRpcServletTest {
+
+ private static final ObjectMapper MAPPER = new ObjectMapper();
+
+ private TestableServlet servlet;
+ private JsonRpcServer mockRpcServer;
+ private int savedMaxBatchSize;
+ private int savedMaxResponseSize;
+
+ @Before
+ public void setUp() throws Exception {
+ servlet = new TestableServlet();
+ mockRpcServer = mock(JsonRpcServer.class);
+ Field f = JsonRpcServlet.class.getDeclaredField("rpcServer");
+ f.setAccessible(true);
+ f.set(servlet, mockRpcServer);
+ savedMaxBatchSize = CommonParameter.getInstance().jsonRpcMaxBatchSize;
+ savedMaxResponseSize = CommonParameter.getInstance().jsonRpcMaxResponseSize;
+ }
+
+ @After
+ public void tearDown() {
+ CommonParameter.getInstance().jsonRpcMaxBatchSize = savedMaxBatchSize;
+ CommonParameter.getInstance().jsonRpcMaxResponseSize = savedMaxResponseSize;
+ }
+
+ // --- parse error paths ---
+
+ @Test
+ public void invalidJson_returnsParseError() throws Exception {
+ MockHttpServletResponse resp = doPost("not {{ valid json");
+ assertEquals(200, resp.getStatus());
+ JsonNode body = MAPPER.readTree(resp.getContentAsString());
+ assertFalse(body.isArray());
+ assertEquals(-32700, body.get("error").get("code").asInt());
+ assertEquals("2.0", body.get("jsonrpc").asText());
+ assertTrue(body.get("id").isNull());
+ }
+
+ @Test
+ public void emptyBody_returnsParseError() throws Exception {
+ MockHttpServletResponse resp = doPost("");
+ assertEquals(200, resp.getStatus());
+ JsonNode body = MAPPER.readTree(resp.getContentAsString());
+ assertEquals(-32700, body.get("error").get("code").asInt());
+ }
+
+ // --- batch size limit ---
+
+ @Test
+ public void batchExceedsLimit_returnsExceedLimitAsArray() throws Exception {
+ CommonParameter.getInstance().jsonRpcMaxBatchSize = 2;
+ MockHttpServletResponse resp = doPost("[{\"id\":1},{\"id\":2},{\"id\":3}]");
+ assertEquals(200, resp.getStatus());
+ JsonNode body = MAPPER.readTree(resp.getContentAsString());
+ assertTrue("batch error response must be a JSON array", body.isArray());
+ assertEquals(1, body.size());
+ assertEquals(-32005, body.get(0).get("error").get("code").asInt());
+ }
+
+ @Test
+ public void batchWithinLimit_proceedsToRpcServer() throws Exception {
+ CommonParameter.getInstance().jsonRpcMaxBatchSize = 5;
+ byte[] singleResp = "{\"jsonrpc\":\"2.0\",\"result\":\"ok\",\"id\":1}"
+ .getBytes(StandardCharsets.UTF_8);
+ doAnswer(inv -> {
+ OutputStream out = inv.getArgument(1);
+ out.write(singleResp);
+ return 0;
+ }).when(mockRpcServer).handleRequest(any(InputStream.class), any(OutputStream.class));
+
+ MockHttpServletResponse resp = doPost("[{\"id\":1},{\"id\":2}]");
+ assertEquals(200, resp.getStatus());
+ JsonNode body = MAPPER.readTree(resp.getContentAsByteArray());
+ assertTrue("batch response must be a JSON array", body.isArray());
+ assertEquals("each sub-request must produce a response", 2, body.size());
+ assertEquals("ok", body.get(0).get("result").asText());
+ }
+
+ @Test
+ public void emptyBatch_returnsInvalidRequest() throws Exception {
+ MockHttpServletResponse resp = doPost("[]");
+ assertEquals(200, resp.getStatus());
+ JsonNode body = MAPPER.readTree(resp.getContentAsString());
+ assertFalse("empty-batch error response must be a single object, not an array", body.isArray());
+ assertEquals(-32600, body.get("error").get("code").asInt());
+ assertEquals("2.0", body.get("jsonrpc").asText());
+ assertTrue(body.get("id").isNull());
+ }
+
+ @Test
+ public void batchLimitDisabled_largeBatchAllowed() throws Exception {
+ CommonParameter.getInstance().jsonRpcMaxBatchSize = 0;
+ // write nothing — simulates notifications (no response expected)
+ doAnswer(inv -> 0).when(mockRpcServer)
+ .handleRequest(any(InputStream.class), any(OutputStream.class));
+
+ StringBuilder sb = new StringBuilder("[");
+ for (int i = 0; i < 500; i++) {
+ if (i > 0) {
+ sb.append(',');
+ }
+ sb.append("{}");
+ }
+ sb.append("]");
+ MockHttpServletResponse resp = doPost(sb.toString());
+ assertEquals(200, resp.getStatus());
+ assertEquals("all-notification batch must return empty body per JSON-RPC 2.0 §6",
+ 0, resp.getContentLength());
+ assertEquals("", resp.getContentAsString());
+ }
+
+ // --- rpcServer.handle exceptions ---
+
+ @Test
+ public void rpcServerThrowsRuntimeException_returnsInternalError() throws Exception {
+ doThrow(new RuntimeException("server exploded")).when(mockRpcServer)
+ .handle(any(HttpServletRequest.class), any(HttpServletResponse.class));
+ MockHttpServletResponse resp = doPost("{\"method\":\"eth_blockNumber\",\"id\":42}");
+ assertEquals(200, resp.getStatus());
+ JsonNode body = MAPPER.readTree(resp.getContentAsString());
+ assertFalse(body.isArray());
+ assertEquals(-32603, body.get("error").get("code").asInt());
+ }
+
+ @Test
+ public void batchRpcServerThrows_internalErrorIsArray() throws Exception {
+ doThrow(new RuntimeException("boom")).when(mockRpcServer)
+ .handleRequest(any(InputStream.class), any(OutputStream.class));
+ MockHttpServletResponse resp = doPost("[{\"method\":\"eth_blockNumber\"}]");
+ assertEquals(200, resp.getStatus());
+ JsonNode body = MAPPER.readTree(resp.getContentAsString());
+ assertTrue("batch internal error must be an array", body.isArray());
+ assertEquals(-32603, body.get(0).get("error").get("code").asInt());
+ }
+
+ // --- response size limit ---
+
+ @Test
+ public void responseTooLarge_returnsSingleErrorObject() throws Exception {
+ int limit = 50;
+ CommonParameter.getInstance().jsonRpcMaxResponseSize = limit;
+ doAnswer(inv -> {
+ HttpServletResponse r = inv.getArgument(1);
+ r.getOutputStream().write(new byte[limit + 1]);
+ return null;
+ }).when(mockRpcServer).handle(any(HttpServletRequest.class), any(HttpServletResponse.class));
+
+ MockHttpServletResponse resp = doPost("{\"method\":\"eth_getLogs\",\"id\":1}");
+ assertEquals(200, resp.getStatus());
+ JsonNode body = MAPPER.readTree(resp.getContentAsString());
+ assertFalse(body.isArray());
+ assertEquals(-32003, body.get("error").get("code").asInt());
+ }
+
+ @Test
+ public void batchResponseTooLarge_returnsErrorArray() throws Exception {
+ int limit = 50;
+ CommonParameter.getInstance().jsonRpcMaxResponseSize = limit;
+ doAnswer(inv -> {
+ OutputStream out = inv.getArgument(1);
+ out.write(new byte[limit + 1]);
+ return 0;
+ }).when(mockRpcServer).handleRequest(any(InputStream.class), any(OutputStream.class));
+
+ MockHttpServletResponse resp = doPost("[{\"method\":\"eth_getLogs\"}]");
+ assertEquals(200, resp.getStatus());
+ JsonNode body = MAPPER.readTree(resp.getContentAsString());
+ assertTrue("batch response-too-large must be an array", body.isArray());
+ assertEquals(-32003, body.get(0).get("error").get("code").asInt());
+ }
+
+ @Test
+ public void batchShortCircuitsOnOverflow() throws Exception {
+ int limit = 50;
+ CommonParameter.getInstance().jsonRpcMaxResponseSize = limit;
+ int[] callCount = {0};
+ doAnswer(inv -> {
+ OutputStream out = inv.getArgument(1);
+ callCount[0]++;
+ if (callCount[0] == 1) {
+ out.write("{\"result\":\"ok\"}".getBytes(StandardCharsets.UTF_8));
+ } else {
+ out.write(new byte[limit]); // triggers overflow when added to accumulated size
+ }
+ return 0;
+ }).when(mockRpcServer).handleRequest(any(InputStream.class), any(OutputStream.class));
+
+ MockHttpServletResponse resp = doPost("[{\"id\":1},{\"id\":2},{\"id\":3}]");
+ assertEquals(200, resp.getStatus());
+ JsonNode body = MAPPER.readTree(resp.getContentAsString());
+ assertTrue("overflow response must be an array", body.isArray());
+ // Geth-compatible: previous successes are preserved; overflow item and remaining
+ // unexecuted items each get a -32003 error with their original id.
+ assertEquals(3, body.size());
+ assertEquals("ok", body.get(0).get("result").asText());
+ assertEquals(-32003, body.get(1).get("error").get("code").asInt());
+ assertEquals(2, body.get(1).get("id").asInt());
+ assertEquals(-32003, body.get(2).get("error").get("code").asInt());
+ assertEquals(3, body.get(2).get("id").asInt());
+ assertEquals("third sub-request must not be executed after overflow", 2, callCount[0]);
+ }
+
+ // --- normal path ---
+
+ @Test
+ public void normalRequest_commitsRpcServerResponse() throws Exception {
+ byte[] rpcResp = "{\"result\":\"0x1\"}".getBytes(StandardCharsets.UTF_8);
+ doAnswer(inv -> {
+ HttpServletResponse r = inv.getArgument(1);
+ r.getOutputStream().write(rpcResp);
+ return null;
+ }).when(mockRpcServer).handle(any(HttpServletRequest.class), any(HttpServletResponse.class));
+
+ MockHttpServletResponse resp = doPost("{\"method\":\"eth_blockNumber\",\"id\":1}");
+ assertEquals(200, resp.getStatus());
+ assertArrayEquals(rpcResp, resp.getContentAsByteArray());
+ }
+
+ // --- helpers ---
+
+ private MockHttpServletResponse doPost(String body) throws Exception {
+ MockHttpServletRequest req = new MockHttpServletRequest("POST", "/jsonrpc");
+ req.setContent(body.getBytes(StandardCharsets.UTF_8));
+ MockHttpServletResponse resp = new MockHttpServletResponse();
+ servlet.callDoPost(req, resp);
+ return resp;
+ }
+
+ private static class TestableServlet extends JsonRpcServlet {
+
+ void callDoPost(HttpServletRequest req, HttpServletResponse resp) throws IOException {
+ doPost(req, resp);
+ }
+ }
+}