@@ -5,7 +5,7 @@ | |||
<parent> | |||
<groupId>com.jd.blockchain</groupId> | |||
<artifactId>jdchain-root</artifactId> | |||
<version>1.1.1-PACK20191209</version> | |||
<version>1.1.2.RELEASE</version> | |||
</parent> | |||
<artifactId>base</artifactId> | |||
@@ -121,12 +121,16 @@ public interface DataCodes { | |||
public static final int ACCOUNT_HEADER = 0x710; | |||
public static final int USER = 0x800; | |||
public static final int USER_ACCOUNT_HEADER = 0x800; | |||
public static final int USER_INFO = 0x801; | |||
public static final int DATA = 0x900; | |||
// contract related; | |||
public static final int CONTRACT = 0xA00; | |||
public static final int CONTRACT_ACCOUNT_HEADER = 0xA00; | |||
public static final int CONTRACT_INFO = 0xA01; | |||
// ...0xA19 | |||
public static final int HASH = 0xB00; | |||
@@ -5,7 +5,7 @@ | |||
<parent> | |||
<groupId>com.jd.blockchain</groupId> | |||
<artifactId>jdchain-root</artifactId> | |||
<version>1.1.1-PACK20191209</version> | |||
<version>1.1.2.RELEASE</version> | |||
</parent> | |||
<artifactId>binary-proto</artifactId> | |||
@@ -21,8 +21,6 @@ class DynamicDataContract implements InvocationHandler { | |||
private DataContractEncoderImpl contractEncoder; | |||
// private BytesSlice contractBytes; | |||
// 字段的数据片段列表,首个是 HeaderSlice,其次是按字段顺序排列的数据片段; | |||
private BytesSlices[] dataSlices; | |||
@@ -5,7 +5,7 @@ | |||
<parent> | |||
<groupId>com.jd.blockchain</groupId> | |||
<artifactId>consensus</artifactId> | |||
<version>1.1.1-PACK20191209</version> | |||
<version>1.1.2.RELEASE</version> | |||
</parent> | |||
<artifactId>consensus-bftsmart</artifactId> | |||
@@ -10,6 +10,7 @@ import bftsmart.consensus.app.BatchAppResultImpl; | |||
import bftsmart.tom.*; | |||
import com.jd.blockchain.binaryproto.BinaryProtocol; | |||
import com.jd.blockchain.consensus.service.*; | |||
import com.jd.blockchain.crypto.HashDigest; | |||
import com.jd.blockchain.ledger.*; | |||
import com.jd.blockchain.transaction.TxResponseMessage; | |||
import com.jd.blockchain.utils.serialize.binary.BinarySerializeUtils; | |||
@@ -354,141 +355,97 @@ public class BftsmartNodeServer extends DefaultRecoverable implements NodeServer | |||
// }); | |||
} | |||
/** | |||
* | |||
* Used by consensus write phase, pre compute new block hash | |||
* | |||
*/ | |||
// public BatchAppResultImpl preComputeAppHash(byte[][] commands) { | |||
// String batchId = messageHandle.beginBatch(realmName); | |||
// List<AsyncFuture<byte[]>> asyncFutureLinkedList = new ArrayList<>(commands.length); | |||
// List<byte[]> responseLinkedList = new ArrayList<>(); | |||
// try { | |||
// int msgId = 0; | |||
// for (byte[] txContent : commands) { | |||
// AsyncFuture<byte[]> asyncFuture = messageHandle.processOrdered(msgId++, txContent, realmName, batchId); | |||
// asyncFutureLinkedList.add(asyncFuture); | |||
// } | |||
// StateSnapshot stateSnapshot = messageHandle.completeBatch(realmName, batchId); | |||
// byte[] blockHashBytes = stateSnapshot.getSnapshot(); | |||
// | |||
// for (int i = 0; i< asyncFutureLinkedList.size(); i++) { | |||
// responseLinkedList.add(asyncFutureLinkedList.get(i).get()); | |||
// } | |||
// | |||
// | |||
// return new BatchAppResultImpl(responseLinkedList, blockHashBytes, batchId); | |||
// | |||
// } catch (Exception e) { | |||
// // todo 需要处理应答码 404 | |||
// LOGGER.error("Error occurred while processing ordered messages! --" + e.getMessage(), e); | |||
// messageHandle.rollbackBatch(realmName, batchId, TransactionState.IGNORED_BY_CONSENSUS_PHASE_PRECOMPUTE_ROLLBACK.CODE); | |||
// } | |||
// | |||
// return null; | |||
// } | |||
/** | |||
* Used by consensus write phase, pre compute new block hash | |||
*/ | |||
public BatchAppResultImpl preComputeAppHash(byte[][] commands) { | |||
String batchId = messageHandle.beginBatch(realmName); | |||
List<AsyncFuture<byte[]>> asyncFutureLinkedList = new ArrayList<>(commands.length); | |||
List<byte[]> responseLinkedList = new ArrayList<>(); | |||
BatchAppResultImpl result; | |||
StateSnapshot newStateSnapshot = null; | |||
StateSnapshot preStateSnapshot = null; | |||
StateSnapshot genisStateSnapshot = null; | |||
BatchAppResultImpl result = null; | |||
String batchId = null; | |||
int msgId = 0; | |||
try { | |||
int msgId = 0; | |||
boolean isOK = true; | |||
TransactionState transactionState = TransactionState.IGNORED_BY_BLOCK_FULL_ROLLBACK; | |||
batchId = messageHandle.beginBatch(realmName); | |||
genisStateSnapshot = messageHandle.getGenisStateSnapshot(realmName); | |||
preStateSnapshot = messageHandle.getStateSnapshot(realmName); | |||
if (preStateSnapshot == null) { | |||
throw new IllegalStateException("Pre block state snapshot is null!"); | |||
} | |||
for (int i = 0; i < commands.length; i++) { | |||
byte[] txContent = commands[i]; | |||
try { | |||
AsyncFuture<byte[]> asyncFuture = messageHandle.processOrdered(msgId++, txContent, realmName, batchId); | |||
asyncFutureLinkedList.add(asyncFuture); | |||
} catch (BlockRollbackException e) { | |||
LOGGER.error("Error occurred while processing ordered messages! --" + e.getMessage(), e); | |||
isOK = false; | |||
// TODO: handle the BlockRollbackException in detail; | |||
if (e instanceof DataVersionConflictException) { | |||
transactionState = TransactionState.DATA_VERSION_CONFLICT; | |||
} | |||
break; | |||
} | |||
AsyncFuture<byte[]> asyncFuture = messageHandle.processOrdered(msgId++, txContent, realmName, batchId); | |||
asyncFutureLinkedList.add(asyncFuture); | |||
} | |||
newStateSnapshot = messageHandle.completeBatch(realmName, batchId); | |||
if (isOK) { | |||
StateSnapshot stateSnapshot = messageHandle.completeBatch(realmName, batchId); | |||
byte[] blockHashBytes = stateSnapshot.getSnapshot(); | |||
for (int i = 0; i < asyncFutureLinkedList.size(); i++) { | |||
responseLinkedList.add(asyncFutureLinkedList.get(i).get()); | |||
} | |||
result = new BatchAppResultImpl(responseLinkedList, blockHashBytes, batchId); | |||
result.setErrorCode((byte) 0); | |||
return result; | |||
} else { | |||
for (int i = 0; i < commands.length; i++) { | |||
responseLinkedList.add(createAppResponse(commands[i],transactionState)); | |||
} | |||
Random random = new Random(); | |||
byte[] rand = new byte[4]; | |||
random.nextBytes(rand); | |||
for (int i = 0; i < asyncFutureLinkedList.size(); i++) { | |||
responseLinkedList.add(asyncFutureLinkedList.get(i).get()); | |||
} | |||
result = new BatchAppResultImpl(responseLinkedList, rand, batchId); | |||
result.setErrorCode((byte) 1); | |||
result = new BatchAppResultImpl(responseLinkedList, newStateSnapshot.getSnapshot(), batchId, genisStateSnapshot.getSnapshot()); | |||
result.setErrorCode((byte) 0); | |||
return result; | |||
} catch (Exception e) { | |||
LOGGER.error("Error occurred while pre compute app! --" + e.getMessage(), e); | |||
for (int i = 0; i < commands.length; i++) { | |||
responseLinkedList.add(createAppResponse(commands[i],TransactionState.IGNORED_BY_BLOCK_FULL_ROLLBACK)); | |||
} | |||
} catch (Exception e) { | |||
LOGGER.error("Error occurred while genearte batch app result! --" + e.getMessage(), e); | |||
throw e; | |||
result = new BatchAppResultImpl(responseLinkedList,preStateSnapshot.getSnapshot(), batchId, genisStateSnapshot.getSnapshot()); | |||
result.setErrorCode((byte) 1); | |||
} | |||
return result; | |||
} | |||
public byte[] createAppResponse(byte[] command, TransactionState transactionState) { | |||
// Block full rollback responses, generated in pre compute phase, due to tx exception | |||
private byte[] createAppResponse(byte[] command, TransactionState transactionState) { | |||
TransactionRequest txRequest = BinaryProtocol.decode(command); | |||
TxResponseMessage resp = new TxResponseMessage(txRequest.getTransactionContent().getHash()); | |||
// resp.setExecutionState(TransactionState.IGNORED_BY_BLOCK_FULL_ROLLBACK); | |||
resp.setExecutionState(transactionState); | |||
return BinaryProtocol.encode(resp, TransactionResponse.class); | |||
} | |||
/** | |||
* | |||
* Consensus write phase will terminate, new block hash values are inconsistent, update batch messages execute state | |||
* | |||
*/ | |||
public List<byte[]> updateAppResponses(List<byte[]> asyncResponseLinkedList) { | |||
public List<byte[]> updateAppResponses(List<byte[]> asyncResponseLinkedList, byte[] commonHash, boolean isConsistent) { | |||
List<byte[]> updatedResponses = new ArrayList<>(); | |||
TxResponseMessage resp = null; | |||
for(int i = 0; i < asyncResponseLinkedList.size(); i++) { | |||
TransactionResponse txResponse = BinaryProtocol.decode(asyncResponseLinkedList.get(i)); | |||
TxResponseMessage resp = new TxResponseMessage(txResponse.getContentHash()); | |||
resp.setExecutionState(TransactionState.IGNORED_BY_CONSENSUS_PHASE_PRECOMPUTE_ROLLBACK); | |||
if (isConsistent) { | |||
resp = new TxResponseMessage(txResponse.getContentHash()); | |||
} | |||
else { | |||
resp = new TxResponseMessage(new HashDigest(commonHash)); | |||
} | |||
resp.setExecutionState(TransactionState.IGNORED_BY_BLOCK_FULL_ROLLBACK); | |||
updatedResponses.add(BinaryProtocol.encode(resp, TransactionResponse.class)); | |||
} | |||
return updatedResponses; | |||
} | |||
return updatedResponses; | |||
} | |||
/** | |||
* | |||
* Decision has been made at the consensus stage, commit block | |||
* | |||
*/ | |||
public void preComputeAppCommit(String batchId) { | |||
messageHandle.commitBatch(realmName, batchId); | |||
try { | |||
messageHandle.commitBatch(realmName, batchId); | |||
} catch (BlockRollbackException e) { | |||
LOGGER.error("Error occurred while pre compute commit --" + e.getMessage(), e); | |||
throw e; | |||
} | |||
} | |||
/** | |||
@@ -497,7 +454,7 @@ public class BftsmartNodeServer extends DefaultRecoverable implements NodeServer | |||
* | |||
*/ | |||
public void preComputeAppRollback(String batchId) { | |||
messageHandle.rollbackBatch(realmName, batchId, TransactionState.IGNORED_BY_CONSENSUS_PHASE_PRECOMPUTE_ROLLBACK.CODE); | |||
messageHandle.rollbackBatch(realmName, batchId, TransactionState.IGNORED_BY_BLOCK_FULL_ROLLBACK.CODE); | |||
LOGGER.debug("Rollback of operations that cause inconsistencies in the ledger"); | |||
} | |||
@@ -5,7 +5,7 @@ | |||
<parent> | |||
<groupId>com.jd.blockchain</groupId> | |||
<artifactId>consensus</artifactId> | |||
<version>1.1.1-PACK20191209</version> | |||
<version>1.1.2.RELEASE</version> | |||
</parent> | |||
<artifactId>consensus-framework</artifactId> | |||
@@ -19,7 +19,8 @@ public class ConsensusProviders { | |||
provider = providers.get(className); | |||
if (provider == null) { | |||
provider = loadProvider(ConsensusProvider.class, className); | |||
providers.put(className, provider); | |||
// providers.put(className, provider); | |||
registerProvider(provider); | |||
} | |||
} | |||
} | |||
@@ -59,4 +59,19 @@ public interface MessageHandle { | |||
*/ | |||
AsyncFuture<byte[]> processUnordered(byte[] message); | |||
/** | |||
* 获得当前最新区块的状态快照 | |||
* | |||
* @param realmName | |||
* @return 最新区块的状态快照 | |||
*/ | |||
StateSnapshot getStateSnapshot(String realmName); | |||
/** | |||
* 获得创世区块的状态快照 | |||
* @param realmName | |||
* @return 创世区块的状态快照 | |||
*/ | |||
StateSnapshot getGenisStateSnapshot(String realmName); | |||
} |
@@ -5,7 +5,7 @@ | |||
<parent> | |||
<groupId>com.jd.blockchain</groupId> | |||
<artifactId>consensus</artifactId> | |||
<version>1.1.1-PACK20191209</version> | |||
<version>1.1.2.RELEASE</version> | |||
</parent> | |||
<artifactId>consensus-mq</artifactId> | |||
@@ -5,7 +5,7 @@ | |||
<parent> | |||
<groupId>com.jd.blockchain</groupId> | |||
<artifactId>jdchain-root</artifactId> | |||
<version>1.1.1-PACK20191209</version> | |||
<version>1.1.2.RELEASE</version> | |||
</parent> | |||
<artifactId>consensus</artifactId> | |||
<packaging>pom</packaging> | |||
@@ -5,7 +5,7 @@ | |||
<parent> | |||
<groupId>com.jd.blockchain</groupId> | |||
<artifactId>contract</artifactId> | |||
<version>1.1.1-PACK20191209</version> | |||
<version>1.1.2.RELEASE</version> | |||
</parent> | |||
<artifactId>contract-framework</artifactId> | |||
@@ -5,7 +5,7 @@ | |||
<parent> | |||
<groupId>com.jd.blockchain</groupId> | |||
<artifactId>contract</artifactId> | |||
<version>1.1.1-PACK20191209</version> | |||
<version>1.1.2.RELEASE</version> | |||
</parent> | |||
<artifactId>contract-jvm</artifactId> | |||
@@ -5,7 +5,7 @@ | |||
<parent> | |||
<groupId>com.jd.blockchain</groupId> | |||
<artifactId>contract</artifactId> | |||
<version>1.1.1-PACK20191209</version> | |||
<version>1.1.2.RELEASE</version> | |||
</parent> | |||
<artifactId>contract-maven-plugin</artifactId> | |||
<packaging>maven-plugin</packaging> | |||
@@ -5,7 +5,7 @@ | |||
<parent> | |||
<artifactId>contract</artifactId> | |||
<groupId>com.jd.blockchain</groupId> | |||
<version>1.1.1-PACK20191209</version> | |||
<version>1.1.2.RELEASE</version> | |||
</parent> | |||
<modelVersion>4.0.0</modelVersion> | |||
@@ -1,7 +1,7 @@ | |||
package com.jd.blockchain.contract; | |||
import com.jd.blockchain.crypto.HashDigest; | |||
import com.jd.blockchain.ledger.KVDataEntry; | |||
import com.jd.blockchain.ledger.TypedKVEntry; | |||
@Contract | |||
public class ReadContractImpl implements EventProcessingAware, ReadContract { | |||
@@ -24,7 +24,7 @@ public class ReadContractImpl implements EventProcessingAware, ReadContract { | |||
@Override | |||
@ContractEvent(name = "read-key") | |||
public String read(String address, String key) { | |||
KVDataEntry[] kvDataEntries = eventContext.getLedger().getDataEntries(ledgerHash, address, key); | |||
TypedKVEntry[] kvDataEntries = eventContext.getLedger().getDataEntries(ledgerHash, address, key); | |||
if (kvDataEntries != null && kvDataEntries.length == 1) { | |||
return kvDataEntries[0].getValue().toString(); | |||
@@ -35,7 +35,7 @@ public class ReadContractImpl implements EventProcessingAware, ReadContract { | |||
@Override | |||
@ContractEvent(name = "version-key") | |||
public Long readVersion(String address, String key) { | |||
KVDataEntry[] kvDataEntries = eventContext.getLedger().getDataEntries(ledgerHash, address, key); | |||
TypedKVEntry[] kvDataEntries = eventContext.getLedger().getDataEntries(ledgerHash, address, key); | |||
if (kvDataEntries != null && kvDataEntries.length == 1) { | |||
return kvDataEntries[0].getVersion(); | |||
@@ -2,7 +2,7 @@ package com.jd.blockchain.contract; | |||
import com.alibaba.fastjson.JSON; | |||
import com.jd.blockchain.crypto.HashDigest; | |||
import com.jd.blockchain.ledger.KVDataEntry; | |||
import com.jd.blockchain.ledger.TypedKVEntry; | |||
import com.jd.blockchain.ledger.KVDataVO; | |||
import com.jd.blockchain.ledger.KVInfoVO; | |||
@@ -14,7 +14,7 @@ public class TransferContractImpl implements EventProcessingAware, TransferContr | |||
@Override | |||
public String create(String address, String account, long money) { | |||
KVDataEntry[] kvDataEntries = eventContext.getLedger().getDataEntries(ledgerHash, address, account); | |||
TypedKVEntry[] kvDataEntries = eventContext.getLedger().getDataEntries(ledgerHash, address, account); | |||
// 肯定有返回值,但若不存在则返回version=-1 | |||
if (kvDataEntries != null && kvDataEntries.length > 0) { | |||
long currVersion = kvDataEntries[0].getVersion(); | |||
@@ -32,13 +32,13 @@ public class TransferContractImpl implements EventProcessingAware, TransferContr | |||
@Override | |||
public String transfer(String address, String from, String to, long money) { | |||
// 首先查询余额 | |||
KVDataEntry[] kvDataEntries = eventContext.getLedger().getDataEntries(ledgerHash, address, from, to); | |||
TypedKVEntry[] kvDataEntries = eventContext.getLedger().getDataEntries(ledgerHash, address, from, to); | |||
if (kvDataEntries == null || kvDataEntries.length != 2) { | |||
throw new IllegalStateException(String.format("%s -> %s - %s may be not created !!!", address, from, to)); | |||
} else { | |||
// 判断from账号中钱数量是否足够 | |||
long fromMoney = 0L, toMoney = 0L, fromVersion = 0L, toVersion = 0L; | |||
for (KVDataEntry kvDataEntry : kvDataEntries) { | |||
for (TypedKVEntry kvDataEntry : kvDataEntries) { | |||
if (kvDataEntry.getKey().equals(from)) { | |||
fromMoney = (long) kvDataEntry.getValue(); | |||
fromVersion = kvDataEntry.getVersion(); | |||
@@ -62,7 +62,7 @@ public class TransferContractImpl implements EventProcessingAware, TransferContr | |||
@Override | |||
public long read(String address, String account) { | |||
KVDataEntry[] kvDataEntries = eventContext.getLedger().getDataEntries(ledgerHash, address, account); | |||
TypedKVEntry[] kvDataEntries = eventContext.getLedger().getDataEntries(ledgerHash, address, account); | |||
if (kvDataEntries == null || kvDataEntries.length == 0) { | |||
return -1; | |||
} | |||
@@ -71,7 +71,7 @@ public class TransferContractImpl implements EventProcessingAware, TransferContr | |||
@Override | |||
public String readAll(String address, String account) { | |||
KVDataEntry[] kvDataEntries = eventContext.getLedger().getDataEntries(ledgerHash, address, account); | |||
TypedKVEntry[] kvDataEntries = eventContext.getLedger().getDataEntries(ledgerHash, address, account); | |||
// 获取最新的版本号 | |||
if (kvDataEntries == null || kvDataEntries.length == 0) { | |||
return ""; | |||
@@ -91,7 +91,7 @@ public class TransferContractImpl implements EventProcessingAware, TransferContr | |||
KVInfoVO kvInfoVO = new KVInfoVO(kvDataVOS); | |||
KVDataEntry[] allEntries = eventContext.getLedger().getDataEntries(ledgerHash, address, kvInfoVO); | |||
TypedKVEntry[] allEntries = eventContext.getLedger().getDataEntries(ledgerHash, address, kvInfoVO); | |||
return JSON.toJSONString(allEntries); | |||
} | |||
@@ -4,7 +4,7 @@ | |||
<parent> | |||
<groupId>com.jd.blockchain</groupId> | |||
<artifactId>jdchain-root</artifactId> | |||
<version>1.1.1-PACK20191209</version> | |||
<version>1.1.2.RELEASE</version> | |||
</parent> | |||
<artifactId>contract</artifactId> | |||
<packaging>pom</packaging> | |||
@@ -5,7 +5,7 @@ | |||
<parent> | |||
<groupId>com.jd.blockchain</groupId> | |||
<artifactId>crypto</artifactId> | |||
<version>1.1.1-PACK20191209</version> | |||
<version>1.1.2.RELEASE</version> | |||
</parent> | |||
<artifactId>crypto-adv</artifactId> | |||
@@ -5,7 +5,7 @@ | |||
<parent> | |||
<groupId>com.jd.blockchain</groupId> | |||
<artifactId>crypto</artifactId> | |||
<version>1.1.1-PACK20191209</version> | |||
<version>1.1.2.RELEASE</version> | |||
</parent> | |||
<artifactId>crypto-classic</artifactId> | |||
@@ -5,12 +5,10 @@ import static com.jd.blockchain.crypto.CryptoBytes.ALGORYTHM_CODE_SIZE; | |||
import java.util.Arrays; | |||
import com.jd.blockchain.crypto.CryptoAlgorithm; | |||
import com.jd.blockchain.crypto.CryptoBytes; | |||
import com.jd.blockchain.crypto.CryptoException; | |||
import com.jd.blockchain.crypto.HashDigest; | |||
import com.jd.blockchain.crypto.HashFunction; | |||
import com.jd.blockchain.crypto.utils.classic.RIPEMD160Utils; | |||
import com.jd.blockchain.utils.security.RipeMD160Utils; | |||
public class RIPEMD160HashFunction implements HashFunction { | |||
@@ -19,7 +17,7 @@ public class RIPEMD160HashFunction implements HashFunction { | |||
private static final int DIGEST_BYTES = 160 / 8; | |||
private static final int DIGEST_LENGTH = ALGORYTHM_CODE_SIZE + DIGEST_BYTES; | |||
RIPEMD160HashFunction() { | |||
} | |||
@@ -30,7 +28,6 @@ public class RIPEMD160HashFunction implements HashFunction { | |||
@Override | |||
public HashDigest hash(byte[] data) { | |||
if (data == null) { | |||
throw new CryptoException("data is null!"); | |||
} | |||
@@ -39,6 +36,16 @@ public class RIPEMD160HashFunction implements HashFunction { | |||
return new HashDigest(RIPEMD160, digestBytes); | |||
} | |||
@Override | |||
public HashDigest hash(byte[] data, int offset, int len) { | |||
if (data == null) { | |||
throw new CryptoException("data is null!"); | |||
} | |||
byte[] digestBytes = RIPEMD160Utils.hash(data, offset, len); | |||
return new HashDigest(RIPEMD160, digestBytes); | |||
} | |||
@Override | |||
public boolean verify(HashDigest digest, byte[] data) { | |||
HashDigest hashDigest = hash(data); | |||
@@ -59,5 +66,5 @@ public class RIPEMD160HashFunction implements HashFunction { | |||
throw new CryptoException("digestBytes is invalid!"); | |||
} | |||
} | |||
} |
@@ -5,12 +5,10 @@ import static com.jd.blockchain.crypto.CryptoBytes.ALGORYTHM_CODE_SIZE; | |||
import java.util.Arrays; | |||
import com.jd.blockchain.crypto.CryptoAlgorithm; | |||
import com.jd.blockchain.crypto.CryptoBytes; | |||
import com.jd.blockchain.crypto.CryptoException; | |||
import com.jd.blockchain.crypto.HashDigest; | |||
import com.jd.blockchain.crypto.HashFunction; | |||
import com.jd.blockchain.crypto.utils.classic.SHA256Utils; | |||
import com.jd.blockchain.utils.security.ShaUtils; | |||
public class SHA256HashFunction implements HashFunction { | |||
@@ -30,7 +28,6 @@ public class SHA256HashFunction implements HashFunction { | |||
@Override | |||
public HashDigest hash(byte[] data) { | |||
if (data == null) { | |||
throw new CryptoException("data is null!"); | |||
} | |||
@@ -38,6 +35,16 @@ public class SHA256HashFunction implements HashFunction { | |||
byte[] digestBytes = SHA256Utils.hash(data); | |||
return new HashDigest(SHA256, digestBytes); | |||
} | |||
@Override | |||
public HashDigest hash(byte[] data, int offset, int len) { | |||
if (data == null) { | |||
throw new CryptoException("data is null!"); | |||
} | |||
byte[] digestBytes = SHA256Utils.hash(data, offset, len); | |||
return new HashDigest(SHA256, digestBytes); | |||
} | |||
@Override | |||
public boolean verify(HashDigest digest, byte[] data) { | |||
@@ -10,16 +10,26 @@ import org.bouncycastle.crypto.digests.RIPEMD160Digest; | |||
*/ | |||
public class RIPEMD160Utils { | |||
// The length of RIPEMD160 output is 20 bytes | |||
private static final int RIPEMD160DIGEST_LENGTH = 160 / 8; | |||
// The length of RIPEMD160 output is 20 bytes | |||
public static final int RIPEMD160DIGEST_LENGTH = 160 / 8; | |||
public static byte[] hash(byte[] data){ | |||
public static byte[] hash(byte[] data) { | |||
byte[] result = new byte[RIPEMD160DIGEST_LENGTH]; | |||
RIPEMD160Digest ripemd160Digest = new RIPEMD160Digest(); | |||
byte[] result = new byte[RIPEMD160DIGEST_LENGTH]; | |||
RIPEMD160Digest ripemd160Digest = new RIPEMD160Digest(); | |||
ripemd160Digest.update(data,0,data.length); | |||
ripemd160Digest.doFinal(result,0); | |||
return result; | |||
} | |||
ripemd160Digest.update(data, 0, data.length); | |||
ripemd160Digest.doFinal(result, 0); | |||
return result; | |||
} | |||
public static byte[] hash(byte[] data, int offset, int len) { | |||
byte[] result = new byte[RIPEMD160DIGEST_LENGTH]; | |||
RIPEMD160Digest ripemd160Digest = new RIPEMD160Digest(); | |||
ripemd160Digest.update(data, offset, len); | |||
ripemd160Digest.doFinal(result, 0); | |||
return result; | |||
} | |||
} |
@@ -11,7 +11,7 @@ import org.bouncycastle.crypto.digests.SHA256Digest; | |||
public class SHA256Utils { | |||
// The length of SHA256 output is 32 bytes | |||
private static final int SHA256DIGEST_LENGTH = 256 / 8; | |||
public static final int SHA256DIGEST_LENGTH = 256 / 8; | |||
public static byte[] hash(byte[] data){ | |||
@@ -22,4 +22,14 @@ public class SHA256Utils { | |||
sha256Digest.doFinal(result,0); | |||
return result; | |||
} | |||
public static byte[] hash(byte[] data, int offset, int len){ | |||
byte[] result = new byte[SHA256DIGEST_LENGTH]; | |||
SHA256Digest sha256Digest = new SHA256Digest(); | |||
sha256Digest.update(data, offset, len); | |||
sha256Digest.doFinal(result,0); | |||
return result; | |||
} | |||
} |
@@ -5,7 +5,7 @@ | |||
<parent> | |||
<groupId>com.jd.blockchain</groupId> | |||
<artifactId>crypto</artifactId> | |||
<version>1.1.1-PACK20191209</version> | |||
<version>1.1.2.RELEASE</version> | |||
</parent> | |||
<artifactId>crypto-framework</artifactId> | |||
@@ -10,6 +10,14 @@ public interface HashFunction extends CryptoFunction { | |||
*/ | |||
HashDigest hash(byte[] data); | |||
/** | |||
* 计算指定数据的 hash; | |||
* | |||
* @param data | |||
* @return | |||
*/ | |||
HashDigest hash(byte[] data, int offset, int len); | |||
/** | |||
* 校验 hash 摘要与指定的数据是否匹配; | |||
@@ -5,7 +5,7 @@ | |||
<parent> | |||
<artifactId>crypto</artifactId> | |||
<groupId>com.jd.blockchain</groupId> | |||
<version>1.1.1-PACK20191209</version> | |||
<version>1.1.2.RELEASE</version> | |||
</parent> | |||
<modelVersion>4.0.0</modelVersion> | |||
@@ -5,7 +5,7 @@ | |||
<parent> | |||
<groupId>com.jd.blockchain</groupId> | |||
<artifactId>crypto</artifactId> | |||
<version>1.1.1-PACK20191209</version> | |||
<version>1.1.2.RELEASE</version> | |||
</parent> | |||
<artifactId>crypto-sm</artifactId> | |||
@@ -16,7 +16,7 @@ public class SM3HashFunction implements HashFunction { | |||
private static final int DIGEST_BYTES = 256 / 8; | |||
private static final int DIGEST_LENGTH = CryptoBytes.ALGORYTHM_CODE_SIZE + DIGEST_BYTES; | |||
SM3HashFunction() { | |||
} | |||
@@ -27,7 +27,6 @@ public class SM3HashFunction implements HashFunction { | |||
@Override | |||
public HashDigest hash(byte[] data) { | |||
if (data == null) { | |||
throw new CryptoException("data is null!"); | |||
} | |||
@@ -36,6 +35,16 @@ public class SM3HashFunction implements HashFunction { | |||
return new HashDigest(SM3, digestBytes); | |||
} | |||
@Override | |||
public HashDigest hash(byte[] data, int offset, int len) { | |||
if (data == null) { | |||
throw new CryptoException("data is null!"); | |||
} | |||
byte[] digestBytes = SM3Utils.hash(data, offset, len); | |||
return new HashDigest(SM3, digestBytes); | |||
} | |||
@Override | |||
public boolean verify(HashDigest digest, byte[] data) { | |||
HashDigest hashDigest = hash(data); | |||
@@ -4,19 +4,30 @@ import org.bouncycastle.crypto.digests.SM3Digest; | |||
public class SM3Utils { | |||
// The length of sm3 output is 32 bytes | |||
private static final int SM3DIGEST_LENGTH = 32; | |||
// The length of sm3 output is 32 bytes | |||
public static final int SM3DIGEST_LENGTH = 32; | |||
public static byte[] hash(byte[] data) { | |||
public static byte[] hash(byte[] data) { | |||
byte[] result = new byte[SM3DIGEST_LENGTH]; | |||
byte[] result = new byte[SM3DIGEST_LENGTH]; | |||
SM3Digest sm3digest = new SM3Digest(); | |||
SM3Digest sm3digest = new SM3Digest(); | |||
sm3digest.update(data, 0, data.length); | |||
sm3digest.doFinal(result, 0); | |||
sm3digest.update(data, 0, data.length); | |||
sm3digest.doFinal(result, 0); | |||
return result; | |||
} | |||
} | |||
return result; | |||
} | |||
public static byte[] hash(byte[] data, int offset, int len) { | |||
byte[] result = new byte[SM3DIGEST_LENGTH]; | |||
SM3Digest sm3digest = new SM3Digest(); | |||
sm3digest.update(data, offset, len); | |||
sm3digest.doFinal(result, 0); | |||
return result; | |||
} | |||
} |
@@ -5,7 +5,7 @@ | |||
<parent> | |||
<groupId>com.jd.blockchain</groupId> | |||
<artifactId>jdchain-root</artifactId> | |||
<version>1.1.1-PACK20191209</version> | |||
<version>1.1.2.RELEASE</version> | |||
</parent> | |||
<artifactId>crypto</artifactId> | |||
<packaging>pom</packaging> | |||
@@ -1,20 +0,0 @@ | |||
package com.jd.blockchain; | |||
import static org.junit.Assert.assertTrue; | |||
import org.junit.Test; | |||
/** | |||
* Unit test for simple App. | |||
*/ | |||
public class AppTest | |||
{ | |||
/** | |||
* Rigorous Test :-) | |||
*/ | |||
@Test | |||
public void shouldAnswerWithTrue() | |||
{ | |||
assertTrue( true ); | |||
} | |||
} |
@@ -5,7 +5,7 @@ | |||
<parent> | |||
<groupId>com.jd.blockchain</groupId> | |||
<artifactId>deployment</artifactId> | |||
<version>1.1.1-PACK20191209</version> | |||
<version>1.1.2.RELEASE</version> | |||
</parent> | |||
<artifactId>deployment-gateway</artifactId> | |||
@@ -66,6 +66,28 @@ | |||
</execution> | |||
</executions> | |||
</plugin> | |||
<!-- 生成SHA-256校验文件 --> | |||
<plugin> | |||
<groupId>net.nicoulaj.maven.plugins</groupId> | |||
<artifactId>checksum-maven-plugin</artifactId> | |||
<version>1.8</version> | |||
<executions> | |||
<execution> | |||
<goals> | |||
<goal>artifacts</goal> | |||
</goals> | |||
</execution> | |||
</executions> | |||
<configuration> | |||
<algorithms> | |||
<algorithm>SHA-256</algorithm> <!--采用SHA-256算法,还支持其他算法--> | |||
</algorithms> | |||
<file>${project.basedir}/target/deployment-peer-${project.version}.zip</file><!--给zip文件进行加密--> | |||
<xmlSummary>true</xmlSummary><!--生成XML格式的md5文件--> | |||
<xmlSummaryFile>${project.basedir}/target/SHA-256.xml</xmlSummaryFile> | |||
</configuration> | |||
</plugin> | |||
</plugins> | |||
</build> | |||
</project> |
@@ -12,14 +12,17 @@ | |||
<fileSet> | |||
<directory>src/main/resources/scripts</directory> | |||
<outputDirectory>bin</outputDirectory> | |||
<lineEnding>unix</lineEnding> | |||
</fileSet> | |||
<fileSet> | |||
<directory>src/main/resources/config</directory> | |||
<outputDirectory>config</outputDirectory> | |||
<lineEnding>unix</lineEnding> | |||
</fileSet> | |||
<fileSet> | |||
<directory>src/main/resources/docs</directory> | |||
<outputDirectory>docs</outputDirectory> | |||
<lineEnding>unix</lineEnding> | |||
</fileSet> | |||
</fileSets> | |||
<dependencySets> | |||
@@ -20,7 +20,7 @@ peer.providers=com.jd.blockchain.consensus.bftsmart.BftsmartConsensusProvider | |||
#若该值不配置或配置不正确,则浏览器模糊查询部分无法正常显示 | |||
#数据检索服务模块(Argus)需单独部署,若不部署其他功能仍可正常使用 | |||
data.retrieval.url=http://127.0.0.1:10001 | |||
schema.retrieval.url=http://192.168.151.40:8082 | |||
schema.retrieval.url=http://127.0.0.1:8082 | |||
#默认公钥的内容(Base58编码数据); | |||
keys.default.pubkey= | |||
@@ -1768,7 +1768,7 @@ KVInfoVO对应格式如下: | |||
KVInfoVO说明: | |||
+ 1)支持多个Key作为入参; | |||
+ 2)每个Key支持多个version; | |||
@@ -1808,6 +1808,57 @@ http://localhost/ledgers/657TQAw6ssVoeKniWGwbovk7njvCTvikPambM9eBv6ezs/accounts/ | |||
|type|value类型| | |||
|value|值| | |||
### 6.9 查询某数据账户键数量 | |||
``` | |||
GET /ledgers/{ledger}/accounts/address/{address}/keys/count/search?keyword={keyword} | |||
``` | |||
#### 参数 | |||
请求类型 | 名称 | 是否必需 | 说明 | 数据类型 | |||
--- | --- | --- | --- | --- | |||
path | ledger | 是 | 所要搜索的账本,需要完整的账本哈希 | string | |||
path | address | 是 | 所要搜索的数据账户地址,需要完整的数据账户地址 | string | |||
query | keyword | 否 | 键的部分字符,空表示全部 | string | |||
#### 请求实例 | |||
``` | |||
http://localhost/ledgers/657TQAw6ssVoeKniWGwbovk7njvCTvikPambM9eBv6ezs/accounts/address/5Sm4gWXrNpDWW9Boi4xZCzZMHboRvEDm29Fa/keys/count/search?keyword=j | |||
``` | |||
#### 返回实例 | |||
``` | |||
{ "data": 66, "success": true } | |||
``` | |||
说明 | |||
名称 | 说明 | |||
--- | --- | |||
data | 条件查询键总数 | |||
### 6.10 查询某数据账户键 | |||
``` | |||
GET /ledgers/{ledger}/accounts/address/{address}/keys/search?keyword={keyword}&fromIndex={start_index}&count={count} | |||
``` | |||
#### 参数 | |||
请求类型 | 名称 | 是否必需 | 说明 | 数据类型 | |||
--- | --- | --- | --- | --- | |||
path | ledger | 是 | 所要搜索的账本,需要完整的账本哈希 | string | |||
path | address | 是 | 所要搜索的数据账户地址,需要完整的数据账户地址 | string | |||
query | keyword | 否 | 键的部分字符,空表示全部 | string | |||
query | start_index | 否 | 查询数据账户对应Key的起始序号,默认为0 | 数字 | |||
query | count | 否 | 查询返回数据账户对应Key的数量,默认最大返回值为100,小于0或大于100均返回最大可返回结果集 | 数字 | |||
#### 请求实例 | |||
``` | |||
http://localhost/ledgers/657TQAw6ssVoeKniWGwbovk7njvCTvikPambM9eBv6ezs/accounts/address/5Sm4gWXrNpDWW9Boi4xZCzZMHboRvEDm29Fa/keys/search?keyword=j&fromIndex=0&count=-1 | |||
``` | |||
#### 返回实例 | |||
``` | |||
{ "data": [ { "key": "jd" }, { "key": "jdchain" }], "success": true } | |||
``` | |||
说明 | |||
名称 | 说明 | |||
--- | --- | |||
key | 键 | |||
## 7 搜索 | |||
@@ -2,8 +2,18 @@ | |||
HOME=$(cd `dirname $0`;cd ../; pwd) | |||
GATEWAY=$(ls $HOME/lib | grep deployment-gateway-) | |||
PROC_INFO=$HOME/lib/$GATEWAY" -c "$HOME/config/gateway.conf | |||
#echo $PROC_INFO | |||
#get PID | |||
PID=`ps -ef | grep "$PROC_INFO" | grep -v grep | awk '{print $2}'` | |||
#echo $PID | |||
if [[ ! -z $PID ]] | |||
then | |||
echo "process already exists,please check... If necessary, you should kill the process first." | |||
exit | |||
fi | |||
if [ ! -n "$GATEWAY" ]; then | |||
echo "GateWay Is Null !!!" | |||
else | |||
nohup java -jar -server -Djdchain.log=$HOME $HOME/lib/$GATEWAY -c $HOME/config/gateway.conf $* >$HOME/bin/gw.out 2>&1 & | |||
nohup java -jar -server -Djdchain.log=$HOME $PROC_INFO $* >$HOME/bin/gw.out 2>&1 & | |||
fi |
@@ -5,7 +5,7 @@ | |||
<parent> | |||
<groupId>com.jd.blockchain</groupId> | |||
<artifactId>deployment</artifactId> | |||
<version>1.1.1-PACK20191209</version> | |||
<version>1.1.2.RELEASE</version> | |||
</parent> | |||
<artifactId>deployment-peer</artifactId> | |||
@@ -101,6 +101,28 @@ | |||
</executions> | |||
</plugin> | |||
<!-- 生成SHA-256校验文件 --> | |||
<plugin> | |||
<groupId>net.nicoulaj.maven.plugins</groupId> | |||
<artifactId>checksum-maven-plugin</artifactId> | |||
<version>1.8</version> | |||
<executions> | |||
<execution> | |||
<goals> | |||
<goal>artifacts</goal> | |||
</goals> | |||
</execution> | |||
</executions> | |||
<configuration> | |||
<algorithms> | |||
<algorithm>SHA-256</algorithm> <!--采用SHA-256算法,还支持其他算法--> | |||
</algorithms> | |||
<file>${project.basedir}/target/deployment-peer-${project.version}.zip</file><!--给zip文件进行加密--> | |||
<xmlSummary>true</xmlSummary><!--生成XML格式的md5文件--> | |||
<xmlSummaryFile>${project.basedir}/target/SHA-256.xml</xmlSummaryFile> | |||
</configuration> | |||
</plugin> | |||
</plugins> | |||
</build> | |||
</project> |
@@ -12,14 +12,17 @@ | |||
<fileSet> | |||
<directory>src/main/resources/scripts</directory> | |||
<outputDirectory>bin</outputDirectory> | |||
<lineEnding>unix</lineEnding> | |||
</fileSet> | |||
<fileSet> | |||
<directory>src/main/resources/config</directory> | |||
<outputDirectory>config</outputDirectory> | |||
<lineEnding>unix</lineEnding> | |||
</fileSet> | |||
<fileSet> | |||
<directory>src/main/resources/docs</directory> | |||
<outputDirectory>docs</outputDirectory> | |||
<lineEnding>unix</lineEnding> | |||
</fileSet> | |||
</fileSets> | |||
<dependencySets> | |||
@@ -87,7 +87,7 @@ cons_parti.0.consensus.host=127.0.0.1 | |||
#第0个参与方的共识服务的端口; | |||
cons_parti.0.consensus.port=8900 | |||
#第0个参与方的共识服务是否开启安全连接; | |||
cons_parti.0.consensus.secure=true | |||
cons_parti.0.consensus.secure=false | |||
#第0个参与方的账本初始服务的主机; | |||
cons_parti.0.initializer.host=127.0.0.1 | |||
@@ -2,8 +2,18 @@ | |||
HOME=$(cd `dirname $0`;cd ../; pwd) | |||
UMP=$(ls $HOME/manager | grep manager-booter-) | |||
PROC_INFO=$HOME/manager/$UMP" -home "$HOME" -p 8000" | |||
#echo $PROC_INFO | |||
#get PID | |||
PID=`ps -ef | grep "$PROC_INFO" | grep -v grep | awk '{print $2}'` | |||
#echo $PID | |||
if [[ ! -z $PID ]] | |||
then | |||
echo "process already exists,please check... If necessary, you should kill the process first." | |||
exit | |||
fi | |||
if [ ! -n "UMP" ]; then | |||
echo "JDChain Manager Is Null !!!" | |||
else | |||
nohup java -jar -server -Djdchain.log=$HOME $HOME/manager/$UMP -home $HOME -p 8000 $* >$HOME/bin/jump.out 2>&1 & | |||
nohup java -jar -server -Djdchain.log=$HOME $PROC_INFO $* >$HOME/bin/jump.out 2>&1 & | |||
fi |
@@ -2,8 +2,18 @@ | |||
HOME=$(cd `dirname $0`;cd ../; pwd) | |||
PEER=$(ls $HOME/system | grep deployment-peer-) | |||
PROC_INFO=$HOME/system/$PEER" -home="$HOME" -c "$HOME/config/ledger-binding.conf" -p 7080" | |||
#echo $PROC_INFO | |||
#get PID | |||
PID=`ps -ef | grep "$PROC_INFO" | grep -v grep | awk '{print $2}'` | |||
#echo $PID | |||
if [[ ! -z $PID ]] | |||
then | |||
echo "process already exists,please check... If necessary, you should kill the process first." | |||
exit | |||
fi | |||
if [ ! -n "$PEER" ]; then | |||
echo "Peer Is Null !!!" | |||
else | |||
nohup java -jar -server -Xmx2g -Xms2g -Djdchain.log=$HOME $HOME/system/$PEER -home=$HOME -c $HOME/config/ledger-binding.conf -p 7080 $* >$HOME/bin/peer.out 2>&1 & | |||
nohup java -jar -server -Xmx1g -Xms1g -Djdchain.log=$HOME $PROC_INFO $* >$HOME/bin/peer.out 2>&1 & | |||
fi |
@@ -5,7 +5,7 @@ | |||
<parent> | |||
<groupId>com.jd.blockchain</groupId> | |||
<artifactId>jdchain-root</artifactId> | |||
<version>1.1.1-PACK20191209</version> | |||
<version>1.1.2.RELEASE</version> | |||
</parent> | |||
<artifactId>deployment</artifactId> | |||
<packaging>pom</packaging> | |||
@@ -5,7 +5,7 @@ | |||
<parent> | |||
<groupId>com.jd.blockchain</groupId> | |||
<artifactId>jdchain-root</artifactId> | |||
<version>1.1.1-PACK20191209</version> | |||
<version>1.1.2.RELEASE</version> | |||
</parent> | |||
<artifactId>gateway</artifactId> | |||
@@ -5,9 +5,6 @@ import java.util.List; | |||
import javax.servlet.http.HttpServletRequest; | |||
import com.jd.blockchain.gateway.service.GatewayQueryService; | |||
import com.jd.blockchain.sdk.LedgerBaseSettings; | |||
import com.jd.blockchain.utils.decompiler.utils.DecompilerUtils; | |||
import org.slf4j.LoggerFactory; | |||
import org.springframework.beans.factory.annotation.Autowired; | |||
import org.springframework.web.bind.annotation.PathVariable; | |||
@@ -23,9 +20,9 @@ import com.jd.blockchain.crypto.KeyGenUtils; | |||
import com.jd.blockchain.crypto.PubKey; | |||
import com.jd.blockchain.gateway.PeerService; | |||
import com.jd.blockchain.gateway.service.DataRetrievalService; | |||
import com.jd.blockchain.ledger.AccountHeader; | |||
import com.jd.blockchain.gateway.service.GatewayQueryService; | |||
import com.jd.blockchain.ledger.BlockchainIdentity; | |||
import com.jd.blockchain.ledger.ContractInfo; | |||
import com.jd.blockchain.ledger.KVDataEntry; | |||
import com.jd.blockchain.ledger.KVInfoVO; | |||
import com.jd.blockchain.ledger.LedgerAdminInfo; | |||
import com.jd.blockchain.ledger.LedgerBlock; | |||
@@ -34,11 +31,14 @@ import com.jd.blockchain.ledger.LedgerMetadata; | |||
import com.jd.blockchain.ledger.LedgerTransaction; | |||
import com.jd.blockchain.ledger.ParticipantNode; | |||
import com.jd.blockchain.ledger.TransactionState; | |||
import com.jd.blockchain.ledger.TypedKVEntry; | |||
import com.jd.blockchain.ledger.UserInfo; | |||
import com.jd.blockchain.sdk.BlockchainExtendQueryService; | |||
import com.jd.blockchain.sdk.ContractSettings; | |||
import com.jd.blockchain.sdk.LedgerBaseSettings; | |||
import com.jd.blockchain.utils.BaseConstant; | |||
import com.jd.blockchain.utils.ConsoleUtils; | |||
import com.jd.blockchain.utils.decompiler.utils.DecompilerUtils; | |||
@RestController | |||
@RequestMapping(path = "/") | |||
@@ -252,7 +252,7 @@ public class BlockBrowserController implements BlockchainExtendQueryService { | |||
@RequestMapping(method = RequestMethod.GET, path = "ledgers/{ledgerHash}/accounts/address/{address}") | |||
@Override | |||
public AccountHeader getDataAccount(@PathVariable(name = "ledgerHash") HashDigest ledgerHash, | |||
public BlockchainIdentity getDataAccount(@PathVariable(name = "ledgerHash") HashDigest ledgerHash, | |||
@PathVariable(name = "address") String address) { | |||
return peerService.getQueryService().getDataAccount(ledgerHash, address); | |||
@@ -261,7 +261,7 @@ public class BlockBrowserController implements BlockchainExtendQueryService { | |||
@RequestMapping(method = { RequestMethod.GET, | |||
RequestMethod.POST }, path = "ledgers/{ledgerHash}/accounts/{address}/entries") | |||
@Override | |||
public KVDataEntry[] getDataEntries(@PathVariable("ledgerHash") HashDigest ledgerHash, | |||
public TypedKVEntry[] getDataEntries(@PathVariable("ledgerHash") HashDigest ledgerHash, | |||
@PathVariable("address") String address, @RequestParam("keys") String... keys) { | |||
return peerService.getQueryService().getDataEntries(ledgerHash, address, keys); | |||
} | |||
@@ -269,7 +269,7 @@ public class BlockBrowserController implements BlockchainExtendQueryService { | |||
@RequestMapping(method = { RequestMethod.GET, | |||
RequestMethod.POST }, path = "ledgers/{ledgerHash}/accounts/{address}/entries-version") | |||
@Override | |||
public KVDataEntry[] getDataEntries(@PathVariable("ledgerHash") HashDigest ledgerHash, | |||
public TypedKVEntry[] getDataEntries(@PathVariable("ledgerHash") HashDigest ledgerHash, | |||
@PathVariable("address") String address, @RequestBody KVInfoVO kvInfoVO) { | |||
return peerService.getQueryService().getDataEntries(ledgerHash, address, kvInfoVO); | |||
} | |||
@@ -277,7 +277,7 @@ public class BlockBrowserController implements BlockchainExtendQueryService { | |||
@RequestMapping(method = { RequestMethod.GET, | |||
RequestMethod.POST }, path = "ledgers/{ledgerHash}/accounts/address/{address}/entries") | |||
@Override | |||
public KVDataEntry[] getDataEntries(@PathVariable("ledgerHash") HashDigest ledgerHash, | |||
public TypedKVEntry[] getDataEntries(@PathVariable("ledgerHash") HashDigest ledgerHash, | |||
@PathVariable("address") String address, | |||
@RequestParam(name = "fromIndex", required = false, defaultValue = "0") int fromIndex, | |||
@RequestParam(name = "count", required = false, defaultValue = "-1") int count) { | |||
@@ -594,7 +594,7 @@ public class BlockBrowserController implements BlockchainExtendQueryService { | |||
*/ | |||
@RequestMapping(method = RequestMethod.GET, path = "ledgers/{ledgerHash}/users") | |||
@Override | |||
public AccountHeader[] getUsers(@PathVariable(name = "ledgerHash") HashDigest ledgerHash, | |||
public BlockchainIdentity[] getUsers(@PathVariable(name = "ledgerHash") HashDigest ledgerHash, | |||
@RequestParam(name = "fromIndex", required = false, defaultValue = "0") int fromIndex, | |||
@RequestParam(name = "count", required = false, defaultValue = "-1") int count) { | |||
return revertAccountHeader(peerService.getQueryService().getUsers(ledgerHash, fromIndex, count)); | |||
@@ -602,7 +602,7 @@ public class BlockBrowserController implements BlockchainExtendQueryService { | |||
@RequestMapping(method = RequestMethod.GET, path = "ledgers/{ledgerHash}/accounts") | |||
@Override | |||
public AccountHeader[] getDataAccounts(@PathVariable(name = "ledgerHash") HashDigest ledgerHash, | |||
public BlockchainIdentity[] getDataAccounts(@PathVariable(name = "ledgerHash") HashDigest ledgerHash, | |||
@RequestParam(name = "fromIndex", required = false, defaultValue = "0") int fromIndex, | |||
@RequestParam(name = "count", required = false, defaultValue = "-1") int count) { | |||
return revertAccountHeader(peerService.getQueryService().getDataAccounts(ledgerHash, fromIndex, count)); | |||
@@ -610,18 +610,18 @@ public class BlockBrowserController implements BlockchainExtendQueryService { | |||
@RequestMapping(method = RequestMethod.GET, path = "ledgers/{ledgerHash}/contracts") | |||
@Override | |||
public AccountHeader[] getContractAccounts(@PathVariable(name = "ledgerHash") HashDigest ledgerHash, | |||
public BlockchainIdentity[] getContractAccounts(@PathVariable(name = "ledgerHash") HashDigest ledgerHash, | |||
@RequestParam(name = "fromIndex", required = false, defaultValue = "0") int fromIndex, | |||
@RequestParam(name = "count", required = false, defaultValue = "-1") int count) { | |||
return revertAccountHeader(peerService.getQueryService().getContractAccounts(ledgerHash, fromIndex, count)); | |||
} | |||
/** | |||
* reverse the AccountHeader[] content; the latest record show first; | |||
* reverse the BlockchainIdentity[] content; the latest record show first; | |||
* @return | |||
*/ | |||
private AccountHeader[] revertAccountHeader(AccountHeader[] accountHeaders){ | |||
AccountHeader[] accounts = new AccountHeader[accountHeaders.length]; | |||
private BlockchainIdentity[] revertAccountHeader(BlockchainIdentity[] accountHeaders){ | |||
BlockchainIdentity[] accounts = new BlockchainIdentity[accountHeaders.length]; | |||
if(accountHeaders!=null && accountHeaders.length>0){ | |||
for (int i = 0; i < accountHeaders.length; i++) { | |||
accounts[accountHeaders.length-1-i] = accountHeaders[i]; | |||
@@ -5,7 +5,7 @@ | |||
<parent> | |||
<groupId>com.jd.blockchain</groupId> | |||
<artifactId>ledger</artifactId> | |||
<version>1.1.1-PACK20191209</version> | |||
<version>1.1.2.RELEASE</version> | |||
</parent> | |||
<artifactId>ledger-core</artifactId> | |||
@@ -2,7 +2,7 @@ package com.jd.blockchain.ledger.core; | |||
import com.jd.blockchain.crypto.PubKey; | |||
import com.jd.blockchain.ledger.AccountHeader; | |||
import com.jd.blockchain.ledger.BlockchainIdentity; | |||
import com.jd.blockchain.utils.Bytes; | |||
/** | |||
@@ -19,7 +19,7 @@ public interface AccountAccessPolicy { | |||
* @param account | |||
* @return Return true if it satisfies this policy, or false if it doesn't; | |||
*/ | |||
boolean checkDataWriting(AccountHeader account); | |||
boolean checkDataWriting(BlockchainIdentity account); | |||
boolean checkRegistering(Bytes address, PubKey pubKey); | |||
@@ -0,0 +1,45 @@ | |||
package com.jd.blockchain.ledger.core; | |||
import com.jd.blockchain.crypto.HashDigest; | |||
import com.jd.blockchain.ledger.Account; | |||
import com.jd.blockchain.ledger.BlockchainIdentity; | |||
import com.jd.blockchain.ledger.HashProof; | |||
import com.jd.blockchain.ledger.MerkleSnapshot; | |||
import com.jd.blockchain.ledger.TypedValue; | |||
import com.jd.blockchain.utils.Bytes; | |||
import com.jd.blockchain.utils.Dataset; | |||
public class AccountDecorator implements Account, HashProvable, MerkleSnapshot{ | |||
private CompositeAccount mklAccount; | |||
public AccountDecorator(CompositeAccount mklAccount) { | |||
this.mklAccount = mklAccount; | |||
} | |||
protected Dataset<String, TypedValue> getHeaders() { | |||
return mklAccount.getHeaders(); | |||
} | |||
@Override | |||
public HashDigest getRootHash() { | |||
return mklAccount.getRootHash(); | |||
} | |||
@Override | |||
public HashProof getProof(Bytes key) { | |||
return mklAccount.getProof(key); | |||
} | |||
@Override | |||
public BlockchainIdentity getID() { | |||
return mklAccount.getID(); | |||
} | |||
@Override | |||
public Dataset<String, TypedValue> getDataset() { | |||
return mklAccount.getDataset(); | |||
} | |||
} |
@@ -1,12 +1,11 @@ | |||
package com.jd.blockchain.ledger.core; | |||
import com.jd.blockchain.ledger.AccountHeader; | |||
import com.jd.blockchain.ledger.BlockchainIdentity; | |||
import com.jd.blockchain.ledger.MerkleProof; | |||
import com.jd.blockchain.utils.Bytes; | |||
public interface AccountQuery<T> extends MerkleProvable { | |||
AccountHeader[] getHeaders(int fromIndex, int count); | |||
/** | |||
* 返回总数; | |||
* | |||
@@ -14,7 +13,15 @@ public interface AccountQuery<T> extends MerkleProvable { | |||
*/ | |||
long getTotal(); | |||
BlockchainIdentity[] getHeaders(int fromIndex, int count); | |||
boolean contains(Bytes address); | |||
/** | |||
* get proof of specified account; | |||
*/ | |||
@Override | |||
MerkleProof getProof(Bytes address); | |||
/** | |||
* 返回账户实例; | |||
@@ -0,0 +1,12 @@ | |||
package com.jd.blockchain.ledger.core; | |||
import com.jd.blockchain.ledger.Account; | |||
import com.jd.blockchain.ledger.MerkleSnapshot; | |||
import com.jd.blockchain.ledger.TypedValue; | |||
import com.jd.blockchain.utils.Dataset; | |||
public interface CompositeAccount extends Account, MerkleSnapshot, HashProvable{ | |||
Dataset<String, TypedValue> getHeaders(); | |||
} |
@@ -1,81 +1,72 @@ | |||
package com.jd.blockchain.ledger.core; | |||
import com.jd.blockchain.crypto.HashDigest; | |||
import com.jd.blockchain.crypto.PubKey; | |||
import com.jd.blockchain.ledger.BytesData; | |||
import com.jd.blockchain.ledger.BytesValue; | |||
import com.jd.blockchain.ledger.ContractInfo; | |||
import com.jd.blockchain.ledger.MerkleProof; | |||
import com.jd.blockchain.ledger.TypedValue; | |||
import com.jd.blockchain.utils.Bytes; | |||
public class ContractAccount implements ContractInfo { | |||
public class ContractAccount extends AccountDecorator implements ContractInfo { | |||
private static final Bytes CONTRACT_INFO_PREFIX = Bytes.fromString("INFO" + LedgerConsts.KEY_SEPERATOR); | |||
private static final String CONTRACT_INFO_PREFIX = "INFO" + LedgerConsts.KEY_SEPERATOR; | |||
private static final Bytes CHAIN_CODE_KEY = Bytes.fromString("CHAIN-CODE"); | |||
private static final String CHAIN_CODE_KEY = "CHAIN-CODE"; | |||
private MerkleAccount accBase; | |||
public ContractAccount(MerkleAccount accBase) { | |||
this.accBase = accBase; | |||
public ContractAccount(CompositeAccount mklAccount) { | |||
super(mklAccount); | |||
} | |||
@Override | |||
public Bytes getAddress() { | |||
return accBase.getAddress(); | |||
return getID().getAddress(); | |||
} | |||
@Override | |||
public PubKey getPubKey() { | |||
return accBase.getPubKey(); | |||
} | |||
@Override | |||
public HashDigest getRootHash() { | |||
return accBase.getRootHash(); | |||
return getID().getPubKey(); | |||
} | |||
public MerkleProof getChaincodeProof() { | |||
return accBase.getProof(CHAIN_CODE_KEY); | |||
} | |||
public MerkleProof getPropertyProof(Bytes key) { | |||
return accBase.getProof(encodePropertyKey(key)); | |||
} | |||
// public MerkleProof getChaincodeProof() { | |||
// return getHeaders().getProof(CHAIN_CODE_KEY); | |||
// } | |||
// | |||
// public MerkleProof getPropertyProof(Bytes key) { | |||
// return getHeaders().getProof(encodePropertyKey(key)); | |||
// } | |||
public long setChaincode(byte[] chaincode, long version) { | |||
BytesValue bytesValue = BytesData.fromBytes(chaincode); | |||
return accBase.setBytes(CHAIN_CODE_KEY, bytesValue, version); | |||
TypedValue bytesValue = TypedValue.fromBytes(chaincode); | |||
return getHeaders().setValue(CHAIN_CODE_KEY, bytesValue, version); | |||
} | |||
public byte[] getChainCode() { | |||
return accBase.getBytes(CHAIN_CODE_KEY).getValue().toBytes(); | |||
return getHeaders().getValue(CHAIN_CODE_KEY).getBytes().toBytes(); | |||
} | |||
public byte[] getChainCode(long version) { | |||
return accBase.getBytes(CHAIN_CODE_KEY, version).getValue().toBytes(); | |||
return getHeaders().getValue(CHAIN_CODE_KEY, version).getBytes().toBytes(); | |||
} | |||
public long getChaincodeVersion() { | |||
return accBase.getVersion(CHAIN_CODE_KEY); | |||
return getHeaders().getVersion(CHAIN_CODE_KEY); | |||
} | |||
public long setProperty(Bytes key, String value, long version) { | |||
BytesValue bytesValue = BytesData.fromText(value); | |||
return accBase.setBytes(encodePropertyKey(key), bytesValue, version); | |||
public long setProperty(String key, String value, long version) { | |||
TypedValue bytesValue = TypedValue.fromText(value); | |||
return getHeaders().setValue(encodePropertyKey(key), bytesValue, version); | |||
} | |||
public String getProperty(Bytes key) { | |||
BytesValue bytesValue = accBase.getBytes(encodePropertyKey(key)); | |||
return BytesData.toText(bytesValue); | |||
public String getProperty(String key) { | |||
BytesValue bytesValue = getHeaders().getValue(encodePropertyKey(key)); | |||
return TypedValue.wrap(bytesValue).stringValue(); | |||
} | |||
public String getProperty(Bytes key, long version) { | |||
BytesValue bytesValue = accBase.getBytes(encodePropertyKey(key), version); | |||
return BytesData.toText(bytesValue); | |||
public String getProperty(String key, long version) { | |||
BytesValue bytesValue = getHeaders().getValue(encodePropertyKey(key), version); | |||
return TypedValue.wrap(bytesValue).stringValue(); | |||
} | |||
private Bytes encodePropertyKey(Bytes key) { | |||
private String encodePropertyKey(String key) { | |||
return CONTRACT_INFO_PREFIX.concat(key); | |||
} | |||
@@ -2,7 +2,7 @@ package com.jd.blockchain.ledger.core; | |||
import com.jd.blockchain.crypto.HashDigest; | |||
import com.jd.blockchain.crypto.PubKey; | |||
import com.jd.blockchain.ledger.AccountHeader; | |||
import com.jd.blockchain.ledger.BlockchainIdentity; | |||
import com.jd.blockchain.ledger.CryptoSetting; | |||
import com.jd.blockchain.ledger.DigitalSignature; | |||
import com.jd.blockchain.ledger.MerkleProof; | |||
@@ -17,17 +17,18 @@ public class ContractAccountSet implements Transactional, ContractAccountQuery { | |||
public ContractAccountSet(CryptoSetting cryptoSetting, String prefix, ExPolicyKVStorage exStorage, | |||
VersioningKVStorage verStorage, AccountAccessPolicy accessPolicy) { | |||
accountSet = new MerkleAccountSet(cryptoSetting, prefix, exStorage, verStorage, accessPolicy); | |||
accountSet = new MerkleAccountSet(cryptoSetting, Bytes.fromString(prefix), exStorage, verStorage, accessPolicy); | |||
} | |||
public ContractAccountSet(HashDigest dataRootHash, CryptoSetting cryptoSetting, String prefix, | |||
ExPolicyKVStorage exStorage, VersioningKVStorage verStorage, boolean readonly, | |||
AccountAccessPolicy accessPolicy) { | |||
accountSet = new MerkleAccountSet(dataRootHash, cryptoSetting, prefix, exStorage, verStorage, readonly, accessPolicy); | |||
accountSet = new MerkleAccountSet(dataRootHash, cryptoSetting, Bytes.fromString(prefix), exStorage, verStorage, | |||
readonly, accessPolicy); | |||
} | |||
@Override | |||
public AccountHeader[] getHeaders(int fromIndex, int count) { | |||
public BlockchainIdentity[] getHeaders(int fromIndex, int count) { | |||
return accountSet.getHeaders(fromIndex, count); | |||
} | |||
@@ -66,7 +67,7 @@ public class ContractAccountSet implements Transactional, ContractAccountQuery { | |||
@Override | |||
public ContractAccount getAccount(Bytes address) { | |||
MerkleAccount accBase = accountSet.getAccount(address); | |||
CompositeAccount accBase = accountSet.getAccount(address); | |||
return new ContractAccount(accBase); | |||
} | |||
@@ -77,7 +78,7 @@ public class ContractAccountSet implements Transactional, ContractAccountQuery { | |||
@Override | |||
public ContractAccount getAccount(Bytes address, long version) { | |||
MerkleAccount accBase = accountSet.getAccount(address, version); | |||
CompositeAccount accBase = accountSet.getAccount(address, version); | |||
return new ContractAccount(accBase); | |||
} | |||
@@ -92,7 +93,7 @@ public class ContractAccountSet implements Transactional, ContractAccountQuery { | |||
*/ | |||
public ContractAccount deploy(Bytes address, PubKey pubKey, DigitalSignature addressSignature, byte[] chaincode) { | |||
// TODO: 校验和记录合约地址签名; | |||
MerkleAccount accBase = accountSet.register(address, pubKey); | |||
CompositeAccount accBase = accountSet.register(address, pubKey); | |||
ContractAccount contractAcc = new ContractAccount(accBase); | |||
contractAcc.setChaincode(chaincode, -1); | |||
return contractAcc; | |||
@@ -107,7 +108,7 @@ public class ContractAccountSet implements Transactional, ContractAccountQuery { | |||
* @return 返回链码的新版本号; | |||
*/ | |||
public long update(Bytes address, byte[] chaincode, long version) { | |||
MerkleAccount accBase = accountSet.getAccount(address); | |||
CompositeAccount accBase = accountSet.getAccount(address); | |||
ContractAccount contractAcc = new ContractAccount(accBase); | |||
return contractAcc.setChaincode(chaincode, version); | |||
} | |||
@@ -1,268 +1,234 @@ | |||
package com.jd.blockchain.ledger.core; | |||
import com.jd.blockchain.binaryproto.BinaryProtocol; | |||
import com.jd.blockchain.crypto.HashDigest; | |||
import com.jd.blockchain.crypto.PubKey; | |||
import com.jd.blockchain.ledger.AccountHeader; | |||
import com.jd.blockchain.ledger.BytesData; | |||
import com.jd.blockchain.ledger.BytesValue; | |||
import com.jd.blockchain.ledger.KVDataEntry; | |||
import com.jd.blockchain.ledger.KVDataObject; | |||
import com.jd.blockchain.ledger.MerkleProof; | |||
import com.jd.blockchain.utils.Bytes; | |||
public class DataAccount implements AccountHeader, MerkleProvable { | |||
private MerkleAccount baseAccount; | |||
public DataAccount(MerkleAccount accBase) { | |||
this.baseAccount = accBase; | |||
} | |||
@Override | |||
public Bytes getAddress() { | |||
return baseAccount.getAddress(); | |||
} | |||
@Override | |||
public PubKey getPubKey() { | |||
return baseAccount.getPubKey(); | |||
} | |||
@Override | |||
public HashDigest getRootHash() { | |||
return baseAccount.getRootHash(); | |||
} | |||
/** | |||
* 返回指定数据的存在性证明; | |||
*/ | |||
@Override | |||
public MerkleProof getProof(Bytes key) { | |||
return baseAccount.getProof(key); | |||
} | |||
public class DataAccount extends AccountDecorator { | |||
public DataAccount(CompositeAccount mklAccount) { | |||
super(mklAccount); | |||
} | |||
// /** | |||
// * Create or update the value associated the specified key if the version | |||
// * checking is passed.<br> | |||
// * | |||
// * The value of the key will be updated only if it's latest version equals the | |||
// * specified version argument. <br> | |||
// * If the key doesn't exist, the version checking will be ignored, and key will | |||
// * be created with a new sequence number as id. <br> | |||
// * It also could specify the version argument to -1 to ignore the version | |||
// * checking. | |||
// * <p> | |||
// * If updating is performed, the version of the key increase by 1. <br> | |||
// * If creating is performed, the version of the key initialize by 0. <br> | |||
// * | |||
// * @param key The key of data; | |||
// * @param value The value of data; | |||
// * @param version The expected version of the key. | |||
// * @return The new version of the key. <br> | |||
// * If the key is new created success, then return 0; <br> | |||
// * If the key is updated success, then return the new version;<br> | |||
// * If this operation fail by version checking or other reason, then | |||
// * return -1; | |||
// */ | |||
// public long setBytes(Bytes key, BytesValue value, long version) { | |||
// return super.getDataset().setValue(key, value, version); | |||
// } | |||
// | |||
// /** | |||
// * Create or update the value associated the specified key if the version | |||
// * checking is passed.<br> | |||
// * | |||
// * The value of the key will be updated only if it's latest version equals the | |||
// * specified version argument. <br> | |||
// * If the key doesn't exist, the version checking will be ignored, and key will | |||
// * be created with a new sequence number as id. <br> | |||
// * It also could specify the version argument to -1 to ignore the version | |||
// * checking. | |||
// * <p> | |||
// * If updating is performed, the version of the key increase by 1. <br> | |||
// * If creating is performed, the version of the key initialize by 0. <br> | |||
// * | |||
// * @param key The key of data; | |||
// * @param value The value of data; | |||
// * @param version The expected version of the key. | |||
// * @return The new version of the key. <br> | |||
// * If the key is new created success, then return 0; <br> | |||
// * If the key is updated success, then return the new version;<br> | |||
// * If this operation fail by version checking or other reason, then | |||
// * return -1; | |||
// */ | |||
// public long setBytes(Bytes key, String value, long version) { | |||
// BytesValue bytesValue = TypedValue.fromText(value); | |||
// return baseAccount.setValue(key, bytesValue, version); | |||
// } | |||
// | |||
// /** | |||
// * Create or update the value associated the specified key if the version | |||
// * checking is passed.<br> | |||
// * | |||
// * The value of the key will be updated only if it's latest version equals the | |||
// * specified version argument. <br> | |||
// * If the key doesn't exist, the version checking will be ignored, and key will | |||
// * be created with a new sequence number as id. <br> | |||
// * It also could specify the version argument to -1 to ignore the version | |||
// * checking. | |||
// * <p> | |||
// * If updating is performed, the version of the key increase by 1. <br> | |||
// * If creating is performed, the version of the key initialize by 0. <br> | |||
// * | |||
// * @param key The key of data; | |||
// * @param value The value of data; | |||
// * @param version The expected version of the key. | |||
// * @return The new version of the key. <br> | |||
// * If the key is new created success, then return 0; <br> | |||
// * If the key is updated success, then return the new version;<br> | |||
// * If this operation fail by version checking or other reason, then | |||
// * return -1; | |||
// */ | |||
// public long setBytes(Bytes key, byte[] value, long version) { | |||
// BytesValue bytesValue = TypedValue.fromBytes(value); | |||
// return baseAccount.setValue(key, bytesValue, version); | |||
// } | |||
// | |||
// /** | |||
// * Return the latest version entry associated the specified key; If the key | |||
// * doesn't exist, then return -1; | |||
// * | |||
// * @param key | |||
// * @return | |||
// */ | |||
// public long getDataVersion(String key) { | |||
// return baseAccount.getVersion(Bytes.fromString(key)); | |||
// } | |||
// | |||
// /** | |||
// * Return the latest version entry associated the specified key; If the key | |||
// * doesn't exist, then return -1; | |||
// * | |||
// * @param key | |||
// * @return | |||
// */ | |||
// public long getDataVersion(Bytes key) { | |||
// return baseAccount.getVersion(key); | |||
// } | |||
// | |||
// /** | |||
// * return the latest version's value; | |||
// * | |||
// * @param key | |||
// * @return return null if not exist; | |||
// */ | |||
// public BytesValue getBytes(String key) { | |||
// return baseAccount.getValue(Bytes.fromString(key)); | |||
// } | |||
// | |||
// /** | |||
// * return the latest version's value; | |||
// * | |||
// * @param key | |||
// * @return return null if not exist; | |||
// */ | |||
// public BytesValue getBytes(Bytes key) { | |||
// return baseAccount.getValue(key); | |||
// } | |||
// | |||
// /** | |||
// * return the specified version's value; | |||
// * | |||
// * @param key | |||
// * @param version | |||
// * @return return null if not exist; | |||
// */ | |||
// public BytesValue getBytes(String key, long version) { | |||
// return baseAccount.getValue(Bytes.fromString(key), version); | |||
// } | |||
// | |||
// /** | |||
// * return the specified version's value; | |||
// * | |||
// * @param key | |||
// * @param version | |||
// * @return return null if not exist; | |||
// */ | |||
// public BytesValue getBytes(Bytes key, long version) { | |||
// return baseAccount.getValue(key, version); | |||
// } | |||
/** | |||
* Create or update the value associated the specified key if the version | |||
* checking is passed.<br> | |||
* | |||
* The value of the key will be updated only if it's latest version equals the | |||
* specified version argument. <br> | |||
* If the key doesn't exist, the version checking will be ignored, and key will | |||
* be created with a new sequence number as id. <br> | |||
* It also could specify the version argument to -1 to ignore the version | |||
* checking. | |||
* <p> | |||
* If updating is performed, the version of the key increase by 1. <br> | |||
* If creating is performed, the version of the key initialize by 0. <br> | |||
* | |||
* @param key The key of data; | |||
* @param value The value of data; | |||
* @param version The expected version of the key. | |||
* @return The new version of the key. <br> | |||
* If the key is new created success, then return 0; <br> | |||
* If the key is updated success, then return the new version;<br> | |||
* If this operation fail by version checking or other reason, then | |||
* return -1; | |||
*/ | |||
public long setBytes(Bytes key, BytesValue value, long version) { | |||
return baseAccount.setBytes(key, value, version); | |||
} | |||
/** | |||
* Create or update the value associated the specified key if the version | |||
* checking is passed.<br> | |||
* | |||
* The value of the key will be updated only if it's latest version equals the | |||
* specified version argument. <br> | |||
* If the key doesn't exist, the version checking will be ignored, and key will | |||
* be created with a new sequence number as id. <br> | |||
* It also could specify the version argument to -1 to ignore the version | |||
* checking. | |||
* <p> | |||
* If updating is performed, the version of the key increase by 1. <br> | |||
* If creating is performed, the version of the key initialize by 0. <br> | |||
* | |||
* @param key The key of data; | |||
* @param value The value of data; | |||
* @param version The expected version of the key. | |||
* @return The new version of the key. <br> | |||
* If the key is new created success, then return 0; <br> | |||
* If the key is updated success, then return the new version;<br> | |||
* If this operation fail by version checking or other reason, then | |||
* return -1; | |||
*/ | |||
public long setBytes(Bytes key, String value, long version) { | |||
BytesValue bytesValue = BytesData.fromText(value); | |||
return baseAccount.setBytes(key, bytesValue, version); | |||
} | |||
/** | |||
* Create or update the value associated the specified key if the version | |||
* checking is passed.<br> | |||
* | |||
* The value of the key will be updated only if it's latest version equals the | |||
* specified version argument. <br> | |||
* If the key doesn't exist, the version checking will be ignored, and key will | |||
* be created with a new sequence number as id. <br> | |||
* It also could specify the version argument to -1 to ignore the version | |||
* checking. | |||
* <p> | |||
* If updating is performed, the version of the key increase by 1. <br> | |||
* If creating is performed, the version of the key initialize by 0. <br> | |||
* | |||
* @param key The key of data; | |||
* @param value The value of data; | |||
* @param version The expected version of the key. | |||
* @return The new version of the key. <br> | |||
* If the key is new created success, then return 0; <br> | |||
* If the key is updated success, then return the new version;<br> | |||
* If this operation fail by version checking or other reason, then | |||
* return -1; | |||
*/ | |||
public long setBytes(Bytes key, byte[] value, long version) { | |||
BytesValue bytesValue = BytesData.fromBytes(value); | |||
return baseAccount.setBytes(key, bytesValue, version); | |||
} | |||
/** | |||
* Return the latest version entry associated the specified key; If the key | |||
* doesn't exist, then return -1; | |||
* | |||
* @param key | |||
* @return | |||
*/ | |||
public long getDataVersion(String key) { | |||
return baseAccount.getVersion(Bytes.fromString(key)); | |||
} | |||
/** | |||
* Return the latest version entry associated the specified key; If the key | |||
* doesn't exist, then return -1; | |||
* | |||
* @param key | |||
* @return | |||
*/ | |||
public long getDataVersion(Bytes key) { | |||
return baseAccount.getVersion(key); | |||
} | |||
/** | |||
* return the latest version's value; | |||
* | |||
* @param key | |||
* @return return null if not exist; | |||
*/ | |||
public BytesValue getBytes(String key) { | |||
return baseAccount.getBytes(Bytes.fromString(key)); | |||
} | |||
/** | |||
* return the latest version's value; | |||
* | |||
* @param key | |||
* @return return null if not exist; | |||
*/ | |||
public BytesValue getBytes(Bytes key) { | |||
return baseAccount.getBytes(key); | |||
} | |||
/** | |||
* return the specified version's value; | |||
* | |||
* @param key | |||
* @param version | |||
* @return return null if not exist; | |||
*/ | |||
public BytesValue getBytes(String key, long version) { | |||
return baseAccount.getBytes(Bytes.fromString(key), version); | |||
} | |||
/** | |||
* return the specified version's value; | |||
* | |||
* @param key | |||
* @param version | |||
* @return return null if not exist; | |||
*/ | |||
public BytesValue getBytes(Bytes key, long version) { | |||
return baseAccount.getBytes(key, version); | |||
} | |||
/** | |||
* @param key | |||
* @param version | |||
* @return | |||
*/ | |||
public KVDataEntry getDataEntry(String key, long version) { | |||
return getDataEntry(Bytes.fromString(key), version); | |||
} | |||
/** | |||
* @param key | |||
* @param version | |||
* @return | |||
*/ | |||
public KVDataEntry getDataEntry(Bytes key, long version) { | |||
BytesValue value = baseAccount.getBytes(key, version); | |||
if (value == null) { | |||
return new KVDataObject(key.toUTF8String(), -1, null); | |||
}else { | |||
return new KVDataObject(key.toUTF8String(), version, value); | |||
} | |||
} | |||
/** | |||
* return the specified index's KVDataEntry; | |||
* | |||
* @param fromIndex | |||
* @param count | |||
* @return return null if not exist; | |||
*/ | |||
public KVDataEntry[] getDataEntries(int fromIndex, int count) { | |||
if (count == 0 || getDataEntriesTotalCount() == 0) { | |||
return null; | |||
} | |||
if (count == -1 || count > getDataEntriesTotalCount()) { | |||
fromIndex = 0; | |||
count = (int)getDataEntriesTotalCount(); | |||
} | |||
if (fromIndex < 0 || fromIndex > getDataEntriesTotalCount() - 1) { | |||
fromIndex = 0; | |||
} | |||
KVDataEntry[] kvDataEntries = new KVDataEntry[count]; | |||
byte[] value; | |||
String key; | |||
long ver; | |||
for (int i = 0; i < count; i++) { | |||
value = baseAccount.dataset.getValuesAtIndex(fromIndex); | |||
key = baseAccount.dataset.getKeyAtIndex(fromIndex); | |||
ver = baseAccount.dataset.getVersion(key); | |||
BytesValue decodeData = BinaryProtocol.decode(value); | |||
kvDataEntries[i] = new KVDataObject(key, ver, decodeData); | |||
fromIndex++; | |||
} | |||
return kvDataEntries; | |||
} | |||
/** | |||
* return the dataAccount's kv total count; | |||
* | |||
* @param | |||
* @param | |||
* @return return total count; | |||
*/ | |||
public long getDataEntriesTotalCount() { | |||
if(baseAccount == null){ | |||
return 0; | |||
} | |||
return baseAccount.dataset.getDataCount(); | |||
} | |||
// /** | |||
// * @param key | |||
// * @param version | |||
// * @return | |||
// */ | |||
// public KVDataEntry getDataEntry(String key, long version) { | |||
// return getDataEntry(Bytes.fromString(key), version); | |||
// } | |||
// | |||
// /** | |||
// * @param key | |||
// * @param version | |||
// * @return | |||
// */ | |||
// public KVDataEntry getDataEntry(Bytes key, long version) { | |||
// BytesValue value = baseAccount.getValue(key, version); | |||
// if (value == null) { | |||
// return new KVDataObject(key.toUTF8String(), -1, null); | |||
// }else { | |||
// return new KVDataObject(key.toUTF8String(), version, value); | |||
// } | |||
// } | |||
// | |||
// /** | |||
// * return the specified index's KVDataEntry; | |||
// * | |||
// * @param fromIndex | |||
// * @param count | |||
// * @return return null if not exist; | |||
// */ | |||
// | |||
// public KVDataEntry[] getDataEntries(int fromIndex, int count) { | |||
// if (count == 0 || getDataEntriesTotalCount() == 0) { | |||
// return null; | |||
// } | |||
// | |||
// if (count == -1 || count > getDataEntriesTotalCount()) { | |||
// fromIndex = 0; | |||
// count = (int)getDataEntriesTotalCount(); | |||
// } | |||
// | |||
// if (fromIndex < 0 || fromIndex > getDataEntriesTotalCount() - 1) { | |||
// fromIndex = 0; | |||
// } | |||
// | |||
// KVDataEntry[] kvDataEntries = new KVDataEntry[count]; | |||
// byte[] value; | |||
// String key; | |||
// long ver; | |||
// for (int i = 0; i < count; i++) { | |||
// value = baseAccount.dataset.getValuesAtIndex(fromIndex); | |||
// key = baseAccount.dataset.getKeyAtIndex(fromIndex); | |||
// ver = baseAccount.dataset.getVersion(key); | |||
// BytesValue decodeData = BinaryProtocol.decode(value); | |||
// kvDataEntries[i] = new KVDataObject(key, ver, decodeData); | |||
// fromIndex++; | |||
// } | |||
// | |||
// return kvDataEntries; | |||
// } | |||
// | |||
// /** | |||
// * return the dataAccount's kv total count; | |||
// * | |||
// * @param | |||
// * @param | |||
// * @return return total count; | |||
// */ | |||
// public long getDataEntriesTotalCount() { | |||
// if(baseAccount == null){ | |||
// return 0; | |||
// } | |||
// return baseAccount.dataset.getDataCount(); | |||
// } | |||
} |
@@ -2,7 +2,7 @@ package com.jd.blockchain.ledger.core; | |||
import com.jd.blockchain.crypto.HashDigest; | |||
import com.jd.blockchain.crypto.PubKey; | |||
import com.jd.blockchain.ledger.AccountHeader; | |||
import com.jd.blockchain.ledger.BlockchainIdentity; | |||
import com.jd.blockchain.ledger.CryptoSetting; | |||
import com.jd.blockchain.ledger.DigitalSignature; | |||
import com.jd.blockchain.ledger.MerkleProof; | |||
@@ -17,17 +17,18 @@ public class DataAccountSet implements Transactional, DataAccountQuery { | |||
public DataAccountSet(CryptoSetting cryptoSetting, String prefix, ExPolicyKVStorage exStorage, | |||
VersioningKVStorage verStorage, AccountAccessPolicy accessPolicy) { | |||
accountSet = new MerkleAccountSet(cryptoSetting, prefix, exStorage, verStorage, accessPolicy); | |||
accountSet = new MerkleAccountSet(cryptoSetting, Bytes.fromString(prefix), exStorage, verStorage, accessPolicy); | |||
} | |||
public DataAccountSet(HashDigest dataRootHash, CryptoSetting cryptoSetting, String prefix, | |||
ExPolicyKVStorage exStorage, VersioningKVStorage verStorage, boolean readonly, | |||
AccountAccessPolicy accessPolicy) { | |||
accountSet = new MerkleAccountSet(dataRootHash, cryptoSetting, prefix, exStorage, verStorage, readonly, accessPolicy); | |||
accountSet = new MerkleAccountSet(dataRootHash, cryptoSetting, Bytes.fromString(prefix), exStorage, verStorage, | |||
readonly, accessPolicy); | |||
} | |||
@Override | |||
public AccountHeader[] getHeaders(int fromIndex, int count) { | |||
public BlockchainIdentity[] getHeaders(int fromIndex, int count) { | |||
return accountSet.getHeaders(fromIndex, count); | |||
} | |||
@@ -64,7 +65,7 @@ public class DataAccountSet implements Transactional, DataAccountQuery { | |||
public DataAccount register(Bytes address, PubKey pubKey, DigitalSignature addressSignature) { | |||
// TODO: 未实现对地址签名的校验和记录; | |||
MerkleAccount accBase = accountSet.register(address, pubKey); | |||
CompositeAccount accBase = accountSet.register(address, pubKey); | |||
return new DataAccount(accBase); | |||
} | |||
@@ -82,7 +83,7 @@ public class DataAccountSet implements Transactional, DataAccountQuery { | |||
*/ | |||
@Override | |||
public DataAccount getAccount(Bytes address) { | |||
MerkleAccount accBase = accountSet.getAccount(address); | |||
CompositeAccount accBase = accountSet.getAccount(address); | |||
if (accBase == null) { | |||
return null; | |||
} | |||
@@ -91,7 +92,7 @@ public class DataAccountSet implements Transactional, DataAccountQuery { | |||
@Override | |||
public DataAccount getAccount(Bytes address, long version) { | |||
MerkleAccount accBase = accountSet.getAccount(address, version); | |||
CompositeAccount accBase = accountSet.getAccount(address, version); | |||
return new DataAccount(accBase); | |||
} | |||
@@ -1,13 +1,13 @@ | |||
package com.jd.blockchain.ledger.core; | |||
import com.jd.blockchain.crypto.HashDigest; | |||
import com.jd.blockchain.ledger.AccountHeader; | |||
import com.jd.blockchain.ledger.BlockchainIdentity; | |||
import com.jd.blockchain.ledger.MerkleProof; | |||
import com.jd.blockchain.utils.Bytes; | |||
public class EmptyAccountSet<T> implements AccountQuery<T> { | |||
private static final AccountHeader[] EMPTY = {}; | |||
private static final BlockchainIdentity[] EMPTY = {}; | |||
@Override | |||
public HashDigest getRootHash() { | |||
@@ -20,7 +20,7 @@ public class EmptyAccountSet<T> implements AccountQuery<T> { | |||
} | |||
@Override | |||
public AccountHeader[] getHeaders(int fromIndex, int count) { | |||
public BlockchainIdentity[] getHeaders(int fromIndex, int count) { | |||
return EMPTY; | |||
} | |||
@@ -0,0 +1,92 @@ | |||
//package com.jd.blockchain.ledger.core; | |||
// | |||
//import com.jd.blockchain.crypto.HashDigest; | |||
//import com.jd.blockchain.ledger.AccountHeader; | |||
//import com.jd.blockchain.ledger.CryptoSetting; | |||
//import com.jd.blockchain.ledger.MerkleProof; | |||
//import com.jd.blockchain.storage.service.ExPolicyKVStorage; | |||
//import com.jd.blockchain.storage.service.VersioningKVStorage; | |||
//import com.jd.blockchain.utils.Bytes; | |||
//import com.jd.blockchain.utils.Transactional; | |||
// | |||
//public class GenericAccountSet<H extends AccountHeader, T extends GenericAccount<H>> implements AccountQuery<H, T>, Transactional { | |||
// | |||
// private Class<H> headerType; | |||
// | |||
// private MerkleAccountSet merkleAccountSet; | |||
// | |||
// public GenericAccountSet(Class<H> headerType, CryptoSetting cryptoSetting, String keyPrefix, ExPolicyKVStorage exStorage, | |||
// VersioningKVStorage verStorage, AccountAccessPolicy accessPolicy) { | |||
// this(headerType, null, cryptoSetting, keyPrefix, exStorage, verStorage, false, accessPolicy); | |||
// } | |||
// | |||
// public GenericAccountSet(Class<H> headerType, HashDigest rootHash, CryptoSetting cryptoSetting, String keyPrefix, | |||
// ExPolicyKVStorage exStorage, VersioningKVStorage verStorage, boolean readonly, | |||
// AccountAccessPolicy accessPolicy) { | |||
// this.headerType = headerType; | |||
// this.merkleAccountSet = new MerkleAccountSet(rootHash, cryptoSetting, keyPrefix, exStorage, verStorage, readonly, accessPolicy); | |||
// } | |||
// | |||
// @Override | |||
// public MerkleProof getProof(Bytes address) { | |||
// return merkleAccountSet.getProof(address); | |||
// } | |||
// | |||
// @Override | |||
// public HashDigest getRootHash() { | |||
// return merkleAccountSet.getRootHash(); | |||
// } | |||
// | |||
// @Override | |||
// public boolean isUpdated() { | |||
// return merkleAccountSet.isUpdated(); | |||
// } | |||
// | |||
// @Override | |||
// public void commit() { | |||
// merkleAccountSet.commit(); | |||
// } | |||
// | |||
// @Override | |||
// public void cancel() { | |||
// merkleAccountSet.cancel(); | |||
// } | |||
// | |||
// @Override | |||
// public H[] getHeaders(int fromIndex, int count) { | |||
// merkleAccountSet.getHeaders(fromIndex, count) | |||
// return null; | |||
// } | |||
// | |||
// @Override | |||
// public long getTotal() { | |||
// // TODO Auto-generated method stub | |||
// return 0; | |||
// } | |||
// | |||
// @Override | |||
// public boolean contains(Bytes address) { | |||
// // TODO Auto-generated method stub | |||
// return false; | |||
// } | |||
// | |||
// @Override | |||
// public T getAccount(String address) { | |||
// // TODO Auto-generated method stub | |||
// return null; | |||
// } | |||
// | |||
// @Override | |||
// public T getAccount(Bytes address) { | |||
// // TODO Auto-generated method stub | |||
// return null; | |||
// } | |||
// | |||
// @Override | |||
// public T getAccount(Bytes address, long version) { | |||
// // TODO Auto-generated method stub | |||
// return null; | |||
// } | |||
// | |||
// | |||
//} |
@@ -0,0 +1,42 @@ | |||
package com.jd.blockchain.ledger.core; | |||
import java.util.ArrayList; | |||
import java.util.List; | |||
import com.jd.blockchain.crypto.HashDigest; | |||
import com.jd.blockchain.ledger.HashProof; | |||
/** | |||
* | |||
* @author huanghaiquan | |||
* | |||
*/ | |||
public class HashDigestList implements HashProof { | |||
private List<HashDigest> proofs = new ArrayList<HashDigest>(); | |||
public HashDigestList() { | |||
} | |||
public HashDigestList(HashProof proof) { | |||
concat(proof); | |||
} | |||
public void concat(HashProof proof) { | |||
int levels = proof.getLevels(); | |||
for (int i = levels; i > -1; i--) { | |||
proofs.add(proof.getHash(i)); | |||
} | |||
} | |||
@Override | |||
public int getLevels() { | |||
return proofs.size(); | |||
} | |||
@Override | |||
public HashDigest getHash(int level) { | |||
return proofs.get(level); | |||
} | |||
} |
@@ -0,0 +1,10 @@ | |||
package com.jd.blockchain.ledger.core; | |||
import com.jd.blockchain.ledger.HashProof; | |||
import com.jd.blockchain.utils.Bytes; | |||
public interface HashProvable { | |||
HashProof getProof(Bytes key); | |||
} |
@@ -5,11 +5,12 @@ import java.util.List; | |||
import com.jd.blockchain.contract.ContractException; | |||
import com.jd.blockchain.crypto.HashDigest; | |||
import com.jd.blockchain.ledger.AccountHeader; | |||
import com.jd.blockchain.ledger.BlockchainIdentity; | |||
import com.jd.blockchain.ledger.BytesValue; | |||
import com.jd.blockchain.ledger.ContractInfo; | |||
import com.jd.blockchain.ledger.KVDataEntry; | |||
import com.jd.blockchain.ledger.KVDataObject; | |||
import com.jd.blockchain.ledger.TypedKVEntry; | |||
import com.jd.blockchain.ledger.TypedValue; | |||
import com.jd.blockchain.ledger.TypedKVData; | |||
import com.jd.blockchain.ledger.KVDataVO; | |||
import com.jd.blockchain.ledger.KVInfoVO; | |||
import com.jd.blockchain.ledger.LedgerAdminInfo; | |||
@@ -22,22 +23,25 @@ import com.jd.blockchain.ledger.ParticipantNode; | |||
import com.jd.blockchain.ledger.TransactionState; | |||
import com.jd.blockchain.ledger.UserInfo; | |||
import com.jd.blockchain.transaction.BlockchainQueryService; | |||
import com.jd.blockchain.utils.ArrayUtils; | |||
import com.jd.blockchain.utils.Bytes; | |||
import com.jd.blockchain.utils.DataEntry; | |||
import com.jd.blockchain.utils.DataIterator; | |||
import com.jd.blockchain.utils.QueryUtil; | |||
public class LedgerQueryService implements BlockchainQueryService { | |||
private static final KVDataEntry[] EMPTY_ENTRIES = new KVDataEntry[0]; | |||
private static final TypedKVEntry[] EMPTY_ENTRIES = new TypedKVEntry[0]; | |||
private HashDigest[] ledgerHashs; | |||
private LedgerQuery ledger; | |||
public LedgerQueryService(LedgerQuery ledger) { | |||
this.ledger = ledger; | |||
this.ledgerHashs = new HashDigest[] {ledger.getHash()}; | |||
this.ledgerHashs = new HashDigest[] { ledger.getHash() }; | |||
} | |||
private void checkLedgerHash(HashDigest ledgerHash) { | |||
if (!ledgerHashs[0].equals(ledgerHash)) { | |||
throw new LedgerException("Unsupport cross chain query!"); | |||
@@ -58,7 +62,7 @@ public class LedgerQueryService implements BlockchainQueryService { | |||
ledgerInfo.setLatestBlockHeight(ledger.getLatestBlockHeight()); | |||
return ledgerInfo; | |||
} | |||
@Override | |||
public LedgerAdminInfo getLedgerAdminInfo(HashDigest ledgerHash) { | |||
checkLedgerHash(ledgerHash); | |||
@@ -270,15 +274,15 @@ public class LedgerQueryService implements BlockchainQueryService { | |||
} | |||
@Override | |||
public AccountHeader getDataAccount(HashDigest ledgerHash, String address) { | |||
public BlockchainIdentity getDataAccount(HashDigest ledgerHash, String address) { | |||
checkLedgerHash(ledgerHash); | |||
LedgerBlock block = ledger.getLatestBlock(); | |||
DataAccountQuery dataAccountSet = ledger.getDataAccountSet(block); | |||
return dataAccountSet.getAccount(Bytes.fromBase58(address)); | |||
return dataAccountSet.getAccount(Bytes.fromBase58(address)).getID(); | |||
} | |||
@Override | |||
public KVDataEntry[] getDataEntries(HashDigest ledgerHash, String address, String... keys) { | |||
public TypedKVEntry[] getDataEntries(HashDigest ledgerHash, String address, String... keys) { | |||
if (keys == null || keys.length == 0) { | |||
return EMPTY_ENTRIES; | |||
} | |||
@@ -287,25 +291,26 @@ public class LedgerQueryService implements BlockchainQueryService { | |||
DataAccountQuery dataAccountSet = ledger.getDataAccountSet(block); | |||
DataAccount dataAccount = dataAccountSet.getAccount(Bytes.fromBase58(address)); | |||
KVDataEntry[] entries = new KVDataEntry[keys.length]; | |||
TypedKVEntry[] entries = new TypedKVEntry[keys.length]; | |||
long ver; | |||
for (int i = 0; i < entries.length; i++) { | |||
final String currKey = keys[i]; | |||
ver = dataAccount == null ? -1 : dataAccount.getDataVersion(Bytes.fromString(currKey)); | |||
ver = dataAccount == null ? -1 : dataAccount.getDataset().getVersion(currKey); | |||
if (ver < 0) { | |||
entries[i] = new KVDataObject(currKey, -1, null); | |||
entries[i] = new TypedKVData(currKey, -1, null); | |||
} else { | |||
BytesValue value = dataAccount.getBytes(Bytes.fromString(currKey), ver); | |||
entries[i] = new KVDataObject(currKey, ver, value); | |||
BytesValue value = dataAccount.getDataset().getValue(currKey, ver); | |||
entries[i] = new TypedKVData(currKey, ver, value); | |||
} | |||
} | |||
return entries; | |||
} | |||
public KVDataEntry[] getDataEntries(HashDigest ledgerHash, String address, KVInfoVO kvInfoVO) { | |||
public TypedKVEntry[] getDataEntries(HashDigest ledgerHash, String address, KVInfoVO kvInfoVO) { | |||
// parse kvInfoVO; | |||
List<String> keyList = new ArrayList<>(); | |||
List<Long> versionList = new ArrayList<>(); | |||
@@ -335,22 +340,22 @@ public class LedgerQueryService implements BlockchainQueryService { | |||
DataAccountQuery dataAccountSet = ledger.getDataAccountSet(block); | |||
DataAccount dataAccount = dataAccountSet.getAccount(Bytes.fromBase58(address)); | |||
KVDataEntry[] entries = new KVDataEntry[keys.length]; | |||
TypedKVEntry[] entries = new TypedKVEntry[keys.length]; | |||
long ver = -1; | |||
for (int i = 0; i < entries.length; i++) { | |||
// ver = dataAccount.getDataVersion(Bytes.fromString(keys[i])); | |||
// dataAccount.getBytes(Bytes.fromString(keys[i]),1); | |||
ver = versions[i]; | |||
if (ver < 0) { | |||
entries[i] = new KVDataObject(keys[i], -1, null); | |||
entries[i] = new TypedKVData(keys[i], -1, null); | |||
} else { | |||
if (dataAccount.getDataEntriesTotalCount() == 0 | |||
|| dataAccount.getBytes(Bytes.fromString(keys[i]), ver) == null) { | |||
if (dataAccount.getDataset().getDataCount() == 0 | |||
|| dataAccount.getDataset().getValue(keys[i], ver) == null) { | |||
// is the address is not exist; the result is null; | |||
entries[i] = new KVDataObject(keys[i], -1, null); | |||
entries[i] = new TypedKVData(keys[i], -1, null); | |||
} else { | |||
BytesValue value = dataAccount.getBytes(Bytes.fromString(keys[i]), ver); | |||
entries[i] = new KVDataObject(keys[i], ver, value); | |||
BytesValue value = dataAccount.getDataset().getValue(keys[i], ver); | |||
entries[i] = new TypedKVData(keys[i], ver, value); | |||
} | |||
} | |||
} | |||
@@ -359,14 +364,22 @@ public class LedgerQueryService implements BlockchainQueryService { | |||
} | |||
@Override | |||
public KVDataEntry[] getDataEntries(HashDigest ledgerHash, String address, int fromIndex, int count) { | |||
public TypedKVEntry[] getDataEntries(HashDigest ledgerHash, String address, int fromIndex, int count) { | |||
checkLedgerHash(ledgerHash); | |||
LedgerBlock block = ledger.getLatestBlock(); | |||
DataAccountQuery dataAccountSet = ledger.getDataAccountSet(block); | |||
DataAccount dataAccount = dataAccountSet.getAccount(Bytes.fromBase58(address)); | |||
int pages[] = QueryUtil.calFromIndexAndCount(fromIndex, count, (int) dataAccount.getDataEntriesTotalCount()); | |||
return dataAccount.getDataEntries(pages[0], pages[1]); | |||
// int pages[] = QueryUtil.calFromIndexAndCount(fromIndex, count, (int) dataAccount.getDataset().getDataCount()); | |||
// return dataAccount.getDataset().getDataEntry(key, version).getDataEntries(pages[0], pages[1]); | |||
DataIterator<String, TypedValue> iterator = dataAccount.getDataset().iterator(); | |||
iterator.skip(fromIndex); | |||
DataEntry<String, TypedValue>[] dataEntries = iterator.next(count); | |||
TypedKVEntry[] typedKVEntries = ArrayUtils.castTo(dataEntries, TypedKVEntry.class, | |||
e -> e == null ? null : new TypedKVData(e.getKey(), e.getVersion(), e.getValue())); | |||
return typedKVEntries; | |||
} | |||
@Override | |||
@@ -376,7 +389,7 @@ public class LedgerQueryService implements BlockchainQueryService { | |||
DataAccountQuery dataAccountSet = ledger.getDataAccountSet(block); | |||
DataAccount dataAccount = dataAccountSet.getAccount(Bytes.fromBase58(address)); | |||
return dataAccount.getDataEntriesTotalCount(); | |||
return dataAccount.getDataset().getDataCount(); | |||
} | |||
@Override | |||
@@ -388,7 +401,7 @@ public class LedgerQueryService implements BlockchainQueryService { | |||
} | |||
@Override | |||
public AccountHeader[] getUsers(HashDigest ledgerHash, int fromIndex, int count) { | |||
public BlockchainIdentity[] getUsers(HashDigest ledgerHash, int fromIndex, int count) { | |||
checkLedgerHash(ledgerHash); | |||
LedgerBlock block = ledger.getLatestBlock(); | |||
UserAccountQuery userAccountSet = ledger.getUserAccountSet(block); | |||
@@ -397,7 +410,7 @@ public class LedgerQueryService implements BlockchainQueryService { | |||
} | |||
@Override | |||
public AccountHeader[] getDataAccounts(HashDigest ledgerHash, int fromIndex, int count) { | |||
public BlockchainIdentity[] getDataAccounts(HashDigest ledgerHash, int fromIndex, int count) { | |||
checkLedgerHash(ledgerHash); | |||
LedgerBlock block = ledger.getLatestBlock(); | |||
DataAccountQuery dataAccountSet = ledger.getDataAccountSet(block); | |||
@@ -406,7 +419,7 @@ public class LedgerQueryService implements BlockchainQueryService { | |||
} | |||
@Override | |||
public AccountHeader[] getContractAccounts(HashDigest ledgerHash, int fromIndex, int count) { | |||
public BlockchainIdentity[] getContractAccounts(HashDigest ledgerHash, int fromIndex, int count) { | |||
checkLedgerHash(ledgerHash); | |||
LedgerBlock block = ledger.getLatestBlock(); | |||
ContractAccountQuery contractAccountSet = ledger.getContractAccountSet(block); | |||
@@ -374,6 +374,10 @@ class LedgerRepositoryImpl implements LedgerRepository { | |||
return new LedgerDataset(adminDataset, userAccountSet, dataAccountSet, contractAccountSet, true); | |||
} | |||
public synchronized void resetNextBlockEditor() { | |||
this.nextBlockEditor = null; | |||
} | |||
@Override | |||
public synchronized LedgerEditor createNextBlock() { | |||
if (closed) { | |||
@@ -355,7 +355,11 @@ public class LedgerTransactionalEditor implements LedgerEditor { | |||
throw new IllegalStateException("The current block is not ready yet!"); | |||
} | |||
baseStorage.flush(); | |||
try { | |||
baseStorage.flush(); | |||
} catch (Exception e) { | |||
throw new BlockRollbackException(e.getMessage(), e); | |||
} | |||
committed = true; | |||
} | |||
@@ -3,15 +3,21 @@ package com.jd.blockchain.ledger.core; | |||
import com.jd.blockchain.binaryproto.BinaryProtocol; | |||
import com.jd.blockchain.crypto.HashDigest; | |||
import com.jd.blockchain.crypto.PubKey; | |||
import com.jd.blockchain.ledger.AccountHeader; | |||
import com.jd.blockchain.ledger.BlockchainIdentity; | |||
import com.jd.blockchain.ledger.BlockchainIdentityData; | |||
import com.jd.blockchain.ledger.BytesValue; | |||
import com.jd.blockchain.ledger.CryptoSetting; | |||
import com.jd.blockchain.ledger.HashProof; | |||
import com.jd.blockchain.ledger.LedgerException; | |||
import com.jd.blockchain.ledger.MerkleProof; | |||
import com.jd.blockchain.ledger.MerkleSnapshot; | |||
import com.jd.blockchain.ledger.TypedValue; | |||
import com.jd.blockchain.storage.service.ExPolicyKVStorage; | |||
import com.jd.blockchain.storage.service.VersioningKVStorage; | |||
import com.jd.blockchain.utils.Bytes; | |||
import com.jd.blockchain.utils.Dataset; | |||
import com.jd.blockchain.utils.DatasetHelper; | |||
import com.jd.blockchain.utils.DatasetHelper.DataChangedListener; | |||
import com.jd.blockchain.utils.DatasetHelper.TypeMapper; | |||
import com.jd.blockchain.utils.Transactional; | |||
/** | |||
@@ -20,94 +26,166 @@ import com.jd.blockchain.utils.Transactional; | |||
* @author huanghaiquan | |||
* | |||
*/ | |||
public class MerkleAccount implements AccountHeader, MerkleProvable, Transactional { | |||
public class MerkleAccount implements CompositeAccount, HashProvable, MerkleSnapshot, Transactional { | |||
private BlockchainIdentity bcid; | |||
private static final Bytes HEADER_PREFIX = Bytes.fromString("HD/"); | |||
private static final Bytes DATA_PREFIX = Bytes.fromString("DT/"); | |||
protected MerkleDataSet dataset; | |||
private static final Bytes KEY_HEADER_ROOT = Bytes.fromString("HEADER"); | |||
private static final Bytes KEY_DATA_ROOT = Bytes.fromString("DATA"); | |||
private static final String KEY_PUBKEY = "PUBKEY"; | |||
private BlockchainIdentity accountID; | |||
private MerkleDataSet rootDataset; | |||
private MerkleDataSet headerDataset; | |||
private MerkleDataSet dataDataset; | |||
private Dataset<String, TypedValue> typedHeader; | |||
private Dataset<String, TypedValue> typedData; | |||
// private long version; | |||
/** | |||
* Create a new Account with the specified address and pubkey; <br> | |||
* Create a new Account with the specified identity(address and pubkey); <br> | |||
* | |||
* At the same time, a empty merkle dataset is also created for this account, | |||
* which is used for storing data of this account.<br> | |||
* | |||
* Note that, the blockchain identity of the account is not stored in the | |||
* account's merkle dataset, but is stored by the outer invoker; | |||
* | |||
* @param address | |||
* @param pubKey | |||
* This new account will be writable. <br> | |||
* | |||
* @param accountID Identity of this new account; | |||
* @param cryptoSetting Settings about crypto operations; | |||
* @param keyPrefix Prefix of all keys in this account's dataset; | |||
* @param exStorage The base storage for existance operation; | |||
* @param verStorage The base storage for versioning operation; | |||
*/ | |||
public MerkleAccount(Bytes address, PubKey pubKey, CryptoSetting cryptoSetting, String keyPrefix, | |||
public MerkleAccount(BlockchainIdentity accountID, CryptoSetting cryptoSetting, Bytes keyPrefix, | |||
ExPolicyKVStorage exStorage, VersioningKVStorage verStorage) { | |||
this(address, pubKey, null, cryptoSetting, keyPrefix, exStorage, verStorage, false); | |||
// 初始化数据集; | |||
initializeDatasets(null, cryptoSetting, keyPrefix, exStorage, verStorage, false); | |||
initPubKey(accountID.getPubKey()); | |||
this.accountID = accountID; | |||
} | |||
/** | |||
* Create a new Account with the specified address and pubkey; <br> | |||
* Create a account instance with the specified address and root hash; load it's | |||
* merkle dataset from the specified root hash. This merkle dateset is used for | |||
* storing data of this account.<br> | |||
* | |||
* At the same time, a empty merkle dataset is also created for this account, | |||
* which is used for storing data of this account.<br> | |||
* | |||
* Note that, the blockchain identity of the account is not stored in the | |||
* account's merkle dataset, but is stored by the outer invoker; | |||
* | |||
* @param bcid | |||
* @param cryptoSetting | |||
* @param exStorage | |||
* @param verStorage | |||
* @param accessPolicy | |||
* @param address Address of this account; | |||
* @param rootHash Merkle root hash of this account; It can not be null; | |||
* @param cryptoSetting Settings about crypto operations; | |||
* @param keyPrefix Prefix of all keys in this account's dataset; | |||
* @param exStorage The base storage for existance operation; | |||
* @param verStorage The base storage for versioning operation; | |||
* @param readonly Readonly about this account's dataset; | |||
*/ | |||
public MerkleAccount(BlockchainIdentity bcid, CryptoSetting cryptoSetting, String keyPrefix, | |||
ExPolicyKVStorage exStorage, VersioningKVStorage verStorage) { | |||
this(bcid, null, cryptoSetting, keyPrefix, exStorage, verStorage, false); | |||
public MerkleAccount(Bytes address, HashDigest rootHash, CryptoSetting cryptoSetting, Bytes keyPrefix, | |||
ExPolicyKVStorage exStorage, VersioningKVStorage verStorage, boolean readonly) { | |||
if (rootHash == null) { | |||
throw new IllegalArgumentException("Specified a null root hash for account[" + address.toBase58() + "]!"); | |||
} | |||
// 初始化数据集; | |||
initializeDatasets(rootHash, cryptoSetting, keyPrefix, exStorage, verStorage, readonly); | |||
// 初始化账户的身份; | |||
PubKey pubKey = loadPubKey(); | |||
this.accountID = new AccountID(address, pubKey); | |||
} | |||
/** | |||
* Create a account instance with the specified address and pubkey and load it's | |||
* merkle dataset from the specified root hash. This merkle dateset is used for storing data | |||
* of this account.<br> | |||
* | |||
* @param address | |||
* @param pubKey | |||
* @param dataRootHash merkle root hash of account's data; if set to a null value, | |||
* an empty merkle dataset is created; | |||
* @param cryptoSetting | |||
* @param exStorage | |||
* @param verStorage | |||
* @param readonly | |||
* @param accessPolicy | |||
*/ | |||
public MerkleAccount(Bytes address, PubKey pubKey, HashDigest dataRootHash, CryptoSetting cryptoSetting, | |||
String keyPrefix, ExPolicyKVStorage exStorage, VersioningKVStorage verStorage, boolean readonly) { | |||
this(new BlockchainIdentityData(address, pubKey), dataRootHash, cryptoSetting, keyPrefix, exStorage, verStorage, | |||
private void initializeDatasets(HashDigest rootHash, CryptoSetting cryptoSetting, Bytes keyPrefix, | |||
ExPolicyKVStorage exStorage, VersioningKVStorage verStorage, boolean readonly) { | |||
// 加载“根数据集” | |||
this.rootDataset = new MerkleDataSet(rootHash, cryptoSetting, keyPrefix, exStorage, verStorage, readonly); | |||
// 初始化数据修改监听器; | |||
DataChangedListener<String, TypedValue> dataChangedListener = new DataChangedListener<String, TypedValue>() { | |||
@Override | |||
public void onChanged(String key, TypedValue value, long expectedVersion, long newVersion) { | |||
onUpdated(key, value, expectedVersion, newVersion); | |||
} | |||
}; | |||
TypeMapper<byte[], TypedValue> valueMapper = new TypeMapper<byte[], TypedValue>() { | |||
@Override | |||
public byte[] encode(TypedValue t2) { | |||
return BinaryProtocol.encode(t2, BytesValue.class); | |||
} | |||
@Override | |||
public TypedValue decode(byte[] t1) { | |||
BytesValue v = BinaryProtocol.decodeAs(t1, BytesValue.class); | |||
return TypedValue.wrap(v); | |||
} | |||
}; | |||
// 加载“头数据集”; | |||
HashDigest headerRoot = loadHeaderRoot(); | |||
Bytes headerPrefix = keyPrefix.concat(HEADER_PREFIX); | |||
this.headerDataset = new MerkleDataSet(headerRoot, cryptoSetting, headerPrefix, exStorage, verStorage, | |||
readonly); | |||
this.typedHeader = DatasetHelper.listen(DatasetHelper.map(headerDataset, valueMapper), dataChangedListener); | |||
// 加载“主数据集” | |||
HashDigest dataRoot = loadDataRoot(); | |||
Bytes dataPrefix = keyPrefix.concat(DATA_PREFIX); | |||
this.dataDataset = new MerkleDataSet(dataRoot, cryptoSetting, dataPrefix, exStorage, verStorage, readonly); | |||
this.typedData = DatasetHelper.listen(DatasetHelper.map(dataDataset, valueMapper), dataChangedListener); | |||
} | |||
public MerkleAccount(BlockchainIdentity bcid, HashDigest dataRootHash, CryptoSetting cryptoSetting, String keyPrefix, | |||
ExPolicyKVStorage exStorage, VersioningKVStorage verStorage, boolean readonly) { | |||
this.bcid = bcid; | |||
this.dataset = new MerkleDataSet(dataRootHash, cryptoSetting, keyPrefix, exStorage, verStorage, readonly); | |||
private HashDigest loadHeaderRoot() { | |||
byte[] hashBytes = rootDataset.getValue(KEY_HEADER_ROOT); | |||
if (hashBytes == null) { | |||
return null; | |||
} | |||
return new HashDigest(hashBytes); | |||
} | |||
private HashDigest loadDataRoot() { | |||
byte[] hashBytes = rootDataset.getValue(KEY_DATA_ROOT); | |||
if (hashBytes == null) { | |||
return null; | |||
} | |||
return new HashDigest(hashBytes); | |||
} | |||
private long getHeaderRootVersion() { | |||
return rootDataset.getVersion(KEY_HEADER_ROOT); | |||
} | |||
private long getDataRootVersion() { | |||
return rootDataset.getVersion(KEY_DATA_ROOT); | |||
} | |||
/* | |||
* (non-Javadoc) | |||
* | |||
* @see com.jd.blockchain.ledger.core.AccountDataSet#getAddress() | |||
*/ | |||
@Override | |||
public Bytes getAddress() { | |||
return bcid.getAddress(); | |||
return accountID.getAddress(); | |||
} | |||
/* | |||
* (non-Javadoc) | |||
* | |||
* @see com.jd.blockchain.ledger.core.AccountDataSet#getPubKey() | |||
*/ | |||
@Override | |||
public PubKey getPubKey() { | |||
return bcid.getPubKey(); | |||
return accountID.getPubKey(); | |||
} | |||
@Override | |||
public BlockchainIdentity getID() { | |||
return accountID; | |||
} | |||
public Dataset<String, TypedValue> getHeaders() { | |||
return typedHeader; | |||
} | |||
@Override | |||
public Dataset<String, TypedValue> getDataset() { | |||
return typedData; | |||
} | |||
/* | |||
@@ -117,12 +195,22 @@ public class MerkleAccount implements AccountHeader, MerkleProvable, Transaction | |||
*/ | |||
@Override | |||
public HashDigest getRootHash() { | |||
return dataset.getRootHash(); | |||
return rootDataset.getRootHash(); | |||
} | |||
@Override | |||
public MerkleProof getProof(Bytes key) { | |||
return dataset.getProof(key); | |||
public HashProof getProof(Bytes key) { | |||
MerkleProof dataProof = dataDataset.getProof(key); | |||
if (dataProof == null) { | |||
return null; | |||
} | |||
MerkleProof rootProof = rootDataset.getProof(KEY_DATA_ROOT); | |||
if (rootProof == null) { | |||
return null; | |||
} | |||
HashDigestList proof = new HashDigestList(rootProof); | |||
proof.concat(dataProof); | |||
return proof; | |||
} | |||
/** | |||
@@ -131,90 +219,298 @@ public class MerkleAccount implements AccountHeader, MerkleProvable, Transaction | |||
* @return | |||
*/ | |||
public boolean isReadonly() { | |||
return dataset.isReadonly(); | |||
return dataDataset.isReadonly() || headerDataset.isReadonly(); | |||
} | |||
/** | |||
* Create or update the value associated the specified key if the version | |||
* checking is passed.<br> | |||
* | |||
* The value of the key will be updated only if it's latest version equals the | |||
* specified version argument. <br> | |||
* If the key doesn't exist, the version checking will be ignored, and key will | |||
* be created with a new sequence number as id. <br> | |||
* It also could specify the version argument to -1 to ignore the version | |||
* checking. | |||
* <p> | |||
* If updating is performed, the version of the key increase by 1. <br> | |||
* If creating is performed, the version of the key initialize by 0. <br> | |||
* 初始化账户的公钥; | |||
* | |||
* @param key The key of data; | |||
* @param value The value of data; | |||
* @param version The expected version of the key. | |||
* @return The new version of the key. <br> | |||
* If the key is new created success, then return 0; <br> | |||
* If the key is updated success, then return the new version;<br> | |||
* If this operation fail by version checking or other reason, then | |||
* return -1; | |||
* @param pubKey | |||
*/ | |||
public long setBytes(Bytes key, BytesValue value, long version) { | |||
byte[] bytesValue = BinaryProtocol.encode(value, BytesValue.class); | |||
return dataset.setValue(key, bytesValue, version); | |||
private void initPubKey(PubKey pubKey) { | |||
long v = typedHeader.setValue(KEY_PUBKEY, TypedValue.fromPubKey(pubKey), -1); | |||
if (v < 0) { | |||
throw new LedgerException("PubKey storage conflict!"); | |||
} | |||
} | |||
/** | |||
* Return the latest version entry associated the specified key; If the key | |||
* doesn't exist, then return -1; | |||
* 加载公钥; | |||
* | |||
* @param key | |||
* @return | |||
*/ | |||
public long getVersion(Bytes key) { | |||
return dataset.getVersion(key); | |||
private PubKey loadPubKey() { | |||
TypedValue value = typedHeader.getValue(KEY_PUBKEY); | |||
if (value == null) { | |||
return null; | |||
} | |||
return value.pubKeyValue(); | |||
} | |||
/** | |||
* return the latest version's value; | |||
* 当写入新值时触发此方法; | |||
* | |||
* @param key | |||
* @return return null if not exist; | |||
* @param value | |||
* @param newVersion | |||
*/ | |||
public BytesValue getBytes(Bytes key) { | |||
byte[] bytesValue = dataset.getValue(key); | |||
if (bytesValue == null) { | |||
return null; | |||
} | |||
return BinaryProtocol.decodeAs(bytesValue, BytesValue.class); | |||
protected void onUpdated(String key, TypedValue value, long expectedVersion, long newVersion) { | |||
} | |||
/** | |||
* Return the specified version's value; | |||
* 当账户数据提交后触发此方法;<br> | |||
* | |||
* @param key | |||
* @param version | |||
* @return return null if not exist; | |||
* 此方法默认会返回新的账户版本号,等于当前版本号加 1 ; | |||
* | |||
* @param previousRootHash 提交前的根哈希;如果是新账户的首次提交,则为 null; | |||
* @param newRootHash 新的根哈希; | |||
*/ | |||
public BytesValue getBytes(Bytes key, long version) { | |||
byte[] bytesValue = dataset.getValue(key, version); | |||
if (bytesValue == null) { | |||
return null; | |||
} | |||
return BinaryProtocol.decodeAs(bytesValue, BytesValue.class); | |||
protected void onCommited(HashDigest previousRootHash, HashDigest newRootHash) { | |||
} | |||
@Override | |||
public boolean isUpdated() { | |||
return dataset.isUpdated(); | |||
return headerDataset.isUpdated() || dataDataset.isUpdated() || rootDataset.isUpdated(); | |||
} | |||
@Override | |||
public void commit() { | |||
dataset.commit(); | |||
if (headerDataset.isUpdated()) { | |||
headerDataset.commit(); | |||
long version = getHeaderRootVersion(); | |||
rootDataset.setValue(KEY_HEADER_ROOT, headerDataset.getRootHash().toBytes(), version); | |||
} | |||
if (dataDataset.isUpdated()) { | |||
long version = getDataRootVersion(); | |||
dataDataset.commit(); | |||
rootDataset.setValue(KEY_DATA_ROOT, dataDataset.getRootHash().toBytes(), version); | |||
} | |||
if (rootDataset.isUpdated()) { | |||
HashDigest previousRootHash = rootDataset.getRootHash(); | |||
rootDataset.commit(); | |||
onCommited(previousRootHash, rootDataset.getRootHash()); | |||
} | |||
} | |||
@Override | |||
public void cancel() { | |||
dataset.cancel(); | |||
headerDataset.cancel(); | |||
dataDataset.cancel(); | |||
rootDataset.cancel(); | |||
} | |||
// ---------------------- | |||
private class AccountID implements BlockchainIdentity { | |||
private Bytes address; | |||
private PubKey pubKey; | |||
public AccountID(Bytes address, PubKey pubKey) { | |||
this.address = address; | |||
this.pubKey = pubKey; | |||
} | |||
@Override | |||
public Bytes getAddress() { | |||
return address; | |||
} | |||
@Override | |||
public PubKey getPubKey() { | |||
return pubKey; | |||
} | |||
} | |||
// private static class MerkleDatasetAdapter implements Dataset<String, BytesValue> { | |||
// | |||
// private static DataChangedListener NULL_LISTENER = new DataChangedListener() { | |||
// @Override | |||
// public void onChanged(Bytes key, BytesValue value, long newVersion) { | |||
// } | |||
// }; | |||
// | |||
// private DataChangedListener changedListener; | |||
// | |||
// private MerkleDataSet dataset; | |||
// | |||
// public MerkleDataSet getDataset() { | |||
// return dataset; | |||
// } | |||
// | |||
// @SuppressWarnings("unused") | |||
// public MerkleDatasetAdapter(MerkleDataSet dataset) { | |||
// this(dataset, NULL_LISTENER); | |||
// } | |||
// | |||
// public MerkleDatasetAdapter(MerkleDataSet dataset, DataChangedListener listener) { | |||
// this.dataset = dataset; | |||
// this.changedListener = listener == null ? NULL_LISTENER : listener; | |||
// } | |||
// | |||
// @Override | |||
// public DataEntry<String, BytesValue> getDataEntry(String key) { | |||
// return new VersioningKVEntryWraper(dataset.getDataEntry(Bytes.fromString(key))); | |||
// } | |||
// | |||
// @Override | |||
// public DataEntry<String, BytesValue> getDataEntry(String key, long version) { | |||
// return new VersioningKVEntryWraper(dataset.getDataEntry(Bytes.fromString(key), version)); | |||
// } | |||
// | |||
// /** | |||
// * Create or update the value associated the specified key if the version | |||
// * checking is passed.<br> | |||
// * | |||
// * The value of the key will be updated only if it's latest version equals the | |||
// * specified version argument. <br> | |||
// * If the key doesn't exist, the version checking will be ignored, and key will | |||
// * be created with a new sequence number as id. <br> | |||
// * It also could specify the version argument to -1 to ignore the version | |||
// * checking. | |||
// * <p> | |||
// * If updating is performed, the version of the key increase by 1. <br> | |||
// * If creating is performed, the version of the key initialize by 0. <br> | |||
// * | |||
// * @param key The key of data; | |||
// * @param value The value of data; | |||
// * @param version The expected version of the key. | |||
// * @return The new version of the key. <br> | |||
// * If the key is new created success, then return 0; <br> | |||
// * If the key is updated success, then return the new version;<br> | |||
// * If this operation fail by version checking or other reason, then | |||
// * return -1; | |||
// */ | |||
// @Override | |||
// public long setValue(Bytes key, BytesValue value, long version) { | |||
// byte[] bytesValue = BinaryProtocol.encode(value, BytesValue.class); | |||
// long v = dataset.setValue(key, bytesValue, version); | |||
// if (v > -1) { | |||
// changedListener.onChanged(key, value, v); | |||
// } | |||
// return v; | |||
// } | |||
// | |||
// /** | |||
// * Return the latest version entry associated the specified key; If the key | |||
// * doesn't exist, then return -1; | |||
// * | |||
// * @param key | |||
// * @return | |||
// */ | |||
// @Override | |||
// public long getVersion(Bytes key) { | |||
// return dataset.getVersion(key); | |||
// } | |||
// | |||
// /** | |||
// * return the latest version's value; | |||
// * | |||
// * @param key | |||
// * @return return null if not exist; | |||
// */ | |||
// @Override | |||
// public BytesValue getValue(Bytes key) { | |||
// byte[] bytesValue = dataset.getValue(key); | |||
// if (bytesValue == null) { | |||
// return null; | |||
// } | |||
// return BinaryProtocol.decodeAs(bytesValue, BytesValue.class); | |||
// } | |||
// | |||
// /** | |||
// * Return the specified version's value; | |||
// * | |||
// * @param key | |||
// * @param version | |||
// * @return return null if not exist; | |||
// */ | |||
// @Override | |||
// public BytesValue getValue(Bytes key, long version) { | |||
// byte[] bytesValue = dataset.getValue(key, version); | |||
// if (bytesValue == null) { | |||
// return null; | |||
// } | |||
// return BinaryProtocol.decodeAs(bytesValue, BytesValue.class); | |||
// } | |||
// | |||
// @Override | |||
// public long getDataCount() { | |||
// return dataset.getDataCount(); | |||
// } | |||
// | |||
// @Override | |||
// public long setValue(String key, BytesValue value, long version) { | |||
// byte[] bytesValue = BinaryProtocol.encode(value, BytesValue.class); | |||
// return dataset.setValue(key, bytesValue, version); | |||
// } | |||
// | |||
// @Override | |||
// public BytesValue getValue(String key, long version) { | |||
// byte[] bytesValue = dataset.getValue(key, version); | |||
// if (bytesValue == null) { | |||
// return null; | |||
// } | |||
// return BinaryProtocol.decodeAs(bytesValue, BytesValue.class); | |||
// } | |||
// | |||
// @Override | |||
// public BytesValue getValue(String key) { | |||
// byte[] bytesValue = dataset.getValue(key); | |||
// if (bytesValue == null) { | |||
// return null; | |||
// } | |||
// return BinaryProtocol.decodeAs(bytesValue, BytesValue.class); | |||
// } | |||
// | |||
// @Override | |||
// public long getVersion(String key) { | |||
// return dataset.getVersion(key); | |||
// } | |||
// | |||
// @Override | |||
// public DataEntry<String, BytesValue> getDataEntry(String key) { | |||
// return new VersioningKVEntryWraper<String>(dataset.getDataEntry(key)); | |||
// } | |||
// | |||
// @Override | |||
// public DataEntry<String, BytesValue> getDataEntry(String key, long version) { | |||
// return new VersioningKVEntryWraper<String>(dataset.getDataEntry(key, version)); | |||
// } | |||
// } | |||
// private static interface DataChangedListener { | |||
// | |||
// void onChanged(Bytes key, BytesValue value, long newVersion); | |||
// | |||
// } | |||
// private static class VersioningKVEntryWraper implements DataEntry<String, BytesValue> { | |||
// | |||
// private DataEntry<Bytes, byte[]> kv; | |||
// | |||
// public VersioningKVEntryWraper(DataEntry<Bytes, byte[]> kv) { | |||
// this.kv = kv; | |||
// } | |||
// | |||
// @Override | |||
// public String getKey() { | |||
// return kv.getKey().toUTF8String(); | |||
// } | |||
// | |||
// @Override | |||
// public long getVersion() { | |||
// return kv.getVersion(); | |||
// } | |||
// | |||
// @Override | |||
// public BytesValue getValue() { | |||
// return BinaryProtocol.decodeAs(kv.getValue(), BytesValue.class); | |||
// } | |||
// | |||
// } | |||
} |
@@ -0,0 +1,12 @@ | |||
//package com.jd.blockchain.ledger.core; | |||
// | |||
//import com.jd.blockchain.binaryproto.DataField; | |||
//import com.jd.blockchain.ledger.BlockchainIdentity; | |||
//import com.jd.blockchain.ledger.MerkleSnapshot; | |||
// | |||
//public interface MerkleAccountHeader extends MerkleSnapshot { | |||
// | |||
// @DataField(order = 1, refContract = true) | |||
// BlockchainIdentity getID(); | |||
// | |||
//} |
@@ -3,31 +3,35 @@ package com.jd.blockchain.ledger.core; | |||
import java.util.HashMap; | |||
import java.util.Map; | |||
import com.jd.blockchain.binaryproto.BinaryProtocol; | |||
import com.jd.blockchain.binaryproto.DataContractRegistry; | |||
import com.jd.blockchain.crypto.AddressEncoding; | |||
import com.jd.blockchain.crypto.HashDigest; | |||
import com.jd.blockchain.crypto.PubKey; | |||
import com.jd.blockchain.ledger.AccountHeader; | |||
import com.jd.blockchain.ledger.BytesValue; | |||
import com.jd.blockchain.ledger.BlockchainIdentity; | |||
import com.jd.blockchain.ledger.BlockchainIdentityData; | |||
import com.jd.blockchain.ledger.CryptoSetting; | |||
import com.jd.blockchain.ledger.LedgerException; | |||
import com.jd.blockchain.ledger.MerkleProof; | |||
import com.jd.blockchain.ledger.MerkleSnapshot; | |||
import com.jd.blockchain.ledger.TypedValue; | |||
import com.jd.blockchain.storage.service.ExPolicyKVStorage; | |||
import com.jd.blockchain.storage.service.VersioningKVStorage; | |||
import com.jd.blockchain.utils.Bytes; | |||
import com.jd.blockchain.utils.DataEntry; | |||
import com.jd.blockchain.utils.Transactional; | |||
public class MerkleAccountSet implements Transactional, MerkleProvable, AccountQuery<MerkleAccount> { | |||
public class MerkleAccountSet implements Transactional, MerkleProvable, AccountQuery<CompositeAccount> { | |||
static { | |||
DataContractRegistry.register(MerkleSnapshot.class); | |||
DataContractRegistry.register(AccountHeader.class); | |||
DataContractRegistry.register(BlockchainIdentity.class); | |||
} | |||
private final String keyPrefix; | |||
private final Bytes keyPrefix; | |||
/** | |||
* 账户根哈希的数据集; | |||
*/ | |||
private MerkleDataSet merkleDataset; | |||
/** | |||
@@ -36,7 +40,7 @@ public class MerkleAccountSet implements Transactional, MerkleProvable, AccountQ | |||
* | |||
*/ | |||
// TODO:未考虑大数据量时,由于缺少过期策略,会导致内存溢出的问题; | |||
private Map<Bytes, InnerVersioningAccount> latestAccountsCache = new HashMap<>(); | |||
private Map<Bytes, InnerMerkleAccount> latestAccountsCache = new HashMap<>(); | |||
private ExPolicyKVStorage baseExStorage; | |||
@@ -44,7 +48,7 @@ public class MerkleAccountSet implements Transactional, MerkleProvable, AccountQ | |||
private CryptoSetting cryptoSetting; | |||
private boolean updated; | |||
private volatile boolean updated; | |||
private AccountAccessPolicy accessPolicy; | |||
@@ -56,12 +60,12 @@ public class MerkleAccountSet implements Transactional, MerkleProvable, AccountQ | |||
merkleDataset.setReadonly(); | |||
} | |||
public MerkleAccountSet(CryptoSetting cryptoSetting, String keyPrefix, ExPolicyKVStorage exStorage, | |||
public MerkleAccountSet(CryptoSetting cryptoSetting, Bytes keyPrefix, ExPolicyKVStorage exStorage, | |||
VersioningKVStorage verStorage, AccountAccessPolicy accessPolicy) { | |||
this(null, cryptoSetting, keyPrefix, exStorage, verStorage, false, accessPolicy); | |||
} | |||
public MerkleAccountSet(HashDigest rootHash, CryptoSetting cryptoSetting, String keyPrefix, | |||
public MerkleAccountSet(HashDigest rootHash, CryptoSetting cryptoSetting, Bytes keyPrefix, | |||
ExPolicyKVStorage exStorage, VersioningKVStorage verStorage, boolean readonly, | |||
AccountAccessPolicy accessPolicy) { | |||
this.keyPrefix = keyPrefix; | |||
@@ -70,6 +74,7 @@ public class MerkleAccountSet implements Transactional, MerkleProvable, AccountQ | |||
this.baseVerStorage = verStorage; | |||
this.merkleDataset = new MerkleDataSet(rootHash, cryptoSetting, keyPrefix, this.baseExStorage, | |||
this.baseVerStorage, readonly); | |||
this.accessPolicy = accessPolicy; | |||
} | |||
@@ -83,29 +88,17 @@ public class MerkleAccountSet implements Transactional, MerkleProvable, AccountQ | |||
return merkleDataset.getProof(key); | |||
} | |||
public AccountHeader[] getHeaders(int fromIndex, int count) { | |||
byte[][] results = merkleDataset.getLatestValues(fromIndex, count); | |||
AccountHeader[] accounts = new AccountHeader[results.length]; | |||
@Override | |||
public BlockchainIdentity[] getHeaders(int fromIndex, int count) { | |||
DataEntry<Bytes, byte[]>[] results = merkleDataset.getLatestDataEntries(fromIndex, count); | |||
BlockchainIdentity[] ids = new BlockchainIdentity[results.length]; | |||
for (int i = 0; i < results.length; i++) { | |||
accounts[i] = deserialize(results[i]); | |||
InnerMerkleAccount account = createAccount(results[i].getKey(), new HashDigest(results[i].getValue()), | |||
results[i].getVersion(), true); | |||
ids[i] = account.getID(); | |||
} | |||
return accounts; | |||
} | |||
// private VersioningAccount deserialize(byte[] txBytes) { | |||
//// return BinaryEncodingUtils.decode(txBytes, null, Account.class); | |||
// AccountHeaderData accInfo = BinaryEncodingUtils.decode(txBytes); | |||
//// return new BaseAccount(accInfo.getAddress(), accInfo.getPubKey(), null, | |||
// cryptoSetting, | |||
//// baseExStorage, baseVerStorage, true, accessPolicy); | |||
// return new VersioningAccount(accInfo.getAddress(), accInfo.getPubKey(), | |||
// accInfo.getRootHash(), cryptoSetting, | |||
// keyPrefix, baseExStorage, baseVerStorage, true, accessPolicy, accInfo.); | |||
// } | |||
private AccountHeader deserialize(byte[] txBytes) { | |||
return BinaryProtocol.decode(txBytes); | |||
return ids; | |||
} | |||
/** | |||
@@ -118,7 +111,7 @@ public class MerkleAccountSet implements Transactional, MerkleProvable, AccountQ | |||
} | |||
@Override | |||
public MerkleAccount getAccount(String address) { | |||
public CompositeAccount getAccount(String address) { | |||
return getAccount(Bytes.fromBase58(address)); | |||
} | |||
@@ -128,7 +121,8 @@ public class MerkleAccountSet implements Transactional, MerkleProvable, AccountQ | |||
* @param address | |||
* @return | |||
*/ | |||
public MerkleAccount getAccount(Bytes address) { | |||
@Override | |||
public CompositeAccount getAccount(Bytes address) { | |||
return this.getAccount(address, -1); | |||
} | |||
@@ -142,25 +136,30 @@ public class MerkleAccountSet implements Transactional, MerkleProvable, AccountQ | |||
* @return | |||
*/ | |||
public boolean contains(Bytes address) { | |||
long latestVersion = getVersion(address); | |||
InnerMerkleAccount acc = latestAccountsCache.get(address); | |||
if (acc != null) { | |||
// 无论是新注册未提交的,还是缓存已提交的账户实例,都认为是存在; | |||
return true; | |||
} | |||
long latestVersion = merkleDataset.getVersion(address); | |||
return latestVersion > -1; | |||
} | |||
/** | |||
* 返回指定账户的版本; <br> | |||
* 如果账户已经注册,则返回该账户的最新版本,值大于等于 0; <br> | |||
* 如果账户不存在,则返回 -1; <br> | |||
* 如果指定的账户已经注册(通过 {@link #register(String, PubKey)} 方法),但尚未提交(通过 | |||
* {@link #commit()} 方法),此方法对该账户仍然返回 0; | |||
* 如果账户不存在,则返回 -1;<br> | |||
* 如果账户已经注册(通过 {@link #register(String, PubKey)} 方法),但尚未提交(通过 {@link #commit()} | |||
* 方法),则返回 -1; <br> | |||
* | |||
* @param address | |||
* @return | |||
*/ | |||
public long getVersion(Bytes address) { | |||
InnerVersioningAccount acc = latestAccountsCache.get(address); | |||
InnerMerkleAccount acc = latestAccountsCache.get(address); | |||
if (acc != null) { | |||
// 已注册尚未提交,也返回 -1; | |||
return acc.version == -1 ? 0 : acc.version; | |||
return acc.getVersion(); | |||
} | |||
return merkleDataset.getVersion(address); | |||
@@ -175,12 +174,12 @@ public class MerkleAccountSet implements Transactional, MerkleProvable, AccountQ | |||
* @param version 账户版本;如果指定为 -1,则返回最新版本; | |||
* @return | |||
*/ | |||
public MerkleAccount getAccount(Bytes address, long version) { | |||
public CompositeAccount getAccount(Bytes address, long version) { | |||
version = version < 0 ? -1 : version; | |||
InnerVersioningAccount acc = latestAccountsCache.get(address); | |||
InnerMerkleAccount acc = latestAccountsCache.get(address); | |||
if (acc != null && version == -1) { | |||
return acc; | |||
} else if (acc != null && acc.version == version) { | |||
} else if (acc != null && acc.getVersion() == version) { | |||
return acc; | |||
} | |||
@@ -194,7 +193,7 @@ public class MerkleAccountSet implements Transactional, MerkleProvable, AccountQ | |||
} | |||
// 如果是不存在的,或者刚刚新增未提交的账户,则前面一步查询到的 latestVersion 小于 0, 代码不会执行到此; | |||
if (acc != null && acc.version != latestVersion) { | |||
if (acc != null && acc.getVersion() != latestVersion) { | |||
// 当执行到此处时,并且缓冲列表中缓存了最新的版本, | |||
// 如果当前缓存的最新账户的版本和刚刚从存储中检索得到的最新版本不一致,可能存在外部的并发更新,这超出了系统设计的逻辑; | |||
@@ -205,21 +204,15 @@ public class MerkleAccountSet implements Transactional, MerkleProvable, AccountQ | |||
} | |||
// Now, be sure that "acc == null", so get account from storage; | |||
byte[] bytes = merkleDataset.getValue(address, version); | |||
if (bytes == null) { | |||
return null; | |||
} | |||
// Set readonly for the old version account; | |||
boolean readonly = (version > -1 && version < latestVersion) || isReadonly(); | |||
// String prefix = address.concat(LedgerConsts.KEY_SEPERATOR); | |||
// ExPolicyKVStorage ss = PrefixAppender.prefix(prefix, baseExStorage); | |||
// VersioningKVStorage vs = PrefixAppender.prefix(prefix, baseVerStorage); | |||
// BaseAccount accDS = deserialize(bytes, cryptoSetting, ss, vs, readonly); | |||
String prefix = keyPrefix + address; | |||
acc = deserialize(bytes, cryptoSetting, prefix, baseExStorage, baseVerStorage, readonly, latestVersion); | |||
long qVersion = version == -1 ? latestVersion : version; | |||
// load account from storage; | |||
acc = loadAccount(address, readonly, qVersion); | |||
if (acc == null) { | |||
return null; | |||
} | |||
if (!readonly) { | |||
// cache the latest version witch enable reading and writing; | |||
// readonly version of account not necessary to be cached; | |||
@@ -228,6 +221,10 @@ public class MerkleAccountSet implements Transactional, MerkleProvable, AccountQ | |||
return acc; | |||
} | |||
public CompositeAccount register(Bytes address, PubKey pubKey) { | |||
return register(new BlockchainIdentityData(address, pubKey)); | |||
} | |||
/** | |||
* 注册一个新账户; <br> | |||
* | |||
@@ -239,16 +236,18 @@ public class MerkleAccountSet implements Transactional, MerkleProvable, AccountQ | |||
* @param pubKey 公钥; | |||
* @return 注册成功的账户对象; | |||
*/ | |||
public MerkleAccount register(Bytes address, PubKey pubKey) { | |||
public CompositeAccount register(BlockchainIdentity accountId) { | |||
if (isReadonly()) { | |||
throw new IllegalArgumentException("This AccountSet is readonly!"); | |||
} | |||
Bytes address = accountId.getAddress(); | |||
PubKey pubKey = accountId.getPubKey(); | |||
verifyAddressEncoding(address, pubKey); | |||
InnerVersioningAccount cachedAcc = latestAccountsCache.get(address); | |||
InnerMerkleAccount cachedAcc = latestAccountsCache.get(address); | |||
if (cachedAcc != null) { | |||
if (cachedAcc.version < 0) { | |||
if (cachedAcc.getVersion() < 0) { | |||
// 同一个新账户已经注册,但尚未提交,所以重复注册不会引起任何变化; | |||
return cachedAcc; | |||
} | |||
@@ -264,17 +263,8 @@ public class MerkleAccountSet implements Transactional, MerkleProvable, AccountQ | |||
throw new LedgerException("Account Registering was rejected for the access policy!"); | |||
} | |||
// String prefix = address.concat(LedgerConsts.KEY_SEPERATOR); | |||
// ExPolicyKVStorage accExStorage = PrefixAppender.prefix(prefix, | |||
// baseExStorage); | |||
// VersioningKVStorage accVerStorage = PrefixAppender.prefix(prefix, | |||
// baseVerStorage); | |||
// BaseAccount accDS = createInstance(address, pubKey, cryptoSetting, | |||
// accExStorage, accVerStorage); | |||
String prefix = keyPrefix + address; | |||
InnerVersioningAccount acc = createInstance(address, pubKey, cryptoSetting, prefix, baseExStorage, baseVerStorage, | |||
-1); | |||
Bytes prefix = keyPrefix.concat(address); | |||
InnerMerkleAccount acc = createInstance(accountId, cryptoSetting, prefix); | |||
latestAccountsCache.put(address, acc); | |||
updated = true; | |||
@@ -288,20 +278,50 @@ public class MerkleAccountSet implements Transactional, MerkleProvable, AccountQ | |||
} | |||
} | |||
private InnerVersioningAccount createInstance(Bytes address, PubKey pubKey, CryptoSetting cryptoSetting, | |||
String keyPrefix, ExPolicyKVStorage exStorage, VersioningKVStorage verStorage, long version) { | |||
return new InnerVersioningAccount(address, pubKey, cryptoSetting, keyPrefix, exStorage, verStorage, version); | |||
private InnerMerkleAccount createInstance(BlockchainIdentity header, CryptoSetting cryptoSetting, Bytes keyPrefix) { | |||
return new InnerMerkleAccount(header, cryptoSetting, keyPrefix, baseExStorage, baseVerStorage); | |||
} | |||
/** | |||
* 加载指定版本的账户; | |||
* | |||
* @param address 账户地址; | |||
* @param readonly 是否只读; | |||
* @param version 账户的版本;大于等于 0 ; | |||
* @return | |||
*/ | |||
private InnerMerkleAccount loadAccount(Bytes address, boolean readonly, long version) { | |||
byte[] rootHashBytes = merkleDataset.getValue(address, version); | |||
if (rootHashBytes == null) { | |||
return null; | |||
} | |||
HashDigest rootHash = new HashDigest(rootHashBytes); | |||
return createAccount(address, rootHash, version, readonly); | |||
} | |||
private InnerVersioningAccount deserialize(byte[] bytes, CryptoSetting cryptoSetting, String keyPrefix, | |||
ExPolicyKVStorage exStorage, VersioningKVStorage verStorage, boolean readonly, long version) { | |||
AccountHeader accInfo = BinaryProtocol.decode(bytes); | |||
return new InnerVersioningAccount(accInfo.getAddress(), accInfo.getPubKey(), accInfo.getRootHash(), cryptoSetting, | |||
keyPrefix, exStorage, verStorage, readonly, version); | |||
private InnerMerkleAccount createAccount(Bytes address, HashDigest rootHash, long version, boolean readonly) { | |||
// prefix; | |||
Bytes prefix = keyPrefix.concat(address); | |||
return new InnerMerkleAccount(address, version, rootHash, cryptoSetting, prefix, baseExStorage, baseVerStorage, | |||
readonly); | |||
} | |||
private byte[] serialize(AccountHeader account) { | |||
return BinaryProtocol.encode(account, AccountHeader.class); | |||
// TODO:优化:区块链身份(地址+公钥)与其Merkle树根哈希分开独立存储; | |||
// 不必作为一个整块,避免状态数据写入时频繁重写公钥,尤其某些算法的公钥可能很大; | |||
/** | |||
* 保存账户的根哈希,返回账户的新版本; | |||
* | |||
* @param account | |||
* @return | |||
*/ | |||
private long saveAccount(InnerMerkleAccount account) { | |||
// 提交更改,更新哈希; | |||
account.commit(); | |||
return account.getVersion(); | |||
} | |||
@Override | |||
@@ -315,17 +335,10 @@ public class MerkleAccountSet implements Transactional, MerkleProvable, AccountQ | |||
return; | |||
} | |||
try { | |||
for (InnerVersioningAccount acc : latestAccountsCache.values()) { | |||
for (InnerMerkleAccount acc : latestAccountsCache.values()) { | |||
// updated or new created; | |||
if (acc.isUpdated() || acc.version < 0) { | |||
// 提交更改,更新哈希; | |||
acc.commit(); | |||
byte[] value = serialize(acc); | |||
long ver = merkleDataset.setValue(acc.getAddress(), value, acc.version); | |||
if (ver < 0) { | |||
// Update fail; | |||
throw new LedgerException("Account updating fail! --[Address=" + acc.getAddress() + "]"); | |||
} | |||
if (acc.isUpdated() || acc.getVersion() < 0) { | |||
saveAccount(acc); | |||
} | |||
} | |||
merkleDataset.commit(); | |||
@@ -343,7 +356,7 @@ public class MerkleAccountSet implements Transactional, MerkleProvable, AccountQ | |||
Bytes[] addresses = new Bytes[latestAccountsCache.size()]; | |||
latestAccountsCache.keySet().toArray(addresses); | |||
for (Bytes address : addresses) { | |||
InnerVersioningAccount acc = latestAccountsCache.remove(address); | |||
InnerMerkleAccount acc = latestAccountsCache.remove(address); | |||
// cancel; | |||
if (acc.isUpdated()) { | |||
acc.cancel(); | |||
@@ -352,107 +365,46 @@ public class MerkleAccountSet implements Transactional, MerkleProvable, AccountQ | |||
updated = false; | |||
} | |||
public static class AccountHeaderData implements AccountHeader { | |||
private Bytes address; | |||
private PubKey pubKey; | |||
private HashDigest rootHash; | |||
public AccountHeaderData(Bytes address, PubKey pubKey, HashDigest rootHash) { | |||
this.address = address; | |||
this.pubKey = pubKey; | |||
this.rootHash = rootHash; | |||
} | |||
@Override | |||
public Bytes getAddress() { | |||
return address; | |||
} | |||
/** | |||
* 内部实现的账户,监听和同步账户数据的变更; | |||
* | |||
* @author huanghaiquan | |||
* | |||
*/ | |||
private class InnerMerkleAccount extends MerkleAccount { | |||
@Override | |||
public PubKey getPubKey() { | |||
return pubKey; | |||
} | |||
private long version; | |||
@Override | |||
public HashDigest getRootHash() { | |||
return rootHash; | |||
public InnerMerkleAccount(BlockchainIdentity accountID, CryptoSetting cryptoSetting, Bytes keyPrefix, | |||
ExPolicyKVStorage exStorage, VersioningKVStorage verStorage) { | |||
super(accountID, cryptoSetting, keyPrefix, exStorage, verStorage); | |||
this.version = -1; | |||
} | |||
} | |||
private class InnerVersioningAccount extends MerkleAccount { | |||
// private final BaseAccount account; | |||
private final long version; | |||
// public VersioningAccount(BaseAccount account, long version) { | |||
// this.account = account; | |||
// this.version = version; | |||
// } | |||
public InnerVersioningAccount(Bytes address, PubKey pubKey, HashDigest rootHash, CryptoSetting cryptoSetting, | |||
String keyPrefix, ExPolicyKVStorage exStorage, VersioningKVStorage verStorage, boolean readonly, | |||
long version) { | |||
super(address, pubKey, rootHash, cryptoSetting, keyPrefix, exStorage, verStorage, readonly); | |||
public InnerMerkleAccount(Bytes address, long version, HashDigest dataRootHash, CryptoSetting cryptoSetting, | |||
Bytes keyPrefix, ExPolicyKVStorage exStorage, VersioningKVStorage verStorage, boolean readonly) { | |||
super(address, dataRootHash, cryptoSetting, keyPrefix, exStorage, verStorage, readonly); | |||
this.version = version; | |||
} | |||
public InnerVersioningAccount(Bytes address, PubKey pubKey, CryptoSetting cryptoSetting, String keyPrefix, | |||
ExPolicyKVStorage exStorage, VersioningKVStorage verStorage, long version) { | |||
super(address, pubKey, cryptoSetting, keyPrefix, exStorage, verStorage); | |||
this.version = version; | |||
@Override | |||
protected void onUpdated(String key, TypedValue value, long expectedVersion, long newVersion) { | |||
updated = true; | |||
} | |||
// @Override | |||
// public Bytes getAddress() { | |||
// return account.getAddress(); | |||
// } | |||
// | |||
// @Override | |||
// public PubKey getPubKey() { | |||
// return account.getPubKey(); | |||
// } | |||
// | |||
// @Override | |||
// public HashDigest getRootHash() { | |||
// return account.getRootHash(); | |||
// } | |||
// | |||
// @Override | |||
// public MerkleProof getProof(Bytes key) { | |||
// return account.getProof(key); | |||
// } | |||
// | |||
// @Override | |||
// public boolean isReadonly() { | |||
// return account.isReadonly(); | |||
// } | |||
@Override | |||
public long setBytes(Bytes key, BytesValue value, long version) { | |||
long v = super.setBytes(key, value, version); | |||
if (v > -1) { | |||
updated = true; | |||
protected void onCommited(HashDigest previousRootHash, HashDigest newRootHash) { | |||
long newVersion = merkleDataset.setValue(this.getAddress(), newRootHash.toBytes(), version); | |||
if (newVersion < 0) { | |||
// Update fail; | |||
throw new LedgerException("Account updating fail! --[Address=" + this.getAddress() + "]"); | |||
} | |||
return v; | |||
this.version = newVersion; | |||
} | |||
// @Override | |||
// public long getKeyVersion(Bytes key) { | |||
// return account.getKeyVersion(key); | |||
// } | |||
// | |||
// @Override | |||
// public byte[] getBytes(Bytes key) { | |||
// return account.getBytes(key); | |||
// } | |||
// | |||
// @Override | |||
// public byte[] getBytes(Bytes key, long version) { | |||
// return account.getBytes(key, version); | |||
// } | |||
public long getVersion() { | |||
return version; | |||
} | |||
} | |||
@@ -0,0 +1,74 @@ | |||
package com.jd.blockchain.ledger.core; | |||
import java.util.Map; | |||
import com.jd.blockchain.crypto.HashDigest; | |||
import com.jd.blockchain.ledger.CryptoSetting; | |||
import com.jd.blockchain.ledger.MerkleSnapshot; | |||
import com.jd.blockchain.storage.service.ExPolicyKVStorage; | |||
import com.jd.blockchain.storage.service.VersioningKVStorage; | |||
import com.jd.blockchain.utils.Bytes; | |||
import com.jd.blockchain.utils.Transactional; | |||
public class MerkleDataCluster implements Transactional, MerkleSnapshot { | |||
private boolean readonly; | |||
private MerkleDataSet rootDS; | |||
private Map<Bytes, MerkleDataSet> partitions; | |||
/** | |||
* Create an empty readable {@link MerkleDataCluster} instance; | |||
*/ | |||
public MerkleDataCluster(CryptoSetting setting, Bytes keyPrefix, ExPolicyKVStorage exPolicyStorage, | |||
VersioningKVStorage versioningStorage) { | |||
this(null, setting, keyPrefix, exPolicyStorage, versioningStorage, false); | |||
} | |||
/** | |||
* Create an {@link MerkleDataCluster} instance; | |||
* | |||
* @param rootHash root hash of this {@link MerkleDataCluster} instance; | |||
* @param readonly whether read only; | |||
*/ | |||
public MerkleDataCluster(HashDigest rootHash, CryptoSetting setting, Bytes keyPrefix, | |||
ExPolicyKVStorage exPolicyStorage, VersioningKVStorage versioningStorage, boolean readonly) { | |||
this.rootDS = new MerkleDataSet(rootHash, setting, keyPrefix, exPolicyStorage, versioningStorage, readonly); | |||
} | |||
@Override | |||
public HashDigest getRootHash() { | |||
return rootDS.getRootHash(); | |||
} | |||
@Override | |||
public boolean isUpdated() { | |||
return rootDS.isUpdated(); | |||
} | |||
// public VersioningMap<Bytes, byte[]> getPartition(Bytes name) { | |||
// return getPartition(name, false); | |||
// } | |||
// | |||
// public VersioningMap<Bytes, byte[]> getPartition(Bytes name, boolean create) { | |||
// | |||
// } | |||
// | |||
// public VersioningMap<Bytes, byte[]> createPartition(Bytes name) { | |||
// | |||
// } | |||
@Override | |||
public void commit() { | |||
// TODO Auto-generated method stub | |||
} | |||
@Override | |||
public void cancel() { | |||
// TODO Auto-generated method stub | |||
} | |||
} |
@@ -1,11 +1,12 @@ | |||
package com.jd.blockchain.ledger.core; | |||
import com.jd.blockchain.ledger.MerkleProof; | |||
import com.jd.blockchain.storage.service.VersioningKVEntry; | |||
import com.jd.blockchain.utils.Bytes; | |||
import com.jd.blockchain.utils.DataEntry; | |||
public interface MerkleDataEntry { | |||
VersioningKVEntry getData(); | |||
DataEntry<Bytes, byte[]> getData(); | |||
MerkleProof getProof(); | |||
} |
@@ -0,0 +1,14 @@ | |||
package com.jd.blockchain.ledger.core; | |||
import com.jd.blockchain.ledger.core.MerkleTree.DataNode; | |||
import com.jd.blockchain.utils.Bytes; | |||
public interface MerkleDataNodeEncoder { | |||
byte getFormatVersion(); | |||
DataNode create(short hashAlgorithm, long sn, Bytes key, long version, byte[] hashedData); | |||
DataNode resolve(byte[] bytes); | |||
} |
@@ -0,0 +1,128 @@ | |||
package com.jd.blockchain.ledger.core; | |||
import com.jd.blockchain.crypto.Crypto; | |||
import com.jd.blockchain.crypto.HashDigest; | |||
import com.jd.blockchain.crypto.HashFunction; | |||
import com.jd.blockchain.ledger.core.MerkleTree.DataNode; | |||
import com.jd.blockchain.utils.Bytes; | |||
import com.jd.blockchain.utils.io.BytesUtils; | |||
import com.jd.blockchain.utils.io.NumberMask; | |||
/** | |||
* The first version of the DataNode binary sequence encoder, which's version | |||
* number is 0. | |||
* | |||
* <p> | |||
* This version of DataNode binary sequence is composed of sn(8 bytes), | |||
* key(variable size), version(8 bytes) and node hash(32 bytes for SHA256); | |||
* | |||
* <p> | |||
* In this version, the node hash is computed from bytes sequence composing of | |||
* sn, key, version and original value of the key; | |||
* | |||
* <p> | |||
* For the purpose of upgrading the version of DataNode binary format, we use | |||
* the first byte of the binary sequence as the tag to identify the version of | |||
* DataNode binary format, and reduce the maximum value of the valid range of SN | |||
* to 2^56. <br> | |||
* Other versions of the implementation also follow the above rules, the version | |||
* of the data node binary format is marked from 0, incremented by 1. | |||
* | |||
* @author huanghaiquan | |||
* | |||
*/ | |||
class MerkleDataNodeEncoder_V0 implements MerkleDataNodeEncoder { | |||
@Override | |||
public byte getFormatVersion() { | |||
return 0; | |||
} | |||
@Override | |||
public DataNode resolve(byte[] bytes) { | |||
if (bytes[0] != getFormatVersion()) { | |||
throw new IllegalArgumentException("Unsupported version of data node bytes sequence[" + bytes[0] + "]! "); | |||
} | |||
// resolve SN; | |||
byte[] snBytes = new byte[8]; | |||
snBytes[0] = 0x0; | |||
System.arraycopy(bytes, 1, snBytes, 1, 7); | |||
long sn = BytesUtils.toLong(snBytes); | |||
// skip bytes of SN; | |||
int offset = 8; | |||
// byte[] keyBytes = BytesEncoding.read(NumberMask.SHORT, in); | |||
// String key = BytesUtils.toString(keyBytes); | |||
int keySize = NumberMask.SHORT.resolveMaskedNumber(bytes, offset); | |||
offset += NumberMask.SHORT.getMaskLength(keySize); | |||
byte[] keyBytes = new byte[keySize]; | |||
System.arraycopy(bytes, offset, keyBytes, 0, keySize); | |||
offset += keySize; | |||
// String key = BytesUtils.toString(keyBytes); | |||
Bytes key = new Bytes(keyBytes); | |||
// long version = BytesUtils.readLong(in); | |||
long version = BytesUtils.toLong(bytes, offset); | |||
offset += 8; | |||
// byte[] dataHashBytes = BytesEncoding.read(NumberMask.SHORT, in); | |||
int hashSize = NumberMask.TINY.resolveMaskedNumber(bytes, offset); | |||
offset += NumberMask.TINY.getMaskLength(hashSize); | |||
byte[] nodeHashBytes = new byte[hashSize]; | |||
System.arraycopy(bytes, offset, nodeHashBytes, 0, hashSize); | |||
offset += hashSize; | |||
HashDigest nodeHash = new HashDigest(nodeHashBytes); | |||
return new DataNode(nodeHash, sn, key, version, null, bytes); | |||
} | |||
@Deprecated | |||
@Override | |||
public DataNode create(short hashAlgorithm, long sn, Bytes key, long version, byte[] value) { | |||
// Header is composed of sn, key and version; | |||
// So the size of header is: 8 + "mask of key size" + "key bytes" + 8; | |||
int keySize = key.size(); | |||
int maskSize = NumberMask.SHORT.getMaskLength(keySize); | |||
int headerSize = 8 + maskSize + keySize + 8; | |||
byte[] headerBytes = new byte[headerSize]; | |||
int offset = 0; | |||
// write sn; | |||
offset += BytesUtils.toBytes(sn, headerBytes, 0); | |||
// write the size of key bytes; | |||
NumberMask.SHORT.writeMask(keySize, headerBytes, offset); | |||
offset += maskSize; | |||
// write the key bytes; | |||
offset += key.copyTo(headerBytes, offset, keySize); | |||
// version; | |||
offset += BytesUtils.toBytes(version, headerBytes, offset); | |||
// compute node hash from the combination of header and data value; | |||
byte[] dataBytes = BytesUtils.concat(headerBytes, value); | |||
HashFunction hashFunc = Crypto.getHashFunction(hashAlgorithm); | |||
HashDigest dataNodeHash = hashFunc.hash(dataBytes); | |||
// build bytes of data node, which is composed of sn, key, version and node | |||
// hash; | |||
int hashMaskSize = NumberMask.TINY.getMaskLength(dataNodeHash.size()); | |||
int dataNodeSize = headerSize + hashMaskSize + dataNodeHash.size(); | |||
byte[] nodeBytes = new byte[dataNodeSize]; | |||
offset = 0; | |||
System.arraycopy(headerBytes, 0, nodeBytes, offset, headerSize); | |||
offset += headerSize; | |||
NumberMask.TINY.writeMask(dataNodeHash.size(), nodeBytes, offset); | |||
offset += hashMaskSize; | |||
System.arraycopy(dataNodeHash.toBytes(), 0, nodeBytes, offset, dataNodeHash.size()); | |||
// No data hash has been computed and record in this old version of | |||
// implementation; | |||
return new DataNode(dataNodeHash, sn, key, version, null, nodeBytes); | |||
} | |||
} |
@@ -0,0 +1,200 @@ | |||
package com.jd.blockchain.ledger.core; | |||
import com.jd.blockchain.crypto.Crypto; | |||
import com.jd.blockchain.crypto.HashDigest; | |||
import com.jd.blockchain.crypto.HashFunction; | |||
import com.jd.blockchain.ledger.core.MerkleTree.DataNode; | |||
import com.jd.blockchain.utils.Bytes; | |||
import com.jd.blockchain.utils.io.BytesUtils; | |||
import com.jd.blockchain.utils.io.NumberMask; | |||
/** | |||
* The second version of the DataNode binary sequence encoder, which's version | |||
* number is 1. | |||
* | |||
* <p> | |||
* This version of DataNode binary sequence is composed of sn(8 bytes), | |||
* key(variable size), version(8 bytes), hash of original value the key, and | |||
* node hash; | |||
* | |||
* <p> | |||
* In this version, the node hash is computed from bytes sequence composing of | |||
* sn, key, version , hash of original value of the key; | |||
* | |||
* <p> | |||
* For the purpose of upgrading the version of DataNode binary format, we use | |||
* the first byte of the binary sequence as the tag to identify the version of | |||
* DataNode binary format, and reduce the maximum value of the valid range of SN | |||
* to 2^56. <br> | |||
* Other versions of the implementation also follow the above rules, the version | |||
* of the data node binary format is marked from 0, incremented by 1. | |||
* | |||
* @author huanghaiquan | |||
* | |||
*/ | |||
class MerkleDataNodeEncoder_V1 implements MerkleDataNodeEncoder { | |||
@Override | |||
public byte getFormatVersion() { | |||
return 1; | |||
} | |||
/** | |||
* Parse DataNode from it's bytes sequence; | |||
* <p> | |||
* the bytes sequence is: sn + key + version + data_hash; | |||
* | |||
* @param bytes | |||
* @return | |||
*/ | |||
@Override | |||
public DataNode resolve(byte[] bytes) { | |||
if (bytes[0] != getFormatVersion()) { | |||
throw new IllegalArgumentException("Unsupported version of data node bytes sequence[" + bytes[0] + "]! "); | |||
} | |||
// resolve SN; | |||
byte[] snBytes = new byte[8]; | |||
snBytes[0] = 0x0; | |||
System.arraycopy(bytes, 1, snBytes, 1, 7); | |||
long sn = BytesUtils.toLong(snBytes); | |||
// skip bytes of SN; | |||
int offset = 8; | |||
// resolve key of data; | |||
// First, resolve the number mask of the key size; | |||
// Second, read the key bytes; | |||
int keySize = NumberMask.SHORT.resolveMaskedNumber(bytes, offset); | |||
offset += NumberMask.SHORT.getMaskLength(keySize); | |||
byte[] keyBytes = new byte[keySize]; | |||
System.arraycopy(bytes, offset, keyBytes, 0, keySize); | |||
offset += keySize; | |||
Bytes key = new Bytes(keyBytes); | |||
// Resolve version of key; | |||
long version = BytesUtils.toLong(bytes, offset); | |||
offset += 8; | |||
// resovle data hash; | |||
int dataHashSize = NumberMask.TINY.resolveMaskedNumber(bytes, offset); | |||
offset += NumberMask.TINY.getMaskLength(dataHashSize); | |||
byte[] dataHashBytes = new byte[dataHashSize]; | |||
System.arraycopy(bytes, offset, dataHashBytes, 0, dataHashSize); | |||
offset += dataHashSize; | |||
HashDigest dataHash = new HashDigest(dataHashBytes); | |||
// resovle node hash; | |||
int nodeHashSize = NumberMask.TINY.resolveMaskedNumber(bytes, offset); | |||
offset += NumberMask.TINY.getMaskLength(nodeHashSize); | |||
byte[] nodeHashBytes = new byte[nodeHashSize]; | |||
System.arraycopy(bytes, offset, nodeHashBytes, 0, nodeHashSize); | |||
offset += nodeHashSize; | |||
HashDigest nodeHash = new HashDigest(nodeHashBytes); | |||
return new DataNode(nodeHash, sn, key, version, dataHash, bytes); | |||
} | |||
public DataNode newDataNode(short hashAlgorithm, long sn, Bytes key, long version, HashDigest dataHash) { | |||
HashFunction hashFunc = Crypto.getHashFunction(hashAlgorithm); | |||
return create(hashFunc, sn, key, version, dataHash); | |||
} | |||
@Override | |||
public DataNode create(short hashAlgorithm, long sn, Bytes key, long version, byte[] data) { | |||
HashFunction hashFunc = Crypto.getHashFunction(hashAlgorithm); | |||
HashDigest dataHash = hashFunc.hash(data); | |||
return create(hashFunc, sn, key, version, dataHash); | |||
} | |||
/** | |||
* Data node's bytes sequence is composited by header( reference: | |||
* {@link #buildKeyHeaderBytes(long, Bytes, long)} ) and data hash; | |||
* | |||
* <p> | |||
* In general, the bytes sequence is: sn + key + version + data_hash + | |||
* node_hash; | |||
* | |||
* @param hashFunc | |||
* @param sn | |||
* @param key | |||
* @param version | |||
* @param dataHash | |||
* @return | |||
*/ | |||
private DataNode create(HashFunction hashFunc, long sn, Bytes key, long version, HashDigest dataHash) { | |||
byte[] headerBytes = buildKeyHeaderBytes(sn, key, version); | |||
int headerSize = headerBytes.length; | |||
// 单独对头部和数据进行哈希,以便在提供 Merkle 证明时能够不必传递原始数据即可进行哈希验证; | |||
HashDigest headerHash = hashFunc.hash(headerBytes); | |||
byte[] dataHashBytes = BytesUtils.concat(headerHash.getRawDigest(), dataHash.getRawDigest()); | |||
HashDigest dataNodeHash = hashFunc.hash(dataHashBytes); | |||
int dataHashSize = dataHash.size(); | |||
int nodeHashSize = dataNodeHash.size(); | |||
int dataHashMaskSize = NumberMask.TINY.getMaskLength(dataHashSize); | |||
int nodeHashMaskSize = NumberMask.TINY.getMaskLength(nodeHashSize); | |||
int nodeSize = headerSize + dataHashMaskSize + dataHashSize + nodeHashMaskSize + nodeHashSize; | |||
byte[] nodeBytes = new byte[nodeSize]; | |||
// write header; | |||
int offset = 0; | |||
System.arraycopy(headerBytes, 0, nodeBytes, offset, headerSize); | |||
offset += headerSize; | |||
// write data hash; | |||
NumberMask.TINY.writeMask(dataHashSize, nodeBytes, offset); | |||
offset += dataHashMaskSize; | |||
System.arraycopy(dataHash.toBytes(), 0, nodeBytes, offset, dataHashSize); | |||
offset += dataHashSize; | |||
// write node hash; | |||
NumberMask.TINY.writeMask(nodeHashSize, nodeBytes, offset); | |||
offset += nodeHashMaskSize; | |||
System.arraycopy(dataNodeHash.toBytes(), 0, nodeBytes, offset, nodeHashSize); | |||
// set format version; | |||
nodeBytes[0] = getFormatVersion(); | |||
return new DataNode(dataNodeHash, sn, key, version, dataHash, nodeBytes); | |||
} | |||
/** | |||
* Header is composited by sn + key + version; Bytes sequence: sn_size(8) + | |||
* number_mask_of_key_size + key_bytes + version_size(8); | |||
* | |||
* @param sn | |||
* @param key | |||
* @param version | |||
* @return | |||
*/ | |||
private static byte[] buildKeyHeaderBytes(long sn, Bytes key, long version) { | |||
int keySize = key.size(); | |||
int maskSize = NumberMask.SHORT.getMaskLength(keySize); | |||
// Size Of header = sn + key + version; | |||
// sn_size(8) + mask_size + key_size + version_size(8); | |||
int headerSize = 8 + maskSize + keySize + 8; | |||
byte[] headerBytes = new byte[headerSize]; | |||
// write bytes of sn; | |||
int offset = 0; | |||
offset += BytesUtils.toBytes(sn, headerBytes, 0); | |||
// write bytes of key mask; | |||
NumberMask.SHORT.writeMask(keySize, headerBytes, offset); | |||
offset += maskSize; | |||
// write bytes of key; | |||
offset += key.copyTo(headerBytes, offset, keySize); | |||
// write bytes of version; | |||
offset += BytesUtils.toBytes(version, headerBytes, offset); | |||
return headerBytes; | |||
} | |||
} |
@@ -7,11 +7,14 @@ import com.jd.blockchain.ledger.MerkleDataNode; | |||
import com.jd.blockchain.ledger.MerkleProof; | |||
import com.jd.blockchain.storage.service.ExPolicyKVStorage; | |||
import com.jd.blockchain.storage.service.ExPolicyKVStorage.ExPolicy; | |||
import com.jd.blockchain.storage.service.VersioningKVEntry; | |||
import com.jd.blockchain.storage.service.VersioningKVStorage; | |||
import com.jd.blockchain.storage.service.utils.BufferedKVStorage; | |||
import com.jd.blockchain.storage.service.utils.VersioningKVData; | |||
import com.jd.blockchain.utils.ArrayUtils; | |||
import com.jd.blockchain.utils.Bytes; | |||
import com.jd.blockchain.utils.DataEntry; | |||
import com.jd.blockchain.utils.DataIterator; | |||
import com.jd.blockchain.utils.Dataset; | |||
import com.jd.blockchain.utils.Transactional; | |||
import com.jd.blockchain.utils.io.BytesUtils; | |||
@@ -23,23 +26,24 @@ import com.jd.blockchain.utils.io.BytesUtils; | |||
* @author huanghaiquan | |||
* | |||
*/ | |||
public class MerkleDataSet implements Transactional, MerkleProvable { | |||
public class MerkleDataSet implements Transactional, MerkleProvable, Dataset<Bytes, byte[]> { | |||
/** | |||
* 4 MB MaxSize of value; | |||
*/ | |||
public static final int MAX_SIZE_OF_VALUE = 4 * 1024 * 1024; | |||
public static final String ORIG_KEY_SEPERATOR = LedgerConsts.KEY_SEPERATOR; | |||
public static final String SN_PREFIX = "SN" + ORIG_KEY_SEPERATOR; | |||
public static final String DATA_PREFIX = "KV" + ORIG_KEY_SEPERATOR; | |||
public static final String MERKLE_TREE_PREFIX = "MKL" + ORIG_KEY_SEPERATOR; | |||
public static final Bytes SN_PREFIX = Bytes.fromString("SN" + LedgerConsts.KEY_SEPERATOR); | |||
public static final Bytes DATA_PREFIX = Bytes.fromString("KV" + LedgerConsts.KEY_SEPERATOR); | |||
public static final Bytes MERKLE_TREE_PREFIX = Bytes.fromString("MKL" + LedgerConsts.KEY_SEPERATOR); | |||
private final Bytes snKeyPrefix; | |||
private final Bytes dataKeyPrefix; | |||
private final Bytes merkleKeyPrefix; | |||
@SuppressWarnings("unchecked") | |||
private static final DataEntry<Bytes, byte[]>[] EMPTY_ENTRIES = new DataEntry[0]; | |||
private BufferedKVStorage bufferedStorage; | |||
private VersioningKVStorage valueStorage; | |||
@@ -71,6 +75,18 @@ public class MerkleDataSet implements Transactional, MerkleProvable { | |||
*/ | |||
public MerkleDataSet(CryptoSetting setting, String keyPrefix, ExPolicyKVStorage exPolicyStorage, | |||
VersioningKVStorage versioningStorage) { | |||
this(setting, Bytes.fromString(keyPrefix), exPolicyStorage, versioningStorage); | |||
} | |||
/** | |||
* 创建一个新的 MerkleDataSet; | |||
* | |||
* @param setting 密码设置; | |||
* @param exPolicyStorage 默克尔树的存储; | |||
* @param versioningStorage 数据的存储; | |||
*/ | |||
public MerkleDataSet(CryptoSetting setting, Bytes keyPrefix, ExPolicyKVStorage exPolicyStorage, | |||
VersioningKVStorage versioningStorage) { | |||
// 缓冲对KV的写入; | |||
this.bufferedStorage = new BufferedKVStorage(exPolicyStorage, versioningStorage, false); | |||
@@ -79,15 +95,15 @@ public class MerkleDataSet implements Transactional, MerkleProvable { | |||
// bufferedStorage); | |||
// this.snStorage = PrefixAppender.prefix(SN_PREFIX, (ExPolicyKVStorage) | |||
// bufferedStorage); | |||
snKeyPrefix = Bytes.fromString(keyPrefix + SN_PREFIX); | |||
dataKeyPrefix = Bytes.fromString(keyPrefix + DATA_PREFIX); | |||
snKeyPrefix = keyPrefix.concat(SN_PREFIX); | |||
dataKeyPrefix = keyPrefix.concat(DATA_PREFIX); | |||
this.valueStorage = bufferedStorage; | |||
this.snStorage = bufferedStorage; | |||
// MerkleTree 本身是可缓冲的; | |||
// ExPolicyKVStorage merkleTreeStorage = | |||
// PrefixAppender.prefix(MERKLE_TREE_PREFIX, exPolicyStorage); | |||
merkleKeyPrefix = Bytes.fromString(keyPrefix + MERKLE_TREE_PREFIX); | |||
merkleKeyPrefix = keyPrefix.concat(MERKLE_TREE_PREFIX); | |||
ExPolicyKVStorage merkleTreeStorage = exPolicyStorage; | |||
this.merkleTree = new MerkleTree(setting, merkleKeyPrefix, merkleTreeStorage); | |||
this.snGenerator = new MerkleSequenceSNGenerator(merkleTree); | |||
@@ -104,17 +120,33 @@ public class MerkleDataSet implements Transactional, MerkleProvable { | |||
*/ | |||
public MerkleDataSet(HashDigest merkleRootHash, CryptoSetting setting, String keyPrefix, | |||
ExPolicyKVStorage exPolicyStorage, VersioningKVStorage versioningStorage, boolean readonly) { | |||
this(merkleRootHash, setting, Bytes.fromString(keyPrefix), exPolicyStorage, versioningStorage, readonly); | |||
} | |||
/** | |||
* 从指定的 Merkle 根构建的 MerkleDataSet; | |||
* | |||
* @param dataStorage | |||
* @param defaultMerkleHashAlgorithm | |||
* @param verifyMerkleHashOnLoad | |||
* @param merkleTreeStorage | |||
* @param snGenerator | |||
*/ | |||
public MerkleDataSet(HashDigest merkleRootHash, CryptoSetting setting, Bytes keyPrefix, | |||
ExPolicyKVStorage exPolicyStorage, VersioningKVStorage versioningStorage, boolean readonly) { | |||
// 缓冲对KV的写入; | |||
this.bufferedStorage = new BufferedKVStorage(exPolicyStorage, versioningStorage, false); | |||
// 把存储数据值、SN、Merkle节点的 key 分别加入独立的前缀,避免针对 key 的注入攻击; | |||
snKeyPrefix = Bytes.fromString(keyPrefix + SN_PREFIX); | |||
dataKeyPrefix = Bytes.fromString(keyPrefix + DATA_PREFIX); | |||
// snKeyPrefix = Bytes.fromString(keyPrefix + SN_PREFIX); | |||
// dataKeyPrefix = Bytes.fromString(keyPrefix + DATA_PREFIX); | |||
snKeyPrefix = keyPrefix.concat(SN_PREFIX); | |||
dataKeyPrefix = keyPrefix.concat(DATA_PREFIX); | |||
this.valueStorage = bufferedStorage; | |||
this.snStorage = bufferedStorage; | |||
// MerkleTree 本身是可缓冲的; | |||
merkleKeyPrefix = Bytes.fromString(keyPrefix + MERKLE_TREE_PREFIX); | |||
merkleKeyPrefix = keyPrefix.concat(MERKLE_TREE_PREFIX); | |||
ExPolicyKVStorage merkleTreeStorage = exPolicyStorage; | |||
this.merkleTree = new MerkleTree(merkleRootHash, setting, merkleKeyPrefix, merkleTreeStorage, readonly); | |||
@@ -130,15 +162,21 @@ public class MerkleDataSet implements Transactional, MerkleProvable { | |||
this.readonly = true; | |||
} | |||
@Override | |||
public long getDataCount() { | |||
return merkleTree.getDataCount(); | |||
} | |||
/** | |||
* 返回理论上允许的最大数据索引; | |||
* | |||
* @return | |||
*/ | |||
public long getMaxIndex() { | |||
return merkleTree.getMaxSn(); | |||
} | |||
public byte[][] getLatestValues(int fromIndex, int count) { | |||
public byte[][] getLatestValues(long fromIndex, int count) { | |||
if (count > LedgerConsts.MAX_LIST_COUNT) { | |||
throw new IllegalArgumentException("Count exceed the upper limit[" + LedgerConsts.MAX_LIST_COUNT + "]!"); | |||
} | |||
@@ -154,24 +192,41 @@ public class MerkleDataSet implements Transactional, MerkleProvable { | |||
return values; | |||
} | |||
public VersioningKVEntry[] getLatestDataEntries(int fromIndex, int count) { | |||
public DataEntry<Bytes, byte[]>[] getLatestDataEntries(long fromIndex, int count) { | |||
if (count > LedgerConsts.MAX_LIST_COUNT) { | |||
throw new IllegalArgumentException("Count exceed the upper limit[" + LedgerConsts.MAX_LIST_COUNT + "]!"); | |||
} | |||
if (fromIndex < 0 || (fromIndex + count) > merkleTree.getDataCount()) { | |||
throw new IllegalArgumentException("Index out of bound!"); | |||
} | |||
VersioningKVEntry[] values = new VersioningKVEntry[count]; | |||
if (count == 0) { | |||
return EMPTY_ENTRIES; | |||
} | |||
@SuppressWarnings("unchecked") | |||
DataEntry<Bytes, byte[]>[] values = new DataEntry[count]; | |||
byte[] bytesValue; | |||
for (int i = 0; i < count; i++) { | |||
MerkleDataNode dataNode = merkleTree.getData(fromIndex + i); | |||
Bytes dataKey = encodeDataKey(dataNode.getKey()); | |||
bytesValue = valueStorage.get(dataKey, dataNode.getVersion()); | |||
values[i] = new VersioningKVData(dataNode.getKey(), dataNode.getVersion(), bytesValue); | |||
values[i] = new VersioningKVData<Bytes, byte[]>(dataNode.getKey(), dataNode.getVersion(), bytesValue); | |||
} | |||
return values; | |||
} | |||
public DataEntry<Bytes, byte[]> getLatestDataEntry(long index) { | |||
if (index < 0 || index + 1 > merkleTree.getDataCount()) { | |||
throw new IllegalArgumentException("Index out of bound!"); | |||
} | |||
byte[] bytesValue; | |||
MerkleDataNode dataNode = merkleTree.getData(index); | |||
Bytes dataKey = encodeDataKey(dataNode.getKey()); | |||
bytesValue = valueStorage.get(dataKey, dataNode.getVersion()); | |||
DataEntry<Bytes, byte[]> entry = new VersioningKVData<Bytes, byte[]>(dataNode.getKey(), dataNode.getVersion(), | |||
bytesValue); | |||
return entry; | |||
} | |||
/** | |||
* get the data at the specific index; | |||
* | |||
@@ -192,32 +247,34 @@ public class MerkleDataSet implements Transactional, MerkleProvable { | |||
*/ | |||
public String getKeyAtIndex(int fromIndex) { | |||
MerkleDataNode dataNode = merkleTree.getData(fromIndex); | |||
return dataNode.getKey().toUTF8String(); | |||
} | |||
/** | |||
* Create or update the value associated the specified key if the version | |||
* checking is passed.<br> | |||
* | |||
* The value of the key will be updated only if it's latest version equals the | |||
* specified version argument. <br> | |||
* If the key doesn't exist, it will be created when the version arg was -1. | |||
* <p> | |||
* If updating is performed, the version of the key increase by 1. <br> | |||
* If creating is performed, the version of the key initialize by 0. <br> | |||
* | |||
* @param key The key of data; | |||
* @param value The value of data; | |||
* @param version The expected latest version of the key. | |||
* @return The new version of the key. <br> | |||
* If the key is new created success, then return 0; <br> | |||
* If the key is updated success, then return the new version;<br> | |||
* If this operation fail by version checking or other reason, then | |||
* return -1; | |||
*/ | |||
public long setValue(String key, byte[] value, long version) { | |||
return setValue(Bytes.fromString(key), value, version); | |||
} | |||
// TODO: 未去掉前缀; | |||
return dataNode.getKey().toUTF8String(); | |||
} | |||
// /** | |||
// * Create or update the value associated the specified key if the version | |||
// * checking is passed.<br> | |||
// * | |||
// * The value of the key will be updated only if it's latest version equals the | |||
// * specified version argument. <br> | |||
// * If the key doesn't exist, it will be created when the version arg was -1. | |||
// * <p> | |||
// * If updating is performed, the version of the key increase by 1. <br> | |||
// * If creating is performed, the version of the key initialize by 0. <br> | |||
// * | |||
// * @param key The key of data; | |||
// * @param value The value of data; | |||
// * @param version The expected latest version of the key. | |||
// * @return The new version of the key. <br> | |||
// * If the key is new created success, then return 0; <br> | |||
// * If the key is updated success, then return the new version;<br> | |||
// * If this operation fail by version checking or other reason, then | |||
// * return -1; | |||
// */ | |||
// @Override | |||
// public long setValue(String key, byte[] value, long version) { | |||
// return setValue(Bytes.fromString(key), value, version); | |||
// } | |||
/** | |||
* Create or update the value associated the specified key if the version | |||
@@ -239,6 +296,7 @@ public class MerkleDataSet implements Transactional, MerkleProvable { | |||
* If this operation fail by version checking or other reason, then | |||
* return -1; | |||
*/ | |||
@Override | |||
public long setValue(Bytes key, byte[] value, long version) { | |||
if (readonly) { | |||
throw new IllegalArgumentException("This merkle dataset is readonly!"); | |||
@@ -336,18 +394,19 @@ public class MerkleDataSet implements Transactional, MerkleProvable { | |||
return mdn.getVersion(); | |||
} | |||
/** | |||
* Return the specified version's value;<br> | |||
* | |||
* If the key with the specified version doesn't exist, then return null;<br> | |||
* If the version is specified to -1, then return the latest version's value; | |||
* | |||
* @param key | |||
* @param version | |||
*/ | |||
public byte[] getValue(String key, long version) { | |||
return getValue(Bytes.fromString(key), version); | |||
} | |||
// /** | |||
// * Return the specified version's value;<br> | |||
// * | |||
// * If the key with the specified version doesn't exist, then return null;<br> | |||
// * If the version is specified to -1, then return the latest version's value; | |||
// * | |||
// * @param key | |||
// * @param version | |||
// */ | |||
// @Override | |||
// public byte[] getValue(String key, long version) { | |||
// return getValue(Bytes.fromString(key), version); | |||
// } | |||
/** | |||
* Return the specified version's value;<br> | |||
@@ -358,6 +417,7 @@ public class MerkleDataSet implements Transactional, MerkleProvable { | |||
* @param key | |||
* @param version | |||
*/ | |||
@Override | |||
public byte[] getValue(Bytes key, long version) { | |||
long latestVersion = getMerkleVersion(key); | |||
if (latestVersion < 0 || version > latestVersion) { | |||
@@ -370,15 +430,16 @@ public class MerkleDataSet implements Transactional, MerkleProvable { | |||
return valueStorage.get(dataKey, version); | |||
} | |||
/** | |||
* Return the latest version's value; | |||
* | |||
* @param key | |||
* @return return null if not exist; | |||
*/ | |||
public byte[] getValue(String key) { | |||
return getValue(Bytes.fromString(key)); | |||
} | |||
// /** | |||
// * Return the latest version's value; | |||
// * | |||
// * @param key | |||
// * @return return null if not exist; | |||
// */ | |||
// @Override | |||
// public byte[] getValue(String key) { | |||
// return getValue(Bytes.fromString(key)); | |||
// } | |||
/** | |||
* Return the latest version's value; | |||
@@ -386,6 +447,7 @@ public class MerkleDataSet implements Transactional, MerkleProvable { | |||
* @param key | |||
* @return return null if not exist; | |||
*/ | |||
@Override | |||
public byte[] getValue(Bytes key) { | |||
long latestVersion = getMerkleVersion(key); | |||
if (latestVersion < 0) { | |||
@@ -395,16 +457,17 @@ public class MerkleDataSet implements Transactional, MerkleProvable { | |||
return valueStorage.get(dataKey, latestVersion); | |||
} | |||
/** | |||
* Return the latest version entry associated the specified key; If the key | |||
* doesn't exist, then return -1; | |||
* | |||
* @param key | |||
* @return | |||
*/ | |||
public long getVersion(String key) { | |||
return getMerkleVersion(Bytes.fromString(key)); | |||
} | |||
// /** | |||
// * Return the latest version entry associated the specified key; If the key | |||
// * doesn't exist, then return -1; | |||
// * | |||
// * @param key | |||
// * @return | |||
// */ | |||
// @Override | |||
// public long getVersion(String key) { | |||
// return getMerkleVersion(Bytes.fromString(key)); | |||
// } | |||
/** | |||
* Return the latest version entry associated the specified key; If the key | |||
@@ -413,33 +476,46 @@ public class MerkleDataSet implements Transactional, MerkleProvable { | |||
* @param key | |||
* @return | |||
*/ | |||
@Override | |||
public long getVersion(Bytes key) { | |||
return getMerkleVersion(key); | |||
} | |||
public VersioningKVEntry getDataEntry(String key) { | |||
return getDataEntry(Bytes.fromString(key)); | |||
} | |||
// @Override | |||
// public VersioningKVEntry<String, byte[]> getDataEntry(String key) { | |||
// return getDataEntry(key, -1); | |||
// } | |||
/** | |||
* | |||
* @param key | |||
* @return Null if the key doesn't exist! | |||
*/ | |||
public VersioningKVEntry getDataEntry(Bytes key) { | |||
long latestVersion = getMerkleVersion(key); | |||
if (latestVersion < 0) { | |||
return null; | |||
} | |||
Bytes dataKey = encodeDataKey(key); | |||
byte[] value = valueStorage.get(dataKey, latestVersion); | |||
if (value == null) { | |||
return null; | |||
} | |||
return new VersioningKVData(key, latestVersion, value); | |||
} | |||
@Override | |||
public DataEntry<Bytes, byte[]> getDataEntry(Bytes key) { | |||
return getDataEntry(key, -1); | |||
} | |||
// @Override | |||
// public VersioningKVEntry<String, byte[]> getDataEntry(String key, long version) { | |||
// Bytes keyBytes = Bytes.fromString(key); | |||
// long latestVersion = getMerkleVersion(keyBytes); | |||
// if (latestVersion < 0 || version > latestVersion) { | |||
// // key not exist, or the specified version is out of the latest version indexed | |||
// // by the current merkletree; | |||
// return null; | |||
// } | |||
// version = version < 0 ? latestVersion : version; | |||
// Bytes dataKey = encodeDataKey(keyBytes); | |||
// byte[] value = valueStorage.get(dataKey, version); | |||
// if (value == null) { | |||
// return null; | |||
// } | |||
// return new VersioningKVData<String, byte[]>(key, version, value); | |||
// } | |||
public VersioningKVEntry getDataEntry(Bytes key, long version) { | |||
@Override | |||
public DataEntry<Bytes, byte[]> getDataEntry(Bytes key, long version) { | |||
long latestVersion = getMerkleVersion(key); | |||
if (latestVersion < 0 || version > latestVersion) { | |||
// key not exist, or the specified version is out of the latest version indexed | |||
@@ -452,11 +528,21 @@ public class MerkleDataSet implements Transactional, MerkleProvable { | |||
if (value == null) { | |||
return null; | |||
} | |||
return new VersioningKVData(key, version, value); | |||
return new VersioningKVData<Bytes, byte[]>(key, version, value); | |||
} | |||
@Override | |||
public DataIterator<Bytes, byte[]> iterator() { | |||
return new AscDataInterator(getDataCount()); | |||
} | |||
@Override | |||
public DataIterator<Bytes, byte[]> iteratorDesc() { | |||
return new DescDataInterator(getDataCount()); | |||
} | |||
public MerkleDataEntry getMerkleEntry(Bytes key, long version) { | |||
VersioningKVEntry dataEntry = getDataEntry(key, version); | |||
DataEntry<Bytes, byte[]> dataEntry = getDataEntry(key, version); | |||
if (dataEntry == null) { | |||
return null; | |||
} | |||
@@ -465,7 +551,7 @@ public class MerkleDataSet implements Transactional, MerkleProvable { | |||
} | |||
public MerkleDataEntry getMerkleEntry(Bytes key) { | |||
VersioningKVEntry dataEntry = getDataEntry(key); | |||
DataEntry<Bytes, byte[]> dataEntry = getDataEntry(key); | |||
if (dataEntry == null) { | |||
return null; | |||
} | |||
@@ -492,23 +578,23 @@ public class MerkleDataSet implements Transactional, MerkleProvable { | |||
} | |||
/** | |||
* A wrapper for {@link VersioningKVEntry} and {@link MerkleProof}; | |||
* A wrapper for {@link DataEntry} and {@link MerkleProof}; | |||
* | |||
* @author huanghaiquan | |||
* | |||
*/ | |||
private static class MerkleDataEntryWrapper implements MerkleDataEntry { | |||
private VersioningKVEntry data; | |||
private DataEntry<Bytes, byte[]> data; | |||
private MerkleProof proof; | |||
public MerkleDataEntryWrapper(VersioningKVEntry data, MerkleProof proof) { | |||
public MerkleDataEntryWrapper(DataEntry<Bytes, byte[]> data, MerkleProof proof) { | |||
this.data = data; | |||
this.proof = proof; | |||
} | |||
@Override | |||
public VersioningKVEntry getData() { | |||
public DataEntry<Bytes, byte[]> getData() { | |||
return data; | |||
} | |||
@@ -536,4 +622,119 @@ public class MerkleDataSet implements Transactional, MerkleProvable { | |||
merkleTree.cancel(); | |||
snGenerator = new MerkleSequenceSNGenerator(merkleTree); | |||
} | |||
// ---------------------------------------------------------- | |||
private class AscDataInterator implements DataIterator<Bytes, byte[]> { | |||
private final long total; | |||
private long cursor = 0; | |||
public AscDataInterator(long total) { | |||
this.total = total; | |||
} | |||
@Override | |||
public void skip(long count) { | |||
cursor = nextCursor(count); | |||
} | |||
private long nextCursor(long skippingCount) { | |||
long c = cursor + skippingCount; | |||
return c > total ? total : c; | |||
} | |||
@Override | |||
public DataEntry<Bytes, byte[]> next() { | |||
if (hasNext()) { | |||
DataEntry<Bytes, byte[]> entry = getLatestDataEntry(cursor); | |||
cursor = nextCursor(1); | |||
return entry; | |||
} | |||
return null; | |||
} | |||
@Override | |||
public DataEntry<Bytes, byte[]>[] next(int count) { | |||
if (hasNext()) { | |||
long from = cursor; | |||
long nextCursor = nextCursor(count); | |||
long c = nextCursor - cursor; | |||
if (c > LedgerConsts.MAX_LIST_COUNT) { | |||
throw new IllegalArgumentException( | |||
"Count exceed the upper limit[" + LedgerConsts.MAX_LIST_COUNT + "]!"); | |||
} | |||
DataEntry<Bytes, byte[]>[] entries = getLatestDataEntries(from, (int) c); | |||
cursor = nextCursor; | |||
return entries; | |||
} | |||
return EMPTY_ENTRIES; | |||
} | |||
@Override | |||
public boolean hasNext() { | |||
return cursor < total; | |||
} | |||
} | |||
private class DescDataInterator implements DataIterator<Bytes, byte[]> { | |||
private final long total; | |||
private long cursor; | |||
public DescDataInterator(long total) { | |||
this.total = total; | |||
this.cursor = total - 1; | |||
} | |||
@Override | |||
public void skip(long count) { | |||
cursor = nextCursor(count); | |||
} | |||
private long nextCursor(long skippingCount) { | |||
long c = cursor - skippingCount; | |||
return c < 0 ? -1 : c; | |||
} | |||
@Override | |||
public DataEntry<Bytes, byte[]> next() { | |||
if (hasNext()) { | |||
DataEntry<Bytes, byte[]> entry = getLatestDataEntry(cursor); | |||
cursor = nextCursor(1); | |||
return entry; | |||
} | |||
return null; | |||
} | |||
@Override | |||
public DataEntry<Bytes, byte[]>[] next(int count) { | |||
if (hasNext()) { | |||
long nextCursor = nextCursor(count); | |||
long from = nextCursor + 1; | |||
long c = cursor - nextCursor; | |||
if (c > LedgerConsts.MAX_LIST_COUNT) { | |||
throw new IllegalArgumentException( | |||
"Count exceed the upper limit[" + LedgerConsts.MAX_LIST_COUNT + "]!"); | |||
} | |||
DataEntry<Bytes, byte[]>[] entries = getLatestDataEntries(from, (int) c); | |||
// reverse; | |||
ArrayUtils.reverse(entries); | |||
cursor = nextCursor; | |||
return entries; | |||
} | |||
return EMPTY_ENTRIES; | |||
} | |||
@Override | |||
public boolean hasNext() { | |||
return cursor < total; | |||
} | |||
} | |||
} |
@@ -52,8 +52,9 @@ public class MerkleTree implements Transactional { | |||
public static final int TREE_DEGREE = 16; | |||
public static final int MAX_LEVEL = 15; | |||
public static final int MAX_LEVEL = 14; | |||
// 正好是 2 的 56 次方(7字节),将 SN 8个字节中的首个字节预留作为 DataNode 的编码格式版本标记; | |||
public static final long MAX_DATACOUNT = power(TREE_DEGREE, MAX_LEVEL); | |||
public static final long MAX_SN = MAX_DATACOUNT - 1; | |||
@@ -147,14 +148,10 @@ public class MerkleTree implements Transactional { | |||
/** | |||
* 创建 Merkle 树; | |||
* | |||
* @param rootHash | |||
* 节点的根Hash; 如果指定为 null,则实际上创建一个空的 Merkle Tree; | |||
* @param verifyOnLoad | |||
* 从外部存储加载节点时是否校验节点的哈希; | |||
* @param kvStorage | |||
* 保存 Merkle 节点的存储服务; | |||
* @param readonly | |||
* 是否只读; | |||
* @param rootHash 节点的根Hash; 如果指定为 null,则实际上创建一个空的 Merkle Tree; | |||
* @param verifyOnLoad 从外部存储加载节点时是否校验节点的哈希; | |||
* @param kvStorage 保存 Merkle 节点的存储服务; | |||
* @param readonly 是否只读; | |||
*/ | |||
public MerkleTree(HashDigest rootHash, CryptoSetting setting, String keyPrefix, ExPolicyKVStorage kvStorage, | |||
boolean readonly) { | |||
@@ -164,14 +161,10 @@ public class MerkleTree implements Transactional { | |||
/** | |||
* 创建 Merkle 树; | |||
* | |||
* @param rootHash | |||
* 节点的根Hash; 如果指定为 null,则实际上创建一个空的 Merkle Tree; | |||
* @param verifyOnLoad | |||
* 从外部存储加载节点时是否校验节点的哈希; | |||
* @param kvStorage | |||
* 保存 Merkle 节点的存储服务; | |||
* @param readonly | |||
* 是否只读; | |||
* @param rootHash 节点的根Hash; 如果指定为 null,则实际上创建一个空的 Merkle Tree; | |||
* @param verifyOnLoad 从外部存储加载节点时是否校验节点的哈希; | |||
* @param kvStorage 保存 Merkle 节点的存储服务; | |||
* @param readonly 是否只读; | |||
*/ | |||
public MerkleTree(HashDigest rootHash, CryptoSetting setting, Bytes keyPrefix, ExPolicyKVStorage kvStorage, | |||
boolean readonly) { | |||
@@ -205,8 +198,7 @@ public class MerkleTree implements Transactional { | |||
* <p> | |||
* 如果 sn 超出范围,则引发 {@link IndexOutOfBoundsException} ; | |||
* | |||
* @param sn | |||
* 数据的序列号; | |||
* @param sn 数据的序列号; | |||
* @return 默克尔证明的实例; | |||
*/ | |||
public MerkleProof getProof(long sn) { | |||
@@ -242,13 +234,10 @@ public class MerkleTree implements Transactional { | |||
* 注:默克尔树只保存指定数据的哈希以及关联的键,而不会保存数据原文,因此调用者需要自己处理对数据的存储; <br> | |||
* 此外,哈希计算是把键和数据内容拼接一起进行计算的; | |||
* | |||
* @param sn | |||
* 与此数据唯一相关的序列号;sn 必须大于等于 0 ; | |||
* @param key | |||
* 与此数据唯一相关的键; | |||
* @param sn 与此数据唯一相关的序列号;sn 必须大于等于 0 ; | |||
* @param key 与此数据唯一相关的键; | |||
* @param version | |||
* @param hashedData | |||
* 要参与哈希计算的数据内容;注:此参数值并不会被默克尔树保存; | |||
* @param hashedData 要参与哈希计算的数据内容;注:此参数值并不会被默克尔树保存; | |||
* @return | |||
*/ | |||
public MerkleDataNode setData(long sn, String key, long version, byte[] hashedData) { | |||
@@ -266,13 +255,10 @@ public class MerkleTree implements Transactional { | |||
* 注:默克尔树只保存指定数据的哈希以及关联的键,而不会保存数据原文,因此调用者需要自己处理对数据的存储; <br> | |||
* 此外,哈希计算是把键和数据内容拼接一起进行计算的; | |||
* | |||
* @param sn | |||
* 与此数据唯一相关的序列号;sn 必须大于等于 0 ; | |||
* @param key | |||
* 与此数据唯一相关的键; | |||
* @param sn 与此数据唯一相关的序列号;sn 必须大于等于 0 ; | |||
* @param key 与此数据唯一相关的键; | |||
* @param version | |||
* @param hashedData | |||
* 要参与哈希计算的数据内容;注:此参数值并不会被默克尔树保存; | |||
* @param hashedData 要参与哈希计算的数据内容;注:此参数值并不会被默克尔树保存; | |||
* @return | |||
*/ | |||
public MerkleDataNode setData(long sn, Bytes key, long version, byte[] hashedData) { | |||
@@ -285,7 +271,8 @@ public class MerkleTree implements Transactional { | |||
if (sn > MAX_SN) { | |||
throw new IllegalArgumentException("The sn is great than MAX[" + MAX_SN + "]!"); | |||
} | |||
DataNode dataNode = DataNode.newDataNode(setting.getHashAlgorithm(), sn, key, version, hashedData); | |||
DataNode dataNode = MerkleTreeEncoder.LATEST_DATANODE_ENCODER.create(setting.getHashAlgorithm(), sn, key, | |||
version, hashedData); | |||
updatedDataNodes.put(sn, dataNode); | |||
return dataNode; | |||
} | |||
@@ -591,10 +578,8 @@ public class MerkleTree implements Transactional { | |||
/** | |||
* 重新计算所有子节点以及自身的哈希,并返回新加入的数据节点的数量; | |||
* | |||
* @param pathNode | |||
* 需要重新计算 hash 的路径节点; | |||
* @param updatedNodes | |||
* 用于记录已更新节点的列表; | |||
* @param pathNode 需要重新计算 hash 的路径节点; | |||
* @param updatedNodes 用于记录已更新节点的列表; | |||
* @return | |||
*/ | |||
@SuppressWarnings("unused") | |||
@@ -732,12 +717,10 @@ public class MerkleTree implements Transactional { | |||
* | |||
* 如果 sn 超出范围,则引发 {@link IndexOutOfBoundsException} ; | |||
* | |||
* @param sn | |||
* 数据节点的序列号; | |||
* @param path | |||
* 用于记录节点路径的列表,长度必须大于等于当前默克尔树的总的层级(即 path.length 大于等于 root.level + | |||
* 1);<br> | |||
* 如果参数为 null,则不记录; | |||
* @param sn 数据节点的序列号; | |||
* @param path 用于记录节点路径的列表,长度必须大于等于当前默克尔树的总的层级(即 path.length 大于等于 root.level + | |||
* 1);<br> | |||
* 如果参数为 null,则不记录; | |||
* @return 序列号对应的数据节点;<br> | |||
* 如果不存在,则返回 null,注意,此时指定的路径参数 path 依然写入了查找过程的路径; | |||
*/ | |||
@@ -844,7 +827,8 @@ public class MerkleTree implements Transactional { | |||
if (bytes == null || bytes.length == 0) { | |||
return null; | |||
} | |||
DataNode dataNode = DataNode.parse(bytes); | |||
DataNode dataNode = MerkleTreeEncoder.resolve(bytes); | |||
if (verify && !hashBytes.equals(dataNode.nodeHash)) { | |||
String keyStr = hashBytes.toBase58(); | |||
String actualHashStr = dataNode.nodeHash.toBase58(); | |||
@@ -861,8 +845,7 @@ public class MerkleTree implements Transactional { | |||
* 注:此方法不处理溢出;调用者需要自行规避; | |||
* | |||
* @param value | |||
* @param x | |||
* 大于等于 0 的整数; | |||
* @param x 大于等于 0 的整数; | |||
* @return | |||
*/ | |||
private static long power(long value, int x) { | |||
@@ -1140,14 +1123,10 @@ public class MerkleTree implements Transactional { | |||
/** | |||
* 创建一个路径节点; | |||
* | |||
* @param hashAlgorithm | |||
* 生成节点采用的哈希算法; | |||
* @param startingSN | |||
* 路径节点表示的子树的起始序列号; | |||
* @param level | |||
* 路径节点的层级深度;路径节点的深度从 1 开始往上递增(数据节点作为树的深度为 0); | |||
* @param dataCount | |||
* 路径节点表示的子树所包含的数据节点的数量; | |||
* @param hashAlgorithm 生成节点采用的哈希算法; | |||
* @param startingSN 路径节点表示的子树的起始序列号; | |||
* @param level 路径节点的层级深度;路径节点的深度从 1 开始往上递增(数据节点作为树的深度为 0); | |||
* @param dataCount 路径节点表示的子树所包含的数据节点的数量; | |||
*/ | |||
private PathNode(CryptoAlgorithm hashAlgorithm, long startingSN, int level, long dataCount) { | |||
this(hashAlgorithm, startingSN, level, dataCount, new HashDigest[TREE_DEGREE], null); | |||
@@ -1338,10 +1317,8 @@ public class MerkleTree implements Transactional { | |||
/** | |||
* 从指定的字节数组反序列化节点; | |||
* | |||
* @param bytes | |||
* 字节数组;合法的输入应等同于 {@link #toBytes()} 方法的输出; | |||
* @param checkHash | |||
* 是否重新计算并校验节点的哈希; | |||
* @param bytes 字节数组;合法的输入应等同于 {@link #toBytes()} 方法的输出; | |||
* @param checkHash 是否重新计算并校验节点的哈希; | |||
* @return | |||
*/ | |||
private static PathNode parse(byte[] bytes, boolean checkHash) { | |||
@@ -1429,7 +1406,7 @@ public class MerkleTree implements Transactional { | |||
* @author huanghaiquan | |||
* | |||
*/ | |||
private static class DataNode extends AbstractMerkleNode implements MerkleDataNode { | |||
static class DataNode extends AbstractMerkleNode implements MerkleDataNode { | |||
private long sn; | |||
@@ -1437,64 +1414,17 @@ public class MerkleTree implements Transactional { | |||
private long version; | |||
private byte[] dataNodeBytes; | |||
private byte[] nodeBytes; | |||
private HashDigest valueHash; | |||
private DataNode(long sn, Bytes key, long version, HashDigest dataHash, byte[] dataBytes) { | |||
DataNode(HashDigest nodeHash, long sn, Bytes key, long version, HashDigest valueHash, byte[] nodeBytes) { | |||
this.sn = sn; | |||
this.key = key; | |||
this.version = version; | |||
this.nodeHash = dataHash; | |||
this.dataNodeBytes = dataBytes; | |||
} | |||
@SuppressWarnings("unused") | |||
private static DataNode newDataNode(CryptoAlgorithm hashAlgorithm, long sn, Bytes key, long version, | |||
byte[] hashedData) { | |||
return newDataNode(hashAlgorithm.code(), sn, key, version, hashedData); | |||
} | |||
private static DataNode newDataNode(short hashAlgorithm, long sn, Bytes key, long version, byte[] hashedData) { | |||
// byte[] keyStrBytes = BytesUtils.toBytes(key); | |||
// int maskSize = NumberMask.SHORT.getMaskLength(keyStrBytes.length); | |||
int keySize = key.size(); | |||
int maskSize = NumberMask.SHORT.getMaskLength(keySize); | |||
// int bodySize = 8 + maskSize + keyStrBytes.length + 8;// sn + key + version; | |||
int bodySize = 8 + maskSize + keySize + 8;// sn + key + version; | |||
byte[] bodyBytes = new byte[bodySize]; | |||
int offset = 0; | |||
offset += BytesUtils.toBytes(sn, bodyBytes, 0); | |||
// NumberMask.SHORT.writeMask(keyStrBytes.length, bodyBytes, offset); | |||
NumberMask.SHORT.writeMask(keySize, bodyBytes, offset); | |||
offset += maskSize; | |||
// System.arraycopy(keyStrBytes, 0, bodyBytes, offset, keyStrBytes.length); | |||
// System.arraycopy(keyStrBytes, 0, bodyBytes, offset, keyStrBytes.length); | |||
// offset += keyStrBytes.length; | |||
offset += key.copyTo(bodyBytes, offset, keySize); | |||
// TODO: version; | |||
offset += BytesUtils.toBytes(version, bodyBytes, offset); | |||
byte[] dataBytes = BytesUtils.concat(bodyBytes, hashedData); | |||
HashFunction hashFunc = Crypto.getHashFunction(hashAlgorithm); | |||
HashDigest dataHash = hashFunc.hash(dataBytes); | |||
int hashMaskSize = NumberMask.TINY.getMaskLength(dataHash.size()); | |||
int dataNodeSize = bodySize + hashMaskSize + dataHash.size(); | |||
byte[] dataNodeBytes = new byte[dataNodeSize]; | |||
offset = 0; | |||
System.arraycopy(bodyBytes, 0, dataNodeBytes, offset, bodySize); | |||
offset += bodySize; | |||
NumberMask.TINY.writeMask(dataHash.size(), dataNodeBytes, offset); | |||
offset += hashMaskSize; | |||
System.arraycopy(dataHash.toBytes(), 0, dataNodeBytes, offset, dataHash.size()); | |||
return new DataNode(sn, key, version, dataHash, dataNodeBytes); | |||
this.nodeHash = nodeHash; | |||
this.valueHash = valueHash; | |||
this.nodeBytes = nodeBytes; | |||
} | |||
@Override | |||
@@ -1547,6 +1477,11 @@ public class MerkleTree implements Transactional { | |||
return version; | |||
} | |||
@Override | |||
public HashDigest getValueHash() { | |||
return valueHash; | |||
} | |||
@Override | |||
public byte[] toBytes() { | |||
// ByteArrayOutputStream out = new ByteArrayOutputStream(); | |||
@@ -1574,38 +1509,7 @@ public class MerkleTree implements Transactional { | |||
// | |||
// System.arraycopy(nodeHash.toBytes(), 0, totalBytes, offset, hashSize); | |||
return dataNodeBytes; | |||
} | |||
private static DataNode parse(byte[] bytes) { | |||
// InputStream in = new ByteArrayInputStream(bytes); | |||
int offset = 0; | |||
long sn = BytesUtils.toLong(bytes, offset); | |||
offset += 8; | |||
// byte[] keyBytes = BytesEncoding.read(NumberMask.SHORT, in); | |||
// String key = BytesUtils.toString(keyBytes); | |||
int keySize = NumberMask.SHORT.resolveMaskedNumber(bytes, offset); | |||
offset += NumberMask.SHORT.getMaskLength(keySize); | |||
byte[] keyBytes = new byte[keySize]; | |||
System.arraycopy(bytes, offset, keyBytes, 0, keySize); | |||
offset += keySize; | |||
// String key = BytesUtils.toString(keyBytes); | |||
Bytes key = new Bytes(keyBytes); | |||
// long version = BytesUtils.readLong(in); | |||
long version = BytesUtils.toLong(bytes, offset); | |||
offset += 8; | |||
// byte[] dataHashBytes = BytesEncoding.read(NumberMask.SHORT, in); | |||
int hashSize = NumberMask.TINY.resolveMaskedNumber(bytes, offset); | |||
offset += NumberMask.TINY.getMaskLength(hashSize); | |||
byte[] dataHashBytes = new byte[hashSize]; | |||
System.arraycopy(bytes, offset, dataHashBytes, 0, hashSize); | |||
offset += hashSize; | |||
HashDigest dataHash = new HashDigest(dataHashBytes); | |||
return new DataNode(sn, key, version, dataHash, bytes); | |||
return nodeBytes; | |||
} | |||
@Override | |||
@@ -0,0 +1,31 @@ | |||
package com.jd.blockchain.ledger.core; | |||
import java.util.Arrays; | |||
import java.util.Collections; | |||
import java.util.List; | |||
import com.jd.blockchain.ledger.core.MerkleTree.DataNode; | |||
class MerkleTreeEncoder { | |||
static final MerkleDataNodeEncoder LATEST_DATANODE_ENCODER = new MerkleDataNodeEncoder_V1(); | |||
static final MerkleDataNodeEncoder V0_DATANODE_ENCODER = new MerkleDataNodeEncoder_V0(); | |||
static final List<MerkleDataNodeEncoder> DATANODE_ENCODERS = Collections | |||
.unmodifiableList(Arrays.asList(LATEST_DATANODE_ENCODER, V0_DATANODE_ENCODER)); | |||
/** | |||
* @param bytes | |||
* @return | |||
*/ | |||
static DataNode resolve(byte[] bytes) { | |||
for (MerkleDataNodeEncoder encoder : MerkleTreeEncoder.DATANODE_ENCODERS) { | |||
if (encoder.getFormatVersion() == bytes[0]) { | |||
return encoder.resolve(bytes); | |||
} | |||
} | |||
throw new IllegalStateException("Unsupported version of DataNode bytes sequence[" + bytes[0] + "]!"); | |||
} | |||
} |
@@ -1,7 +1,7 @@ | |||
package com.jd.blockchain.ledger.core; | |||
import com.jd.blockchain.crypto.PubKey; | |||
import com.jd.blockchain.ledger.AccountHeader; | |||
import com.jd.blockchain.ledger.BlockchainIdentity; | |||
import com.jd.blockchain.utils.Bytes; | |||
/** | |||
@@ -15,7 +15,7 @@ import com.jd.blockchain.utils.Bytes; | |||
public class OpeningAccessPolicy implements AccountAccessPolicy { | |||
@Override | |||
public boolean checkDataWriting(AccountHeader account) { | |||
public boolean checkDataWriting(BlockchainIdentity account) { | |||
return true; | |||
} | |||
@@ -28,7 +28,7 @@ public class ParticipantDataset implements Transactional, MerkleProvable, Partic | |||
public ParticipantDataset(HashDigest merkleRootHash, CryptoSetting cryptoSetting, String prefix, | |||
ExPolicyKVStorage exPolicyStorage, VersioningKVStorage verStorage, boolean readonly) { | |||
dataset = new MerkleDataSet(merkleRootHash, cryptoSetting, prefix, exPolicyStorage, verStorage, readonly); | |||
dataset = new MerkleDataSet(merkleRootHash, cryptoSetting, Bytes.fromString(prefix), exPolicyStorage, verStorage, readonly); | |||
} | |||
@Override | |||
@@ -14,10 +14,10 @@ import com.jd.blockchain.ledger.RolePrivileges; | |||
import com.jd.blockchain.ledger.TransactionPermission; | |||
import com.jd.blockchain.ledger.TransactionPrivilege; | |||
import com.jd.blockchain.storage.service.ExPolicyKVStorage; | |||
import com.jd.blockchain.storage.service.VersioningKVEntry; | |||
import com.jd.blockchain.storage.service.VersioningKVStorage; | |||
import com.jd.blockchain.utils.Bytes; | |||
import com.jd.blockchain.utils.Transactional; | |||
import com.jd.blockchain.utils.DataEntry; | |||
public class RolePrivilegeDataset implements Transactional, MerkleProvable, RolePrivilegeSettings { | |||
@@ -30,7 +30,8 @@ public class RolePrivilegeDataset implements Transactional, MerkleProvable, Role | |||
public RolePrivilegeDataset(HashDigest merkleRootHash, CryptoSetting cryptoSetting, String prefix, | |||
ExPolicyKVStorage exPolicyStorage, VersioningKVStorage verStorage, boolean readonly) { | |||
dataset = new MerkleDataSet(merkleRootHash, cryptoSetting, prefix, exPolicyStorage, verStorage, readonly); | |||
dataset = new MerkleDataSet(merkleRootHash, cryptoSetting, Bytes.fromString(prefix), exPolicyStorage, | |||
verStorage, readonly); | |||
} | |||
@Override | |||
@@ -255,7 +256,7 @@ public class RolePrivilegeDataset implements Transactional, MerkleProvable, Role | |||
public RolePrivileges getRolePrivilege(String roleName) { | |||
// 只返回最新版本; | |||
Bytes key = encodeKey(roleName); | |||
VersioningKVEntry kv = dataset.getDataEntry(key); | |||
DataEntry<Bytes, byte[]> kv = dataset.getDataEntry(key); | |||
if (kv == null) { | |||
return null; | |||
} | |||
@@ -265,7 +266,7 @@ public class RolePrivilegeDataset implements Transactional, MerkleProvable, Role | |||
@Override | |||
public RolePrivileges[] getRolePrivileges(int index, int count) { | |||
VersioningKVEntry[] kvEntries = dataset.getLatestDataEntries(index, count); | |||
DataEntry<Bytes, byte[]>[] kvEntries = dataset.getLatestDataEntries(index, count); | |||
RolePrivileges[] pns = new RolePrivileges[kvEntries.length]; | |||
PrivilegeSet privilege; | |||
for (int i = 0; i < pns.length; i++) { | |||
@@ -1,12 +1,5 @@ | |||
package com.jd.blockchain.ledger.core; | |||
import java.util.Set; | |||
import com.jd.blockchain.ledger.BlockchainIdentity; | |||
import com.jd.blockchain.ledger.LedgerPermission; | |||
import com.jd.blockchain.ledger.LedgerSecurityException; | |||
import com.jd.blockchain.ledger.TransactionPermission; | |||
public class SecurityContext { | |||
private static ThreadLocal<SecurityPolicy> policyHolder = new ThreadLocal<SecurityPolicy>(); | |||
@@ -39,6 +39,18 @@ public class TransactionBatchProcessor implements TransactionBatchProcess { | |||
private TransactionBatchResult batchResult; | |||
public byte[] getPrevLatestBlockHash() { | |||
return ledger.getLatestBlockHash().toBytes(); | |||
} | |||
public byte[] getGenisBlockHash() { | |||
return ledger.getBlockHash(0).toBytes(); | |||
} | |||
public long getPreLatestBlockHeight() { | |||
return ledger.getLatestBlockHeight(); | |||
} | |||
public HashDigest getLedgerHash() { | |||
return ledger.getHash(); | |||
} | |||
@@ -273,9 +285,6 @@ public class TransactionBatchProcessor implements TransactionBatchProcess { | |||
// rollback all the block; | |||
// TODO: handle the BlockRollbackException in detail; | |||
result = TransactionState.IGNORED_BY_BLOCK_FULL_ROLLBACK; | |||
if (e instanceof DataVersionConflictException) { | |||
result = TransactionState.DATA_VERSION_CONFLICT; | |||
} | |||
txCtx.rollback(); | |||
LOGGER.error( | |||
String.format("Transaction was rolled back! --[BlockHeight=%s][RequestHash=%s][TxHash=%s] --%s", | |||
@@ -295,6 +304,8 @@ public class TransactionBatchProcessor implements TransactionBatchProcess { | |||
result = TransactionState.CONTRACT_DOES_NOT_EXIST; | |||
} else if (e instanceof ParticipantDoesNotExistException) { | |||
result = TransactionState.PARTICIPANT_DOES_NOT_EXIST; | |||
} else if (e instanceof DataVersionConflictException) { | |||
result = TransactionState.DATA_VERSION_CONFLICT; | |||
} | |||
txCtx.discardAndCommit(result, operationResults); | |||
LOGGER.error(String.format( | |||
@@ -48,6 +48,16 @@ public class TransactionEngineImpl implements TransactionEngine { | |||
return batchs.get(ledgerHash); | |||
} | |||
public void freeBatch(HashDigest ledgerHash) { | |||
finishBatch(ledgerHash); | |||
} | |||
public void resetNewBlockEditor(HashDigest ledgerHash) { | |||
LedgerRepository ledgerRepo = ledgerService.getLedger(ledgerHash); | |||
((LedgerRepositoryImpl)ledgerRepo).resetNextBlockEditor(); | |||
} | |||
private void finishBatch(HashDigest ledgerHash) { | |||
batchs.remove(ledgerHash); | |||
} | |||
@@ -88,7 +88,8 @@ public class TransactionSet implements Transactional, TransactionQuery { | |||
public TransactionSet(HashDigest txRootHash, CryptoSetting setting, String keyPrefix, | |||
ExPolicyKVStorage merkleTreeStorage, VersioningKVStorage dataStorage, boolean readonly) { | |||
this.txStatePrefix = Bytes.fromString(keyPrefix + TX_STATE_PREFIX); | |||
this.txSet = new MerkleDataSet(txRootHash, setting, keyPrefix, merkleTreeStorage, dataStorage, readonly); | |||
this.txSet = new MerkleDataSet(txRootHash, setting, Bytes.fromString(keyPrefix), merkleTreeStorage, dataStorage, | |||
readonly); | |||
} | |||
/** | |||
@@ -1,9 +1,9 @@ | |||
package com.jd.blockchain.ledger.core; | |||
import com.jd.blockchain.crypto.HashDigest; | |||
import com.jd.blockchain.crypto.PubKey; | |||
import com.jd.blockchain.ledger.BytesValue; | |||
import com.jd.blockchain.ledger.BytesData; | |||
import com.jd.blockchain.ledger.LedgerException; | |||
import com.jd.blockchain.ledger.TypedValue; | |||
import com.jd.blockchain.ledger.UserInfo; | |||
import com.jd.blockchain.utils.Bytes; | |||
@@ -13,72 +13,73 @@ import com.jd.blockchain.utils.Bytes; | |||
* @author huanghaiquan | |||
* | |||
*/ | |||
public class UserAccount implements UserInfo { | |||
public class UserAccount extends AccountDecorator implements UserInfo { // implements UserInfo { | |||
private static final Bytes USER_INFO_PREFIX = Bytes.fromString("PROP" + LedgerConsts.KEY_SEPERATOR); | |||
private static final String USER_INFO_PREFIX = "PROP" + LedgerConsts.KEY_SEPERATOR; | |||
private static final Bytes DATA_PUB_KEY = Bytes.fromString("DATA-PUBKEY"); | |||
private static final String DATA_PUB_KEY = "DATA-PUBKEY"; | |||
private MerkleAccount baseAccount; | |||
public UserAccount(CompositeAccount baseAccount) { | |||
super(baseAccount); | |||
} | |||
private PubKey dataPubKey; | |||
@Override | |||
public Bytes getAddress() { | |||
return baseAccount.getAddress(); | |||
return getID().getAddress(); | |||
} | |||
@Override | |||
public PubKey getPubKey() { | |||
return baseAccount.getPubKey(); | |||
return getID().getPubKey(); | |||
} | |||
@Override | |||
public HashDigest getRootHash() { | |||
return baseAccount.getRootHash(); | |||
} | |||
public UserAccount(MerkleAccount baseAccount) { | |||
this.baseAccount = baseAccount; | |||
} | |||
public PubKey getDataPubKey() { | |||
BytesValue pkBytes = baseAccount.getBytes(DATA_PUB_KEY); | |||
if (pkBytes == null) { | |||
return null; | |||
if (dataPubKey == null) { | |||
BytesValue pkBytes = getHeaders().getValue(DATA_PUB_KEY); | |||
if (pkBytes == null) { | |||
return null; | |||
} | |||
dataPubKey = new PubKey(pkBytes.getBytes().toBytes()); | |||
} | |||
return new PubKey(pkBytes.getValue().toBytes()); | |||
return dataPubKey; | |||
} | |||
public long setDataPubKey(PubKey pubKey) { | |||
byte[] pkBytes = pubKey.toBytes(); | |||
return baseAccount.setBytes(DATA_PUB_KEY, BytesData.fromBytes(pkBytes), -1); | |||
public void setDataPubKey(PubKey pubKey) { | |||
long version = getHeaders().getVersion(DATA_PUB_KEY); | |||
setDataPubKey(pubKey, version); | |||
} | |||
public long setDataPubKey(PubKey pubKey, long version) { | |||
byte[] pkBytes = pubKey.toBytes(); | |||
return baseAccount.setBytes(DATA_PUB_KEY, BytesData.fromBytes(pkBytes), version); | |||
public void setDataPubKey(PubKey pubKey, long version) { | |||
TypedValue value = TypedValue.fromPubKey(dataPubKey); | |||
long newVersion = getHeaders().setValue(DATA_PUB_KEY, value, version); | |||
if (newVersion > -1) { | |||
dataPubKey = pubKey; | |||
} else { | |||
throw new LedgerException("Data public key was updated failed!"); | |||
} | |||
} | |||
public long setProperty(String key, String value, long version) { | |||
return setProperty(Bytes.fromString(key), value, version); | |||
return getHeaders().setValue(encodePropertyKey(key), TypedValue.fromText(value), version); | |||
} | |||
public long setProperty(Bytes key, String value, long version) { | |||
return baseAccount.setBytes(encodePropertyKey(key), BytesData.fromText(value), version); | |||
public String getProperty(String key) { | |||
BytesValue value = getHeaders().getValue(encodePropertyKey(key)); | |||
return value == null ? null : value.getBytes().toUTF8String(); | |||
} | |||
public String getProperty(Bytes key) { | |||
BytesValue value = baseAccount.getBytes(encodePropertyKey(key)); | |||
return value == null ? null : value.getValue().toUTF8String(); | |||
public String getProperty(String key, long version) { | |||
BytesValue value = getHeaders().getValue(encodePropertyKey(key), version); | |||
return value == null ? null : value.getBytes().toUTF8String(); | |||
} | |||
public String getProperty(Bytes key, long version) { | |||
BytesValue value = baseAccount.getBytes(encodePropertyKey(key), version); | |||
return value == null ? null : value.getValue().toUTF8String(); | |||
private String encodePropertyKey(String key) { | |||
return USER_INFO_PREFIX+key; | |||
} | |||
private Bytes encodePropertyKey(Bytes key) { | |||
// return key.concatTo(USER_INFO_PREFIX); | |||
return USER_INFO_PREFIX.concat(key); | |||
} | |||
} |
@@ -2,7 +2,7 @@ package com.jd.blockchain.ledger.core; | |||
import com.jd.blockchain.crypto.HashDigest; | |||
import com.jd.blockchain.crypto.PubKey; | |||
import com.jd.blockchain.ledger.AccountHeader; | |||
import com.jd.blockchain.ledger.BlockchainIdentity; | |||
import com.jd.blockchain.ledger.CryptoSetting; | |||
import com.jd.blockchain.ledger.LedgerException; | |||
import com.jd.blockchain.ledger.MerkleProof; | |||
@@ -21,19 +21,20 @@ public class UserAccountSet implements Transactional, UserAccountQuery { | |||
public UserAccountSet(CryptoSetting cryptoSetting, String keyPrefix, ExPolicyKVStorage simpleStorage, | |||
VersioningKVStorage versioningStorage, AccountAccessPolicy accessPolicy) { | |||
accountSet = new MerkleAccountSet(cryptoSetting, keyPrefix, simpleStorage, versioningStorage, accessPolicy); | |||
accountSet = new MerkleAccountSet(cryptoSetting, Bytes.fromString(keyPrefix), simpleStorage, versioningStorage, | |||
accessPolicy); | |||
} | |||
public UserAccountSet(HashDigest dataRootHash, CryptoSetting cryptoSetting, String keyPrefix, | |||
ExPolicyKVStorage exStorage, VersioningKVStorage verStorage, boolean readonly, | |||
AccountAccessPolicy accessPolicy) { | |||
accountSet = new MerkleAccountSet(dataRootHash, cryptoSetting, keyPrefix, exStorage, verStorage, readonly, | |||
accessPolicy); | |||
accountSet = new MerkleAccountSet(dataRootHash, cryptoSetting, Bytes.fromString(keyPrefix), exStorage, | |||
verStorage, readonly, accessPolicy); | |||
} | |||
@Override | |||
public AccountHeader[] getHeaders(int fromIndex, int count) { | |||
return accountSet.getHeaders(fromIndex,count); | |||
public BlockchainIdentity[] getHeaders(int fromIndex, int count) { | |||
return accountSet.getHeaders(fromIndex, count); | |||
} | |||
/** | |||
@@ -49,7 +50,7 @@ public class UserAccountSet implements Transactional, UserAccountQuery { | |||
public boolean isReadonly() { | |||
return accountSet.isReadonly(); | |||
} | |||
void setReadonly() { | |||
accountSet.setReadonly(); | |||
} | |||
@@ -63,7 +64,7 @@ public class UserAccountSet implements Transactional, UserAccountQuery { | |||
public MerkleProof getProof(Bytes key) { | |||
return accountSet.getProof(key); | |||
} | |||
@Override | |||
public UserAccount getAccount(String address) { | |||
return getAccount(Bytes.fromBase58(address)); | |||
@@ -71,7 +72,7 @@ public class UserAccountSet implements Transactional, UserAccountQuery { | |||
@Override | |||
public UserAccount getAccount(Bytes address) { | |||
MerkleAccount baseAccount = accountSet.getAccount(address); | |||
CompositeAccount baseAccount = accountSet.getAccount(address); | |||
return new UserAccount(baseAccount); | |||
} | |||
@@ -82,7 +83,7 @@ public class UserAccountSet implements Transactional, UserAccountQuery { | |||
@Override | |||
public UserAccount getAccount(Bytes address, long version) { | |||
MerkleAccount baseAccount = accountSet.getAccount(address, version); | |||
CompositeAccount baseAccount = accountSet.getAccount(address, version); | |||
return new UserAccount(baseAccount); | |||
} | |||
@@ -93,14 +94,12 @@ public class UserAccountSet implements Transactional, UserAccountQuery { | |||
* | |||
* 如果指定的地址和公钥不匹配,则会引发 {@link LedgerException} 异常; | |||
* | |||
* @param address | |||
* 区块链地址; | |||
* @param pubKey | |||
* 公钥; | |||
* @param address 区块链地址; | |||
* @param pubKey 公钥; | |||
* @return 注册成功的用户对象; | |||
*/ | |||
public UserAccount register(Bytes address, PubKey pubKey) { | |||
MerkleAccount baseAccount = accountSet.register(address, pubKey); | |||
CompositeAccount baseAccount = accountSet.register(address, pubKey); | |||
return new UserAccount(baseAccount); | |||
} | |||
@@ -13,10 +13,10 @@ import com.jd.blockchain.ledger.RolesPolicy; | |||
import com.jd.blockchain.ledger.UserRoles; | |||
import com.jd.blockchain.ledger.UserAuthorizationSettings; | |||
import com.jd.blockchain.storage.service.ExPolicyKVStorage; | |||
import com.jd.blockchain.storage.service.VersioningKVEntry; | |||
import com.jd.blockchain.storage.service.VersioningKVStorage; | |||
import com.jd.blockchain.utils.Bytes; | |||
import com.jd.blockchain.utils.Transactional; | |||
import com.jd.blockchain.utils.DataEntry; | |||
/** | |||
* User-Role authorization data set; | |||
@@ -35,7 +35,7 @@ public class UserRoleDataset implements Transactional, MerkleProvable, UserAutho | |||
public UserRoleDataset(HashDigest merkleRootHash, CryptoSetting cryptoSetting, String prefix, | |||
ExPolicyKVStorage exPolicyStorage, VersioningKVStorage verStorage, boolean readonly) { | |||
dataset = new MerkleDataSet(merkleRootHash, cryptoSetting, prefix, exPolicyStorage, verStorage, readonly); | |||
dataset = new MerkleDataSet(merkleRootHash, cryptoSetting, Bytes.fromString(prefix), exPolicyStorage, verStorage, readonly); | |||
} | |||
@Override | |||
@@ -168,7 +168,7 @@ public class UserRoleDataset implements Transactional, MerkleProvable, UserAutho | |||
@Override | |||
public UserRoles getUserRoles(Bytes userAddress) { | |||
// 只返回最新版本; | |||
VersioningKVEntry kv = dataset.getDataEntry(userAddress); | |||
DataEntry<Bytes, byte[]> kv = dataset.getDataEntry(userAddress); | |||
if (kv == null) { | |||
return null; | |||
} | |||
@@ -178,7 +178,7 @@ public class UserRoleDataset implements Transactional, MerkleProvable, UserAutho | |||
@Override | |||
public UserRoles[] getUserRoles() { | |||
VersioningKVEntry[] kvEntries = dataset.getLatestDataEntries(0, (int) dataset.getDataCount()); | |||
DataEntry<Bytes, byte[]>[] kvEntries = dataset.getLatestDataEntries(0, (int) dataset.getDataCount()); | |||
UserRoles[] pns = new UserRoles[kvEntries.length]; | |||
RoleSet roleset; | |||
for (int i = 0; i < pns.length; i++) { | |||
@@ -5,7 +5,24 @@ import java.util.List; | |||
import com.jd.blockchain.contract.LedgerContext; | |||
import com.jd.blockchain.crypto.HashDigest; | |||
import com.jd.blockchain.ledger.*; | |||
import com.jd.blockchain.ledger.BlockchainIdentity; | |||
import com.jd.blockchain.ledger.BytesValue; | |||
import com.jd.blockchain.ledger.ContractInfo; | |||
import com.jd.blockchain.ledger.DataAccountKVSetOperation; | |||
import com.jd.blockchain.ledger.DataAccountRegisterOperation; | |||
import com.jd.blockchain.ledger.TypedKVEntry; | |||
import com.jd.blockchain.ledger.KVInfoVO; | |||
import com.jd.blockchain.ledger.LedgerAdminInfo; | |||
import com.jd.blockchain.ledger.LedgerBlock; | |||
import com.jd.blockchain.ledger.LedgerInfo; | |||
import com.jd.blockchain.ledger.LedgerMetadata; | |||
import com.jd.blockchain.ledger.LedgerTransaction; | |||
import com.jd.blockchain.ledger.Operation; | |||
import com.jd.blockchain.ledger.ParticipantNode; | |||
import com.jd.blockchain.ledger.TransactionState; | |||
import com.jd.blockchain.ledger.TypedValue; | |||
import com.jd.blockchain.ledger.UserInfo; | |||
import com.jd.blockchain.ledger.UserRegisterOperation; | |||
import com.jd.blockchain.ledger.core.OperationHandleContext; | |||
import com.jd.blockchain.transaction.BlockchainQueryService; | |||
import com.jd.blockchain.transaction.DataAccountKVSetOperationBuilder; | |||
@@ -150,22 +167,22 @@ public class ContractLedgerContext implements LedgerContext { | |||
} | |||
@Override | |||
public AccountHeader getDataAccount(HashDigest ledgerHash, String address) { | |||
public BlockchainIdentity getDataAccount(HashDigest ledgerHash, String address) { | |||
return innerQueryService.getDataAccount(ledgerHash, address); | |||
} | |||
@Override | |||
public KVDataEntry[] getDataEntries(HashDigest ledgerHash, String address, String... keys) { | |||
public TypedKVEntry[] getDataEntries(HashDigest ledgerHash, String address, String... keys) { | |||
return innerQueryService.getDataEntries(ledgerHash, address, keys); | |||
} | |||
@Override | |||
public KVDataEntry[] getDataEntries(HashDigest ledgerHash, String address, KVInfoVO kvInfoVO) { | |||
public TypedKVEntry[] getDataEntries(HashDigest ledgerHash, String address, KVInfoVO kvInfoVO) { | |||
return innerQueryService.getDataEntries(ledgerHash, address, kvInfoVO); | |||
} | |||
@Override | |||
public KVDataEntry[] getDataEntries(HashDigest ledgerHash, String address, int fromIndex, int count) { | |||
public TypedKVEntry[] getDataEntries(HashDigest ledgerHash, String address, int fromIndex, int count) { | |||
return innerQueryService.getDataEntries(ledgerHash, address, fromIndex, count); | |||
} | |||
@@ -182,17 +199,17 @@ public class ContractLedgerContext implements LedgerContext { | |||
// ---------------------------user()---------------------------- | |||
@Override | |||
public AccountHeader[] getUsers(HashDigest ledgerHash, int fromIndex, int count) { | |||
public BlockchainIdentity[] getUsers(HashDigest ledgerHash, int fromIndex, int count) { | |||
return innerQueryService.getUsers(ledgerHash, fromIndex, count); | |||
} | |||
@Override | |||
public AccountHeader[] getDataAccounts(HashDigest ledgerHash, int fromIndex, int count) { | |||
public BlockchainIdentity[] getDataAccounts(HashDigest ledgerHash, int fromIndex, int count) { | |||
return innerQueryService.getDataAccounts(ledgerHash, fromIndex, count); | |||
} | |||
@Override | |||
public AccountHeader[] getContractAccounts(HashDigest ledgerHash, int fromIndex, int count) { | |||
public BlockchainIdentity[] getContractAccounts(HashDigest ledgerHash, int fromIndex, int count) { | |||
return innerQueryService.getContractAccounts(ledgerHash, fromIndex, count); | |||
} | |||
@@ -268,7 +285,7 @@ public class ContractLedgerContext implements LedgerContext { | |||
@Override | |||
public DataAccountKVSetOperationBuilder setText(String key, String value, long expVersion) { | |||
BytesValue bytesValue = BytesData.fromText(value); | |||
BytesValue bytesValue = TypedValue.fromText(value); | |||
this.op = new SingleKVSetOpTemplate(key, bytesValue, expVersion); | |||
handle(op); | |||
return this; | |||
@@ -276,7 +293,7 @@ public class ContractLedgerContext implements LedgerContext { | |||
@Override | |||
public DataAccountKVSetOperationBuilder setBytes(String key, Bytes value, long expVersion) { | |||
BytesValue bytesValue = BytesData.fromBytes(value); | |||
BytesValue bytesValue = TypedValue.fromBytes(value); | |||
this.op = new SingleKVSetOpTemplate(key, bytesValue, expVersion); | |||
handle(op); | |||
return this; | |||
@@ -284,7 +301,7 @@ public class ContractLedgerContext implements LedgerContext { | |||
@Override | |||
public DataAccountKVSetOperationBuilder setInt64(String key, long value, long expVersion) { | |||
BytesValue bytesValue = BytesData.fromInt64(value); | |||
BytesValue bytesValue = TypedValue.fromInt64(value); | |||
this.op = new SingleKVSetOpTemplate(key, bytesValue, expVersion); | |||
handle(op); | |||
return this; | |||
@@ -301,7 +318,7 @@ public class ContractLedgerContext implements LedgerContext { | |||
@Override | |||
public DataAccountKVSetOperationBuilder setJSON(String key, String value, long expVersion) { | |||
BytesValue bytesValue = BytesData.fromJSON(value); | |||
BytesValue bytesValue = TypedValue.fromJSON(value); | |||
this.op = new SingleKVSetOpTemplate(key, bytesValue, expVersion); | |||
handle(op); | |||
return this; | |||
@@ -309,7 +326,7 @@ public class ContractLedgerContext implements LedgerContext { | |||
@Override | |||
public DataAccountKVSetOperationBuilder setXML(String key, String value, long expVersion) { | |||
BytesValue bytesValue = BytesData.fromXML(value); | |||
BytesValue bytesValue = TypedValue.fromXML(value); | |||
this.op = new SingleKVSetOpTemplate(key, bytesValue, expVersion); | |||
handle(op); | |||
return this; | |||
@@ -317,7 +334,7 @@ public class ContractLedgerContext implements LedgerContext { | |||
@Override | |||
public DataAccountKVSetOperationBuilder setBytes(String key, byte[] value, long expVersion) { | |||
BytesValue bytesValue = BytesData.fromBytes(value); | |||
BytesValue bytesValue = TypedValue.fromBytes(value); | |||
this.op = new SingleKVSetOpTemplate(key, bytesValue, expVersion); | |||
handle(op); | |||
return this; | |||
@@ -325,7 +342,7 @@ public class ContractLedgerContext implements LedgerContext { | |||
@Override | |||
public DataAccountKVSetOperationBuilder setImage(String key, byte[] value, long expVersion) { | |||
BytesValue bytesValue = BytesData.fromImage(value); | |||
BytesValue bytesValue = TypedValue.fromImage(value); | |||
this.op = new SingleKVSetOpTemplate(key, bytesValue, expVersion); | |||
handle(op); | |||
return this; | |||
@@ -333,7 +350,7 @@ public class ContractLedgerContext implements LedgerContext { | |||
@Override | |||
public DataAccountKVSetOperationBuilder setTimestamp(String key, long value, long expVersion) { | |||
BytesValue bytesValue = BytesData.fromTimestamp(value); | |||
BytesValue bytesValue = TypedValue.fromTimestamp(value); | |||
this.op = new SingleKVSetOpTemplate(key, bytesValue, expVersion); | |||
handle(op); | |||
return this; | |||
@@ -5,6 +5,7 @@ import com.jd.blockchain.ledger.DataAccountKVSetOperation; | |||
import com.jd.blockchain.ledger.DataAccountKVSetOperation.KVWriteEntry; | |||
import com.jd.blockchain.ledger.DataVersionConflictException; | |||
import com.jd.blockchain.ledger.LedgerPermission; | |||
import com.jd.blockchain.ledger.TypedValue; | |||
import com.jd.blockchain.ledger.core.DataAccount; | |||
import com.jd.blockchain.ledger.core.LedgerDataset; | |||
import com.jd.blockchain.ledger.core.LedgerQuery; | |||
@@ -37,7 +38,7 @@ public class DataAccountKVSetOperationHandle extends AbstractLedgerOperationHand | |||
KVWriteEntry[] writeSet = kvWriteOp.getWriteSet(); | |||
long v = -1L; | |||
for (KVWriteEntry kvw : writeSet) { | |||
v = account.setBytes(Bytes.fromString(kvw.getKey()), kvw.getValue(), kvw.getExpectedVersion()); | |||
v = account.getDataset().setValue(kvw.getKey(), TypedValue.wrap(kvw.getValue()), kvw.getExpectedVersion()); | |||
if (v < 0) { | |||
throw new DataVersionConflictException(); | |||
} | |||
@@ -0,0 +1,153 @@ | |||
package com.jd.blockchain.ledger.core; | |||
import static org.junit.Assert.assertEquals; | |||
import static org.junit.Assert.assertNull; | |||
import java.util.Random; | |||
import org.junit.Test; | |||
import com.jd.blockchain.crypto.Crypto; | |||
import com.jd.blockchain.crypto.HashDigest; | |||
import com.jd.blockchain.crypto.HashFunction; | |||
import com.jd.blockchain.crypto.service.classic.ClassicAlgorithm; | |||
import com.jd.blockchain.ledger.core.MerkleTree.DataNode; | |||
import com.jd.blockchain.utils.Bytes; | |||
public class MerkleDataNodeEncoderTest { | |||
@Test | |||
public void testEnocoderV0() { | |||
Random rand = new Random(); | |||
byte[] data = new byte[512]; | |||
byte[] keyBytes = new byte[256]; | |||
rand.nextBytes(data); | |||
rand.nextBytes(keyBytes); | |||
Bytes key = new Bytes(keyBytes); | |||
long sn = 1024; | |||
long version = 1; | |||
doTestV0(sn, version, key, data); | |||
sn = 0; | |||
version = 1000; | |||
doTestV0(sn, version, key, data); | |||
sn = (1 << 56) -1; | |||
version = 1000; | |||
doTestV0(sn, version, key, data); | |||
} | |||
private void doTestV0(long sn, long version, Bytes key, byte[] data) { | |||
MerkleDataNodeEncoder encoderV0 = new MerkleDataNodeEncoder_V0(); | |||
DataNode nodeV0 = encoderV0.create(ClassicAlgorithm.SHA256.code(), sn, key, version, data); | |||
assertNull(nodeV0.getValueHash()); | |||
assertEquals(sn, nodeV0.getSN()); | |||
assertEquals(version, nodeV0.getVersion()); | |||
assertEquals(key, nodeV0.getKey()); | |||
byte[] nodeBytes = nodeV0.toBytes(); | |||
DataNode nodeV0_reversed = encoderV0.resolve(nodeBytes); | |||
assertNull(nodeV0_reversed.getValueHash()); | |||
assertEquals(nodeV0.getNodeHash(), nodeV0_reversed.getNodeHash()); | |||
assertEquals(encoderV0.getFormatVersion(), nodeBytes[0]); | |||
assertEquals(sn, nodeV0_reversed.getSN()); | |||
assertEquals(version, nodeV0_reversed.getVersion()); | |||
assertEquals(key, nodeV0_reversed.getKey()); | |||
} | |||
@Test | |||
public void testEnocoderV1() { | |||
Random rand = new Random(); | |||
byte[] data = new byte[512]; | |||
byte[] keyBytes = new byte[256]; | |||
rand.nextBytes(data); | |||
rand.nextBytes(keyBytes); | |||
Bytes key = new Bytes(keyBytes); | |||
long sn = 1024; | |||
long version = 1; | |||
doTestV1(sn, version, key, data); | |||
sn = 0; | |||
version = 10088; | |||
doTestV1(sn, version, key, data); | |||
sn = (1 << 56) -1; | |||
version = 1000; | |||
doTestV1(sn, version, key, data); | |||
} | |||
private void doTestV1(long sn, long version, Bytes key, byte[] data) { | |||
HashFunction hashFunc = Crypto.getHashFunction(ClassicAlgorithm.SHA256); | |||
HashDigest dataHash = hashFunc.hash(data); | |||
MerkleDataNodeEncoder encoderV1 = new MerkleDataNodeEncoder_V1(); | |||
DataNode node = encoderV1.create(ClassicAlgorithm.SHA256.code(), sn, key, version, data); | |||
assertEquals(dataHash, node.getValueHash()); | |||
assertEquals(sn, node.getSN()); | |||
assertEquals(version, node.getVersion()); | |||
assertEquals(key, node.getKey()); | |||
byte[] nodeBytes = node.toBytes(); | |||
DataNode node_reversed = encoderV1.resolve(nodeBytes); | |||
assertEquals(dataHash, node_reversed.getValueHash()); | |||
assertEquals(node.getNodeHash(), node_reversed.getNodeHash()); | |||
assertEquals(encoderV1.getFormatVersion(), nodeBytes[0]); | |||
assertEquals(sn, node_reversed.getSN()); | |||
assertEquals(version, node_reversed.getVersion()); | |||
assertEquals(key, node_reversed.getKey()); | |||
} | |||
@Test | |||
public void testCompatibility() { | |||
Random rand = new Random(); | |||
byte[] data = new byte[512]; | |||
byte[] keyBytes = new byte[256]; | |||
rand.nextBytes(data); | |||
rand.nextBytes(keyBytes); | |||
Bytes key = new Bytes(keyBytes); | |||
long sn = 1024; | |||
long version = 1; | |||
PreviousDataNode pdataNode = PreviousDataNode.newDataNode(ClassicAlgorithm.SHA256.code(), sn, key, version, | |||
data); | |||
MerkleDataNodeEncoder encoderV0 = new MerkleDataNodeEncoder_V0(); | |||
DataNode dataNode = encoderV0.create(ClassicAlgorithm.SHA256.code(), sn, key, version, data); | |||
assertEquals(pdataNode.getNodeHash(), dataNode.getNodeHash()); | |||
assertEquals(pdataNode.getSN(), dataNode.getSN()); | |||
assertEquals(pdataNode.getVersion(), dataNode.getVersion()); | |||
assertEquals(pdataNode.getKey(), dataNode.getKey()); | |||
DataNode dataNode_reversed = encoderV0.resolve(pdataNode.toBytes()); | |||
assertNull(dataNode_reversed.getValueHash()); | |||
assertEquals(pdataNode.getNodeHash(), dataNode_reversed.getNodeHash()); | |||
assertEquals(pdataNode.getSN(), dataNode_reversed.getSN()); | |||
assertEquals(pdataNode.getVersion(), dataNode_reversed.getVersion()); | |||
assertEquals(pdataNode.getKey(), dataNode_reversed.getKey()); | |||
} | |||
} |
@@ -0,0 +1,191 @@ | |||
package com.jd.blockchain.ledger.core; | |||
import com.jd.blockchain.crypto.Crypto; | |||
import com.jd.blockchain.crypto.CryptoAlgorithm; | |||
import com.jd.blockchain.crypto.HashDigest; | |||
import com.jd.blockchain.crypto.HashFunction; | |||
import com.jd.blockchain.utils.Bytes; | |||
import com.jd.blockchain.utils.io.BytesUtils; | |||
import com.jd.blockchain.utils.io.NumberMask; | |||
/** | |||
* A copy of previous version of com.jd.blockchain.ledger.core.MerkleTree.DataNode; | |||
* | |||
* @author huanghaiquan | |||
* | |||
*/ | |||
public class PreviousDataNode { | |||
private HashDigest nodeHash; | |||
private long sn; | |||
private Bytes key; | |||
private long version; | |||
private byte[] dataNodeBytes; | |||
private PreviousDataNode(long sn, Bytes key, long version, HashDigest dataHash, byte[] dataBytes) { | |||
this.sn = sn; | |||
this.key = key; | |||
this.version = version; | |||
this.nodeHash = dataHash; | |||
this.dataNodeBytes = dataBytes; | |||
} | |||
static PreviousDataNode newDataNode(CryptoAlgorithm hashAlgorithm, long sn, Bytes key, long version, | |||
byte[] hashedData) { | |||
return newDataNode(hashAlgorithm.code(), sn, key, version, hashedData); | |||
} | |||
static PreviousDataNode newDataNode(short hashAlgorithm, long sn, Bytes key, long version, byte[] hashedData) { | |||
// byte[] keyStrBytes = BytesUtils.toBytes(key); | |||
// int maskSize = NumberMask.SHORT.getMaskLength(keyStrBytes.length); | |||
int keySize = key.size(); | |||
int maskSize = NumberMask.SHORT.getMaskLength(keySize); | |||
// int bodySize = 8 + maskSize + keyStrBytes.length + 8;// sn + key + version; | |||
int bodySize = 8 + maskSize + keySize + 8;// sn + key + version; | |||
byte[] bodyBytes = new byte[bodySize]; | |||
int offset = 0; | |||
offset += BytesUtils.toBytes(sn, bodyBytes, 0); | |||
// NumberMask.SHORT.writeMask(keyStrBytes.length, bodyBytes, offset); | |||
NumberMask.SHORT.writeMask(keySize, bodyBytes, offset); | |||
offset += maskSize; | |||
// System.arraycopy(keyStrBytes, 0, bodyBytes, offset, keyStrBytes.length); | |||
// System.arraycopy(keyStrBytes, 0, bodyBytes, offset, keyStrBytes.length); | |||
// offset += keyStrBytes.length; | |||
offset += key.copyTo(bodyBytes, offset, keySize); | |||
// TODO: version; | |||
offset += BytesUtils.toBytes(version, bodyBytes, offset); | |||
byte[] dataBytes = BytesUtils.concat(bodyBytes, hashedData); | |||
HashFunction hashFunc = Crypto.getHashFunction(hashAlgorithm); | |||
HashDigest dataHash = hashFunc.hash(dataBytes); | |||
int hashMaskSize = NumberMask.TINY.getMaskLength(dataHash.size()); | |||
int dataNodeSize = bodySize + hashMaskSize + dataHash.size(); | |||
byte[] dataNodeBytes = new byte[dataNodeSize]; | |||
offset = 0; | |||
System.arraycopy(bodyBytes, 0, dataNodeBytes, offset, bodySize); | |||
offset += bodySize; | |||
NumberMask.TINY.writeMask(dataHash.size(), dataNodeBytes, offset); | |||
offset += hashMaskSize; | |||
System.arraycopy(dataHash.toBytes(), 0, dataNodeBytes, offset, dataHash.size()); | |||
return new PreviousDataNode(sn, key, version, dataHash, dataNodeBytes); | |||
} | |||
public HashDigest getNodeHash() { | |||
return nodeHash; | |||
} | |||
protected long getStartingSN() { | |||
return sn; | |||
} | |||
protected long getDataCount() { | |||
return 1; | |||
} | |||
/* | |||
* (non-Javadoc) | |||
* | |||
* @see com.jd.blockchain.ledger.core.MerkleDataNode#getLevel() | |||
*/ | |||
public int getLevel() { | |||
return 0; | |||
} | |||
/* | |||
* (non-Javadoc) | |||
* | |||
* @see com.jd.blockchain.ledger.core.MerkleDataNode#getSN() | |||
*/ | |||
public long getSN() { | |||
return sn; | |||
} | |||
/* | |||
* (non-Javadoc) | |||
* | |||
* @see com.jd.blockchain.ledger.core.MerkleDataNode#getKey() | |||
*/ | |||
public Bytes getKey() { | |||
return key; | |||
} | |||
/* | |||
* (non-Javadoc) | |||
* | |||
* @see com.jd.blockchain.ledger.core.MerkleDataNode#getVersion() | |||
*/ | |||
public long getVersion() { | |||
return version; | |||
} | |||
public byte[] toBytes() { | |||
return dataNodeBytes; | |||
} | |||
static PreviousDataNode parse(byte[] bytes) { | |||
// InputStream in = new ByteArrayInputStream(bytes); | |||
int offset = 0; | |||
long sn = BytesUtils.toLong(bytes, offset); | |||
offset += 8; | |||
// byte[] keyBytes = BytesEncoding.read(NumberMask.SHORT, in); | |||
// String key = BytesUtils.toString(keyBytes); | |||
int keySize = NumberMask.SHORT.resolveMaskedNumber(bytes, offset); | |||
offset += NumberMask.SHORT.getMaskLength(keySize); | |||
byte[] keyBytes = new byte[keySize]; | |||
System.arraycopy(bytes, offset, keyBytes, 0, keySize); | |||
offset += keySize; | |||
// String key = BytesUtils.toString(keyBytes); | |||
Bytes key = new Bytes(keyBytes); | |||
// long version = BytesUtils.readLong(in); | |||
long version = BytesUtils.toLong(bytes, offset); | |||
offset += 8; | |||
// byte[] dataHashBytes = BytesEncoding.read(NumberMask.SHORT, in); | |||
int hashSize = NumberMask.TINY.resolveMaskedNumber(bytes, offset); | |||
offset += NumberMask.TINY.getMaskLength(hashSize); | |||
byte[] dataHashBytes = new byte[hashSize]; | |||
System.arraycopy(bytes, offset, dataHashBytes, 0, hashSize); | |||
offset += hashSize; | |||
HashDigest dataHash = new HashDigest(dataHashBytes); | |||
return new PreviousDataNode(sn, key, version, dataHash, bytes); | |||
} | |||
@Override | |||
public int hashCode() { | |||
return nodeHash.hashCode(); | |||
} | |||
@Override | |||
public boolean equals(Object obj) { | |||
if (obj == null) { | |||
return false; | |||
} | |||
if (obj == this) { | |||
return true; | |||
} | |||
if (obj instanceof PreviousDataNode) { | |||
PreviousDataNode node1 = (PreviousDataNode) obj; | |||
return this.nodeHash.equals(node1.nodeHash); | |||
} | |||
return false; | |||
} | |||
} |
@@ -3,7 +3,7 @@ package test.com.jd.blockchain.ledger; | |||
import com.jd.blockchain.contract.ContractEventContext; | |||
import com.jd.blockchain.contract.ContractLifecycleAware; | |||
import com.jd.blockchain.contract.EventProcessingAware; | |||
import com.jd.blockchain.ledger.KVDataEntry; | |||
import com.jd.blockchain.ledger.TypedKVEntry; | |||
import com.jd.blockchain.utils.Bytes; | |||
public class TxTestContractImpl implements TxTestContract, ContractLifecycleAware, EventProcessingAware { | |||
@@ -16,7 +16,7 @@ public class TxTestContractImpl implements TxTestContract, ContractLifecycleAwar | |||
@Override | |||
public boolean testReadable() { | |||
KVDataEntry v1 = eventContext.getLedger().getDataEntries(eventContext.getCurrentLedgerHash(), | |||
TypedKVEntry v1 = eventContext.getLedger().getDataEntries(eventContext.getCurrentLedgerHash(), | |||
dataAddress.toBase58(), KEY)[0]; | |||
String text1 = (String) v1.getValue(); | |||
System.out.printf("k1=%s, version=%s \r\n", text1, v1.getVersion()); | |||
@@ -26,7 +26,7 @@ public class TxTestContractImpl implements TxTestContract, ContractLifecycleAwar | |||
System.out.printf("new value = %s\r\n", newValue); | |||
eventContext.getLedger().dataAccount(dataAddress).setText(KEY, newValue, v1.getVersion()); | |||
KVDataEntry v2 = eventContext.getLedger().getDataEntries(eventContext.getCurrentLedgerHash(), | |||
TypedKVEntry v2 = eventContext.getLedger().getDataEntries(eventContext.getCurrentLedgerHash(), | |||
dataAddress.toBase58(), KEY)[0]; | |||
System.out.printf("---- read new value ----\r\nk1=%s, version=%s \r\n", v2.getValue(), v2.getVersion()); | |||
@@ -14,11 +14,12 @@ import com.jd.blockchain.crypto.service.classic.ClassicCryptoService; | |||
import com.jd.blockchain.crypto.service.sm.SMCryptoService; | |||
import com.jd.blockchain.ledger.BlockchainKeyGenerator; | |||
import com.jd.blockchain.ledger.BlockchainKeypair; | |||
import com.jd.blockchain.ledger.core.MerkleAccountSet; | |||
import com.jd.blockchain.ledger.core.MerkleAccount; | |||
import com.jd.blockchain.ledger.core.CompositeAccount; | |||
import com.jd.blockchain.ledger.core.CryptoConfig; | |||
import com.jd.blockchain.ledger.core.MerkleAccountSet; | |||
import com.jd.blockchain.ledger.core.OpeningAccessPolicy; | |||
import com.jd.blockchain.storage.service.utils.MemoryKVStorage; | |||
import com.jd.blockchain.utils.Bytes; | |||
public class AccountSetTest { | |||
@@ -43,12 +44,13 @@ public class AccountSetTest { | |||
cryptoConf.setHashAlgorithm(ClassicAlgorithm.SHA256); | |||
String keyPrefix = ""; | |||
MerkleAccountSet accset = new MerkleAccountSet(cryptoConf, keyPrefix, storage, storage, accessPolicy); | |||
MerkleAccountSet accset = new MerkleAccountSet(cryptoConf, Bytes.fromString(keyPrefix), storage, storage, accessPolicy); | |||
BlockchainKeypair userKey = BlockchainKeyGenerator.getInstance().generate(); | |||
accset.register(userKey.getAddress(), userKey.getPubKey()); | |||
MerkleAccount userAcc = accset.getAccount(userKey.getAddress()); | |||
//尚未提交之前,可以检索到账户的存在,但版本仍然标记为 -1; | |||
CompositeAccount userAcc = accset.getAccount(userKey.getAddress()); | |||
assertNotNull(userAcc); | |||
assertTrue(accset.contains(userKey.getAddress())); | |||
@@ -56,13 +58,13 @@ public class AccountSetTest { | |||
HashDigest rootHash = accset.getRootHash(); | |||
assertNotNull(rootHash); | |||
MerkleAccountSet reloadAccSet = new MerkleAccountSet(rootHash, cryptoConf, keyPrefix, storage, storage, true, accessPolicy); | |||
MerkleAccount reloadUserAcc = reloadAccSet.getAccount(userKey.getAddress()); | |||
MerkleAccountSet reloadAccSet = new MerkleAccountSet(rootHash, cryptoConf, Bytes.fromString(keyPrefix), storage, storage, true, accessPolicy); | |||
CompositeAccount reloadUserAcc = reloadAccSet.getAccount(userKey.getAddress()); | |||
assertNotNull(reloadUserAcc); | |||
assertTrue(reloadAccSet.contains(userKey.getAddress())); | |||
assertEquals(userAcc.getAddress(), reloadUserAcc.getAddress()); | |||
assertEquals(userAcc.getPubKey(), reloadUserAcc.getPubKey()); | |||
assertEquals(userAcc.getID().getAddress(), reloadUserAcc.getID().getAddress()); | |||
assertEquals(userAcc.getID().getPubKey(), reloadUserAcc.getID().getPubKey()); | |||
} | |||
} |
@@ -2,6 +2,7 @@ package test.com.jd.blockchain.ledger.core; | |||
import static org.junit.Assert.assertEquals; | |||
import static org.junit.Assert.assertFalse; | |||
import static org.junit.Assert.assertTrue; | |||
import org.junit.Test; | |||
@@ -12,9 +13,9 @@ import com.jd.blockchain.crypto.service.classic.ClassicCryptoService; | |||
import com.jd.blockchain.crypto.service.sm.SMCryptoService; | |||
import com.jd.blockchain.ledger.BlockchainKeyGenerator; | |||
import com.jd.blockchain.ledger.BlockchainKeypair; | |||
import com.jd.blockchain.ledger.BytesData; | |||
import com.jd.blockchain.ledger.core.MerkleAccount; | |||
import com.jd.blockchain.ledger.TypedValue; | |||
import com.jd.blockchain.ledger.core.CryptoConfig; | |||
import com.jd.blockchain.ledger.core.MerkleAccount; | |||
import com.jd.blockchain.storage.service.utils.MemoryKVStorage; | |||
import com.jd.blockchain.utils.Bytes; | |||
@@ -48,38 +49,39 @@ public class BaseAccountTest { | |||
BlockchainKeypair bck = BlockchainKeyGenerator.getInstance().generate(); | |||
// 新建账户; | |||
MerkleAccount baseAccount = new MerkleAccount(bck.getIdentity(), cryptoConf, keyPrefix, testStorage, testStorage); | |||
assertFalse(baseAccount.isUpdated());// 空的账户; | |||
MerkleAccount baseAccount = new MerkleAccount(bck.getIdentity(), cryptoConf, Bytes.fromString(keyPrefix), | |||
testStorage, testStorage); | |||
assertTrue(baseAccount.isUpdated());//初始化新账户时,先写入PubKey; | |||
assertFalse(baseAccount.isReadonly()); | |||
// 在空白状态下写入数据; | |||
long v = baseAccount.setBytes(Bytes.fromString("A"), BytesData.fromText("VALUE_A"), 0); | |||
long v = baseAccount.getDataset().setValue("A", TypedValue.fromText("VALUE_A"), 0); | |||
// 预期失败; | |||
assertEquals(-1, v); | |||
v = baseAccount.setBytes(Bytes.fromString("A"), BytesData.fromText("VALUE_A"), 1); | |||
v = baseAccount.getDataset().setValue("A", TypedValue.fromText("VALUE_A"), 1); | |||
// 预期失败; | |||
assertEquals(-1, v); | |||
v = baseAccount.setBytes(Bytes.fromString("A"), BytesData.fromText("VALUE_A"), -1); | |||
v = baseAccount.getDataset().setValue("A", TypedValue.fromText("VALUE_A"), -1); | |||
// 预期成功; | |||
assertEquals(0, v); | |||
v = baseAccount.setBytes(Bytes.fromString("A"), BytesData.fromText("VALUE_A-1"), -1); | |||
v = baseAccount.getDataset().setValue("A", TypedValue.fromText("VALUE_A-1"), -1); | |||
// 已经存在版本,指定版本号-1,预期导致失败; | |||
assertEquals(-1, v); | |||
baseAccount.commit(); | |||
v = 0; | |||
for (int i = 0; i < 10; i++) { | |||
long s = baseAccount.setBytes(Bytes.fromString("A"), BytesData.fromText("VALUE_A_" + i), v); | |||
long s = baseAccount.getDataset().setValue("A", TypedValue.fromText("VALUE_A_" + i), v); | |||
baseAccount.commit(); | |||
// 预期成功; | |||
assertEquals(v + 1, s); | |||
v++; | |||
} | |||
v = baseAccount.setBytes(Bytes.fromString("A"), BytesData.fromText("VALUE_A_" + v), v + 1); | |||
v = baseAccount.getDataset().setValue("A", TypedValue.fromText("VALUE_A_" + v), v + 1); | |||
// 预期成功; | |||
assertEquals(-1, v); | |||
@@ -0,0 +1,188 @@ | |||
package test.com.jd.blockchain.ledger.core; | |||
import com.jd.blockchain.binaryproto.DataContractRegistry; | |||
import com.jd.blockchain.crypto.HashDigest; | |||
import com.jd.blockchain.ledger.*; | |||
import com.jd.blockchain.ledger.core.*; | |||
import com.jd.blockchain.storage.service.utils.MemoryKVStorage; | |||
import org.junit.Test; | |||
import org.mockito.Mockito; | |||
import static org.junit.Assert.*; | |||
import static org.junit.Assert.assertFalse; | |||
import static org.junit.Assert.assertTrue; | |||
import static org.mockito.Matchers.any; | |||
import static org.mockito.Matchers.anyLong; | |||
import static org.mockito.Mockito.doCallRealMethod; | |||
import static org.mockito.Mockito.doThrow; | |||
import static org.mockito.Mockito.when; | |||
public class BlockFullRollBackTest { | |||
static { | |||
DataContractRegistry.register(TransactionContent.class); | |||
DataContractRegistry.register(TransactionContentBody.class); | |||
DataContractRegistry.register(TransactionRequest.class); | |||
DataContractRegistry.register(NodeRequest.class); | |||
DataContractRegistry.register(EndpointRequest.class); | |||
DataContractRegistry.register(TransactionResponse.class); | |||
DataContractRegistry.register(UserRegisterOperation.class); | |||
DataContractRegistry.register(DataAccountRegisterOperation.class); | |||
} | |||
private static final String LEDGER_KEY_PREFIX = "LDG://"; | |||
private HashDigest ledgerHash = null; | |||
private BlockchainKeypair parti0 = BlockchainKeyGenerator.getInstance().generate(); | |||
private BlockchainKeypair parti1 = BlockchainKeyGenerator.getInstance().generate(); | |||
private BlockchainKeypair parti2 = BlockchainKeyGenerator.getInstance().generate(); | |||
private BlockchainKeypair parti3 = BlockchainKeyGenerator.getInstance().generate(); | |||
private BlockchainKeypair[] participants = { parti0, parti1, parti2, parti3 }; | |||
@Test | |||
public void testBlockFullkRollBack() { | |||
final MemoryKVStorage STORAGE = new MemoryKVStorage(); | |||
final MemoryKVStorage STORAGE_Mock = Mockito.spy(STORAGE); | |||
// 初始化账本到指定的存储库; | |||
ledgerHash = initLedger(STORAGE_Mock, parti0, parti1, parti2, parti3); | |||
System.out.println("---------- Ledger init OK !!! ----------"); | |||
// 加载账本; | |||
LedgerManager ledgerManager = new LedgerManager(); | |||
LedgerRepository ledgerRepo = ledgerManager.register(ledgerHash, STORAGE_Mock); | |||
// 构造存储错误,并产生区块回滚 | |||
doThrow(BlockRollbackException.class).when(STORAGE_Mock).set(any(), any(), anyLong()); | |||
LedgerEditor newBlockEditor = ledgerRepo.createNextBlock(); | |||
OperationHandleRegisteration opReg = new DefaultOperationHandleRegisteration(); | |||
LedgerSecurityManager securityManager = getSecurityManager(); | |||
TransactionBatchProcessor txbatchProcessor = new TransactionBatchProcessor(securityManager, newBlockEditor, | |||
ledgerRepo, opReg); | |||
// 注册新用户; | |||
BlockchainKeypair userKeypair = BlockchainKeyGenerator.getInstance().generate(); | |||
TransactionRequest transactionRequest = LedgerTestUtils.createTxRequest_UserReg(userKeypair, ledgerHash, | |||
parti0, parti0); | |||
TransactionResponse txResp = txbatchProcessor.schedule(transactionRequest); | |||
LedgerBlock newBlock = newBlockEditor.prepare(); | |||
try { | |||
newBlockEditor.commit(); | |||
} catch (BlockRollbackException e) { | |||
newBlockEditor.cancel(); | |||
} | |||
// 验证正确性; | |||
ledgerManager = new LedgerManager(); | |||
ledgerRepo = ledgerManager.register(ledgerHash, STORAGE_Mock); | |||
LedgerBlock latestBlock = ledgerRepo.getLatestBlock(); | |||
assertEquals(ledgerRepo.getBlockHash(0), latestBlock.getHash()); | |||
assertEquals(0, latestBlock.getHeight()); | |||
LedgerDataQuery ledgerDS = ledgerRepo.getLedgerData(latestBlock); | |||
boolean existUser = ledgerDS.getUserAccountSet().contains(userKeypair.getAddress()); | |||
assertFalse(existUser); | |||
doCallRealMethod().when(STORAGE_Mock).set(any(), any(), anyLong()); | |||
//区块正常提交 | |||
// 生成新区块; | |||
LedgerEditor newBlockEditor1 = ledgerRepo.createNextBlock(); | |||
OperationHandleRegisteration opReg1 = new DefaultOperationHandleRegisteration(); | |||
LedgerSecurityManager securityManager1 = getSecurityManager(); | |||
TransactionBatchProcessor txbatchProcessor1 = new TransactionBatchProcessor(securityManager1, newBlockEditor1, | |||
ledgerRepo, opReg1); | |||
// 注册新用户; | |||
BlockchainKeypair userKeypair1 = BlockchainKeyGenerator.getInstance().generate(); | |||
TransactionRequest transactionRequest1 = LedgerTestUtils.createTxRequest_UserReg(userKeypair1, ledgerHash, | |||
parti0, parti0); | |||
TransactionResponse txResp1 = txbatchProcessor1.schedule(transactionRequest1); | |||
LedgerBlock newBlock1 = newBlockEditor1.prepare(); | |||
try { | |||
newBlockEditor1.commit(); | |||
} catch (BlockRollbackException e) { | |||
newBlockEditor1.cancel(); | |||
} | |||
ledgerManager = new LedgerManager(); | |||
ledgerRepo = ledgerManager.register(ledgerHash, STORAGE_Mock); | |||
LedgerBlock latestBlock1 = ledgerRepo.getLatestBlock(); | |||
assertEquals(newBlock1.getHash(), latestBlock1.getHash()); | |||
assertEquals(1, latestBlock1.getHeight()); | |||
LedgerDataQuery ledgerDS1 = ledgerRepo.getLedgerData(latestBlock1); | |||
boolean existUser1 = ledgerDS1.getUserAccountSet().contains(userKeypair1.getAddress()); | |||
assertTrue(existUser1); | |||
} | |||
private static LedgerSecurityManager getSecurityManager() { | |||
LedgerSecurityManager securityManager = Mockito.mock(LedgerSecurityManager.class); | |||
SecurityPolicy securityPolicy = Mockito.mock(SecurityPolicy.class); | |||
when(securityPolicy.isEndpointEnable(any(LedgerPermission.class), any())).thenReturn(true); | |||
when(securityPolicy.isEndpointEnable(any(TransactionPermission.class), any())).thenReturn(true); | |||
when(securityPolicy.isNodeEnable(any(LedgerPermission.class), any())).thenReturn(true); | |||
when(securityPolicy.isNodeEnable(any(TransactionPermission.class), any())).thenReturn(true); | |||
when(securityManager.createSecurityPolicy(any(), any())).thenReturn(securityPolicy); | |||
return securityManager; | |||
} | |||
private HashDigest initLedger(MemoryKVStorage storage, BlockchainKeypair... partiKeys) { | |||
// 创建初始化配置; | |||
LedgerInitSetting initSetting = LedgerTestUtils.createLedgerInitSetting(partiKeys); | |||
// 创建账本; | |||
LedgerEditor ldgEdt = LedgerTransactionalEditor.createEditor(initSetting, LEDGER_KEY_PREFIX, storage, storage); | |||
TransactionRequest genesisTxReq = LedgerTestUtils.createLedgerInitTxRequest(partiKeys); | |||
LedgerTransactionContext genisisTxCtx = ldgEdt.newTransaction(genesisTxReq); | |||
LedgerDataset ldgDS = genisisTxCtx.getDataset(); | |||
for (int i = 0; i < partiKeys.length; i++) { | |||
UserAccount userAccount = ldgDS.getUserAccountSet().register(partiKeys[i].getAddress(), | |||
partiKeys[i].getPubKey()); | |||
userAccount.setProperty("Name", "参与方-" + i, -1); | |||
userAccount.setProperty("Share", "" + (10 + i), -1); | |||
} | |||
LedgerTransaction tx = genisisTxCtx.commit(TransactionState.SUCCESS); | |||
assertEquals(genesisTxReq.getTransactionContent().getHash(), tx.getTransactionContent().getHash()); | |||
assertEquals(0, tx.getBlockHeight()); | |||
LedgerBlock block = ldgEdt.prepare(); | |||
assertEquals(0, block.getHeight()); | |||
assertNotNull(block.getHash()); | |||
assertNull(block.getPreviousHash()); | |||
// 创世区块的账本哈希为 null; | |||
assertNull(block.getLedgerHash()); | |||
assertNotNull(block.getHash()); | |||
// 提交数据,写入存储; | |||
ldgEdt.commit(); | |||
HashDigest ledgerHash = block.getHash(); | |||
return ledgerHash; | |||
} | |||
} |
@@ -16,14 +16,35 @@ import static org.mockito.Mockito.when; | |||
import java.io.InputStream; | |||
import java.util.Random; | |||
import com.jd.blockchain.utils.io.BytesUtils; | |||
import com.jd.blockchain.ledger.*; | |||
import org.junit.Test; | |||
import org.mockito.Mockito; | |||
import com.jd.blockchain.binaryproto.BinaryProtocol; | |||
import com.jd.blockchain.binaryproto.DataContractRegistry; | |||
import com.jd.blockchain.crypto.HashDigest; | |||
import com.jd.blockchain.ledger.BlockchainKeyGenerator; | |||
import com.jd.blockchain.ledger.BlockchainKeypair; | |||
import com.jd.blockchain.ledger.BytesValue; | |||
import com.jd.blockchain.ledger.DataAccountRegisterOperation; | |||
import com.jd.blockchain.ledger.EndpointRequest; | |||
import com.jd.blockchain.ledger.LedgerBlock; | |||
import com.jd.blockchain.ledger.LedgerInitSetting; | |||
import com.jd.blockchain.ledger.LedgerPermission; | |||
import com.jd.blockchain.ledger.LedgerTransaction; | |||
import com.jd.blockchain.ledger.NodeRequest; | |||
import com.jd.blockchain.ledger.OperationResult; | |||
import com.jd.blockchain.ledger.ParticipantNode; | |||
import com.jd.blockchain.ledger.ParticipantRegisterOperation; | |||
import com.jd.blockchain.ledger.ParticipantStateUpdateOperation; | |||
import com.jd.blockchain.ledger.TransactionContent; | |||
import com.jd.blockchain.ledger.TransactionContentBody; | |||
import com.jd.blockchain.ledger.TransactionPermission; | |||
import com.jd.blockchain.ledger.TransactionRequest; | |||
import com.jd.blockchain.ledger.TransactionRequestBuilder; | |||
import com.jd.blockchain.ledger.TransactionResponse; | |||
import com.jd.blockchain.ledger.TransactionState; | |||
import com.jd.blockchain.ledger.TypedValue; | |||
import com.jd.blockchain.ledger.UserRegisterOperation; | |||
import com.jd.blockchain.ledger.core.DefaultOperationHandleRegisteration; | |||
import com.jd.blockchain.ledger.core.LedgerDataQuery; | |||
import com.jd.blockchain.ledger.core.LedgerDataset; | |||
@@ -43,6 +64,8 @@ import com.jd.blockchain.storage.service.utils.MemoryKVStorage; | |||
import com.jd.blockchain.transaction.BooleanValueHolder; | |||
import com.jd.blockchain.transaction.TxBuilder; | |||
import com.jd.blockchain.utils.Bytes; | |||
import com.jd.blockchain.utils.DataEntry; | |||
import com.jd.blockchain.utils.io.BytesUtils; | |||
import test.com.jd.blockchain.ledger.TxTestContract; | |||
import test.com.jd.blockchain.ledger.TxTestContractImpl; | |||
@@ -132,7 +155,7 @@ public class ContractInvokingTest { | |||
assertEquals(1, opResults.length); | |||
assertEquals(0, opResults[0].getIndex()); | |||
byte[] expectedRetnBytes = BinaryProtocol.encode(BytesData.fromInt64(issueAmount), BytesValue.class); | |||
byte[] expectedRetnBytes = BinaryProtocol.encode(TypedValue.fromInt64(issueAmount), BytesValue.class); | |||
byte[] reallyRetnBytes = BinaryProtocol.encode(opResults[0].getResult(), BytesValue.class); | |||
assertArrayEquals(expectedRetnBytes, reallyRetnBytes); | |||
@@ -218,9 +241,9 @@ public class ContractInvokingTest { | |||
TransactionBatchResultHandle txResultHandle = txbatchProcessor.prepare(); | |||
txResultHandle.commit(); | |||
BytesValue latestValue = ledgerRepo.getDataAccountSet().getAccount(kpDataAccount.getAddress()).getBytes(key, | |||
BytesValue latestValue = ledgerRepo.getDataAccountSet().getAccount(kpDataAccount.getAddress()).getDataset().getValue(key, | |||
-1); | |||
System.out.printf("latest value=[%s] %s \r\n", latestValue.getType(), latestValue.getValue().toUTF8String()); | |||
System.out.printf("latest value=[%s] %s \r\n", latestValue.getType(), latestValue.getBytes().toUTF8String()); | |||
boolean readable = readableHolder.get(); | |||
assertTrue(readable); | |||
@@ -278,14 +301,14 @@ public class ContractInvokingTest { | |||
} | |||
}); | |||
// 预期数据都能够正常写入; | |||
KVDataEntry kv1 = ledgerRepo.getDataAccountSet().getAccount(kpDataAccount.getAddress()).getDataEntry("K1", | |||
DataEntry<String, TypedValue> kv1 = ledgerRepo.getDataAccountSet().getAccount(kpDataAccount.getAddress()).getDataset().getDataEntry("K1", | |||
0); | |||
KVDataEntry kv2 = ledgerRepo.getDataAccountSet().getAccount(kpDataAccount.getAddress()).getDataEntry("K2", | |||
DataEntry<String, TypedValue> kv2 = ledgerRepo.getDataAccountSet().getAccount(kpDataAccount.getAddress()).getDataset().getDataEntry("K2", | |||
0); | |||
assertEquals(0, kv1.getVersion()); | |||
assertEquals(0, kv2.getVersion()); | |||
assertEquals("V1-0", kv1.getValue()); | |||
assertEquals("V2-0", kv2.getValue()); | |||
assertEquals("V1-0", kv1.getValue().stringValue()); | |||
assertEquals("V2-0", kv2.getValue().stringValue()); | |||
// 构建基于接口调用合约的交易请求,用于测试合约调用; | |||
buildBlock(ledgerRepo, ledgerManager, opReg, new TxDefinitor() { | |||
@@ -299,12 +322,12 @@ public class ContractInvokingTest { | |||
} | |||
}); | |||
// 预期数据都能够正常写入; | |||
kv1 = ledgerRepo.getDataAccountSet().getAccount(kpDataAccount.getAddress()).getDataEntry("K1", 1); | |||
kv2 = ledgerRepo.getDataAccountSet().getAccount(kpDataAccount.getAddress()).getDataEntry("K2", 1); | |||
kv1 = ledgerRepo.getDataAccountSet().getAccount(kpDataAccount.getAddress()).getDataset().getDataEntry("K1", 1); | |||
kv2 = ledgerRepo.getDataAccountSet().getAccount(kpDataAccount.getAddress()).getDataset().getDataEntry("K2", 1); | |||
assertEquals(1, kv1.getVersion()); | |||
assertEquals(1, kv2.getVersion()); | |||
assertEquals("V1-1", kv1.getValue()); | |||
assertEquals("V2-1", kv2.getValue()); | |||
assertEquals("V1-1", kv1.getValue().stringValue()); | |||
assertEquals("V2-1", kv2.getValue().stringValue()); | |||
// 构建基于接口调用合约的交易请求,用于测试合约调用; | |||
buildBlock(ledgerRepo, ledgerManager, opReg, new TxDefinitor() { | |||
@@ -314,16 +337,17 @@ public class ContractInvokingTest { | |||
contractProxy.testRollbackWhileVersionConfliction(kpDataAccount.getAddress().toBase58(), "K1", "V1-2", | |||
1); | |||
contractProxy.testRollbackWhileVersionConfliction(kpDataAccount.getAddress().toBase58(), "K2", "V2-2", | |||
0); | |||
0);//预期会回滚; | |||
} | |||
}); | |||
// 预期数据都能够正常写入; | |||
kv1 = ledgerRepo.getDataAccountSet().getAccount(kpDataAccount.getAddress()).getDataEntry("K1", 1); | |||
// 预期数据回滚,账本没有发生变更; | |||
kv1 = ledgerRepo.getDataAccountSet().getAccount(kpDataAccount.getAddress()).getDataset().getDataEntry("K1", 1); | |||
assertEquals(1, kv1.getVersion()); | |||
assertEquals("V1-1", kv1.getValue()); | |||
kv1 = ledgerRepo.getDataAccountSet().getAccount(kpDataAccount.getAddress()).getDataEntry("K1", 2); | |||
assertEquals(-1, kv1.getVersion()); | |||
assertEquals(null, kv1.getValue()); | |||
assertEquals("V1-1", kv1.getValue().stringValue()); | |||
kv1 = ledgerRepo.getDataAccountSet().getAccount(kpDataAccount.getAddress()).getDataset().getDataEntry("K1", 2); | |||
assertNull(kv1); | |||
kv2 = ledgerRepo.getDataAccountSet().getAccount(kpDataAccount.getAddress()).getDataset().getDataEntry("K2", 1); | |||
assertEquals(1, kv2.getVersion()); | |||
} | |||
@@ -1,22 +1,12 @@ | |||
package test.com.jd.blockchain.ledger.core; | |||
import static org.junit.Assert.assertEquals; | |||
import java.util.Random; | |||
import org.junit.Before; | |||
import org.junit.Test; | |||
import com.jd.blockchain.binaryproto.BinaryProtocol; | |||
import com.jd.blockchain.binaryproto.DataContractRegistry; | |||
import com.jd.blockchain.crypto.HashDigest; | |||
import com.jd.blockchain.crypto.PubKey; | |||
import com.jd.blockchain.crypto.service.classic.ClassicAlgorithm; | |||
import com.jd.blockchain.crypto.service.sm.SMAlgorithm; | |||
import com.jd.blockchain.ledger.AccountHeader; | |||
import com.jd.blockchain.ledger.BlockchainIdentity; | |||
import com.jd.blockchain.ledger.UserInfo; | |||
import com.jd.blockchain.ledger.core.MerkleAccountSet; | |||
import com.jd.blockchain.utils.Bytes; | |||
/** | |||
* Created by zhangshuang3 on 2018/9/3. | |||
@@ -35,27 +25,27 @@ public class LedgerAccountTest { | |||
rand.nextBytes(seed); | |||
rand.nextBytes(settingValue); | |||
rand.nextBytes(rawDigestBytes); | |||
DataContractRegistry.register(AccountHeader.class); | |||
DataContractRegistry.register(BlockchainIdentity.class); | |||
DataContractRegistry.register(UserInfo.class); | |||
} | |||
@Test | |||
public void testSerialize_AccountHeader() { | |||
String address = "xxxxxxxxxxxx"; | |||
PubKey pubKey = new PubKey(SMAlgorithm.SM2, rawDigestBytes); | |||
HashDigest hashDigest = new HashDigest(ClassicAlgorithm.SHA256, rawDigestBytes); | |||
MerkleAccountSet.AccountHeaderData accountHeaderData = new MerkleAccountSet.AccountHeaderData(Bytes.fromString(address), | |||
pubKey, hashDigest); | |||
// encode and decode | |||
byte[] encodeBytes = BinaryProtocol.encode(accountHeaderData, AccountHeader.class); | |||
AccountHeader deAccountHeaderData = BinaryProtocol.decode(encodeBytes); | |||
// verify start | |||
assertEquals(accountHeaderData.getAddress(), deAccountHeaderData.getAddress()); | |||
assertEquals(accountHeaderData.getPubKey(), deAccountHeaderData.getPubKey()); | |||
assertEquals(accountHeaderData.getRootHash(), deAccountHeaderData.getRootHash()); | |||
} | |||
// @Test | |||
// public void testSerialize_AccountHeader() { | |||
// String address = "xxxxxxxxxxxx"; | |||
// PubKey pubKey = new PubKey(SMAlgorithm.SM2, rawDigestBytes); | |||
// HashDigest hashDigest = new HashDigest(ClassicAlgorithm.SHA256, rawDigestBytes); | |||
// MerkleAccountSet.AccountHeaderData accountHeaderData = new MerkleAccountSet.AccountHeaderData(Bytes.fromString(address), | |||
// pubKey, hashDigest); | |||
// | |||
// // encode and decode | |||
// byte[] encodeBytes = BinaryProtocol.encode(accountHeaderData, AccountHeader.class); | |||
// AccountHeader deAccountHeaderData = BinaryProtocol.decode(encodeBytes); | |||
// | |||
// // verify start | |||
// assertEquals(accountHeaderData.getAddress(), deAccountHeaderData.getAddress()); | |||
// assertEquals(accountHeaderData.getPubKey(), deAccountHeaderData.getPubKey()); | |||
// assertEquals(accountHeaderData.getRootHash(), deAccountHeaderData.getRootHash()); | |||
// } | |||
} |
@@ -8,12 +8,9 @@ import org.junit.Before; | |||
import org.junit.Test; | |||
import com.jd.blockchain.binaryproto.DataContractRegistry; | |||
import com.jd.blockchain.crypto.AddressEncoding; | |||
import com.jd.blockchain.crypto.AsymmetricKeypair; | |||
import com.jd.blockchain.crypto.Crypto; | |||
import com.jd.blockchain.crypto.CryptoProvider; | |||
import com.jd.blockchain.crypto.SignatureFunction; | |||
import com.jd.blockchain.crypto.service.classic.ClassicAlgorithm; | |||
import com.jd.blockchain.crypto.service.classic.ClassicCryptoService; | |||
import com.jd.blockchain.crypto.service.sm.SMCryptoService; | |||
import com.jd.blockchain.ledger.BlockchainKeyGenerator; | |||
@@ -25,7 +22,7 @@ import com.jd.blockchain.ledger.LedgerInitSetting; | |||
import com.jd.blockchain.ledger.LedgerTransaction; | |||
import com.jd.blockchain.ledger.TransactionRequest; | |||
import com.jd.blockchain.ledger.TransactionState; | |||
import com.jd.blockchain.ledger.core.CryptoConfig; | |||
import com.jd.blockchain.ledger.TypedValue; | |||
import com.jd.blockchain.ledger.core.DataAccount; | |||
import com.jd.blockchain.ledger.core.LedgerDataset; | |||
import com.jd.blockchain.ledger.core.LedgerEditor; | |||
@@ -33,11 +30,6 @@ import com.jd.blockchain.ledger.core.LedgerTransactionContext; | |||
import com.jd.blockchain.ledger.core.LedgerTransactionalEditor; | |||
import com.jd.blockchain.ledger.core.UserAccount; | |||
import com.jd.blockchain.storage.service.utils.MemoryKVStorage; | |||
import com.jd.blockchain.transaction.ConsensusParticipantData; | |||
import com.jd.blockchain.transaction.LedgerInitData; | |||
import com.jd.blockchain.utils.Bytes; | |||
import com.jd.blockchain.utils.io.BytesUtils; | |||
import com.jd.blockchain.utils.net.NetworkAddress; | |||
public class LedgerEditorTest { | |||
@@ -102,7 +94,7 @@ public class LedgerEditorTest { | |||
DataAccount dataAccount = ldgDS.getDataAccountSet().register(dataKP.getAddress(), dataKP.getPubKey(), null); | |||
dataAccount.setBytes(Bytes.fromString("A"), "abc", -1); | |||
dataAccount.getDataset().setValue("A", TypedValue.fromText("abc"), -1); | |||
LedgerTransaction tx = genisisTxCtx.commit(TransactionState.SUCCESS); | |||
LedgerBlock block = ldgEdt.prepare(); | |||
@@ -115,9 +107,9 @@ public class LedgerEditorTest { | |||
assertEquals(0, block.getHeight()); | |||
// 验证数据读写的一致性; | |||
BytesValue bytes = dataAccount.getBytes("A"); | |||
BytesValue bytes = dataAccount.getDataset().getValue("A"); | |||
assertEquals(DataType.TEXT, bytes.getType()); | |||
String textValue = bytes.getValue().toUTF8String(); | |||
String textValue = bytes.getBytes().toUTF8String(); | |||
assertEquals("abc", textValue); | |||
} | |||
@@ -44,12 +44,18 @@ public class LedgerTestUtils { | |||
partiKeys[1] = BlockchainKeyGenerator.getInstance().generate(); | |||
return createLedgerInitSetting(partiKeys); | |||
} | |||
public static LedgerInitSetting createLedgerInitSetting(BlockchainKeypair[] partiKeys) { | |||
public static CryptoProvider[] getContextProviders() { | |||
CryptoProvider[] supportedProviders = new CryptoProvider[SUPPORTED_PROVIDERS.length]; | |||
for (int i = 0; i < SUPPORTED_PROVIDERS.length; i++) { | |||
supportedProviders[i] = Crypto.getProvider(SUPPORTED_PROVIDERS[i]); | |||
} | |||
return supportedProviders; | |||
} | |||
public static LedgerInitSetting createLedgerInitSetting(BlockchainKeypair[] partiKeys) { | |||
CryptoProvider[] supportedProviders =getContextProviders(); | |||
CryptoConfig defCryptoSetting = new CryptoConfig(); | |||
defCryptoSetting.setSupportedProviders(supportedProviders); | |||
@@ -0,0 +1,58 @@ | |||
package test.com.jd.blockchain.ledger.core; | |||
import static org.junit.Assert.assertEquals; | |||
import static org.junit.Assert.assertNotNull; | |||
import org.junit.Test; | |||
import com.jd.blockchain.crypto.Crypto; | |||
import com.jd.blockchain.ledger.BlockchainKeyGenerator; | |||
import com.jd.blockchain.ledger.BlockchainKeypair; | |||
import com.jd.blockchain.ledger.TypedValue; | |||
import com.jd.blockchain.ledger.core.CompositeAccount; | |||
import com.jd.blockchain.ledger.core.CryptoConfig; | |||
import com.jd.blockchain.ledger.core.MerkleAccountSet; | |||
import com.jd.blockchain.ledger.core.OpeningAccessPolicy; | |||
import com.jd.blockchain.storage.service.utils.MemoryKVStorage; | |||
import com.jd.blockchain.utils.Bytes; | |||
public class MerkleAccountSetTest { | |||
@Test | |||
public void testRegister() { | |||
final OpeningAccessPolicy POLICY = new OpeningAccessPolicy(); | |||
final MemoryKVStorage STORAGE = new MemoryKVStorage(); | |||
Bytes KEY_PREFIX = Bytes.fromString("/ACCOUNT"); | |||
CryptoConfig cryptoConfig = new CryptoConfig(); | |||
cryptoConfig.setSupportedProviders(LedgerTestUtils.getContextProviders()); | |||
cryptoConfig.setAutoVerifyHash(true); | |||
cryptoConfig.setHashAlgorithm(Crypto.getAlgorithm("SHA256")); | |||
MerkleAccountSet accountset = new MerkleAccountSet(cryptoConfig, KEY_PREFIX, STORAGE, STORAGE, POLICY); | |||
BlockchainKeypair key1 = BlockchainKeyGenerator.getInstance().generate(); | |||
accountset.register(key1.getIdentity()); | |||
accountset.commit(); | |||
CompositeAccount acc1 = accountset.getAccount(key1.getAddress()); | |||
assertNotNull(acc1); | |||
assertEquals(0, accountset.getVersion(key1.getAddress())); | |||
acc1.getDataset().setValue("K1", TypedValue.fromText("V0"), -1); | |||
TypedValue v1 = acc1.getDataset().getValue("K1"); | |||
assertNotNull(v1); | |||
assertEquals(0, acc1.getDataset().getVersion("K1")); | |||
accountset.commit(); | |||
v1 = acc1.getDataset().getValue("K1"); | |||
assertNotNull(v1); | |||
assertEquals(0, acc1.getDataset().getVersion("K1")); | |||
} | |||
} |
@@ -23,9 +23,11 @@ import com.jd.blockchain.crypto.service.sm.SMCryptoService; | |||
import com.jd.blockchain.ledger.MerkleProof; | |||
import com.jd.blockchain.ledger.core.CryptoConfig; | |||
import com.jd.blockchain.ledger.core.MerkleDataSet; | |||
import com.jd.blockchain.storage.service.VersioningKVEntry; | |||
import com.jd.blockchain.storage.service.utils.MemoryKVStorage; | |||
import com.jd.blockchain.utils.Bytes; | |||
import com.jd.blockchain.utils.DataEntry; | |||
import com.jd.blockchain.utils.Dataset; | |||
import com.jd.blockchain.utils.DatasetHelper; | |||
import com.jd.blockchain.utils.io.BytesUtils; | |||
public class MerkleDataSetTest { | |||
@@ -53,9 +55,9 @@ public class MerkleDataSetTest { | |||
MemoryKVStorage storage = new MemoryKVStorage(); | |||
MerkleDataSet mds = new MerkleDataSet(cryptoConfig, keyPrefix, storage, storage); | |||
mds.setValue("A", "A".getBytes(), -1); | |||
mds.setValue("B", "B".getBytes(), -1); | |||
mds.setValue("C", "C".getBytes(), -1); | |||
mds.setValue(Bytes.fromString("A"), "A".getBytes(), -1); | |||
mds.setValue(Bytes.fromString("B"), "B".getBytes(), -1); | |||
mds.setValue(Bytes.fromString("C"), "C".getBytes(), -1); | |||
mds.commit(); | |||
@@ -85,22 +87,23 @@ public class MerkleDataSetTest { | |||
MemoryKVStorage storage = new MemoryKVStorage(); | |||
MerkleDataSet mds = new MerkleDataSet(cryptoConfig, keyPrefix, storage, storage); | |||
mds.setValue("A", "A".getBytes(), -1); | |||
mds.setValue("B", "B".getBytes(), -1); | |||
mds.setValue("C", "C".getBytes(), -1); | |||
Dataset<String, byte[]> ds = DatasetHelper.map(mds); | |||
ds.setValue("A", "A".getBytes(), -1); | |||
ds.setValue("B", "B".getBytes(), -1); | |||
ds.setValue("C", "C".getBytes(), -1); | |||
mds.commit(); | |||
byte[] va = mds.getValue("A"); | |||
byte[] va = ds.getValue("A"); | |||
assertNotNull(va); | |||
assertEquals("A", new String(va)); | |||
byte[] vc = mds.getValue("C"); | |||
VersioningKVEntry ventry = mds.getDataEntry("C"); | |||
byte[] vc = ds.getValue("C"); | |||
DataEntry<String, byte[]> ventry = ds.getDataEntry("C"); | |||
assertNotNull(vc); | |||
assertNotNull(ventry); | |||
assertEquals("C", new String(vc)); | |||
assertEquals("C", ventry.getKey().toUTF8String()); | |||
assertEquals("C", ventry.getKey()); | |||
HashDigest root1 = mds.getRootHash(); | |||
@@ -111,8 +114,8 @@ public class MerkleDataSetTest { | |||
int expStorageCount = 10; | |||
assertEquals(expStorageCount, storage.getStorageCount()); | |||
mds.setValue("B", "B".getBytes(), 0); | |||
mds.setValue("C", "C".getBytes(), 0); | |||
ds.setValue("B", "B".getBytes(), 0); | |||
ds.setValue("C", "C".getBytes(), 0); | |||
mds.commit(); | |||
HashDigest root2 = mds.getRootHash(); | |||
assertNotEquals(root1, root2); | |||
@@ -122,7 +125,7 @@ public class MerkleDataSetTest { | |||
expStorageCount = expStorageCount + 3; | |||
assertEquals(expStorageCount, storage.getStorageCount()); | |||
mds.setValue("D", "DValue".getBytes(), -1); | |||
ds.setValue("D", "DValue".getBytes(), -1); | |||
mds.commit(); | |||
HashDigest root3 = mds.getRootHash(); | |||
assertNotEquals(root2, root3); | |||
@@ -135,31 +138,31 @@ public class MerkleDataSetTest { | |||
assertEquals(expStorageCount, storage.getStorageCount()); | |||
// Check rollback function: Add some keys, and then rollback; | |||
long v = mds.setValue("E", "E-values".getBytes(), -1); | |||
long v = ds.setValue("E", "E-values".getBytes(), -1); | |||
assertEquals(v, 0); | |||
String expEValue = new String(mds.getValue("E")); | |||
String expEValue = new String(ds.getValue("E")); | |||
assertEquals(expEValue, "E-values"); | |||
v = mds.setValue("F", "F-values".getBytes(), -1); | |||
v = ds.setValue("F", "F-values".getBytes(), -1); | |||
assertEquals(v, 0); | |||
String expFValue = new String(mds.getValue("F")); | |||
String expFValue = new String(ds.getValue("F")); | |||
assertEquals(expFValue, "F-values"); | |||
v = mds.setValue("E", "E-values-1".getBytes(), 0); | |||
v = ds.setValue("E", "E-values-1".getBytes(), 0); | |||
assertEquals(v, 1); | |||
expEValue = new String(mds.getValue("E")); | |||
expEValue = new String(ds.getValue("E")); | |||
assertEquals(expEValue, "E-values-1"); | |||
mds.cancel(); | |||
byte[] bv = mds.getValue("E"); | |||
byte[] bv = ds.getValue("E"); | |||
assertNull(bv); | |||
bv = mds.getValue("F"); | |||
bv = ds.getValue("F"); | |||
assertNull(bv); | |||
v = mds.getVersion("E"); | |||
v = ds.getVersion("E"); | |||
assertEquals(-1, v); | |||
v = mds.getVersion("F"); | |||
v = ds.getVersion("F"); | |||
assertEquals(-1, v); | |||
// Expect that states has been recover; | |||
@@ -194,10 +197,11 @@ public class MerkleDataSetTest { | |||
MemoryKVStorage storage = new MemoryKVStorage(); | |||
MerkleDataSet mds = new MerkleDataSet(cryptoConfig, keyPrefix, storage, storage); | |||
Dataset<String, byte[]> ds = DatasetHelper.map(mds); | |||
// 初始的时候没有任何数据,总是返回 null; | |||
VersioningKVEntry verKVEntry = mds.getDataEntry("NULL_KEY"); | |||
byte[] vbytes = mds.getValue("NULL_KEY"); | |||
DataEntry verKVEntry = ds.getDataEntry("NULL_KEY"); | |||
byte[] vbytes = ds.getValue("NULL_KEY"); | |||
assertNull(verKVEntry); | |||
assertNull(vbytes); | |||
@@ -217,7 +221,7 @@ public class MerkleDataSetTest { | |||
for (int i = 0; i < count; i++) { | |||
key = "data" + i; | |||
rand.nextBytes(data); | |||
v = mds.setValue(key, data, -1); | |||
v = ds.setValue(key, data, -1); | |||
dataVersions.put(key, v); | |||
dataValues.put(key + "_" + v, data); | |||
assertEquals(v, 0); | |||
@@ -237,7 +241,7 @@ public class MerkleDataSetTest { | |||
KeySnapshot ks = new KeySnapshot(); | |||
ks.proof = proof; | |||
ks.maxVersion = mds.getVersion(key); | |||
ks.maxVersion = ds.getVersion(key); | |||
snapshot.put(key, ks); | |||
} | |||
@@ -271,7 +275,7 @@ public class MerkleDataSetTest { | |||
key = "data" + i; | |||
rand.nextBytes(data); | |||
expVer = dataVersions.get(key); | |||
v = mds.setValue(key, data, expVer); | |||
v = ds.setValue(key, data, expVer); | |||
assertEquals(v, expVer + 1); | |||
@@ -300,7 +304,7 @@ public class MerkleDataSetTest { | |||
KeySnapshot ks = new KeySnapshot(); | |||
ks.proof = proof; | |||
ks.maxVersion = mds.getVersion(key); | |||
ks.maxVersion = ds.getVersion(key); | |||
snapshot.put(key, ks); | |||
} | |||
history.put(rootHash, snapshot); | |||
@@ -316,6 +320,7 @@ public class MerkleDataSetTest { | |||
MerkleDataSet mdsReload = new MerkleDataSet(hisRootHash, cryptoConfig, keyPrefix, storage, storage, | |||
true); | |||
Dataset<String, byte[]> dsReload = DatasetHelper.map(mdsReload); | |||
assertEquals(hisRootHash, mdsReload.getRootHash()); | |||
// verify every keys; | |||
@@ -323,7 +328,7 @@ public class MerkleDataSetTest { | |||
key = "data" + i; | |||
// 最新版本一致; | |||
long expLatestVersion = snapshot.get(key).maxVersion; | |||
long actualLatestVersion = mdsReload.getVersion(key); | |||
long actualLatestVersion = dsReload.getVersion(key); | |||
assertEquals(expLatestVersion, actualLatestVersion); | |||
// 数据证明一致; | |||
@@ -339,7 +344,7 @@ public class MerkleDataSetTest { | |||
for (long j = 0; j < actualLatestVersion; j++) { | |||
String keyver = key + "_" + j; | |||
byte[] expValue = dataValues.get(keyver); | |||
byte[] actualValue = mdsReload.getValue(key, j); | |||
byte[] actualValue = dsReload.getValue(key, j); | |||
assertTrue(BytesUtils.equals(expValue, actualValue)); | |||
} | |||
} | |||
@@ -365,10 +370,11 @@ public class MerkleDataSetTest { | |||
MemoryKVStorage storage = new MemoryKVStorage(); | |||
MerkleDataSet mds = new MerkleDataSet(cryptoConfig, keyPrefix, storage, storage); | |||
Dataset<String, byte[]> ds = DatasetHelper.map(mds); | |||
// 初始的时候没有任何数据,总是返回 null; | |||
VersioningKVEntry verKVEntry = mds.getDataEntry("NULL_KEY"); | |||
byte[] vbytes = mds.getValue("NULL_KEY"); | |||
DataEntry verKVEntry = ds.getDataEntry("NULL_KEY"); | |||
byte[] vbytes = ds.getValue("NULL_KEY"); | |||
assertNull(verKVEntry); | |||
assertNull(vbytes); | |||
@@ -388,7 +394,7 @@ public class MerkleDataSetTest { | |||
MerkleProof proof; | |||
for (int i = 0; i < count; i++) { | |||
key = "data" + i; | |||
v = mds.setValue(key, data, -1); | |||
v = ds.setValue(key, data, -1); | |||
dataVersions.put(key, v); | |||
// dataValues.put(key + "_" + v, data); | |||
assertEquals(v, 0); | |||
@@ -408,7 +414,7 @@ public class MerkleDataSetTest { | |||
KeySnapshot ks = new KeySnapshot(); | |||
ks.proof = proof; | |||
ks.maxVersion = mds.getVersion(key); | |||
ks.maxVersion = ds.getVersion(key); | |||
snapshot.put(key, ks); | |||
} | |||
@@ -418,6 +424,7 @@ public class MerkleDataSetTest { | |||
// verify; | |||
{ | |||
MerkleDataSet mdsReload = new MerkleDataSet(rootHash, cryptoConfig, keyPrefix, storage, storage, true); | |||
Dataset<String, byte[]> dsReload = DatasetHelper.map(mdsReload); | |||
// verify every keys; | |||
Map<String, KeySnapshot> snapshot = history.get(rootHash); | |||
MerkleProof expProof; | |||
@@ -429,7 +436,7 @@ public class MerkleDataSetTest { | |||
expProof = snapshot.get(key).proof; | |||
assertEquals(expProof.toString(), proof.toString()); | |||
byte[] value = mdsReload.getValue(key); | |||
byte[] value = dsReload.getValue(key); | |||
assertTrue(BytesUtils.equals(data, value)); | |||
} | |||
} | |||
@@ -556,6 +556,9 @@ public class MerkleTreeTest { | |||
/** | |||
* 测试从存储重新加载 Merkle 树的正确性; | |||
*/ | |||
/** | |||
* | |||
*/ | |||
@Test | |||
public void testMerkleReload() { | |||
CryptoSetting setting = Mockito.mock(CryptoSetting.class); | |||
@@ -563,7 +566,7 @@ public class MerkleTreeTest { | |||
when(setting.getAutoVerifyHash()).thenReturn(true); | |||
// 保存所有写入的数据节点的 SN-Hash 映射表; | |||
TreeMap<Long, HashDigest> dataNodes = new TreeMap<>(); | |||
TreeMap<Long, HashDigest> expectedDataNodes = new TreeMap<>(); | |||
MerkleNode nd; | |||
// 测试从空的树开始,顺序增加数据节点; | |||
@@ -580,7 +583,7 @@ public class MerkleTreeTest { | |||
for (int i = 0; i < count; i++) { | |||
rand.nextBytes(dataBuf); | |||
nd = mkt.setData(sn, "KEY-" + sn, 0, dataBuf); | |||
dataNodes.put(sn, nd.getNodeHash()); | |||
expectedDataNodes.put(sn, nd.getNodeHash()); | |||
sn++; | |||
} | |||
mkt.commit(); | |||
@@ -610,6 +613,24 @@ public class MerkleTreeTest { | |||
// 预期扩展为 4 层16叉树,由 3 层满16叉树扩展 1 新分支(4个路径节点)而形成; | |||
long expectedNodes = getMaxPathNodeCount(3) + 4 + 4097; | |||
assertEquals(expectedNodes, storage.getCount()); | |||
//重新加载,判断数据是否正确; | |||
MerkleTree r1_mkt = new MerkleTree(r1_rootHash, setting, keyPrefix, storage, true); | |||
{ | |||
// 验证每一个数据节点都产生了存在性证明; | |||
MerkleProof proof = null; | |||
HashDigest expectedNodeHash = null; | |||
MerkleDataNode reallyDataNode = null; | |||
for (long n = 0; n < maxSN; n++) { | |||
expectedNodeHash = expectedDataNodes.get(n); | |||
reallyDataNode = r1_mkt.getData(n); | |||
assertEquals(expectedNodeHash, reallyDataNode.getNodeHash()); | |||
proof = r1_mkt.getProof(n); | |||
assertNotNull(proof); | |||
assertEquals(expectedNodeHash, proof.getHash(0)); | |||
} | |||
} | |||
} | |||
// 覆盖到每一路分支修改数据节点; | |||
@@ -621,7 +642,7 @@ public class MerkleTreeTest { | |||
rand.nextBytes(dataBuf); | |||
sn = i; | |||
nd = mkt.setData(sn, "KEY-" + sn, 0, dataBuf); | |||
dataNodes.put(sn, nd.getNodeHash()); | |||
expectedDataNodes.put(sn, nd.getNodeHash()); | |||
} | |||
mkt.commit(); | |||
@@ -658,16 +679,18 @@ public class MerkleTreeTest { | |||
rand.nextBytes(dataBuf); | |||
sn = maxSN + 1 + i; | |||
nd = mkt.setData(sn, "KEY-" + sn, 0, dataBuf); | |||
dataNodes.put(sn, nd.getNodeHash()); | |||
expectedDataNodes.put(sn, nd.getNodeHash()); | |||
} | |||
mkt.commit(); | |||
// 验证每一个数据节点都产生了存在性证明; | |||
MerkleProof proof = null; | |||
for (Long n : dataNodes.keySet()) { | |||
proof = mkt.getProof(n.longValue()); | |||
assertNotNull(proof); | |||
assertEquals(dataNodes.get(n), proof.getHash(0)); | |||
{ | |||
// 验证每一个数据节点都产生了存在性证明; | |||
MerkleProof proof = null; | |||
for (Long n : expectedDataNodes.keySet()) { | |||
proof = mkt.getProof(n.longValue()); | |||
assertNotNull(proof); | |||
assertEquals(expectedDataNodes.get(n), proof.getHash(0)); | |||
} | |||
} | |||
// 记录一次提交的根哈希以及部分节点信息,用于后续的加载校验; | |||
@@ -700,6 +723,7 @@ public class MerkleTreeTest { | |||
assertEquals(r1_proof1, r1_mkt.getProof(r1_sn1).toString()); | |||
assertEquals(r1_proof2, r1_mkt.getProof(r1_sn2).toString()); | |||
// 从第 2 轮提交的 Merkle 根哈希加载; | |||
// 第 2 轮生成的 Merkle 树是对第 1 轮的数据的全部节点的修改,因此同一个 SN 的节点的证明是不同的; | |||
MerkleTree r2_mkt = new MerkleTree(r2_rootHash, setting, keyPrefix, storage, true); | |||
@@ -730,13 +754,16 @@ public class MerkleTreeTest { | |||
assertEquals(r3_proof3, r3_mkt.getProof(r3_sn3).toString()); | |||
// 验证每一个数据节点都产生了存在性证明; | |||
for (Long n : dataNodes.keySet()) { | |||
proof = r3_mkt.getProof(n.longValue()); | |||
assertNotNull(proof); | |||
assertEquals(dataNodes.get(n), proof.getHash(0)); | |||
{ | |||
MerkleProof proof = null; | |||
for (Long n : expectedDataNodes.keySet()) { | |||
proof = r3_mkt.getProof(n.longValue()); | |||
assertNotNull(proof); | |||
assertEquals(expectedDataNodes.get(n), proof.getHash(0)); | |||
} | |||
} | |||
} | |||
@SuppressWarnings("unused") | |||
private static int getLevel(long dataCount) { | |||
if (dataCount < 0) { | |||
@@ -772,8 +799,7 @@ public class MerkleTreeTest { | |||
* 注:此方法不处理溢出;调用者需要自行规避; | |||
* | |||
* @param value | |||
* @param x | |||
* 大于等于 0 的整数; | |||
* @param x 大于等于 0 的整数; | |||
* @return | |||
*/ | |||
private static long power(long value, int x) { | |||
@@ -332,24 +332,25 @@ public class TransactionBatchProcessorTest { | |||
newBlock = newBlockEditor.prepare(); | |||
newBlockEditor.commit(); | |||
BytesValue v1_0 = ledgerRepo.getDataAccountSet().getAccount(dataAccountKeypair.getAddress()).getBytes("K1", | |||
BytesValue v1_0 = ledgerRepo.getDataAccountSet().getAccount(dataAccountKeypair.getAddress()).getDataset().getValue("K1", | |||
0); | |||
BytesValue v1_1 = ledgerRepo.getDataAccountSet().getAccount(dataAccountKeypair.getAddress()).getBytes("K1", | |||
BytesValue v1_1 = ledgerRepo.getDataAccountSet().getAccount(dataAccountKeypair.getAddress()).getDataset().getValue("K1", | |||
1); | |||
BytesValue v2 = ledgerRepo.getDataAccountSet().getAccount(dataAccountKeypair.getAddress()).getBytes("K2", | |||
BytesValue v2 = ledgerRepo.getDataAccountSet().getAccount(dataAccountKeypair.getAddress()).getDataset().getValue("K2", | |||
0); | |||
BytesValue v3 = ledgerRepo.getDataAccountSet().getAccount(dataAccountKeypair.getAddress()).getBytes("K3", | |||
BytesValue v3 = ledgerRepo.getDataAccountSet().getAccount(dataAccountKeypair.getAddress()).getDataset().getValue("K3", | |||
0); | |||
assertNotNull(v1_0); | |||
assertNotNull(v1_1); | |||
assertNotNull(v2); | |||
assertNotNull(v3); | |||
assertEquals("V-1-1", v1_0.getValue().toUTF8String()); | |||
assertEquals("V-1-2", v1_1.getValue().toUTF8String()); | |||
assertEquals("V-2-1", v2.getValue().toUTF8String()); | |||
assertEquals("V-3-1", v3.getValue().toUTF8String()); | |||
assertEquals("V-1-1", v1_0.getBytes().toUTF8String()); | |||
assertEquals("V-1-2", v1_1.getBytes().toUTF8String()); | |||
assertEquals("V-2-1", v2.getBytes().toUTF8String()); | |||
assertEquals("V-3-1", v3.getBytes().toUTF8String()); | |||
// 提交多笔数据写入的交易,包含存在数据版本冲突的交易,验证交易是否正确回滚; | |||
// 先写一笔正确的交易; k3 的版本将变为 1 ; | |||
@@ -371,27 +372,27 @@ public class TransactionBatchProcessorTest { | |||
} catch (DataVersionConflictException e) { | |||
versionConflictionException = e; | |||
} | |||
assertNotNull(versionConflictionException); | |||
// assertNotNull(versionConflictionException); | |||
newBlock = newBlockEditor.prepare(); | |||
newBlockEditor.commit(); | |||
BytesValue v1 = ledgerRepo.getDataAccountSet().getAccount(dataAccountKeypair.getAddress()).getBytes("K1"); | |||
v3 = ledgerRepo.getDataAccountSet().getAccount(dataAccountKeypair.getAddress()).getBytes("K3"); | |||
BytesValue v1 = ledgerRepo.getDataAccountSet().getAccount(dataAccountKeypair.getAddress()).getDataset().getValue("K1"); | |||
v3 = ledgerRepo.getDataAccountSet().getAccount(dataAccountKeypair.getAddress()).getDataset().getValue("K3"); | |||
// k1 的版本仍然为1,没有更新; | |||
long k1_version = ledgerRepo.getDataAccountSet().getAccount(dataAccountKeypair.getAddress()) | |||
.getDataVersion("K1"); | |||
.getDataset().getVersion("K1"); | |||
assertEquals(1, k1_version); | |||
long k3_version = ledgerRepo.getDataAccountSet().getAccount(dataAccountKeypair.getAddress()) | |||
.getDataVersion("K3"); | |||
.getDataset().getVersion("K3"); | |||
assertEquals(1, k3_version); | |||
assertNotNull(v1); | |||
assertNotNull(v3); | |||
assertEquals("V-1-2", v1.getValue().toUTF8String()); | |||
assertEquals("V-3-2", v3.getValue().toUTF8String()); | |||
assertEquals("V-1-2", v1.getBytes().toUTF8String()); | |||
assertEquals("V-3-2", v3.getBytes().toUTF8String()); | |||
// // 验证正确性; | |||
// ledgerManager = new LedgerManager(); | |||
@@ -164,8 +164,8 @@ public class TransactionSetTest { | |||
for (int i = 0; i < acutualKVWriteSet.length; i++) { | |||
assertEquals(expKVWriteSet[i].getKey(), acutualKVWriteSet[i].getKey()); | |||
assertEquals(expKVWriteSet[i].getExpectedVersion(), acutualKVWriteSet[i].getExpectedVersion()); | |||
assertTrue(BytesUtils.equals(expKVWriteSet[i].getValue().getValue().toBytes(), | |||
acutualKVWriteSet[i].getValue().getValue().toBytes())); | |||
assertTrue(BytesUtils.equals(expKVWriteSet[i].getValue().getBytes().toBytes(), | |||
acutualKVWriteSet[i].getValue().getBytes().toBytes())); | |||
} | |||
ContractCodeDeployOperation actualContractDplOp = (ContractCodeDeployOperation) actualOperations[3]; | |||
@@ -6,7 +6,7 @@ | |||
<parent> | |||
<groupId>com.jd.blockchain</groupId> | |||
<artifactId>ledger</artifactId> | |||
<version>1.1.1-PACK20191209</version> | |||
<version>1.1.2.RELEASE</version> | |||
</parent> | |||
<artifactId>ledger-model</artifactId> | |||