From a0687ce02bac73aad9e0d7ddc85625016604f0db Mon Sep 17 00:00:00 2001 From: jsimomaa Date: Wed, 17 Aug 2016 08:59:14 +0300 Subject: [PATCH] Sharing org.simantics.acorn for everyone to use Change-Id: I3e110dd01c31dd0e0885887cfa66e42c3849bc2e --- bundles/org.simantics.acorn/.classpath | 7 + bundles/org.simantics.acorn/.project | 33 + .../.settings/org.eclipse.jdt.core.prefs | 7 + bundles/org.simantics.acorn/.svn/wc.db | Bin 0 -> 152576 bytes .../org.simantics.acorn/META-INF/MANIFEST.MF | 22 + .../OSGI-INF/component.xml | 7 + .../org.simantics.acorn.AcornDriver.xml | 7 + bundles/org.simantics.acorn/build.properties | 17 + bundles/org.simantics.acorn/log4j.properties | 63 + .../simantics/acorn/AcornDatabaseManager.java | 40 + .../src/org/simantics/acorn/AcornDriver.java | 108 ++ .../org/simantics/acorn/AcornManagement.java | 51 + .../acorn/AcornSessionManagerImpl.java | 125 ++ .../org/simantics/acorn/ClusterManager.java | 584 ++++++++ .../acorn/ExternalizableExample.java | 43 + .../src/org/simantics/acorn/FileIO.java | 142 ++ .../org/simantics/acorn/GraphClientImpl2.java | 708 +++++++++ .../src/org/simantics/acorn/HeadState.java | 105 ++ .../acorn/InvalidHeadStateException.java | 27 + .../src/org/simantics/acorn/MainProgram.java | 342 +++++ .../src/org/simantics/acorn/MainState.java | 135 ++ .../src/org/simantics/acorn/Persistable.java | 11 + .../simantics/acorn/UndoClusterSupport.java | 170 +++ .../acorn/backup/AcornBackupProvider.java | 316 ++++ .../simantics/acorn/cluster/ClusterBig.java | 1104 ++++++++++++++ .../simantics/acorn/cluster/ClusterImpl.java | 226 +++ .../simantics/acorn/cluster/ClusterSmall.java | 1304 +++++++++++++++++ .../acorn/internal/AcornDatabase.java | 229 +++ .../simantics/acorn/internal/Activator.java | 62 + .../acorn/internal/BijectionMap.java | 119 ++ .../org/simantics/acorn/internal/Change.java | 70 + .../acorn/internal/ClusterChange.java | 735 ++++++++++ .../acorn/internal/ClusterChange2.java | 70 + .../acorn/internal/ClusterStream.java | 437 ++++++ .../acorn/internal/ClusterSupport2.java | 340 +++++ .../internal/ClusterUpdateProcessor.java | 86 ++ .../internal/ClusterUpdateProcessor2.java | 30 + .../internal/ClusterUpdateProcessorBase.java | 475 ++++++ .../internal/ClusterUpdateProcessorBase2.java | 63 + .../simantics/acorn/internal/DebugPolicy.java | 19 + .../internal/UndoClusterUpdateProcessor.java | 112 ++ .../org/simantics/acorn/lru/AccessTime.java | 23 + .../acorn/lru/CachingClusterSupport.java | 160 ++ .../simantics/acorn/lru/ChangeSetInfo.java | 113 ++ .../simantics/acorn/lru/ClusterChangeSet.java | 120 ++ .../org/simantics/acorn/lru/ClusterInfo.java | 346 +++++ .../org/simantics/acorn/lru/ClusterLRU.java | 315 ++++ .../acorn/lru/ClusterStreamChunk.java | 302 ++++ .../acorn/lru/ClusterUpdateOperation.java | 91 ++ .../acorn/lru/ClusterUpdateState.java | 51 + .../src/org/simantics/acorn/lru/FileInfo.java | 139 ++ .../src/org/simantics/acorn/lru/LRU.java | 624 ++++++++ .../org/simantics/acorn/lru/LRUObject.java | 236 +++ .../org/simantics/db/javacore/HeadState.java | 73 + 54 files changed, 11144 insertions(+) create mode 100644 bundles/org.simantics.acorn/.classpath create mode 100644 bundles/org.simantics.acorn/.project create mode 100644 bundles/org.simantics.acorn/.settings/org.eclipse.jdt.core.prefs create mode 100644 bundles/org.simantics.acorn/.svn/wc.db create mode 100644 bundles/org.simantics.acorn/META-INF/MANIFEST.MF create mode 100644 bundles/org.simantics.acorn/OSGI-INF/component.xml create mode 100644 bundles/org.simantics.acorn/OSGI-INF/org.simantics.acorn.AcornDriver.xml create mode 100644 bundles/org.simantics.acorn/build.properties create mode 100644 bundles/org.simantics.acorn/log4j.properties create mode 100644 bundles/org.simantics.acorn/src/org/simantics/acorn/AcornDatabaseManager.java create mode 100644 bundles/org.simantics.acorn/src/org/simantics/acorn/AcornDriver.java create mode 100644 bundles/org.simantics.acorn/src/org/simantics/acorn/AcornManagement.java create mode 100644 bundles/org.simantics.acorn/src/org/simantics/acorn/AcornSessionManagerImpl.java create mode 100644 bundles/org.simantics.acorn/src/org/simantics/acorn/ClusterManager.java create mode 100644 bundles/org.simantics.acorn/src/org/simantics/acorn/ExternalizableExample.java create mode 100644 bundles/org.simantics.acorn/src/org/simantics/acorn/FileIO.java create mode 100644 bundles/org.simantics.acorn/src/org/simantics/acorn/GraphClientImpl2.java create mode 100644 bundles/org.simantics.acorn/src/org/simantics/acorn/HeadState.java create mode 100644 bundles/org.simantics.acorn/src/org/simantics/acorn/InvalidHeadStateException.java create mode 100644 bundles/org.simantics.acorn/src/org/simantics/acorn/MainProgram.java create mode 100644 bundles/org.simantics.acorn/src/org/simantics/acorn/MainState.java create mode 100644 bundles/org.simantics.acorn/src/org/simantics/acorn/Persistable.java create mode 100644 bundles/org.simantics.acorn/src/org/simantics/acorn/UndoClusterSupport.java create mode 100644 bundles/org.simantics.acorn/src/org/simantics/acorn/backup/AcornBackupProvider.java create mode 100644 bundles/org.simantics.acorn/src/org/simantics/acorn/cluster/ClusterBig.java create mode 100644 bundles/org.simantics.acorn/src/org/simantics/acorn/cluster/ClusterImpl.java create mode 100644 bundles/org.simantics.acorn/src/org/simantics/acorn/cluster/ClusterSmall.java create mode 100644 bundles/org.simantics.acorn/src/org/simantics/acorn/internal/AcornDatabase.java create mode 100644 bundles/org.simantics.acorn/src/org/simantics/acorn/internal/Activator.java create mode 100644 bundles/org.simantics.acorn/src/org/simantics/acorn/internal/BijectionMap.java create mode 100644 bundles/org.simantics.acorn/src/org/simantics/acorn/internal/Change.java create mode 100644 bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterChange.java create mode 100644 bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterChange2.java create mode 100644 bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterStream.java create mode 100644 bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterSupport2.java create mode 100644 bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterUpdateProcessor.java create mode 100644 bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterUpdateProcessor2.java create mode 100644 bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterUpdateProcessorBase.java create mode 100644 bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterUpdateProcessorBase2.java create mode 100644 bundles/org.simantics.acorn/src/org/simantics/acorn/internal/DebugPolicy.java create mode 100644 bundles/org.simantics.acorn/src/org/simantics/acorn/internal/UndoClusterUpdateProcessor.java create mode 100644 bundles/org.simantics.acorn/src/org/simantics/acorn/lru/AccessTime.java create mode 100644 bundles/org.simantics.acorn/src/org/simantics/acorn/lru/CachingClusterSupport.java create mode 100644 bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ChangeSetInfo.java create mode 100644 bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterChangeSet.java create mode 100644 bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterInfo.java create mode 100644 bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterLRU.java create mode 100644 bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterStreamChunk.java create mode 100644 bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterUpdateOperation.java create mode 100644 bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterUpdateState.java create mode 100644 bundles/org.simantics.acorn/src/org/simantics/acorn/lru/FileInfo.java create mode 100644 bundles/org.simantics.acorn/src/org/simantics/acorn/lru/LRU.java create mode 100644 bundles/org.simantics.acorn/src/org/simantics/acorn/lru/LRUObject.java create mode 100644 bundles/org.simantics.acorn/src/org/simantics/db/javacore/HeadState.java diff --git a/bundles/org.simantics.acorn/.classpath b/bundles/org.simantics.acorn/.classpath new file mode 100644 index 000000000..22f30643c --- /dev/null +++ b/bundles/org.simantics.acorn/.classpath @@ -0,0 +1,7 @@ + + + + + + + diff --git a/bundles/org.simantics.acorn/.project b/bundles/org.simantics.acorn/.project new file mode 100644 index 000000000..9726c0b94 --- /dev/null +++ b/bundles/org.simantics.acorn/.project @@ -0,0 +1,33 @@ + + + org.simantics.acorn + + + + + + org.eclipse.jdt.core.javabuilder + + + + + org.eclipse.pde.ManifestBuilder + + + + + org.eclipse.pde.SchemaBuilder + + + + + org.eclipse.pde.ds.core.builder + + + + + + org.eclipse.pde.PluginNature + org.eclipse.jdt.core.javanature + + diff --git a/bundles/org.simantics.acorn/.settings/org.eclipse.jdt.core.prefs b/bundles/org.simantics.acorn/.settings/org.eclipse.jdt.core.prefs new file mode 100644 index 000000000..0c68a61dc --- /dev/null +++ b/bundles/org.simantics.acorn/.settings/org.eclipse.jdt.core.prefs @@ -0,0 +1,7 @@ +eclipse.preferences.version=1 +org.eclipse.jdt.core.compiler.codegen.inlineJsrBytecode=enabled +org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.8 +org.eclipse.jdt.core.compiler.compliance=1.8 +org.eclipse.jdt.core.compiler.problem.assertIdentifier=error +org.eclipse.jdt.core.compiler.problem.enumIdentifier=error +org.eclipse.jdt.core.compiler.source=1.8 diff --git a/bundles/org.simantics.acorn/.svn/wc.db b/bundles/org.simantics.acorn/.svn/wc.db new file mode 100644 index 0000000000000000000000000000000000000000..9defa905814a3c7e6879fac4e711bc1cec154c27 GIT binary patch literal 152576 zcmeEv31D5-dFDCy>PgsOFc@PbV=RLW7TDI^7n@DCWh1=e1!F*nyV}CCWF*<(q-iRN zWRhl2voJ}>22Hak&AzoMZI-5O7P4nLZId*UHfg41(rIQUN!$6(-QIo6U7jpEgy>m* zI(qM(|D5xm{a?O+|Lr@6C%tZeZ2Y)2*)5YcLMgeWyPFVtHz9=mk6)cgP(nJs5`BO) zSs8zs@%znW{f~ZJ!Iu^OAJ`+^{4e;Q@&Ake3I7jtA6-RPW#oJMeB8URec6^3lnjr$ z-a``)jDTB1*2&4S;P0Wm`v&$O7}(uEB*v-phtBI*c6}$E>Ij|~7}~jKOW)3+zRmmh z_8qwO(3X9B_8eHSWnX{af&T7;y9aJR*xx;{du#ul-E~iP@7diQKHa@`WX!QfhAeyH zgf)4zXS}^*Stkm(dD5~+yzrUuy(6M~pl|ce{v>zzu-lC?4)kyD-`9<5?&{ljxO+$c z;qJbJ2lfo?MxI^$yAQ1Et|_Iv`#}Gl2jUv`T;I{Y?B>oaE`+5IiQ(__wpQAgb*`d= z5d&C@_x11Hvwz^go_&YgqdLZoX5X^5R-fozJ3clx*|W8+hRVT%16!-lRO+a(Z|kX? zJUQ(4OkF}dmaSSvr^W(qRVh?cTn*Q%pLefioGZaNR^3{D?M9n+AKbZfT{o*l^j-X6 zcaO}sFWbH+z;4<`4j()^IeB7YV{fnPo!T%leEj4I z&mFclIAh0qCr*v_o){my-*YA>B+0W(-7?neitVmfOx0R%I*z(t^8`V2Jlhckk?^ww3oisd~(F-ZQtDY+8LbR3g z-yryzf6a4Xo&(PW4qV3{rsa+Z(ft1w;lK4vX!v{^<~cCW0dQa~e~^|r{?G6K^TTDH z1J5)Lw6On&^1ojY{uj?QCd{{Io&)n7$l(A_c|yYb|NQ*V;nVzA^Bj04a-fZeu77_1 zKNGulzD@HSDC9se|Cw!0JO6$5@L<(82OqqH+*U}T=7GV5?ZbqlhgVM=wZzrB;^?+u zsE+7E%jkKcW{HYz+PZE#f@GPptLu_zc;`PtS08uP?$x&EyN2ZWg5+zuWNU_GtGel0 zwyn8>A)12=(&QCeaum&UTwgR@$MRjxl`KaQTwT=!&oD*D@>Iz&7r!_nFW8>tn7Sug zo?=O+uL+`LTcV{1hTvoA-OUdE6b)Ms;a6xh9^3*FFJ#jGbbzRk$2HVo)9nDn@ z!!}$&6Lr@RT@ZB@$8j{!s=zu!(|mi7 zPm?!P)5l0qea}`bA56k15F}5~kV$te%kdPH=sIm&%zH=hTrf|PB#bYx&l5evax~Ax zppZOckV})7EZH+K%p^hg86BIIW%;`9nVM=Eitc(k_^;3>BJ#3lnqVgCiSeXMf}nsg zlBStH#(`*igEURvHxvgQFB_NursP{53UGbJQ4GuW6jL)bL9tcI>)<2unuSSv=M8w< zDdBC)TD)mj;%)7vc)NBXhj*S2%>3@*zreo|$*<61j^mGzN4cXsB|3cruaNtx#(jzZ zA|4K35NzV_Gtn1zj{f%{E{vVf(rz zsu*_2D9W~dQ8kN;32*3{VvD9K2%hMhvhV1Y=@^cOY3JICDxiPu3#(Z)&qX)dl8SZC zRI$G4E|}$6u4m%|℞RwGGc&RL$Z@o@}U+4PG0Tjup-Vmo?SW9n}|A0oh$yv}N;x zY8KSSbX^QsR+1w)s%K&C&<%_(QP5>gR}D#bZ1enTma=R1!fKYX>+phVma=Q>dDSds z*UOG-ma=PLdo@ehb*)m(lDk%6rrcJ2)YE)d#0F=2p6%+sBWXIuB&G^m=~NkArO1w; zxyz7Eu4c1bP4-0HH#D||VgRZp#-?POJ~mg=u^d%UF!j_fWRt4dRP1i9VmgYA;i3zo z;L8%mjOR-dTYx+RLldi%r!x89LNY|~^S|jf`fU0H*TFrTdpo~?|1w#Pe|V`Ev@aXD zI?G(KZ_9y$FmT)aUI zKJ9Ia&$?Ch)RqMu%T}zQ)1iTFY|b73URYyN8-EHDVdtpl9Gf_Kyxzz+WoW&w8I)S4pi@|0XcyhR7QSM!3*5Lj!W`}DPIT+KPgvvL=;Tn% z*X*az#O#*Yn;9*I9^ z6j2WI;)p+Tj#{HfypiFF0I<+KnB8>WVZy$!qfmA0i zj(R_KC#-pnc?71OP3ltji=taXc9&hLHEq>|E1Tu`3L7Y@QmZY z{QdtK*QN6fn&&`i4$SZWrCBxq;5-MOaU6*EfBN_7f8-~@ziY_hu)+JzZzEG{*ZRXF z-umIuiAif@qk6WN)0vCb;Z=>;YaM+I6PUJCtp+MDin&+1 zM%ys>21fm{22!_7UDslTA_ne=b<><-LWA#MIKgC^Ax_%MSW>qAQPIb3h#X#J% z-i-Kk2&=v*&wj=trv_@7LeV8MsObbSv2rAv-@mgtQN~RPF zjy!j0#5?7UKr&gcN$%d-zpd}!&I8@+#X4zGjU+Es2E;|{FKJ)aw>*mxtU=da)V^%f z(yS*KFNQ?+e#wK2Ltrj5I62{0@zMNUg0>mbdTST&T%CY+secmEU z|1-Y-^Z$?k@8`Da^QFymV4eeK%z@2KCV6kT09@PbD-?JU=Kp^D?x2)1|DP?~qs$H9 z)q(%d{Xpj(<0lzHFaLf1Z}@NXU*o?7&w$VHpXNWof0%y*|9<{m{M-1q@UMlpz)ScS z@H6~x^QU=_zmLD0zk|P>`zL-IFY_Dt9)2~ym|w~LlJA16@lyVLzMcCS_dmG5}7UHu38 z)(`C7Mra~+m>kDlj!7jLDq<*=*x=g1vA(#E3Tus7H$5d*93>6t#lb8@f5p@$yuWBl#ry9~ZNU4B zrK9HiGy3OGF&~%bOBPpCZ?FN{BKXOt?wbWQ9f$|Z_jxM zZ@+a4Z{x@C_S{Ll{mu!zO&-VF14r;S=HTu4eRw;158ep5f9eq4y(u=C>^t%Hs;S%Y ze%DsK9qz;1p_}n`2lKw!$Hw*53-GoY>>&qOZT1*=+oj=chl00TnS;)D0dJdGd-~Vm z?FQDSt=Hl07RG@auf^N8HF(=}72XU6pRKb7E6k-wyaI0mYh3T;cw5J?ey?XNxP}4# zu33z?{TJfxAZz(m?RZ*y8q74!=7 zJ#I1gDsCS=OeX2K*u!V?k}A}~($jcuNed7oAyhDT$z^7Rj+M&N=P+U50nDv1=hBT8 z`ar3i_3|5XDg8d#xxF1o-jpj**(iS~l-vV@AZrqHZv*{4IQ;yZD)e+v3;c0&SS4_9 zLuTce5OuUFMBO~hL~$Uu%Q_LXtMVYSufM)R_cv`@m_3k~?FpaXjps*8aj}WCh#36C zq{7DXOlFPi`>v6l$bS0f3S3Nzx;|5OOxW-r+JY_tje;&oh(s0H5(v~n88KmV>Gm5d z^uabji_uMJ`#?rUQpP*Ef^d{HMuR4iSG{`03xyN0Ct~{lnzptY^u~vZQ*UtgP zm_rD|Zf|1pb$sj;1b0Wh@v3{sy6*79$+6g^5B7rf$nr2zY%oO(TN>Gk)%uOV1LgGb zk>OF;o>}8Zyp&;KXl&?0xY$tb_^VoLv-nd)MmRba8>&r?TcZ;r7?ti&jjIbx*sO`b zaB*l73w6R%#(o@IhTzq96rYAhitM~J1Utt-n-urhy6)joum`ai+#%NF#PBjL3v6nU zxUM_yeMmD~5bOTd(vD?4%ji^pWRDn(%%RX6F-Uhs1Gg@wL;5*pju?&TNW@xeh*)Pl znX2lYm$fhJSwT-ng`<^W)QMeN)i}L8DnB-aj6Wa3GpykF@9wo}2cjMhvyt*|W;6al z{CYWA$3M>R;C{}%f?G}BPB*hJQZIF=o$d;EvU=@b?UvQJxz~Q*8?TC6VWnzY6Vb{w zg0il&JLtd^7Q-ehd`JXN)yGbr$SNZIHYs3p@Gw+wr-otf8ZMASD5eLi_olb}lr+8n(lHEu&W8Vz7E9ifM3X^3h<@N_V=;5j5O>3N{67PKZWyMd` zZX#e)s7&3AE{QhFt5Ej6N89P)=bojaLp@yBP)&4)mWq2=fkb^;$c0D{9sY|1BCt0)H1Q+I=CAv^Mk#_jc}U{Bz0AfeC(^K1sez9;bWBZf+lcfcp>nOUkYv87~Gr z7{hr0=0ox{86L<$PGDFSzUBe1zybV$ZMv6LAJqj@@H|8&Fm>BAT|oz?1U`^`;9$6l zW-YBgYMY7zOa*vKo0=zEn&Bv_;p(<)xj-Q>>`SYUT9&K=DZtS121F3}Rx73jcm&`{ zIDi?EmsB5BeL#u$nxks2;h2W)>89amnx+C8#FlmIlIo+rFBt;L#BTsW0Exj=06`$y z7O+YfjL5~+M}fy+$+GM@hKf2`w(t7DJaI)AF58+W+l#A@!i5{o_<$t=BWx2W8WsRX zeA~A*<|}S^7o{ErJOUOVAVbKmrF($90C0#X7=Rx!fmLE&SbY@m2EY;kAd2RCvaSOk z!xDgQA^Eau>Vju3sy^yT3J^(Tz*GQP#1`OTZpsS4RcuK!M4)6`P<>PZOoa^K6kSnu zA4nQN$B;!+R6Ny@Y~QlZuRdA?>9Vl;Xc0Wig6gA1P%GzEA1#7S>8L(h1cB0CeN-rb zGpSS`ErKR#t3C>hhUPFMm}(oVj}=nH<7V}74U;dJe*zF#Fluer*JQ&04^04C7$#-s ze_R0OU-KN8=fH0u2V(u-Z(!5sn>^2f-*^tN{l7w>bP%`&+zs64c_tdodClChtYg{A zmGt?~4g`0JR4++fQ6s=hK86slyJdIWz=jQxFS7)BaJ_9JtxHV3m^`Uba2NYlrfusQ zBzVlJH&Z;J-X4N&A)=xptZ5|wUD;eZmV6&) zV3Mlj!|udkw}G{FqOzX*x;mD!D4&Z?4SNrUGPlir`}?aY5>?=if&N3G5N?P)(A~Gc zdw>7V{w)W(uj}5nZ_lpo(J?3}yARz8fAmCD*nMNSFm+&g$I@%BrPIUJBDNgd$J~*_ zQldYa6cl}vRTio++F0P)s9L-F?p#|HS*EJUvPn_CQE5Zy>V9MQXq4*Py%pbYs2z~( z>zYsI{eN@Ty7{l>Iq*#9K)C-VF>=RTw0K8%`?57_=-t5?Ar4Lq{iK*q z-&}K2NPn|-(jHki>^`&(t~ZlC-pY<;*FyEuS)+Q{wdaoht-;EBck{g^T$O{z8Wk_G zGA*TJnL2Pq$8vQUo%SYyb^*tlU?&)YyA!ZIhSHzKOZl_ie-02YuE4 z{(V7w>ttVn4ygaohC~xLus=DkbZ_q8KCrv{;NGn)IQWTicpeRpdf1;1;20AD!rfR+ zUJo~_aGy$u-B?3=Q+NOFty2e9G;5aYG25pS&8mGcYSvb4nW!Q*>r}J$?A$sR&062A zS!voqv!?s6>{z~i8GTqy_sr1Az$Gk!FDcQ!j2uxzqrTX)t%~zk+vsL}nXc^I_i5ST zQI3yn{_|u8mL2pU_XYZrWcE)#(A2z(Yo9ywF6Ji)JF%%fv=e4zgbp%0vL%aQAixDy zcYDcVd|KAHk3|gYL>Q<>si}# z?sym4I5xMjYr){KEUa`VsjS?)d*iCVI5j*nVteC9I(Sks-t+psU;c3pU(HOuN@{7g zR{czywzg+lX=c00wg1}KZqg@0R(|apIjo#>E6#BKKM!V-FtPnI|7bG*lf=}Kw$8`| zw|08kupCGXYD-Vk-Xu$PzMg|3QcJ?4SQ75;-tqFgN-PPwmSr^y^-1}$8@IVC0y}SB$4{Yn-e_+F|Z6wayI)`h>w6GeKg~$hNTpcIdxNr>r z^*eLMuwl$8Pt&dJncmQz8mvnr3vRIYKCpKKOof;oW%)g&H_6fh`*K0SQvnRNkjgc$ z|Cg8d(s&&5YXa`=``^hNWj9&{v7(vLe{S(pN6za6Mb?DcWsHrC zxH?5`n7Xs06C@TS~;=wqAopi!k-#v3s7af zG6${99P+KUlGUeY`YepDW(}{NSk9aU#+Mme6d**WuO^YrZ(bfHSy>C~#)i8u|Phj=z;lg=+Zui%ON zj;?QgeaBC4I=>6GRw2x&tM>&(^=?|{IjU-XH3Mg;mab0ONBr4L(dIqZ^XuB{t+Nv$ zv*v1HXG>mle)eK|hV!2c5N;XX&g3<-V{sQf&en;W>(&YQHWg3FI>4ExqrH0GM$PAF z=rbnNm;KT5o_eq^bS;Du(gocSbra!?SnMDLfwTlfMUo^{H5FzcM?Oj_8#g%PKX@uW z>@7XcKC%Dkb^nrc*b~L<`-dS{`BCJy7j4Da$t$)htJbO{4+)Odp}Pbi{kIwZ{JK{t&5{Z{PRJKmPZ;{*|)# z^||fe3|>U@e>340k>vV1hNNv$A3=7|JG{hQI zbWwvPu4W?wlcs8t=bErMmPJjI6idN^&^Rp4-^Yf<{vU4+oq(JB>WbpI8WuGi=$Scj zG45Oy)|ZFs`f_KfzT5)O9l)5!gYD#r6Jz6%rFr2JG?xo{PCZ(<$d?UnMqWTF!t<10 zymmvpxG#QjaFgG^{5`poKU)wYYje4{XR{-i{|gc76aM_0xxeQgCbw~G=;!DWdLDU4 zYwN*mgG6$n!$6T$^=!k7lGq>N?1UGMYk!u)i$=9CvU#6vc+rS_k)8Z(!;41DDYC7f zkN?Y%duJ)D|y zU7`jTrOB6#zDgg7YG33=HF!aqyi+n#>*^OqwJ&nL8azKuzI0UAu84e*yB4$m?2Geh9(is@p5k@@g$2(JWIhYe$`Is+TN^x=#hds!O1^yo9$dl*LMhOpwbPXc z@ki3=B*A1k8$4%Z_yqHpb|*J5P$RU$^C!sWHSKhTifl1s;@@fg7^$G@D@;5UGb{ex z%ixe0gwW)MhmMbsn{UE%9=bLDKK>n4A>q-6z#n@|*A2dAB@Bt*nWMOaccoU3|W{)FE*FJcxkFmMd0KFZQ zpYd^T*gcV-eFQ%exc^qjSJ?c&08<}7p99v-QKpx#k=&u4rbHl@yqxkA2rFsRh*sAa&iRLIwMfU47aQ={8& z4Wn-bXw9Y^te&j)2KVzA!e^EjVSq`@-m9|pR*)TRy{)y*^B}Zsmd#{@QYa61&ayuB0P>Zc zd(g0BtvBr8s5{m~tWw#Fg?dx47qzw~gdiD}d)fJ)Bl`$<4c_MDHFNu=U4Yzz3~5W9 z3<+>&P3C(FX(15CN3Be7&(Ez$u%Yx8q=JazuGmlnxH6y#bolf;mLRI|?3Zk)0UXr= zP=KviO{9XWf#het{q_s4Jdr0a60_w+XGK_?bV1L|p(S1PBH(G$r4Q6bR~4L(kSg+`1L>uaPj`n=Jl&2 zWGegSxL+IC5Xt{12-iy(3gWqV&6t;U0RRw#Nv<1AYfB9#gr*2KiE}%Il2Th3IGShy z`@=V(AuudM7d=~MD}W~(o?+XvX}SPqFlDuI;0RA);Cw*o`1T!n14lep11D~L&&=+n zT|i<)-}Tk?9bha=ECluNjE%>Gy6s>twF32px1b6rD>Z0T1<*%ARlw$N<@RKziL^Mk zs(?Bk1pa@^$t}Pah5r8@a-2Muyqo+H`ETT(;1P5S{cZY3^qcfwxGruRcbt1Q_ZIFC zxxWO$?=}3L{7d=20*H78)^O}Xe@O+%xwZHC#T6L3rmyoCS7_aBeqDvuTy=AAxDmbh zVoTbts6a$fz46xGbfc%^n2eCuz#YK5jRymdF#;DXiUq8ouo#--Hd%DiMHN_WCO2wwUS|cCp~>IXJ2_dfvVzdnO}>dQ=YTHFxjg6IlEn%n zVVT#Cx>0xTXcC8Sa)UDvGI~vP`&!gU;+KcOP^kWg+z9RGB0=WylQ^8 z>y0mQ=$2>a44ViLF%SbaK|^dOU&@Bm4f`je5DRgd5UC7p4Pbe*CrL4lcJgJc5Ib@h z<%DRw6}y0;(R1;Tk5PLoo>75~T6Ra&$^%jQauf9-{kJoV69YE2qd9 z#jtXJ0pM@^VC_N`Oqh1^^#X>ZGN(}xD-VVD|D6aCM_}S{X0Msum!Hw$a7);`@nQ4a zp6}}m|8NUNfVSW$xF5=Z!~?paDBHfuQ28ufv`yI+q20v=QI^Svaj1Xn-%tL|iP%o$ z%D1skp8SJfRAzZNG{iZz6RGWho|*fWbzx<}X@6JUX&<&?B~JShPwTKu8~4Psc;;_j zg%(c!Wwo7+&QKTL9-jP1JMubLk~1&=t$3IfGBsWcYda&l|NoGXA99bK+3bHh=bqWv z*@e?R`~~!6AS1E|oSD}Z4&>U+KYvl0tJ`W_nJo<0vZe(dsjU0OJAZJ=Rk0`UB}c;1 z`gb#Vqg9kLN2@SrOHpm>d#3N|>cTA%op&wkysCrvG~!(ptU}?TkR9Qo&Mt7OYtAiPJ$GSY@I~r!oQCHBLBr?kC*si8pW#8 z9NTbY4G7clcQH*>Q(Pcf%NhWuE$B&PSF&{7HjHW(0YK4;s2H*Y!z~S1+@dLfimdvE zE4!v8`HrG#p0hfYMb-hCtr&`7tE%sMs%)8(=|ebbiLku)JW2PUfpI%iSrpF|;d|o< zkh*J@%h1II!v-F>rvk#;_5rD*T7q?XDvM)#5Y5XT>Z<`J93a?ehb!x+RTpK?1|49VdlKA^M8mr-l?AE? z$penM<~RT?mo-mK^u7Jb3=1r%c{sVuDzn6mNExN0e~?byETTfX3_ieZ4;vZ@1AUiHBjS&~HwnB{<% zzp9$0tgp|_sVtHM13N)hB&Y&0Fwp0&ZHcl8w;>H=S1vcNRg1=zlA2mnEs z9mE8TM=shTdCs%>M1L2v?Sl|36DFwB10wGGc&RL!Ep zxY@=Sa6}*S1<%8JCMq_T7Z^SZl7&T9$Jj8ug{dqE2OzufK|}}vUlXy=YNlp+nrXQ% z1{uN(2(BS|h|Q2J<%VpCvH??IUG%ZQ=#~HivZ`p3#+D^XksKW{6pX7=SyU4-0I=?P zFl_KW%y~-`ui{d7|q{wyt4yhLFWGH7HLlYz7E` zV4=qpS!Y2B#8j4&G8r?SMb;GseI*NsrC=$F?J#Cwf5Pf4Nt&-)qUjs*(o_~tP%T7> zkUiDJzGoOBLP@x)tQZz%E9SW)Xo@X*-PJ6nWnp3Pb@-1t7?og;=VMVqK2>Bpg(9dX zhLd}JHA~q$-NmUahUbDMh`3>R*nfRh!HR?e5G+FwbQdcVOyn)qxF(eUy`PZxb4{>+ zlKuW^lbE@GMc|@_yGYeVExgL#R(N8~*Bj)YR_E%NT2O};j;LjQAP$krb$aC^|MX-m zb>#j}crj5vmUlvxGF8$>3qUjZ_mX^O^i1D(Wf$~D=q$Fw_ zG$}RIAR>_E1xAdUmPO5e>&sS|1{viKBz9m~9{`oB9tw}b+)B;AyL@GuBBk5F5q zAS_8Ztv@`|@g5J{=6XZ6|A+p6E^r8d@MFN-5Ggtxvs0hP*b*WH5F8cNdnlstm0asANbs$WDVsP+^H2Jbu6aM0e zJnpRUFvMj<6cC`xaYY+f7#vb@AeJDaa0i#C$uq0NGFSf&Ga`>0X>er+Qyo)58JdnO zEPxGk0Gi;?JJ^*bk2{nMmrLJ(MXUzdj-#4DJCST$^?@AdSdfn>07Ot1{$)fS*8wmS zy^U*vBj7fm8n`q;Kk6wsvitgAXPUfFQq*(do`^hHVrq~mxrQt|GTvl}kz`v$TMWo- zyur(B$d?ogRrW>X9SB1hH-`nb((^ai=luvv764|vNU-Jf?UzS{oP}s+kD(W zb&Lg5ux-N?U?A=axOEDeyZTh*pa<6DmL>yGLN#zTgR!XMDJp~(m@$rF`GZT-l@zS5`Zct;IGPS->W4J=Xe zPuodA^SQcAC3{<>=FhBF3)iZ$Q;fWURJ6a}yZ1RS8}W`#GS^A@itt{w^|^WXDmmMW zL!C3!kFt!3^nXhTcOF?14gZ<(RTwBd8z=|s21@Z2yT0K4AqGqoO1AO%lh#tjdC%79 zsI5y220l_rk#(bi&zk>a20j=6{-ytzHzcyJ+Tz?9_{7|er*I87dS*^`2XYm3dR49x z>UfKxe-hHMrl}^V^RH@6XkM8XR)Vr;*hbZF=c;fe*m*Fw+jCZeIaB@S_f2sBzl`iA z_&H4;N37qU(=~K2jK1DRpQOJ;EZ}|Ib67lJ?mze*{s8}6{+);locXE-9u@G?prf)) zLlJZf!FnYVB5%a#MO0hY&~HvXswj|pL!k%6EKSr^R}xH5Gh_v_UKxtA)#*pe2g)rU zrL~3Nvqe}Fsqni+$I{MfvqVKvU@?f(E4+fBQBrO2!4!N+HX)?7WuyA2gy6xlX|eM; zw0TgA2#_C}jtPOTtwV3IF7+tGBoZavhTcZ6Iy4Sm>to@txJUl&b>ZGRY6!}#Gh77Xn%ay5>x?qGGN1a z5WyRkoDLNX-J9p(8Pp7May96uMQDm##F*9;TwGUHsTQFnrJ@R}IL1Sxq?@=zAeyuc z6)aRc3iNf*0sxRoNKws-|BJh%D|rT=dbE7JYE6c+la;L?u(XFOFVrVMq{OY%#NAXB zH!s9oUW%z-e)gA}|F$cNQ0Ge`v|tAGDU#)RFpia=K=XB5^>3**2YNaa9V0^%?wYt% zIf{jG4NW~1^^Rt{J=I5bfejxWJS~{wD5yDFAx z1IWn5IEFz&aD|3G&Cw8s+?GUByQ%sp>;)i(09OnPak$a1vZUa03D8l*As3+7Pd$n` zYS@UDjw>ssr=>DQp{d~-Du{@!ZtAH=J!q7Dof#IuVuA&V))h_mT}6i!2insctB*qK z=0oD(D?aQrAZl@3v`e>K#1sc6B5Wp7j}}NRu1Gy27Gd5wt5Y2_*{r^XV{)k)}Z|gJnT-`;V!_NPE zlJh@QY=le~n|&w8jm^U>)B*q3{Y7zMbhBfW*4$Eli`eSfCy(qM8yR*^ z|4Qe4Lp@rU=wvh#bN*d*1PiNyqpd1e$LkUX#Z#T{|m`Uf}eMiAJVJo z5Pg*XHMa`3{~zG~o!9wO{73jd=l@J$*aF#hJ1mMu0xbeZ<{X4TD1kuCE8!B|8~Q2+ zt4QV4rV5=Xl_@k0N#{LTSffdV`?W@5(Ri3Q#jQok41 z$mA>IXOg@9bUz&OD=l!W?(iJ8-~r(0O?AwkO2oty10b_#_GC6&4Dr+gpx@YSpmfoh zQHm-xYBEHSY&3wRcj}kW%JBk#iX>-K9Ykg1<_bNXGn?|Wx07#_=4J|6qG|UYWB30B z+z{cv!v6t34ZFWC?kVn1xEJH2`PV!LsvJ1lLHA@V1Zf>EHiGnGcm0C4mGag7cOn-0 z?!KM-2Zoa5(ANHK1H1dTih|J1{>$B>bY9!a{*}oSQABv_(bPp+%9U44B|-RU_isw9lQaCO*Gott{%8L^>jmBQr`_he4S*znr~Zn?X^L)`hlRF{X;>}`qU%c z;hTU)GcnHZOd*9sWsFf$eu16&$Ic@k*f%~id$Um>~w18*jg3}3a7=^ z0{|uqS#(-VZOXqYt#Ff?v~V#fYj@b_7CO2!JQ=>+$U7P4#DkhUx6qtk>6tm+gUbPB z-Mv2%0fl$K;&_kE35UDw)1tqlscq~DYttb|AnlfEx-3fOZs0yKTfvUt5 zRjc79xZuZOf4}C6+4c8XxCzz_{g4%b{{MXPTLeGf2J+YM(7yuGzsG%mCpZhdBE1d( zeR})hriYf)`k!9=M<<7!iJFL*@%U&X9{{rZ_Pg<)fyC!#%Kfr8b-szt&cP{>$oB-a zn_XB+*EWN88+jLwpxYU2VQ$@Wzl?m}Nf$82LF$a^89D{Dk#{p%ht8~8PI!4D>vsmU z&Zu4ytv2#rMyxsfTj|b-hYq=N?#JPtGvgVKJmdo4gKyt+X8i~{ZRCm34tg--rj%k< zG=9pAvEKL&(!~9rED!hpj}h+8ojw+#1CHBex;~;0$*){S3r&?c@(y zSpx=FrQH;4U4Z+A2J;s|)nIVMEcxIpp~1{Wfg3XL4iFgE*8GmH*hR#JJ(6y~=>aa* zGMvca%>gScSYW~*QWy2XwQ2I6XTY!x4ioUT5I_ge7I2KWeNoYUNm5{lBICCzT=b2| z$)d<^XzR|Mn*$@+cU-M9gRpG09+S1DALhD#6-GhN+Mxn}gS+$-^bPlyCMW zLc|;&SQWD1fgU`=0kQ!H64eH10Ah~fEUKu3YtrO#P_-ohs)(+tS(*#mO3_zsIL089 ztqs3^--7cBkV97fZA>2ac&vMLxQ8pEZyEr)hgBU+nhYCugYLbnK_2GdA>K<_@D}nd zHKGsH{9OCad#_H@FTJ4s?1vG3*Mn7~3=9SQQUDYHI1EGZ5Q`aBRxrKO2Cqu9rz|jt z`FKQLDlnrQ>`s$6N+JSWFcFb2G7}qInI>QMIWxezivY z;gK-z^6}y0-ulVYC%o=S@1e=w6C()s(-SUmWoPiyrC(z(^`JG#P7ep7#LC+8qQ&b>omEgypUPq`j~pZV832eLUZb6^b%>xQME8rCgb_KPF` zBul~5LH`rPzzUb578ZfBmj`CNZCvsdbZ9UtVm{dKDbmY;luTJzXK`QvG> zmWrlfjmSsJ#|ZHxI0?e@7J(!cVCUE-&~FR_$i*7m7)1w;zM|}FnnOMUz<|%Sf9DmC z#I6GaSF=yJE8q0myg?+InK0spGz)1#(?Qgf3q3O%#aRbes15vdZu+vW%odh{vXa}z z;0i*p5Zw89xoiD739eAv`au7G9{D;UU#A9r6sX@vf%vTg?fX^bX1>=>53>+nL)@i4Q3hfGdqZKi~;EZLH^twSA1^V$b zDx;0Oj}_89wsu+-Je!6jTRaiaPblPUv?M8{jl7?g^6a{PYBUg;O;b|p+YNqD%Gqd3 zwUjpUhv*j!`U3lpM2MX~!r}=~H?+z1=Vtye`~St{E2I$nKZpR3tO-@5;j$asrc@RL zP6n*BZ0MQ_Cwj;CEm&A7fL8+|H6XKXABrnkF*jDTl#Rl%q?!dd>wpOGEkJ9_whkwD zxI!yB5Y~a*2FGw0fCCPL?LIG+rKH+&RVqtKoWsk~S;{Hs)}*qyE}#TUr0QUm~ zou~lV9JqZlyy2mA1o!~}L!_RXEG|&HMPSJRz#Ac~RR9qJ_yOK)h*w~n0=(=+!8R+^ zEC4-(GS$=pS-^C507Hi+*7GIR1bR5&u<<;Q|157^DhqUNE`lNe?+;%a2r8fe>yE+v z0g(|P>aOnyf-NYiesLtkKL%(AavIRs33y6@Ces3rpf9Qda=M6;ESndkN&%*}VK4+e zs8}&Z04c7xiezbk`~XNJw3n!}<%(^oEV=`9a0Q2V!0`d?M2AM$mIVPZyfvVQ7=RuG z0=y@zPh}|?^5c?f7C?_9_=Ts62($nIb}$d>XrRnPm5L65kum~P03X8YO=STV2?jn; z!2x<9KsRdwHC_kk1L#;XgKJQsHka%RQ&~!)QLdo%OQ{Zvik&DH*Y$5io4=oeVJS5~J-ad8hrHph0s77?#0@o|)Se z7UmIGxYlEU$F^u&tHE;5&7r?p!7ZHp?UD;{qnKyWmxH7K;f`E8;B5Kd9Ey3G@F2MV zFC;qx`ycM_$qw$}1poJ)EiMCt>(Z7AWjB+>-;3=m3XrYtkw6}X>7kYu;J2m1TXfRZ z$6d91brEjPpqQ?_Q8M7f`9Fxr`!)cJ0N^75%t-S^oG?_FJ|Sp@rm+Rx7!=avvG(+>31XH2s*P=$h%^JB9M`p-!3}Bh2m?|o z=*^n`h&({Ta>0ED*Qd!lC3o%{yCUnWB1E#m>(k`R#@}drR#f{Uys^RS(&Q11s#K(= z#YW6~1PlTKFd|uD3?XC)j-ZMF1Yy~M%@<6V27A)v%dSITqhCfW9^a0j_) zLGF>@T!f9t>yDloN$rCBIQm(v6YXA8*w5Qwm@}}agn__Z9rDrzw$O`Zr#E>csp$9o z@XtDq#bVsWM%atz=SBk-vcMqnC2G(wERbrT-Tn zJ&*eWe<8epX80%gj}y5s-jszjke0eNm4y?D{^@L))JEQviHndfxK@BmM3sRl6g99mQyY0} zf6yOAoXqbJ0!TQrFznUL;PvUMH|1FydCL}52EAIK{>fW>h_*GLQN)Kl8g1lpGYb$P z-HL=p>JFon>h&f$+sN-{qC7M!v_6dP6+j}07oIv%U>BSt`!WrrxaJkCs>F!KS)Y&`xeg=C;`q*&&DG?s}e2j z^<{6cjVHFn-wXHp%bv{ZWhrN`pELOH0)7Pgzt-@7Yp4IL|Egk?76n+jJoP9-;OZ(6 zVO{$?P zBMK-k;sQ|96-`M!T5w&wF8yeJ$fGOMk0Np@!YfO_sz&@v8Fyk$048*V$W6Kr@8r|Az>9h`V9dd-*If&h(jGm{mAvZmK(I3WW#F`l)tqPL>U& zws4dzD>5gqM@*?N{_9tlyePgftmzMrl6^1FJ4%X~enQyC=jy^x&w!qpzKyf)!}>+# z-1K2XsV($jX{|~F_%qDw7%1%F0e_Z#I4gh8rWTy_42b-HmNESA`PV!LVh+ss7cwW$ zJ+lFQV_2|}a3OR#O&xFYPl?DmSMvS5O0+QPeF4Tzz-y3v1-CO#6k(zR1P0v#LV~A4 zO^WbK0t~tv_5N-52a|q3*_}7(&k5dJ4lknlzmRb3A~65@4d%ei#G*j zCERy^gNOcn)1R>%m|n543%HiOxQerTW)(kd>RB20%*LC@nVm_RBZ9pN_5qK4;-zD^ z#TUJtmBKiCaYBRn&(RS=N4Sr2Z%Ky1^sSpZ>3X_|KDYLEJ~a-m*(Zlb+zltj$FRmv z4to=%JcAmtwy++TwVG=@i*f%JtjBj%{yC4!Id|w5+%W4~2wVkvbI8hhrcU&(nm9Gu zd&(Q{ZON{+fvMBmZm1uh0T{s)oLK3|*b(La*^SSJtSyYsvPPo~5CP}^ARL|x@`6Jd znK+ECAOzI6FPQ&4xs8z9=m)qrB=dh3iD~b~E_yqbjH#=GHDfV>xo-Ok+CF_5TWMo) z#Cq3W?~IL(A~@jK`1<44=&NYgkZ_+*cw-2lz*uAXUQ1uR3XJuQPW1#Mr1eI=SJY<0B-_ zU4BoBbJP(^k02~s8*?DT5cXJvBajVEFTA+n%O-hxgx6QFvkD#F?72rnzyB0|;rf~}e+T^HLy7wEtTZ%C6bYk_m4 z9g{E6#t&{vlP~K&HE?xA9;lhQir&GEHRMY=KnjmVChqX`%Qa>gG}Gh_TW}N+d{nNN!h>WckXsMYS)A={sno$xE68r#FNX*MSnsU_vzoVY5wH z(*@f?cxH64rR%n_=079yMd8f{^)&gizEg|ujL1W=og3$PP)n2dRfLd+_bGxki@t0k z7P}bx7za8v-ADYgorqtM^RbEzIo^n^p~E4kBkV$ed)aVT?#N`wgxgh{*zO#0ufaMeYpu zw3fh=p?pAye##l?=KT&REqv%???4!>OJz*=MX)4g*PjqVxaf33q~9PCid=NzLi9 z$^XJE``Oa5hLEM{Oa=C$`Nrk#xvC-?zCi0;Z^WDQTpzw8)33}O>7*nG zX?gMb6-(BmK253-M&HY?K&7z)K{oMzgrQ?IwRdv-1jHXdm9O)tw{1q7d0ic zN2aWK!A!|HfO@{|00qEMG+k9aQMVmWfm1m!0^ri8NHD7dc7Uxbh7LRcU}9stgKwEC zs&XuT(p6JbU_l83N$gtqTGaM$xtwmIz2G1doCU`IQSWAPw5_4VVH4q$-Ko+$h_;%YLe&djy)W@l;c0eI`)bG=zi`}R2Jwt^^;`71zse@tgX3ToG{{q9w)Rop~eX%PRMaWiW6d-5aL8{oY)X2*2jr;apL+oab29~ zi4$w%#I4;r(H$pN#)&KA#ELj^MVwe3C%WQ9XPme^PArQP zm&J*tapKZAu_R7h5+^Q>6N}@-MRDT7II$>BTo5PDj}r@7f{?y&Rv_iVA#Sj#xnDT4 z`>;`sroa~9ZiHOU0yZ%?+D87Esr}EVUnZEFN4eJn`-}1l$6>a=j((Z{BEFn|{W@`= ziU8v~a4B$gMHe*(z%(`xdNfs&JlE6=Ll%L7qgaX~>bIn_2rl3WHH80A6xTF8%f)RT z!3H3$a3o7GbrWI(TUBnXW+@wwVr42z$>1$(Q&~zLZ!S$|X&GaxY;4CXtEH4R4{A?k zDJg%@Qd!^`0@HoCyCATODfNw0I^3L*@IZq5yip2Gr@(7#@Rl_BvJ2Pw`y%o{_!51?MF=QBssaX0vOEuRPsva{U$<3%@a8mm z1!0gNj7Hof+XK#=1mqLla)8n08@}zsJqwXNeaF_iKNOKiq+UUTGn8!_a3r%l#Qg-u zmkN=ikH}MoK6q1_yeh*h34C_}w5I8bWjGc<(G0kJLE5c2ss-m8Te0-jyCU+yK!czV z&@u4+5cIjUzXj%GqwE{TXQM8zD+n_#i>_4o4teL3Au2 z;Xy3{P&~H(pGQs+{Ct=0q#wr#{muNP{0sRv^6aFL{bInI&&CJ+@@zVON*}1ku>$G!?(1jeP7Fs~+D<^;#+F5U5tKPAzcH&3UNq)>JXGq+jQa7K zNygeG=cb)unQi2gtH$!?B+GVa}^Zz1Zw~_xrejhjztGPeJE&S_T8@HL8 z;{TX$<8S7logf3j?dxPATLO#2Ai}_YZ*pMN-&}zqdD%G;_e?5KL>u`?<~=&ST0}lg zF@>t%xj;)foYm!zq2d9#|qzV#x*y0;Lgl8|n}6T=;YqhR;plX33Sk z?D4W3M=a)2-h9d2sfA!B*Oy{$Ms5H&-%_FV_{}*Jck}T5FdPZc1x|z`_-Y;$*hbVK zd#nkFmDxRj^96Rt1BLyYv>-J!$r?b#JWyD6v#f38!+mUG6b-9t-q2+)UHV!A63;1L zd;{~^$cNbZpC>yB*-4+zeV#-y02{ac>6>otgbWP=_P#{GzGVH1WoZHXw7#vA4zg!9 zrJq@vekQevhZ2f<@!8oHyVsZ3nxMU`BOl!EAWlbEN?Fha2;31OK9;nbsv=fJ?XqT= zqEv>v4V>H31Zb&o5(HeHE1gA?B1J=PK~YiGZUa;i%vEF-45k7o0Gz>N{z3nT5DLJ_ zgISHjAo2u3Hm0?m&4ymS{-@p@8Be(lc>>V2!c20+T;kEIltQ>)IPxaOp1F3Ov z)2YmLd+L=rVWYZ_vra3fz zURSZB88U+$PR;;|h)Ct^J4FY)J^=jT?GqY(lin8K;ykrCS)KY{~Q z>f{Lv4^bA`{(mR=8X^Bco+AGb>(BZ0QW&pY1CJ?#_R-rglkb6CU=${8ze8V0UrAq2 zAE)o3AEBS7e@>sI|Be1V{R#ayAO>}Ci@8qjD(-qt;cn!%aeKHsIg7iWo8W#6NTM(0 zUdz3idk6Oc?vvbSxG!?wU$AQXffDA5BspOHvYEs=KpQvZwY?>k^GeWlD5;uvP$p!bRL=+)8dO zCvqFPE!dhVU_fkskxt2zo_ZSgP%@Pf0Cr0NK${A zr2Z^PeI`kLHc5RhNqs&^{dto5LX!H6B=wg`>aUX27o!1FiS2qS+u~1djT1ZL#O^q; zH%{CcC+><9_r!^NbkQXMY-%V04N>VRQQZGqTFHKS}OHwaSQm;r-uS`QeTUz+Qj+?5g#MOc zwj9WEHl%HOU>(kpZ)JiHha$ExLlX-*ARe>xKSy1{eFAU$@b8RY^XFLvvqBIkR})>q zg71ehy>DuNCp0HGnjS8HG!1$!0~PLUA5`PZaYpg2om|V}=pyS!&AMV+`Tefd4rEesX(@3&LKY9+sD5#M-Vpp-7^jY$p%z0>LHw z+wX3tyP9ih!bL7y{8RgVZ-W-3^58+x_L@qwYV$TqU24DYO=)K;r|tpOi8-O#p!%tt z90c)`b51;zlU63@y6P#LjLO8lsCx70nYG+3=61-JNB|Pmef+GFZ$IsK&|Q#5HWTH} zuKIODKadAj9yr2^={S4EMBF;T&j06=4-xVq%F|u+we%l2og3%g%eV7G%m*NQ_HSyZ z>h=tQ<;3{@_TBAtOY_JZA)jC;kk!M?gdh;pjrwB&KibKE1vKlH2+8&AQLZzoC{@v& z`woVEMqZSv=)OTeePnY^EuXYWBNxO0S)~VPnww%_f0sH}+~u6GYC> zA2&`N<5F4M$Quu})9qtTCq?SH-16fMC)|t21O5=>&VX)#TBv2Zi0A^oZ!Zj3lTSD$ zuO=#XSI|47il*ox)=hZ+e;%RFgY^GW{KISd;DJtn4yE>tK1;hK(_k$YaiU#$6(_lJho6SuNMYOsW#Q$Zn6+ku6@|!vU2A}{) ziT_0d!H}ab6+=}d37(D9@;$A~|7+tSoo(*bme$(&-vSCLTCI6V0AImJs}UhWMEGT0 z5Ofxo0$!dLR8Kkbe>i|t&;PO_BO(Rc{}+%?6Y^g-lLdz;JO4QhFT zKwb`_^<dtI=t8%98j9jg*PDE|J%q)!aYWRLOy~-?Q3eu)S=6v`(u&b))24Oy+1=r znGa^c#kuU13xE6>n-wWojNq(WFYol3yT3Gb2b2A*s8gDU_Fk<}OR7`eL@tATezRL! z?8~BS1h#N6HIU)0qnQ%KVp42sz+&AewqA;)KqovUPaV7*mdLw;&YY<2%*Kfo-ZWyl zN^dfLqPZ0uotvOE)MiWfa$ymDfwyX!e$d&6r*Iyfpu5uG4zNAVw<}HR-AJ^s(&fiO zW0O*gZeo(%oOi7a2_QEaqQ}DbATYDyyQX58hTy=j$UrzN#1s;6M6yILNB*ZNGEPki z$b*i6+kyW70=fstH1uKm8Tw0T@5i{`=l+^s%=g3d9|wcn7uQYRfO#0Ae;h6vQEA@P z+CntxlGMXx22m}&VBGu)PSptc<1{^vgtLMPHu@X~0rCzE9}itHD&*Cf?g7zUfgYcqqNHnb`fgl;(s;FyRdFPnYlcJjJR--m{h zLos758hVAil%4<2qavXqeJTA{Aa%SKTKPl#yYTT0{=WqOAC7*%48LC*rC*NUznL$H z#9vvApr`@SGdjT66Gfc1Ik@T7?mnb5bw!=+O|ObzZCkt$1A4;I0yITr9wfEECx4S*=fxMDF74oUS~MYcT0Qzc7=r;?V# zQFz=0u3!YFk+&&CIct6@<;qS^U)k~3d4f!{|mX@gxk$c z!9MT%IQMVmAL3ule~|wU_a;~Z5Pk!1^RI!`?FUI+gurV6`&YCWLuoUAT%4Hn#^W>Q zulI4W;IJxHXmjmU^*Fi>^DcF^e{ri>pbE~6=~*o$=5~d=sD-DYcm+~if!?(jv}={C zE$4v(^@<9jlF+8Joh)3zzHENVX(NxWVbiX8)+E#;IltuO4E2BCB=noyySR4|FTEDc zS$cYp+X>~uj*F&tq<58k0ge;i17n44;?JMCmqZ!nYBR|yzi|rzpI&*6&qeXcCE5_u z2bkb!vd`|MJJ`{rr~J_*YH${R=a{cmHkLk|>~lKl7#l^q(xWI}OIp{9Ral0sX2y|p z=eW7?Fs@dh%78LQ5d+dcoINZ!_xdoXL;ynBbpd0pNt$O$js(Ya-9%LRoTG>)OAz=a z@;^9FBR+ko|N9srAA`;RbLro4w_qLkC4VRXZvOG~{C~PxRRPy$*VIK#m%(rsh~>YH zSIpY)q&kO%zn+%u{Ypbhj zmWJpLp6IK#BO8F4v$0zHmZi8d;-N6)issrntlP|KIJzZD0HHI5>D?Z4490#};~3o1 zeix${{fc(GxcJugIzh61xc3^e%5fX7Ll)7eF#k zF%-_k1}v{GrH7(x_l{Ps-O(xH%xb~WMOj+iktXJGe&yn0P5ntR zJpX@`(2sI2VgFj1^XKy67eH}+=FCcN5wqhK8vH`o?wxr(s>i_DRb*=a{hct^#sUBO zC2?5GLfK<{B+6P0=IY2Bmy^u;x)Xk;o6q!#V-PJh_UHs}KS7-9Y7km{thFt5sjq+omq>q{DQ}MN`|$cS@iHZaBm= z8SKe%Xs$+I^O6&osxrJTi?W)7QYGnnV7{trrm5?ITEd1jeZy2|Cj?~R&YXdhb*hGXw(vZf}i$TOv=P$#Ks*Ue&&z zD^6w<-x5#D;uh!d6xaP^OyYC|SUOP;_y4_w?&aRg{jxgiXOo(~dxQny3GK96UcH;% zFtfI*#I$OzR*huMZd#41NCXQO->a%36HofOK69Hm`K?E3$u);7;N!K=rU(We~ zpuPs>#}#-ABzD#DP0LfT?c0bUh&Y0dA}FrtX|`kPz9vZ;P{t)dPXZuGP&5@1K0yb3zYYAOTUjh4{JGf8tJwV)8M0WAR{D%rKz)78%!yMSgq>fB! zE4McCu3xR$%25FLm4{kw99U^3ZZaSnT#*6DYN4R!e$LHElq@LamrT~!K?%)df^$;> zplunq%46G>ZA99pFpvq7wPzDZKE*U(P(#ppcnWgPrYgHc@0RRKvuRT(ZzeqfW<6~x zm^_mnO0zF4s*QY&^k{*%nx%yzAhn zF_6LP@U{?IFEp?ic}HepA%<)x?tr*o&+_2?sOED8jCn;VhX#6OSu|3MrHls$?29Cw)h4bZ+;gdegm zp5Cu%FlSHm*^zF4Q)N!w1GS(7eC^z9Imb?(fYtMRqouUn$&BWgn6;t%1oVt_vW>*k1ebaHIe8x;}$4Qkay{ttXLRb}=+ zi-=6{^APz2`FCJ`z5wxmR&k@;r+~~m$iEj+fIf9QFu0b-%iUJ|QT}#Pdp1t)9euM2 zep0jMZ=OwDp%euK2V*TM9~&%gNe$iBUV2CRz}6bZYS3SSZ?t+Djm_Su4eHV`Xs=0T8KQCQMF_`l9h7{ zU57IeBUnCyMotAI{#{iw4`<82>Y9(h6ruj_03iqHr@6P+YW|v@1xkGQ8+cqO+%W+M zDtn^t8wjtfA>^Z@npln{+l1e?8bo~*6olwySoc_>n@4X!IHA41QTx-drvrv8wqyIUZ~20Uqmtq1u>RFqjAI|sSY^N=F>FuAg?Cbx z^dIWfH2#kyh>{-c{|m_P5&S#_T&gFyZti#B1=P>Km4A!}EMUVc#G3&;;htjDnch;D z?s_{)Xc2JZF513Ik*>QD|Nr)`E=Z253g7OY*(8v#gaDZpA|O8@K-ih-pP3GjkSrvG zBm_tivIZ#nZ{il&4V6EFV5RP86%UI1JRk)YibjKFw0tnd6ZnIGDCmP_`An4-K4@8@ zg_b_xcl%EF_WyLx^loC2NhZl=ckVrX?z!ilbM86csgR$~CE;!}P<{q4{TyddDNhk) z(Cj=F@&*#KwyGXH8zbdf?4BM82Vu!_Tz6oG za-JX1h+MWD!!etRj(i^E zLf7>UOA%;y078Sd+h~uNn#$V*yiuTRrJ)1MsNUUpnBhM`pe8(lA_bx|t|mHFQGwaX zWAc*4O4fC~>C_!$L|dIEs*Y{BX(;IWV`>UoNa8qGaruv?;pqkdOva!AsR0&>@BiII z*e(8OjyC@%ZrJX&BP?;hd-o$_=ne}R-9CVf#v;%2ui#=N6p~kTp44PhX`^;tRXet$ z%7)(5I|%$4=%l7OPP65hD)6jQM-{}&b#VWDUP_1_^c zonG5j9KjZv1Wg5tB&KG%laIQt@hH6~holty>E6|UW>7CRxzzjAoOzgsTnB;2jIiPy zn>SD1a}!lIZA;eVR!eQ7eH?&yZ39ArP7$V|X*!zCA#?%2x%AF_daH?(oOkBKnS_kf zrb0K%tC6@0+y8=ioZ$DF*BK6+t{g~kAdA@hLBE~r#WdC8=TeoWV?1r|1zWkh z*i+7$l3zJH2JHN{X17posu(Z>Y~9wKjtm%ZqiLZHXs3gOel_FHwTXP-;Q0^KShxO1 zKq?o0pLw0(z-hsO?$pD`0|2nVD0PCS8%K9eJ2#^nE%Daylx_P&YJB4(k8!4HhSk(y zQdve@wN<-iG@Aylm2BIYPI2O`S<^A^iEc}{eLkLE(Z3;2}zb@ z^1^8$1caVXCBO|X8_fC#;?|6@cYFfR@^y@I(IBJHb@c2p;IzTilM9=A+0mu)jXZwu z5H;^$dM|cM4h4Esu}271|D|r5lAj|qY0Oqd{oj(I>i_sA?BDO3-CLD){xcgqlnpjJ zeWC5&sjDd8ad6Hf4ajjC4MR3t4QK&0NJmL9(y!di6p{#dld_oSBeqI4&@MfwhUZR1x&8}RECevLN8F1e5ji5rS!(6qu=P+o3q zg}e*Y9i;V^a_?d>Vg9`bXUPVQ1jzOc13&;g)*e7#L68$97 zF!HUtdv>OFJ|Ii;t$X$;NTC3zX|4>4UVB$u{QEcPWZ6yZ!+DtC=Oy?bM*0I&fI;Ky9MN~4Th_mv zAvG?}KWYZr0x`F!`yjo8bt4=IlG-f@ODL)6Lql1%Uw>x!CZNEc^;F?eKSsCJfX}); z-$Dm9GZU>a9&V+tH4J4$|2jr45q>B5Tfg@YdOsa?T`qy%Rm^|vQ5ZD`=gX5!nLg%I((QC9 z%Oru1KQYh)`r3S+pUj^({z%*CxnpQUghyIG&?9+1$UHxhH*GxkcNaZ}9Ra}onGD7Q zvum*er(Tk`ko9n7>Hj_?_{u+z?l*1Q2GVasQ5}H(0-;I;{zu)CkyRlZimBHvQ*G2u z+yJl`A5t$+D1_@*9sl9Agkv|eA^*SLbn$;AnEuHAIV$XoZvQ#|n(m;$at64VKsObX z)mS!Z){2&Sqw3pG>07Y7lb+9@kRY-57L?djb2sbpN)^~t3JS?9AW;EIr(1TXq1%?x zL7=*cB$c|2B=ow|)*F_A{uvfhdRTHF3La!6r#F>4l72jSRRW2Bh*u%&MQ?St((@Pu z5I8#v0o1n?HLwx=)pMLbjsf-OMQJOr0Pt(h_kVbTvb)p+ zMq=WgMCXhe>u9QlE$uSVYE z@xO-%d59b*@8cHm0pVrg&thHNFP;?tDqST_N>55}k^4z%laqugxI;$XZCBw{`m1+C z6{&o=A|+f|`o}BE6mkctdUn6R+O^z4s>mlRg$lvTs6uWBOrLrVqCdm z=U#{?$=D@JHo3zT;C|9D2z(di2_I(GLnY@F4Y#g6BmegNJC(IH45?o1LD)$c~ib&6n{NnYNJGr8#z|4O>>#T1{OwO|4~HK)*0-$Had)mfDhS zv+W@3BHfIw`&m~E3>bKfa(4s0(2Y6->sN|cvA%9hjbwt~rFsPsom>dJR+VkV1W2_F zZ&hn4M%yw_t2|Ttg+5jXr~Lu$pM_ot~TBYAX;@@DPo^ z6^zqF@c@tiHwkG9Q*P%UmTRB+>dN#YH!Eh8%E9fQUAfoOTHKzFo4CHwU8c;=@>$NG zW~s3{t9A#a6lU81Ub+K>8wjT(x1F|W%DQfwHh@#r3|VcUe+RaF3;P~@nPf##R88xy zr5C$-!|+iJL17`GjO+reE-kVPuI{&Pt7}^ zkMbEG88BOs!Rh7nk{SXPyve8451AooMfWVxplg>nqfbw8>a@L;FenQ}L0W ze-jWew2YJCBkS?T3 z4S0qF)e!Zlh!w&ON=uc>HNbTYLrb`XY(PTx(q>JV?8#=1t>jCZ&*tojgNuNTMn`C) zR&w8zYyvzu(I6qNG*oH5R&FPm0ST$laV84Gx03Pt{|rV4KmShc1p@C9CRDvr`yYDn1|l!(~1b8!l=cP6s-PG3Pty`e2m3yc3R6z(R%-NG^9BXN`X zy!dzNcIi0&`&qodg!dD8KZ*BKz4tTto4vn`_gAhYg^q-6#?;JgFgPIqn00=^rMhB3 zm&!l>_fH+}!73M=Jfy97xFTUtXd7TOPB@A89Da}^uE6|nd5X{9W>xY`$s>-C=Z0*h z8@Vc}lBdfZaD;pfEe|FR49&%;%C3^9=4Rj#@&al>CX#W+{Kd_&?nwt;m3*}vs-ssa zRq`Z1|5p*o5#JHlBKQA1@(NjnogIBWPB#p3c11$)#r4!4y1s^#c5Xct@>pRHWKS2g z7MQcToZbxWJT{b<-Y=IwE(rm}=R4U!%3M2U30>zN9K!?zJ^jj%kk#pzb8PTo+p`5JolVQ>jYEr6I&H(OAou@I66ro< z0G<>_1=g7ObbnEut^_|9^wfbLk#ycN*(yoJ!jqT3!J^e!3`M~Q?BRNvnDht zqr01UjIJ8cp$=P5!;=~w&n0dJy}E{cOs;3|FXE-L!LVia>^G)!2xJ~kMn`lByqaFc z;%1ycrwD+GOA1KrS!^Kd$kZVJz8&>wO5BS+I|3}F4 zL^>e;QM^}pTd3oU)8K{b{~NHyhphe&7K{4ooMW$Kvol?lFQaQf*V3Vmyku3LLmJb` zT1<72C6z%|2#9-7HZ7uU7I7KIFz9iK+wem%=WN;nbAℜn9cTL}(G!O5zQtW*FY^ z{J#$f`9N49>=vFDUK9Q)UI83{=g!bV{UVuQ!XW2{`5zfd`u z<)ry`71umLgL8!Z{_3i*CBwtB_~xyUAEp*L8tkYY?^W5kb0pRJk>9SYs#S!rld|9V z5bkFq8oWZjn_Al$_*BYCUm?epDvEH@=j@4h&j8_TNWO7hRoId7p^br1F!`od74~HO zaAGXFLS7!P;_feZpW zLqgxH)Koyev;qIErw)4y^!=4pVRHsExG+}8iJ^ry_x2cS6D|>#q5fATu>0Q-4~QQV zRk(!wi~J_(m-rvx0h}Y%BzhLTz9!t;Gs3p?PsNzMKlXNY+Fo;qSfApCt@ z4X{ewgx_35dV0HS=*Q;cFxeTUt6>@}5$?f(mZQi$Af(>8*R+$sz$+AQ#+BrbyrHS@(<& zcs~j8e^>XHm4WEVMPN!l^Eg+vKPo;gl*Jjg+G!#uQP)ts3G_FV)yam8o-Yo1?X~oJ zGXwESR*|UaBfjA)AU>jk?oL)g2kxO6Lox34EV!V}GH+~5n7bH1b;EF!dIt&Ewgvn= z1#JjA=$_M2P!-qKWKBU@v8^@jhRRU#+bU8H+wFQ?L!UgaBC&yQ1o^4ztrlHFx4Yg2 z$H{oC-}%E048HfrJt0X@9;u0*akUZGhoMPa$xoPpX&bixMX5uillbGz>kJ3Za9|EN zu$4~MguCEBCm9)DmCtJ1Ty5=}nQ^Rx`}VJ%HfE+e&Vhs1Pj`f + + + + + + diff --git a/bundles/org.simantics.acorn/OSGI-INF/org.simantics.acorn.AcornDriver.xml b/bundles/org.simantics.acorn/OSGI-INF/org.simantics.acorn.AcornDriver.xml new file mode 100644 index 000000000..f1a97d175 --- /dev/null +++ b/bundles/org.simantics.acorn/OSGI-INF/org.simantics.acorn.AcornDriver.xml @@ -0,0 +1,7 @@ + + + + + + + diff --git a/bundles/org.simantics.acorn/build.properties b/bundles/org.simantics.acorn/build.properties new file mode 100644 index 000000000..40374cc74 --- /dev/null +++ b/bundles/org.simantics.acorn/build.properties @@ -0,0 +1,17 @@ +############################################################################### +# Copyright (c) 2007, 2010 Association for Decentralized Information Management +# in Industry THTH ry. +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Eclipse Public License v1.0 +# which accompanies this distribution, and is available at +# http://www.eclipse.org/legal/epl-v10.html +# +# Contributors: +# VTT Technical Research Centre of Finland - initial API and implementation +############################################################################### +output.. = bin/ +bin.includes = META-INF/,\ + .,\ + log4j.properties,\ + OSGI-INF/ +source.. = src/ diff --git a/bundles/org.simantics.acorn/log4j.properties b/bundles/org.simantics.acorn/log4j.properties new file mode 100644 index 000000000..6fecb6d25 --- /dev/null +++ b/bundles/org.simantics.acorn/log4j.properties @@ -0,0 +1,63 @@ +############################################################################### +# Copyright (c) 2007, 2010 Association for Decentralized Information Management +# in Industry THTH ry. +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Eclipse Public License v1.0 +# which accompanies this distribution, and is available at +# http://www.eclipse.org/legal/epl-v10.html +# +# Contributors: +# VTT Technical Research Centre of Finland - initial API and implementation +############################################################################### +# For the general syntax of property based configuration files see the +# documentation of org.apache.log4j.PropertyConfigurator. + +# The root category uses the appender called rolling. If no priority is +# specified, the root category assumes the default priority for root +# which is DEBUG in log4j. The root category is the only category that +# has a default priority. All other categories need not be assigned a +# priority in which case they inherit their priority from the +# hierarchy. + +# This will provide console output on log4j configuration loading +#log4j.debug=true + +log4j.rootCategory=warn, stdout +#log4j.rootCategory=warn + +# BEGIN APPENDER: CONSOLE APPENDER (stdout) +# first: type of appender (fully qualified class name) +log4j.appender.stdout=org.apache.log4j.ConsoleAppender + +# second: Any configuration information needed for that appender. +# Many appenders require a layout. +log4j.appender.stdout.layout=org.apache.log4j.TTCCLayout +# log4j.appender.stdout.layout=org.apache.log4j.SimpleLayout + +# Possible information overload? +# log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +# additionally, some layouts can take additional information -- +# like the ConversionPattern for the PatternLayout. +# log4j.appender.stdout.layout.ConversionPattern=%d %-5p %-17c{2} (%30F:%L) %3x - %m%n +# END APPENDER: CONSOLE APPENDER (stdout) + +# BEGIN APPENDER: ROLLING FILE APPENDER (rolling) +#log4j.appender.rolling=com.tools.logging.PluginFileAppender +#log4j.appender.rolling=org.apache.log4j.FileAppender +log4j.appender.rolling=org.apache.log4j.RollingFileAppender +log4j.appender.rolling.File=procore.log +log4j.appender.rolling.append=true +log4j.appender.rolling.MaxFileSize=8192KB +# Keep one backup file +log4j.appender.rolling.MaxBackupIndex=1 +log4j.appender.rolling.layout=org.apache.log4j.PatternLayout +#log4j.appender.rolling.layout.ConversionPattern=%p %t %c - %m%n +log4j.appender.rolling.layout.ConversionPattern=%-6r [%15.15t] %-5p %30.30c - %m%n +# END APPENDER: ROLLING FILE APPENDER (rolling) + +# BEGIN APPENDER: PLUG-IN LOG APPENDER (plugin) +log4j.appender.plugin=com.tools.logging.PluginLogAppender +log4j.appender.plugin.layout=org.apache.log4j.PatternLayout +#log4j.appender.plugin.layout.ConversionPattern=%p %t %c - %m%n +log4j.appender.plugin.layout.ConversionPattern=%-6r [%15.15t] %-5p %30.30c - %m%n +# END APPENDER: PLUG-IN LOG APPENDER (plugin) diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/AcornDatabaseManager.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/AcornDatabaseManager.java new file mode 100644 index 000000000..db2c16763 --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/AcornDatabaseManager.java @@ -0,0 +1,40 @@ +package org.simantics.acorn; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.HashMap; +import java.util.Map; + +import org.simantics.acorn.internal.AcornDatabase; +import org.simantics.db.Database; +import org.simantics.db.server.ProCoreException; + +/** + * @author Tuukka Lehtonen + */ +public class AcornDatabaseManager { + + private static Map dbs = new HashMap(); + + public static synchronized Database getDatabase(Path folder) throws ProCoreException { + Path canonical; + try { + if (!Files.exists(folder)) + Files.createDirectories(folder); + canonical = folder.toRealPath(); + } catch (IOException e) { + throw new ProCoreException("Could not get canonical path.", e); + } + + String canonicalPath = canonical.toString(); + Database db = dbs.get(canonicalPath); + if (null != db) + return db; + + db = new AcornDatabase(canonical); + dbs.put(canonicalPath, db); + return db; + } + +} \ No newline at end of file diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/AcornDriver.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/AcornDriver.java new file mode 100644 index 000000000..0e6d52b9a --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/AcornDriver.java @@ -0,0 +1,108 @@ +package org.simantics.acorn; + +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Properties; + +import org.simantics.db.DatabaseUserAgent; +import org.simantics.db.Driver; +import org.simantics.db.ServerI; +import org.simantics.db.ServerReference; +import org.simantics.db.Session; +import org.simantics.db.SessionReference; +import org.simantics.db.exception.DatabaseException; + +public class AcornDriver implements Driver { + + public static final String AcornDriverName = "acorn"; + + @Override + public String getName() { + return AcornDriverName; + } + + @Override + public DatabaseUserAgent getDatabaseUserAgent(String address) throws DatabaseException { + Path dbFolder = Paths.get(address); + return AcornDatabaseManager.getDatabase(dbFolder).getUserAgent(); + } + + @Override + public void setDatabaseUserAgent(String address, DatabaseUserAgent dbUserAgent) throws DatabaseException { + Path dbFolder = Paths.get(address); + AcornDatabaseManager.getDatabase(dbFolder).setUserAgent(dbUserAgent); + } + + @Override + public Session getSession(String address, Properties properties) throws DatabaseException { + Path dbFolder = Paths.get(address); + Session session = AcornSessionManagerImpl.getInstance().createSession(new SessionReference() { + + @Override + public ServerReference getServerReference() { + return new ServerReference() { + + @Override + public Path getDBFolder() { + return dbFolder; + } + }; + } + + @Override + public long getSessionId() { + return 0L; + } + }, null); + if (!properties.containsKey("clientId")) + properties.put("clientId", dbFolder.toFile().getAbsolutePath()); + session.registerService(Properties.class, properties); + Session s = session.peekService(Session.class); + if (null == s) + session.registerService(Session.class, session); + return session; + } + + @Override + public ServerI getServer(String address, Properties properties) throws DatabaseException { + return new ServerI() { + + @Override + public void stop() throws DatabaseException { + // nop + } + + @Override + public void start() throws DatabaseException { + // nop + } + + @Override + public boolean isActive() throws DatabaseException { + return true; + } + + @Override + public String getAddress() throws DatabaseException { + return address; + } + + @Override + public String executeAndDisconnect(String command) throws DatabaseException { + return ""; + } + + @Override + public String execute(String command) throws DatabaseException { + return ""; + } + }; + } + + @Override + public Management getManagement(String address, Properties properties) throws DatabaseException { + Path dbFolder = Paths.get(address); + return new AcornManagement(dbFolder, properties); + } + +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/AcornManagement.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/AcornManagement.java new file mode 100644 index 000000000..561a03913 --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/AcornManagement.java @@ -0,0 +1,51 @@ +package org.simantics.acorn; + +import java.nio.file.Path; +import java.util.Properties; + +import org.simantics.db.Database; +import org.simantics.db.Driver.Management; +import org.simantics.db.exception.DatabaseException; +import org.simantics.db.server.ProCoreException; + +public class AcornManagement implements Management { + + private final Database db; + private final Properties properties; + + AcornManagement(Path dbFolder, Properties properties) throws ProCoreException { + db = AcornDatabaseManager.getDatabase(dbFolder); + this.properties = properties; + } + + @Override + public boolean exist() throws DatabaseException { + return db.isFolderOk(); + } + + @Override + public void delete() throws DatabaseException { + db.deleteFiles(); + if (exist()) + throw new DatabaseException("Failed to delete database. folder=" + db.getFolder()); + } + + @Override + public void create() throws DatabaseException { + db.initFolder(properties); + if (!exist()) + throw new DatabaseException("Failed to create ProCore database. folder=" + db.getFolder()); + } + + @Override + public void purge() throws DatabaseException { + db.purgeDatabase(); + } + + @Override + public void shutdown() throws DatabaseException { + db.tryToStop(); + db.disconnect(); + } + +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/AcornSessionManagerImpl.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/AcornSessionManagerImpl.java new file mode 100644 index 000000000..1a1e16024 --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/AcornSessionManagerImpl.java @@ -0,0 +1,125 @@ +package org.simantics.acorn; + +import java.nio.file.Path; +import java.util.concurrent.ConcurrentHashMap; + +import org.simantics.db.Database; +import org.simantics.db.Session; +import org.simantics.db.SessionErrorHandler; +import org.simantics.db.SessionManager; +import org.simantics.db.SessionReference; +import org.simantics.db.authentication.UserAuthenticationAgent; +import org.simantics.db.common.utils.Logger; +import org.simantics.db.event.SessionEvent; +import org.simantics.db.event.SessionListener; +import org.simantics.db.exception.DatabaseException; +import org.simantics.db.service.LifecycleSupport; +import org.simantics.utils.datastructures.ListenerList; + +import fi.vtt.simantics.procore.internal.SessionImplDb; +import fi.vtt.simantics.procore.internal.SessionImplSocket; + +public class AcornSessionManagerImpl implements SessionManager { + + private static AcornSessionManagerImpl INSTANCE; + + private ConcurrentHashMap sessionMap = new ConcurrentHashMap(); + private ListenerList sessionListeners = new ListenerList(SessionListener.class); + private SessionErrorHandler errorHandler; + + private Database database; + + private AcornSessionManagerImpl() {} + + void finish() { + sessionMap = null; + sessionListeners = null; + } + + @Override + public void addSessionListener(SessionListener listener) { + sessionListeners.add(listener); + } + + @Override + public Session createSession(SessionReference sessionReference, UserAuthenticationAgent authAgent) + throws DatabaseException { + SessionImplDb sessionImpl = new SessionImplDb(this, authAgent); + boolean ok = false; + try { + Path dbFolder = sessionReference.getServerReference().getDBFolder(); + database = AcornDatabaseManager.getDatabase(dbFolder); + Database.Session dbSession = database.newSession(sessionImpl); + sessionImpl.connect(sessionReference, dbSession); + sessionMap.put(sessionImpl, sessionImpl); + fireSessionOpened(sessionImpl); + ok = true; + } catch (Throwable e) { + Logger.defaultLogError("Connection failed. See exception for details.", e); + try { + fireSessionClosed(sessionImpl, e); + sessionMap.remove(sessionImpl); + sessionImpl = null; + } catch (Throwable t) { + } + throw new DatabaseException(e); + } finally { + if (!ok && null != sessionImpl) + sessionImpl.getService(LifecycleSupport.class).close(); + } + return sessionImpl; + } + + @Override + public void removeSessionListener(SessionListener listener) { + sessionListeners.remove(listener); + } + + private void fireSessionOpened(SessionImplSocket session) { + SessionEvent se = new SessionEvent(session, null); + for (SessionListener listener : sessionListeners.getListeners()) { + listener.sessionOpened(se); + } + } + + private void fireSessionClosed(SessionImplSocket session, Throwable cause) { + SessionEvent se = new SessionEvent(session, cause); + for (SessionListener listener : sessionListeners.getListeners()) { + listener.sessionClosed(se); + } + } + + @Override + public void shutdown(Session s, Throwable cause) { + SessionImplSocket sis = sessionMap.get(s); + if (null == sis) + return; + try { + fireSessionClosed(sis, cause); + } finally { + sessionMap.remove(s); + } + } + + @Override + public SessionErrorHandler getErrorHandler() { + return errorHandler; + } + + @Override + public void setErrorHandler(SessionErrorHandler errorHandler) { + this.errorHandler = errorHandler; + } + + public synchronized static AcornSessionManagerImpl getInstance() { + if (INSTANCE == null) + INSTANCE = new AcornSessionManagerImpl(); + return INSTANCE; + } + + @Override + public Database getDatabase() { + return database; + } + +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/ClusterManager.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/ClusterManager.java new file mode 100644 index 000000000..5b8e5abb8 --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/ClusterManager.java @@ -0,0 +1,584 @@ +package org.simantics.acorn; + +import java.io.IOException; +import java.math.BigInteger; +import java.nio.file.DirectoryStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; + +import org.simantics.acorn.cluster.ClusterImpl; +import org.simantics.acorn.internal.ClusterSupport2; +import org.simantics.acorn.lru.ChangeSetInfo; +import org.simantics.acorn.lru.ClusterInfo; +import org.simantics.acorn.lru.ClusterLRU; +import org.simantics.acorn.lru.ClusterStreamChunk; +import org.simantics.acorn.lru.FileInfo; +import org.simantics.acorn.lru.LRU; +import org.simantics.db.ClusterCreator; +import org.simantics.db.ServiceLocator; +import org.simantics.db.Database.Session.ClusterIds; +import org.simantics.db.Database.Session.ResourceSegment; +import org.simantics.db.exception.DatabaseException; +import org.simantics.db.impl.ClusterBase; +import org.simantics.db.impl.ClusterI; +import org.simantics.db.impl.ClusterSupport; +import org.simantics.db.procore.cluster.ClusterTraits; +import org.simantics.db.server.ProCoreException; +import org.simantics.db.service.ClusterSetsSupport; +import org.simantics.db.service.ClusterUID; +import org.simantics.utils.threads.logger.ITask; +import org.simantics.utils.threads.logger.ThreadLogger; + +public class ClusterManager { + + private ArrayList currentChanges = new ArrayList(); + + public final Path dbFolder; + public Path lastSessionDirectory; + public Path workingDirectory; + + public LRU streamLRU; + public LRU csLRU; + public ClusterLRU clusterLRU; + public LRU fileLRU; + + public MainState mainState; + public HeadState state; + + private long lastSnapshot = System.nanoTime(); + + final public ClusterSupport2 support = new ClusterSupport2(this); + + /* + * Public interface + * + */ + + public ClusterManager(Path dbFolder) { + this.dbFolder = dbFolder; + } + + public ArrayList getChanges(long changeSetId) { + ChangeSetInfo info = csLRU.getWithoutMutex(changeSetId); + info.acquireMutex(); + try { + info.makeResident(); + return info.getCSSIds(); + } catch (Throwable t) { + throw new IllegalStateException(t); + } finally { + info.releaseMutex(); + } + } + + public ClusterBase getClusterByClusterKey(int clusterKey) throws DatabaseException { + return clusterLRU.getClusterByClusterKey(clusterKey); + } + + public ClusterBase getClusterByClusterUIDOrMake(ClusterUID clusterUID) throws DatabaseException { + return clusterLRU.getClusterByClusterUIDOrMake(clusterUID); + } + + public ClusterImpl getClusterByClusterUIDOrMakeProxy(ClusterUID clusterUID) throws DatabaseException { + return clusterLRU.getClusterByClusterUIDOrMakeProxy(clusterUID); + } + + public int getClusterKeyByClusterUIDOrMake(ClusterUID clusterUID) { + return clusterLRU.getClusterKeyByClusterUIDOrMake(clusterUID); + } + + public int getClusterKeyByClusterUIDOrMakeWithoutMutex(ClusterUID clusterUID) { + return clusterLRU.getClusterKeyByClusterUIDOrMakeWithoutMutex(clusterUID); + } + + public int getClusterKeyByUID(long id1, long id2) throws DatabaseException { + return clusterLRU.getClusterKeyByUIDWithoutMutex(id1, id2); + } + + public T getClusterProxyByResourceKey(int resourceKey) throws DatabaseException { + return clusterLRU.getClusterProxyByResourceKey(resourceKey); + } + + public ClusterUID getClusterUIDByResourceKey(int resourceKey) throws DatabaseException { + return clusterLRU.getClusterUIDByResourceKey(resourceKey); + } + + public ClusterUID getClusterUIDByResourceKeyWithoutMutex(int resourceKey) throws DatabaseException { + return clusterLRU.getClusterUIDByResourceKeyWithoutMutex(resourceKey); + } + + /* + * Private implementation + * + */ + + private static long countFiles(Path directory) throws IOException { + try (DirectoryStream ds = Files.newDirectoryStream(directory)) { + int count = 0; + for (@SuppressWarnings("unused") Path p : ds) + ++count; + return count; + } + } + + public synchronized boolean makeSnapshot(ServiceLocator locator, boolean force) throws IOException { + + // Maximum autosave frequency is per 60s + if(!force && System.nanoTime() - lastSnapshot < 10*1000000000L) { +// System.err.println("lastSnapshot too early"); + return false; + } + + // Cluster files are always there + // Nothing has been written => no need to do anything + long amountOfFiles = countFiles(workingDirectory); + if(!force && amountOfFiles < 3) { +// System.err.println("amountOfFiles < 3"); + return false; + } + + System.err.println("makeSnapshot"); + + // Schedule writing of all data to disk + refreshHeadState(); + + // Wait for all files to be written + clusterLRU.shutdown(); + fileLRU.shutdown(); + streamLRU.shutdown(); + csLRU.shutdown(); + + persistHeadState(); + + mainState.save(dbFolder); + + ClusterSetsSupport cssi = locator.getService(ClusterSetsSupport.class); + cssi.save(); + + amountOfFiles = countFiles(workingDirectory); + + System.err.println(" -finished: amount of files is " + amountOfFiles); + + workingDirectory = dbFolder.resolve(Integer.toString(mainState.headDir)); + if (!Files.exists(workingDirectory)) { + Files.createDirectories(workingDirectory); + } + + cssi.updateReadAndWriteDirectories(lastSessionDirectory, workingDirectory); + + clusterLRU.setWriteDir(workingDirectory); + fileLRU.setWriteDir(workingDirectory); + streamLRU.setWriteDir(workingDirectory); + csLRU.setWriteDir(workingDirectory); + + clusterLRU.resume(); + fileLRU.resume(); + streamLRU.resume(); + csLRU.resume(); + + lastSnapshot = System.nanoTime(); + + return true; + + } + + public void refreshHeadState() throws IOException { + + state.clusters.clear(); + state.files.clear(); + state.stream.clear(); + state.cs.clear(); + + clusterLRU.persist(state.clusters); + fileLRU.persist(state.files); + streamLRU.persist(state.stream); + csLRU.persist(state.cs); + + } + + public void persistHeadState() throws IOException { + + // Sync current working directory + Files.walk(workingDirectory, 1).filter(Files::isRegularFile).forEach(FileIO::uncheckedSyncPath); + state.save(workingDirectory); + mainState.headDir++; + } + + +// public void save() throws IOException { +// +// refreshHeadState(); +// +// clusterLRU.shutdown(); +// fileLRU.shutdown(); +// streamLRU.shutdown(); +// csLRU.shutdown(); +// +// persistHeadState(); +// +// mainState.save(getBaseDirectory()); + +// try { +// ThreadLogVisualizer visualizer = new ThreadLogVisualizer(); +// visualizer.read(new DataInputStream(new FileInputStream( +// ThreadLogger.LOG_FILE))); +// visualizer.visualize3(new PrintStream(ThreadLogger.LOG_FILE +// + ".svg")); +// } catch (FileNotFoundException e) { +// // TODO Auto-generated catch block +// e.printStackTrace(); +// } + + // System.err.println("-- load statistics --"); + // for(Pair entry : + // CollectionUtils.valueSortedEntries(histogram)) { + // System.err.println(" " + entry.second + " " + entry.first); + // } + +// } + + private void acquireAll() { + clusterLRU.acquireMutex(); + fileLRU.acquireMutex(); + streamLRU.acquireMutex(); + csLRU.acquireMutex(); + } + + private void releaseAll() { + csLRU.releaseMutex(); + streamLRU.releaseMutex(); + fileLRU.releaseMutex(); + clusterLRU.releaseMutex(); + } + + public void load() throws IOException { + + // Main state + mainState = MainState.load(dbFolder); + + lastSessionDirectory = dbFolder.resolve(Integer.toString(mainState.headDir - 1)); + + // Head State + try { + state = HeadState.load(lastSessionDirectory); + } catch (InvalidHeadStateException e) { + // For backwards compatibility only! + Throwable cause = e.getCause(); + if (cause instanceof Throwable) { + try { + org.simantics.db.javacore.HeadState oldState = org.simantics.db.javacore.HeadState.load(lastSessionDirectory); + + HeadState newState = new HeadState(); + newState.clusters = oldState.clusters; + newState.cs = oldState.cs; + newState.files = oldState.files; + newState.stream = oldState.stream; + newState.headChangeSetId = oldState.headChangeSetId; + newState.reservedIds = oldState.reservedIds; + newState.transactionId = oldState.transactionId; + state = newState; + } catch (InvalidHeadStateException e1) { + throw new IOException("Could not load HeadState due to corruption", e1); + } + } else { + // This should never happen as MainState.load() checks the integrity + // of head.state files and rolls back in cases of corruption until a + // consistent state is found (could be case 0 - initial db state) + // IF this does happen something is completely wrong + throw new IOException("Could not load HeadState due to corruption", e); + } + } + + workingDirectory = dbFolder.resolve(Integer.toString(mainState.headDir)); + Files.createDirectories(workingDirectory); + + csLRU = new LRU("Change Set", workingDirectory); + streamLRU = new LRU("Cluster Stream", workingDirectory); + clusterLRU = new ClusterLRU(this, "Cluster", workingDirectory); + fileLRU = new LRU("External Value", workingDirectory); + + acquireAll(); + + // Clusters + for (String clusterKey : state.clusters) { + String[] parts1 = clusterKey.split("#"); + String[] parts = parts1[0].split("\\."); + long first = new BigInteger(parts[0], 16).longValue(); + long second = new BigInteger(parts[1], 16).longValue(); + ClusterUID uuid = ClusterUID.make(first, second); + Path readDir = dbFolder.resolve(parts1[1]); + int offset = Integer.parseInt(parts1[2]); + int length = Integer.parseInt(parts1[3]); + clusterLRU.map(new ClusterInfo(this, clusterLRU, readDir, uuid, offset, length)); + } + // Files + for (String fileKey : state.files) { +// System.err.println("loadFile: " + fileKey); + String[] parts = fileKey.split("#"); + Path readDir = dbFolder.resolve(parts[1]); + int offset = Integer.parseInt(parts[2]); + int length = Integer.parseInt(parts[3]); + FileInfo info = new FileInfo(fileLRU, readDir, parts[0], offset, length); + fileLRU.map(info); + } + // Update chunks + for (String fileKey : state.stream) { +// System.err.println("loadStream: " + fileKey); + String[] parts = fileKey.split("#"); + Path readDir = dbFolder.resolve(parts[1]); + int offset = Integer.parseInt(parts[2]); + int length = Integer.parseInt(parts[3]); + ClusterStreamChunk info = new ClusterStreamChunk(this, + streamLRU, readDir, parts[0], offset, length); + streamLRU.map(info); + } + // Change sets + for (String fileKey : state.cs) { + String[] parts = fileKey.split("#"); + Path readDir = dbFolder.resolve(parts[1]); + Long revisionId = Long.parseLong(parts[0]); + int offset = Integer.parseInt(parts[2]); + int length = Integer.parseInt(parts[3]); + ChangeSetInfo info = new ChangeSetInfo(csLRU, readDir, revisionId, offset, length); + csLRU.map(info); + } + + releaseAll(); + + } + + public T clone(ClusterUID uid, ClusterCreator creator) + throws DatabaseException { + + clusterLRU.ensureUpdates(uid); + + ClusterInfo info = clusterLRU.getWithoutMutex(uid); + return info.clone(uid, creator); + + } + + //private int loadCounter = 0; + + public static void startLog(String msg) { + tasks.put(msg, ThreadLogger.getInstance().begin(msg)); + } + + public static void endLog(String msg) { + ITask task = tasks.get(msg); + if (task != null) + task.finish(); + } + + static Map tasks = new HashMap(); + + public void update(ClusterUID uid, ClusterImpl clu) { + + ClusterInfo info = clusterLRU.getWithoutMutex(uid); + info.acquireMutex(); + try { + info.update(clu); + } catch (Throwable t) { + throw new IllegalStateException(t); + } finally { + info.releaseMutex(); + } + + } + + public long getClusterIdOrCreate(ClusterUID clusterUID) { + return 1; + } + + public int getResourceKey(ClusterUID uid, int index) { + return clusterLRU.getResourceKey(uid, index); + } + + public int getResourceKeyWitoutMutex(ClusterUID uid, int index) { + return clusterLRU.getResourceKeyWithoutMutex(uid, index); + } + + public ClusterIds getClusterIds() throws ProCoreException { + + clusterLRU.acquireMutex(); + + try { + + Collection infos = clusterLRU.values(); + final int status = infos.size(); + final long[] firsts = new long[status]; + final long[] seconds = new long[status]; + + int index = 0; + for (ClusterInfo info : infos) { + firsts[index] = 0; + seconds[index] = info.getKey().second; + index++; + } + + return new ClusterIds() { + + @Override + public int getStatus() { + return status; + } + + @Override + public long[] getFirst() { + return firsts; + } + + @Override + public long[] getSecond() { + return seconds; + } + + }; + + } catch (Throwable t) { + throw new IllegalStateException(t); + } finally { + clusterLRU.releaseMutex(); + } + + } + + public void addIntoCurrentChangeSet(String ccs) { + + csLRU.acquireMutex(); + + try { + + currentChanges.add(ccs); + + } catch (Throwable t) { + throw new IllegalStateException(t); + } finally { + + csLRU.releaseMutex(); + + } + + } + + public void commitChangeSet(long changeSetId, byte[] data) { + csLRU.acquireMutex(); + try { + ArrayList csids = new ArrayList(currentChanges); + currentChanges = new ArrayList(); + new ChangeSetInfo(csLRU, changeSetId, data, csids); + } catch (Throwable t) { + throw new IllegalStateException(t); + } finally { + csLRU.releaseMutex(); + } + } + + public byte[] getMetadata(long changeSetId) { + + ChangeSetInfo info = csLRU.getWithoutMutex(changeSetId); + if (info == null) return null; + info.acquireMutex(); + try { + return info.getMetadataBytes(); + } catch (Throwable t) { + throw new IllegalStateException(t); + } finally { + info.releaseMutex(); + } + + } + + public byte[] getResourceFile(final byte[] clusterUID, + final int resourceIndex) throws ProCoreException { + + ClusterUID uid = ClusterUID.make(clusterUID, 0); + String key = uid.toString() + "_" + resourceIndex; + FileInfo info = fileLRU.getWithoutMutex(key); + if(info == null) return null; + info.acquireMutex(); + try { + return info.getResourceFile(); + } catch (Throwable t) { + throw new IllegalStateException(t); + } finally { + info.releaseMutex(); + } + + } + + public ResourceSegment getResourceSegment(final byte[] clusterUID, + final int resourceIndex, final long segmentOffset, short segmentSize) + throws ProCoreException { + + ClusterUID uid = ClusterUID.make(clusterUID, 0); + + String key = uid.toString() + "_" + resourceIndex; + FileInfo info = fileLRU.getWithoutMutex(key); + if(info == null) return null; + info.acquireMutex(); + try { + return info.getResourceSegment(clusterUID, resourceIndex, segmentOffset, segmentSize); + } catch (Throwable t) { + throw new IllegalStateException(t); + } finally { + info.releaseMutex(); + } + + } + + public void modiFileEx(ClusterUID uid, int resourceKey, long offset, + long size, byte[] bytes, long pos, ClusterSupport support) { + + try { + + String key = uid.toString() + + "_" + + ClusterTraits + .getResourceIndexFromResourceKey(resourceKey); + + FileInfo info = null; + + fileLRU.acquireMutex(); + + try { + + info = fileLRU.get(key); + if (info == null) + info = new FileInfo(fileLRU, key, (int) (offset + size)); + + + } catch (Throwable t) { + throw new IllegalStateException(t); + } finally { + + fileLRU.releaseMutex(); + + } + + info.acquireMutex(); + try { + info.updateData(bytes, offset, pos, size); + } catch (Throwable t) { + throw new IllegalStateException(t); + } finally { + info.releaseMutex(); + } + + } catch (DatabaseException e) { + e.printStackTrace(); + } + + } + + public void shutdown() { + clusterLRU.shutdown(); + fileLRU.shutdown(); + streamLRU.shutdown(); + csLRU.shutdown(); + } + +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/ExternalizableExample.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/ExternalizableExample.java new file mode 100644 index 000000000..8d0bac29f --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/ExternalizableExample.java @@ -0,0 +1,43 @@ +package org.simantics.acorn; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.io.ObjectOutputStream; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.nio.file.StandardOpenOption; + +public class ExternalizableExample implements Externalizable { + + public int first; + private long second; + + public ExternalizableExample(int first, long second) { + this.first = first; + this.second = second; + } + + @Override + public void writeExternal(ObjectOutput out) throws IOException { + out.writeInt(first); + out.writeLong(second); + } + + @Override + public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { + + } + + + public static void main(String[] args) { + Externalizable test = new ExternalizableExample(123, 3456); + + try (ObjectOutputStream stream = new ObjectOutputStream(Files.newOutputStream(Paths.get("C:/Users/Jani Simomaa/Desktop/test"), StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING))) { + stream.writeObject(test); + } catch (IOException e) { + e.printStackTrace(); + } + } +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/FileIO.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/FileIO.java new file mode 100644 index 000000000..aa7173285 --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/FileIO.java @@ -0,0 +1,142 @@ +package org.simantics.acorn; + +import java.io.BufferedOutputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.io.RandomAccessFile; +import java.nio.ByteBuffer; +import java.nio.channels.FileChannel; +import java.nio.channels.SeekableByteChannel; +import java.nio.file.Files; +import java.nio.file.OpenOption; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.StandardOpenOption; +import java.nio.file.attribute.FileAttribute; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +import org.simantics.databoard.file.RuntimeIOException; + +public class FileIO { + + private static final FileAttribute[] NO_ATTRIBUTES = new FileAttribute[0]; + + private static final Set CREATE_OPTIONS = new HashSet<>(2); + private static final Set APPEND_OPTIONS = new HashSet<>(1); + + static { + CREATE_OPTIONS.add(StandardOpenOption.WRITE); + CREATE_OPTIONS.add(StandardOpenOption.CREATE); + + APPEND_OPTIONS.add(StandardOpenOption.APPEND); + } + + private Path path; + private int writePosition = 0; + + private FileIO(Path path) { + this.path = path; + } + + private static Map map = new HashMap(); + + public static FileIO get(Path path) { + synchronized(map) { + FileIO existing = map.get(path); + if(existing == null) { + existing = new FileIO(path); + map.put(path, existing); + } + return existing; + } + } + + //private static final boolean TRACE_SWAP = false; + private static final boolean TRACE_PERF = false; + + public synchronized int saveBytes(byte[] bytes, int length, boolean overwrite) throws IOException { + if(overwrite) writePosition = 0; + int result = writePosition; + long start = System.nanoTime(); + Set options = writePosition == 0 ? CREATE_OPTIONS : APPEND_OPTIONS; + + ByteBuffer bb = ByteBuffer.wrap(bytes, 0, length); + try (FileChannel fc = FileChannel.open(path, options, NO_ATTRIBUTES)) { + fc.write(bb); + } + + writePosition += length; + if(TRACE_PERF) { + long duration = System.nanoTime()-start; + double ds = 1e-9*duration; + System.err.println("Wrote " + bytes.length + " bytes @ " + 1e-6*bytes.length / ds + "MB/s"); + } + return result; + } + + public synchronized byte[] readBytes(int offset, int length) throws IOException { + long start = System.nanoTime(); + try (SeekableByteChannel channel = Files.newByteChannel(path)) { + channel.position(offset); + ByteBuffer buf = ByteBuffer.allocate(length); + int read = 0; + while (read < length) { + read += channel.read(buf); + } + byte[] result = buf.array(); + if (result.length != length) + System.err.println("faa"); + if (TRACE_PERF) { + long duration = System.nanoTime() - start; + double ds = 1e-9 * duration; + System.err.println("Read " + result.length + " bytes @ " + 1e-6 * result.length / ds + "MB/s"); + } + return result; + } + } + + public static void syncPath(Path f) throws IOException { + // Does not seem to need 's' according to unit test in Windows + try (RandomAccessFile raf = new RandomAccessFile(f.toFile(), "rw")) { + raf.getFD().sync(); + } + } + + static void uncheckedSyncPath(Path f) { + try { + syncPath(f); + } catch (IOException e) { + throw new RuntimeIOException(e); + } + } + + public static void main(String[] args) throws Exception { + + byte[] buf = new byte[1024*1024]; + + long s = System.nanoTime(); + + Path test = Paths.get("e:/work/test.dat"); + OutputStream fs = Files.newOutputStream(test); + OutputStream os = new BufferedOutputStream(fs, 128*1024); + + for(int i=0;i<40;i++) { + os.write(buf); + } + + os.flush(); + //fs.getFD().sync(); + os.close(); + + syncPath(test); + + long duration = System.nanoTime()-s; + System.err.println("Took " + 1e-6*duration + "ms."); + + + } + +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/GraphClientImpl2.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/GraphClientImpl2.java new file mode 100644 index 000000000..2796e3e02 --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/GraphClientImpl2.java @@ -0,0 +1,708 @@ +/******************************************************************************* + * Copyright (c) 2007, 2010 Association for Decentralized Information Management + * in Industry THTH ry. + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * VTT Technical Research Centre of Finland - initial API and implementation + *******************************************************************************/ +package org.simantics.acorn; + +import java.io.IOException; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Semaphore; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; + +import org.simantics.acorn.internal.ClusterChange; +import org.simantics.acorn.internal.ClusterUpdateProcessorBase; +import org.simantics.acorn.internal.UndoClusterUpdateProcessor; +import org.simantics.acorn.lru.ClusterInfo; +import org.simantics.acorn.lru.ClusterStreamChunk; +import org.simantics.acorn.lru.ClusterUpdateOperation; +import org.simantics.acorn.lru.ClusterChangeSet.Entry; +import org.simantics.db.ClusterCreator; +import org.simantics.db.Database; +import org.simantics.db.ServiceLocator; +import org.simantics.db.common.utils.Logger; +import org.simantics.db.exception.DatabaseException; +import org.simantics.db.exception.SDBException; +import org.simantics.db.server.ProCoreException; +import org.simantics.db.service.ClusterSetsSupport; +import org.simantics.db.service.ClusterUID; +import org.simantics.utils.datastructures.Pair; +import org.simantics.utils.logging.TimeLogger; + +import gnu.trove.map.hash.TLongObjectHashMap; + +public class GraphClientImpl2 implements Database.Session { + + public static final boolean DEBUG = false; + + public final ClusterManager clusters; + + private TransactionManager transactionManager = new TransactionManager(); + private ExecutorService executor = Executors.newSingleThreadExecutor(new ClientThreadFactory("Core Main Program", false)); + private ExecutorService saver = Executors.newSingleThreadExecutor(new ClientThreadFactory("Core Snapshot Saver", true)); + + private static GraphClientImpl2 INSTANCE; + private Path dbFolder; + private final Database database; + private ServiceLocator locator; + private MainProgram mainProgram; + + static class ClientThreadFactory implements ThreadFactory { + + final String name; + final boolean daemon; + + public ClientThreadFactory(String name, boolean daemon) { + this.name = name; + this.daemon = daemon; + } + + @Override + public Thread newThread(Runnable r) { + Thread thread = new Thread(r, name); + thread.setDaemon(daemon); + return thread; + } + } + + public GraphClientImpl2(Database database, Path dbFolder, ServiceLocator locator) throws IOException { + this.database = database; + this.dbFolder = dbFolder; + this.locator = locator; + this.clusters = new ClusterManager(dbFolder); + load(); + ClusterSetsSupport cssi = locator.getService(ClusterSetsSupport.class); + cssi.updateReadAndWriteDirectories(clusters.lastSessionDirectory, clusters.workingDirectory); + mainProgram = new MainProgram(this, clusters); + executor.execute(mainProgram); + INSTANCE = this; + } + + public Path getDbFolder() { + return dbFolder; + } + + public void tryMakeSnapshot() throws IOException { + + if (isClosing) + return; + + saver.execute(new Runnable() { + + @Override + public void run() { + Transaction tr = null; + try { + // First take a write transaction + tr = askWriteTransaction(-1); + // Then make sure that MainProgram is idling + mainProgram.mutex.acquire(); + try { + synchronized(mainProgram) { + if(mainProgram.operations.isEmpty()) { + makeSnapshot(false); + } else { + // MainProgram is becoming busy again - delay snapshotting + return; + } + } + } finally { + mainProgram.mutex.release(); + } + } catch (IOException e) { + Logger.defaultLogError(e); + } catch (ProCoreException e) { + Logger.defaultLogError(e); + } catch (InterruptedException e) { + Logger.defaultLogError(e); + } finally { + try { + if(tr != null) + endTransaction(tr.getTransactionId()); + } catch (ProCoreException e) { + Logger.defaultLogError(e); + } + } + } + + }); + } + + public void makeSnapshot(boolean force) throws IOException { + clusters.makeSnapshot(locator, force); + } + + public T clone(ClusterUID uid, ClusterCreator creator) throws DatabaseException { + return clusters.clone(uid, creator); + } + +// private void save() throws IOException { +// clusters.save(); +// } + + public void load() throws IOException { + clusters.load(); + } + +// public void modiFileEx(ClusterUID uid, int resourceKey, long offset, long size, byte[] bytes, long pos, ClusterSupport support) { +// clusters.modiFileEx(uid, resourceKey, offset, size, bytes, pos, support); +// } + + @Override + public Database getDatabase() { + return database; + } + + private boolean closed = false; + private boolean isClosing = false; + + @Override + public void close() throws ProCoreException { + System.err.println("Closing " + this + " and mainProgram " + mainProgram); + if(!closed && !isClosing) { + isClosing = true; + try { + makeSnapshot(true); + + mainProgram.close(); + clusters.shutdown(); + executor.shutdown(); + saver.shutdown(); + boolean executorTerminated = executor.awaitTermination(500, TimeUnit.MILLISECONDS); + boolean saverTerminated = saver.awaitTermination(500, TimeUnit.MILLISECONDS); + + System.err.println("executorTerminated=" + executorTerminated + ", saverTerminated=" + saverTerminated); + + INSTANCE = null; + mainProgram = null; + executor = null; + saver = null; + + } catch (IOException | InterruptedException e) { + throw new ProCoreException(e); + } + } + closed = true; + //impl.close(); + } + + @Override + public void open() throws ProCoreException { + throw new UnsupportedOperationException(); + } + + @Override + public boolean isClosed() throws ProCoreException { + return closed; + } + + @Override + public void acceptCommit(long transactionId, long changeSetId, byte[] metadata) throws ProCoreException { + + clusters.state.headChangeSetId++; + + long committedChangeSetId = changeSetId + 1; + + clusters.commitChangeSet(committedChangeSetId, metadata); + + clusters.state.transactionId = transactionId; + + mainProgram.committed(); + + TimeLogger.log("Accepted commit"); + + } + + @Override + public long cancelCommit(long transactionId, long changeSetId, + byte[] metadata, OnChangeSetUpdate onChangeSetUpdate) + throws ProCoreException { + System.err.println("GraphClientImpl2.cancelCommit() called!! this is experimental and might cause havoc!"); + try { + undo(new long[] {changeSetId}, onChangeSetUpdate); + } catch (SDBException e) { + e.printStackTrace(); + throw new ProCoreException(e); + } + clusters.state.headChangeSetId++; + return clusters.state.headChangeSetId; + } + + @Override + public Transaction askReadTransaction() throws ProCoreException { + return transactionManager.askReadTransaction(); + } + + enum TransactionState { + IDLE,WRITE,READ + } + + class TransactionRequest { + public TransactionState state; + public Semaphore semaphore; + public TransactionRequest(TransactionState state, Semaphore semaphore) { + this.state = state; + this.semaphore = semaphore; + } + } + + class TransactionManager { + + private TransactionState currentTransactionState = TransactionState.IDLE; + + private int reads = 0; + + LinkedList requests = new LinkedList(); + + TLongObjectHashMap requestMap = new TLongObjectHashMap(); + + private synchronized Transaction makeTransaction(TransactionRequest req) { + + final int csId = clusters.state.headChangeSetId; + final long trId = clusters.state.transactionId+1; + requestMap.put(trId, req); + return new Transaction() { + + @Override + public long getTransactionId() { + return trId; + } + + @Override + public long getHeadChangeSetId() { + return csId; + } + }; + } + + /* + * This method cannot be synchronized since it waits and must support multiple entries + * by query thread(s) and internal transactions such as snapshot saver + */ + public Transaction askReadTransaction() throws ProCoreException { + + Semaphore semaphore = new Semaphore(0); + + TransactionRequest req = queue(TransactionState.READ, semaphore); + + try { + semaphore.acquire(); + } catch (InterruptedException e) { + throw new ProCoreException(e); + } + + return makeTransaction(req); + + } + + private synchronized void dispatch() { + TransactionRequest r = requests.removeFirst(); + if(r.state == TransactionState.READ) reads++; + r.semaphore.release(); + } + + private synchronized void processRequests() { + + while(true) { + + if(requests.isEmpty()) return; + TransactionRequest req = requests.peek(); + + if(currentTransactionState == TransactionState.IDLE) { + + // Accept anything while IDLE + currentTransactionState = req.state; + dispatch(); + + } else if (currentTransactionState == TransactionState.READ) { + + if(req.state == currentTransactionState) { + + // Allow other reads + dispatch(); + + } else { + + // Wait + return; + + } + + } else if (currentTransactionState == TransactionState.WRITE) { + + // Wait + return; + + } + + } + + } + + private synchronized TransactionRequest queue(TransactionState state, Semaphore semaphore) { + TransactionRequest req = new TransactionRequest(state, semaphore); + requests.addLast(req); + processRequests(); + return req; + } + + /* + * This method cannot be synchronized since it waits and must support multiple entries + * by query thread(s) and internal transactions such as snapshot saver + */ + public Transaction askWriteTransaction() + throws ProCoreException { + + Semaphore semaphore = new Semaphore(0); + + TransactionRequest req = queue(TransactionState.WRITE, semaphore); + + try { + semaphore.acquire(); + } catch (InterruptedException e) { + throw new ProCoreException(e); + } + + mainProgram.startTransaction(clusters.state.headChangeSetId+1); + + return makeTransaction(req); + + } + + public synchronized long endTransaction(long transactionId) throws ProCoreException { + + TransactionRequest req = requestMap.remove(transactionId); + if(req.state == TransactionState.WRITE) { + currentTransactionState = TransactionState.IDLE; + processRequests(); + } else { + reads--; + if(reads == 0) { + currentTransactionState = TransactionState.IDLE; + processRequests(); + } + } + return clusters.state.transactionId; + } + + } + + @Override + public Transaction askWriteTransaction(final long transactionId) + throws ProCoreException { + return transactionManager.askWriteTransaction(); + } + + @Override + public long endTransaction(long transactionId) throws ProCoreException { + return transactionManager.endTransaction(transactionId); + } + + @Override + public String execute(String command) throws ProCoreException { + // This is called only by WriteGraphImpl.commitAccessorChanges + // We can ignore this in Acorn + return ""; + } + + @Override + public byte[] getChangeSetMetadata(long changeSetId) + throws ProCoreException { + return clusters.getMetadata(changeSetId); + } + + @Override + public ChangeSetData getChangeSetData(long minChangeSetId, + long maxChangeSetId, OnChangeSetUpdate onChangeSetupate) + throws ProCoreException { + + new Exception("GetChangeSetDataFunction " + minChangeSetId + " " + maxChangeSetId).printStackTrace();; + return null; + + } + + @Override + public ChangeSetIds getChangeSetIds() throws ProCoreException { + throw new UnsupportedOperationException(); + } + + @Override + public Cluster getCluster(byte[] clusterId) throws ProCoreException { + throw new UnsupportedOperationException(); + } + + @Override + public ClusterChanges getClusterChanges(long changeSetId, byte[] clusterId) + throws ProCoreException { + throw new UnsupportedOperationException(); + } + + @Override + public ClusterIds getClusterIds() throws ProCoreException { + return clusters.getClusterIds(); + } + + @Override + public Information getInformation() throws ProCoreException { + return new Information() { + + @Override + public String getServerId() { + return "server"; + } + + @Override + public String getProtocolId() { + return ""; + } + + @Override + public String getDatabaseId() { + return "database"; + } + + @Override + public long getFirstChangeSetId() { + return 0; + } + + }; + } + + @Override + public Refresh getRefresh(long changeSetId) throws ProCoreException { + + final ClusterIds ids = getClusterIds(); + + return new Refresh() { + + @Override + public long getHeadChangeSetId() { + return clusters.state.headChangeSetId; + } + + @Override + public long[] getFirst() { + return ids.getFirst(); + } + + @Override + public long[] getSecond() { + return ids.getSecond(); + } + + }; + + } + + public byte[] getResourceFile(final byte[] clusterUID, final int resourceIndex) throws ProCoreException { + return clusters.getResourceFile(clusterUID, resourceIndex); + } + + @Override + public ResourceSegment getResourceSegment(final byte[] clusterUID, + final int resourceIndex, final long segmentOffset, short segmentSize) throws ProCoreException { + + return clusters.getResourceSegment(clusterUID, resourceIndex, segmentOffset, segmentSize); + + } + + @Override + public long reserveIds(int count) throws ProCoreException { + return clusters.state.reservedIds++; + } + + @Override + public void updateCluster(byte[] operations) throws ProCoreException { + + ClusterUpdateOperation operation = new ClusterUpdateOperation(clusters, operations); + ClusterInfo info = clusters.clusterLRU.getOrCreate(operation.uid, true); + if(info == null) throw new IllegalStateException(); + info.acquireMutex(); + try { + info.scheduleUpdate(); + mainProgram.schedule(operation); + } catch (Throwable t) { + throw new IllegalStateException(t); + } finally { + info.releaseMutex(); + } + + } + + private UndoClusterUpdateProcessor getUndoCSS(String ccsId) throws DatabaseException { + + String[] ss = ccsId.split("\\."); + String chunkKey = ss[0]; + int chunkOffset = Integer.parseInt(ss[1]); + ClusterStreamChunk chunk = clusters.streamLRU.getWithoutMutex(chunkKey); + if(chunk == null) throw new IllegalStateException("Cluster Stream Chunk " + chunkKey + " was not found."); + chunk.acquireMutex(); + try { + return chunk.getUndoProcessor(clusters, chunkOffset, ccsId); + } catch (Throwable t) { + throw new IllegalStateException(t); + } finally { + chunk.releaseMutex(); + } + + } + + private void performUndo(String ccsId, ArrayList> clusterChanges, UndoClusterSupport support) throws ProCoreException, DatabaseException { + + UndoClusterUpdateProcessor proc = getUndoCSS(ccsId); + + int clusterKey = clusters.getClusterKeyByClusterUIDOrMakeWithoutMutex(proc.getClusterUID()); + + clusters.clusterLRU.acquireMutex(); + try { + + ClusterChange cs = new ClusterChange(clusterChanges, proc.getClusterUID()); + for(int i=0;i> clusterChanges = new ArrayList>(); + + UndoClusterSupport support = new UndoClusterSupport(clusters); + + final int changeSetId = clusters.state.headChangeSetId; + + if(ClusterUpdateProcessorBase.DEBUG) + System.err.println(" === BEGIN UNDO ==="); + + for(int i=0;i ccss = clusters.getChanges(id); + for(int j=0;j pair = clusterChanges.get(i); + + final ClusterUID cuid = pair.first; + final byte[] data = pair.second; + + onChangeSetUpdate.onChangeSetUpdate(new ChangeSetUpdate() { + + @Override + public long getChangeSetId() { + return changeSetId; + } + + @Override + public int getChangeSetIndex() { + return 0; + } + + @Override + public int getNumberOfClusterChangeSets() { + return clusterChanges.size(); + } + + @Override + public int getIndexOfClusterChangeSet() { + return changeSetIndex; + } + + @Override + public byte[] getClusterId() { + return cuid.asBytes(); + } + + @Override + public boolean getNewCluster() { + return false; + } + + @Override + public byte[] getData() { + return data; + } + + }); + + } + + + return false; + + } + + public static GraphClientImpl2 getInstance() { + return INSTANCE; + } + + public ServiceLocator getServiceLocator() { + return locator; + } + + @Override + public boolean refreshEnabled() { + return false; + } + + + + + + + + + + + + //////////////////////// + + + + + + + + + + + + +} + diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/HeadState.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/HeadState.java new file mode 100644 index 000000000..56ef481a7 --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/HeadState.java @@ -0,0 +1,105 @@ +package org.simantics.acorn; + +import java.io.ByteArrayInputStream; +import java.io.File; +import java.io.IOException; +import java.io.OutputStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.util.ArrayList; +import java.util.Arrays; + +import org.simantics.databoard.Bindings; +import org.simantics.databoard.binding.mutable.MutableVariant; +import org.simantics.databoard.serialization.Serializer; +import org.simantics.databoard.util.binary.BinaryMemory; + +public class HeadState { + + public int headChangeSetId = 0; + public long transactionId = 1; + public long reservedIds = 3; + + public ArrayList clusters = new ArrayList<>(); + public ArrayList files = new ArrayList<>(); + public ArrayList stream = new ArrayList<>(); + public ArrayList cs = new ArrayList<>(); +// public ArrayList ccs = new ArrayList(); + + public static HeadState load(Path directory) throws InvalidHeadStateException { + Path f = directory.resolve("head.state"); + + try { + byte[] bytes = Files.readAllBytes(f); + MessageDigest sha1 = MessageDigest.getInstance("SHA-1"); + int digestLength = sha1.getDigestLength(); + + sha1.update(bytes, digestLength, bytes.length - digestLength); + byte[] newChecksum = sha1.digest(); + if (!Arrays.equals(newChecksum, Arrays.copyOfRange(bytes, 0, digestLength))) { + throw new InvalidHeadStateException( + "Checksum " + Arrays.toString(newChecksum) + " does not match excpected " + + Arrays.toString(Arrays.copyOfRange(bytes, 0, digestLength)) + " for " + f.toAbsolutePath()); + } + + HeadState object = (HeadState) org.simantics.databoard.Files.readFile(new ByteArrayInputStream(bytes, digestLength, bytes.length - digestLength), Bindings.getBindingUnchecked(HeadState.class)); + return object; + + } catch (IOException i) { + return new HeadState(); +// throw new InvalidHeadStateException(i); + } catch (NoSuchAlgorithmException e) { + throw new Error("SHA-1 Algorithm not found", e); + } catch (Throwable t) { + throw new InvalidHeadStateException(t); + } + } + + public void save(Path directory) throws IOException { + Path f = directory.resolve("head.state"); + try { + File file = f.toFile(); + BinaryMemory rf = new BinaryMemory(4096); + try { + MutableVariant v = new MutableVariant(Bindings.getBindingUnchecked(HeadState.class), this); + Serializer s = Bindings.getSerializerUnchecked( Bindings.VARIANT ); + s.serialize(v, file); + } finally { + rf.close(); + } + + byte[] bytes = rf.toByteBuffer().array(); + + MessageDigest sha1 = MessageDigest.getInstance("SHA-1"); + sha1.update(bytes); + byte[] checksum = sha1.digest(); + + try (OutputStream out = Files.newOutputStream(f)) { + out.write(checksum); + out.write(bytes); + } + FileIO.syncPath(f); + } catch (NoSuchAlgorithmException e) { + throw new Error("SHA-1 digest not found, should not happen", e); + } + } + + public static void validateHeadStateIntegrity(Path headState) throws InvalidHeadStateException, IOException { + try { + byte[] bytes = Files.readAllBytes(headState); + MessageDigest sha1 = MessageDigest.getInstance("SHA-1"); + int digestLength = sha1.getDigestLength(); + sha1.update(bytes, digestLength, bytes.length - digestLength); + byte[] newChecksum = sha1.digest(); + if (!Arrays.equals(newChecksum, Arrays.copyOfRange(bytes, 0, digestLength))) { + throw new InvalidHeadStateException( + "Checksum " + Arrays.toString(newChecksum) + " does not match excpected " + + Arrays.toString(Arrays.copyOfRange(bytes, 0, digestLength)) + " for " + headState.toAbsolutePath()); + } + } catch (NoSuchAlgorithmException e) { + throw new Error("SHA-1 digest not found, should not happen", e); + } + } +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/InvalidHeadStateException.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/InvalidHeadStateException.java new file mode 100644 index 000000000..2c342b71f --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/InvalidHeadStateException.java @@ -0,0 +1,27 @@ +package org.simantics.acorn; + +public class InvalidHeadStateException extends Exception { + + private static final long serialVersionUID = -7291859180968235955L; + + public InvalidHeadStateException() { + super(); + } + + public InvalidHeadStateException(String message, Throwable cause, boolean enableSuppression, + boolean writableStackTrace) { + super(message, cause, enableSuppression, writableStackTrace); + } + + public InvalidHeadStateException(String message, Throwable cause) { + super(message, cause); + } + + public InvalidHeadStateException(String message) { + super(message); + } + + public InvalidHeadStateException(Throwable cause) { + super(cause); + } +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/MainProgram.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/MainProgram.java new file mode 100644 index 000000000..f39a4987d --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/MainProgram.java @@ -0,0 +1,342 @@ +package org.simantics.acorn; + +import java.io.Closeable; +import java.util.ArrayList; +import java.util.Comparator; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Semaphore; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; + +import org.simantics.acorn.lru.ClusterStreamChunk; +import org.simantics.acorn.lru.ClusterUpdateOperation; +import org.simantics.db.service.ClusterUID; +import org.simantics.utils.logging.TimeLogger; + +public class MainProgram implements Runnable, Closeable { + + private static final int CLUSTER_THREADS = 4; + private static final int CHUNK_CACHE_SIZE = 100; + + private final GraphClientImpl2 client; + private final ClusterManager clusters; + private final ExecutorService[] clusterUpdateThreads; + private final List[] updateSchedules; + + private int residentOperationBytes = 0; + private long currentChangeSetId = -1; + private int nextChunkId = 0; + private boolean alive = true; + private Semaphore deathBarrier = new Semaphore(0); + + final Semaphore mutex = new Semaphore(1); + final LinkedList operations = new LinkedList<>(); + + static class ClusterThreadFactory implements ThreadFactory { + + final String name; + final boolean daemon; + + public ClusterThreadFactory(String name, boolean daemon) { + this.name = name; + this.daemon = daemon; + } + + @Override + public Thread newThread(Runnable r) { + Thread thread = new Thread(r, name); + thread.setDaemon(daemon); + return thread; + } + } + + public MainProgram(GraphClientImpl2 client, ClusterManager clusters) { + + this.client = client; + this.clusters = clusters; + this.clusterUpdateThreads = new ExecutorService[CLUSTER_THREADS]; + this.updateSchedules = new ArrayList[CLUSTER_THREADS]; + for(int i=0;i(); + } + } + + public void startTransaction(long id) { + currentChangeSetId = id; + nextChunkId = 0; + } + + private static Comparator clusterComparator = new Comparator() { + + @Override + public int compare(ClusterUID o1, ClusterUID o2) { + return Long.compare(o1.second, o2.second); + } + }; + + @Override + public void run() { + try { + + mutex.acquire(); + main: + while(alive) { + + TreeMap> updates = new TreeMap>(clusterComparator); + + synchronized(MainProgram.this) { + + while(!operations.isEmpty() && updates.size() < 100) { + + ClusterStreamChunk chunk = operations.pollFirst(); + + for(int i=chunk.nextToProcess;i ops = updates.get(uid); + if(ops == null) { + ops = new ArrayList(); + updates.put(uid, ops); + } + ops.add(o); + } + + chunk.nextToProcess = chunk.operations.size(); + + if(!chunk.isCommitted()) { + assert(operations.isEmpty()); + operations.add(chunk); + break; + } + + } + + if(updates.isEmpty()) { + try { + long start = System.nanoTime(); + mutex.release(); + MainProgram.this.wait(5000); + mutex.acquire(); + if (!alive) + break main; + long duration = System.nanoTime()-start; + if(duration > 4000000000L) { + + // Was this a time-out or a new stream request? + if(operations.isEmpty()) { + + /* + * We are idling here. + * Flush all caches gradually + */ + + // Write pending cs to disk + boolean written = clusters.csLRU.swapForced(); + while(written) { + if(!updates.isEmpty()) break; + written = clusters.csLRU.swapForced(); + } + // Write pending chunks to disk + written = clusters.streamLRU.swapForced(); + while(written) { + if(!updates.isEmpty()) break; + written = clusters.streamLRU.swapForced(); + } + // Write pending files to disk + written = clusters.fileLRU.swapForced(); + while(written) { + if(!updates.isEmpty()) break; + written = clusters.fileLRU.swapForced(); + } + // Write pending clusters to disk + written = clusters.clusterLRU.swapForced(); + while(written) { + if(!updates.isEmpty()) break; + written = clusters.clusterLRU.swapForced(); + } + + client.tryMakeSnapshot(); + } + } + } catch (InterruptedException e) { + e.printStackTrace(); + } + + } + + } + +// long sss = System.nanoTime(); + + for(int i=0;i> entry : updates.entrySet()) { + ClusterUID key = entry.getKey(); + int hash = key.hashCode() & (clusterUpdateThreads.length-1); + updateSchedules[hash].addAll(entry.getValue()); + } + + // final AtomicLong elapsed = new AtomicLong(0); + int acquireAmount = 0; + for(int i=0;i ops = updateSchedules[i]; + if (!ops.isEmpty()) { + acquireAmount++; + clusterUpdateThreads[i].execute(() -> { + + //long st = System.nanoTime(); + for(ClusterUpdateOperation op : ops) { + op.run(); + } + s.release(); + // long duration = System.nanoTime()-st; + // elapsed.addAndGet(duration); + // double dur = 1e-9*duration; + // if(dur > 0.05) + // System.err.println("duration=" + dur + "s. " + ops.size()); + }); + } + } + + s.acquire(acquireAmount); + + /* + * Here we are actively processing updates from client. + * Maintain necessary caching here. + */ + + clusters.streamLRU.acquireMutex(); + try { + swapChunks(); + } catch (Throwable t) { + throw new IllegalStateException(t); + } finally { + clusters.streamLRU.releaseMutex(); + } + clusters.csLRU.acquireMutex(); + try { + swapCS(); + } catch (Throwable t) { + throw new IllegalStateException(t); + } finally { + clusters.csLRU.releaseMutex(); + } + + TimeLogger.log("Performed updates"); + + } + + } catch (Throwable t) { + t.printStackTrace(); + } finally { + deathBarrier.release(); + } + + } + + /* + * Mutex for streamLRU is assumed here + * + */ + private void swapChunks() { + + // Cache chunks during update operations + boolean written = clusters.streamLRU.swap(Integer.MAX_VALUE, CHUNK_CACHE_SIZE); + while(written) { + written = clusters.streamLRU.swap(Integer.MAX_VALUE, CHUNK_CACHE_SIZE); + } + + } + + private void swapCS() { + + // Cache chunks during update operations + boolean written = clusters.csLRU.swap(Integer.MAX_VALUE, CHUNK_CACHE_SIZE); + while(written) { + written = clusters.csLRU.swap(Integer.MAX_VALUE, CHUNK_CACHE_SIZE); + } + + } + + public synchronized void committed() { + + ClusterStreamChunk last = operations.isEmpty() ? null : operations.getLast(); + if (!alive) { + System.err.println("Trying to commit operation after MainProgram is closed! Operation is " + last); +// return; + } + if(last != null) last.commit(); + + } + + public synchronized void schedule(ClusterUpdateOperation operation) { + if (!alive) { + System.err.println("Trying to schedule operation after MainProgram is closed! Operation is " + operation); +// return; + } + clusters.streamLRU.acquireMutex(); + + try { + + ClusterStreamChunk last = operations.isEmpty() ? null : operations.getLast(); + if(last == null || last.isCommitted()) { + String id = "" + currentChangeSetId + "-" + nextChunkId++; + last = new ClusterStreamChunk(clusters, clusters.streamLRU, id); + operations.add(last); + } + + String chunkId = last.getKey(); + int chunkOffset = last.operations.size(); + operation.scheduled(chunkId + "." + chunkOffset); + + last.addOperation(operation); + + swapChunks(); + + notifyAll(); + + } catch (Throwable t) { + throw new IllegalStateException(t); + } finally { + + clusters.streamLRU.releaseMutex(); + + } + + } + + @Override + public void close() { + alive = false; + synchronized (this) { + notifyAll(); + } + try { + deathBarrier.acquire(); + } catch (InterruptedException e) { + } + + for (ExecutorService executor : clusterUpdateThreads) + executor.shutdown(); + + for (int i = 0; i < clusterUpdateThreads.length; i++) { + try { + ExecutorService executor = clusterUpdateThreads[i]; + executor.awaitTermination(500, TimeUnit.MILLISECONDS); + clusterUpdateThreads[i] = null; + } catch (InterruptedException e) { + e.printStackTrace(); + } + } + } + +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/MainState.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/MainState.java new file mode 100644 index 000000000..77335289d --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/MainState.java @@ -0,0 +1,135 @@ +package org.simantics.acorn; + +import java.io.BufferedInputStream; +import java.io.BufferedOutputStream; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.io.Serializable; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import org.simantics.databoard.file.RuntimeIOException; +import org.simantics.utils.FileUtils; + +public class MainState implements Serializable { + + private static final long serialVersionUID = 6237383147637270225L; + + public int headDir = 0; + + public MainState() { + } + + public MainState(int headDir) { + this.headDir = headDir; + } + + public static MainState load(Path directory) throws IOException { + Files.createDirectories(directory); + Path f = directory.resolve("main.state"); + try { + MainState state = null; + try (ObjectInputStream in = new ObjectInputStream(new BufferedInputStream(Files.newInputStream(f)))) { + state = (MainState) in.readObject(); + } + while (true) { + Path last = directory.resolve(Integer.toString(state.headDir - 1)); + try { + Path headState = last.resolve("head.state"); + HeadState.validateHeadStateIntegrity(headState); + break; + } catch (InvalidHeadStateException e) { + e.printStackTrace(); + state.headDir--; + uncheckedDeleteAll(last); + } + } + return state; + } catch(IOException i) { + return new MainState( findNewHeadState(directory) ); + } catch(ClassNotFoundException c) { + throw new Error("MainState class not found", c); + } finally { + if (Files.exists(f)) { + Files.delete(f); + } + } + } + + public void save(Path directory) throws IOException { + Path f = directory.resolve("main.state"); + try (ObjectOutputStream out = new ObjectOutputStream(new BufferedOutputStream(Files.newOutputStream(f)))) { + out.writeObject(this); + } + FileIO.syncPath(f); + } + + private static boolean isInteger(Path p) { + try { + Integer.parseInt(p.getFileName().toString()); + return true; + } catch (NumberFormatException e) { + return false; + } + } + + /** + * TODO> shouldn't do two things in the same function, this does both head.state search and directory cleanup + * + * @param directory + * @return + * @throws IOException + */ + private static int findNewHeadState(Path directory) throws IOException { + try (Stream s = Files.walk(directory, 1)) { + List reverseSortedPaths = s + .filter(p -> !p.equals(directory) && isInteger(p) && Files.isDirectory(p)) + .sorted((p1, p2) -> { + int p1Name = Integer.parseInt(p1.getFileName().toString()); + int p2Name = Integer.parseInt(p2.getFileName().toString()); + return Integer.compare(p2Name, p1Name); + }).collect(Collectors.toList()); + + int largest = -1; + for (Path last : reverseSortedPaths) { + Path headState = last.resolve("head.state"); + if (Files.exists(headState)) { + try { + HeadState.validateHeadStateIntegrity(headState); + largest = safeParseInt(-1, last.getFileName().toString()); + break; + } catch (IOException | InvalidHeadStateException e) { + e.printStackTrace(); + uncheckedDeleteAll(last); + } + } else { + uncheckedDeleteAll(last); + } + } + // +1 because we want to return the next head version to use, + // not the latest existing version. + return largest + 1; + } + } + + private static int safeParseInt(int defaultValue, String s) { + try { + return Integer.parseInt(s); + } catch (NumberFormatException e) { + return defaultValue; + } + } + + private static void uncheckedDeleteAll(Path path) { + try { + FileUtils.deleteAll(path.toFile()); + } catch (IOException e) { + throw new RuntimeIOException(e); + } + } + +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/Persistable.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/Persistable.java new file mode 100644 index 000000000..0d209b2b8 --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/Persistable.java @@ -0,0 +1,11 @@ +package org.simantics.acorn; + +import java.io.IOException; +import java.nio.file.Path; + +public interface Persistable { + + void toFile(Path path) throws IOException ; + void fromFile(byte[] data); + +} \ No newline at end of file diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/UndoClusterSupport.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/UndoClusterSupport.java new file mode 100644 index 000000000..1e7352c3e --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/UndoClusterSupport.java @@ -0,0 +1,170 @@ +package org.simantics.acorn; + +import java.io.InputStream; + +import org.simantics.db.Session; +import org.simantics.db.exception.DatabaseException; +import org.simantics.db.impl.ClusterBase; +import org.simantics.db.impl.ClusterSupport; +import org.simantics.db.impl.IClusterTable; +import org.simantics.db.service.ClusterUID; + +public class UndoClusterSupport implements ClusterSupport { + + final ClusterManager impl; + + public UndoClusterSupport(ClusterManager impl) { + this.impl = impl; + } + + @Override + public int createClusterKeyByClusterUID(ClusterUID clusterUID, + long clusterId) { + throw new UnsupportedOperationException(); + } + + @Override + public ClusterBase getClusterByClusterUIDOrMake(ClusterUID clusterUID) { + throw new UnsupportedOperationException(); + } + + @Override + public ClusterBase getClusterByClusterId(long clusterId) { + throw new UnsupportedOperationException(); + } + + @Override + public ClusterBase getClusterByClusterKey(int clusterKey) { + try { + return impl.getClusterByClusterKey(clusterKey); + } catch (DatabaseException e) { + e.printStackTrace(); + return null; + } + } + + @Override + public int getClusterKeyByClusterUIDOrMake(ClusterUID clusterUID) { + throw new UnsupportedOperationException(); + } + + @Override + public ClusterBase getClusterByResourceKey(int resourceKey) { + throw new UnsupportedOperationException(); + } + + @Override + public long getClusterIdOrCreate(ClusterUID clusterUID) { + throw new UnsupportedOperationException(); + } + + @Override + public void addStatement(Object cluster) { + } + + @Override + public void cancelStatement(Object cluster) { + throw new UnsupportedOperationException(); + } + + @Override + public void removeStatement(Object cluster) { + } + + @Override + public void cancelValue(Object cluster) { + throw new UnsupportedOperationException(); + } + + @Override + public void removeValue(Object cluster) { + throw new UnsupportedOperationException(); + } + + @Override + public void setValue(Object cluster, long clusterId, byte[] bytes, + int length) { + } + + @Override + public void modiValue(Object cluster, long clusterId, long voffset, + int length, byte[] bytes, int offset) { + throw new UnsupportedOperationException(); + } + + @Override + public void setImmutable(Object cluster, boolean immutable) { + throw new UnsupportedOperationException(); + } + + @Override + public void setDeleted(Object cluster, boolean deleted) { + throw new UnsupportedOperationException(); + } + + @Override + public void createResource(Object cluster, short resourceIndex, + long clusterId) { + throw new UnsupportedOperationException(); + } + + @Override + public void addStatementIndex(Object cluster, int resourceKey, + ClusterUID clusterUID, byte op) { + } + + @Override + public void setStreamOff(boolean setOff) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean getStreamOff() { + throw new UnsupportedOperationException(); + } + + @Override + public InputStream getValueStreamEx(int resourceIndex, long clusterId) + throws DatabaseException { + throw new UnsupportedOperationException(); + } + + @Override + public byte[] getValueEx(int resourceIndex, long clusterId) + throws DatabaseException { + throw new UnsupportedOperationException(); + } + + @Override + public byte[] getValueEx(int resourceIndex, long clusterId, long voffset, + int length) throws DatabaseException { + throw new UnsupportedOperationException(); + } + + @Override + public long getValueSizeEx(int resourceIndex, long clusterId) + throws DatabaseException { + throw new UnsupportedOperationException(); + } + + @Override + public int wait4RequestsLess(int limit) throws DatabaseException { + throw new UnsupportedOperationException(); + } + + @Override + public Session getSession() { + throw new UnsupportedOperationException(); + } + + @Override + public IClusterTable getClusterTable() { + throw new UnsupportedOperationException(); + } + + @Override + public int getClusterKeyByClusterUIDOrMake(long id1, long id2) { + throw new UnsupportedOperationException(); + } + +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/backup/AcornBackupProvider.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/backup/AcornBackupProvider.java new file mode 100644 index 000000000..5ea0799d8 --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/backup/AcornBackupProvider.java @@ -0,0 +1,316 @@ +package org.simantics.acorn.backup; + +import java.io.BufferedReader; +import java.io.IOException; +import java.nio.file.FileVisitResult; +import java.nio.file.Files; +import java.nio.file.LinkOption; +import java.nio.file.Path; +import java.nio.file.SimpleFileVisitor; +import java.nio.file.StandardCopyOption; +import java.nio.file.StandardOpenOption; +import java.nio.file.attribute.BasicFileAttributes; +import java.util.Arrays; +import java.util.concurrent.Future; +import java.util.concurrent.Semaphore; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import org.simantics.acorn.GraphClientImpl2; +import org.simantics.backup.BackupException; +import org.simantics.backup.IBackupProvider; +import org.simantics.db.server.ProCoreException; +import org.simantics.utils.FileUtils; + +/** + * @author Jani + * + * TODO: get rid of {@link GraphClientImpl2#getInstance()} invocations somehow in a cleaner way + */ +public class AcornBackupProvider implements IBackupProvider { + + private static final String IDENTIFIER = "AcornBackupProvider"; + private long trId = -1; + private final Semaphore lock = new Semaphore(1); + + private static Path getAcornMetadataFile(Path dbFolder) { + return dbFolder.getParent().resolve(IDENTIFIER); + } + + @Override + public void lock() throws BackupException { + try { + if (trId != -1) + throw new IllegalStateException(this + " backup provider is already locked"); + trId = GraphClientImpl2.getInstance().askWriteTransaction(-1) + .getTransactionId(); + } catch (ProCoreException e) { + e.printStackTrace(); + } + } + + @Override + public Future backup(Path targetPath, int revision) throws BackupException { + boolean releaseLock = true; + try { + lock.acquire(); + + GraphClientImpl2 client = GraphClientImpl2.getInstance(); + client.makeSnapshot(true); + + Path dbDir = client.getDbFolder(); + int newestFolder = client.clusters.mainState.headDir - 1; + int latestFolder = -2; + Path AcornMetadataFile = getAcornMetadataFile(dbDir); + if (Files.exists(AcornMetadataFile)) { + try (BufferedReader br = Files.newBufferedReader(AcornMetadataFile)) { + latestFolder = Integer.parseInt( br.readLine() ); + } + } + + AcornBackupRunnable r = new AcornBackupRunnable( + lock, targetPath, revision, dbDir, latestFolder, newestFolder); + new Thread(r, "Acorn backup thread").start(); + + releaseLock = false; + return r; + } catch (InterruptedException e) { + releaseLock = false; + throw new BackupException("Failed to lock Acorn for backup.", e); + } catch (NumberFormatException e) { + throw new BackupException("Failed to read Acorn head state file.", e); + } catch (IOException e) { + throw new BackupException("I/O problem during Acorn backup.", e); + } finally { + if (releaseLock) + lock.release(); + } + } + + @Override + public void unlock() throws BackupException { + try { + if (trId == -1) + throw new BackupException(this + " backup provider is not locked"); + GraphClientImpl2.getInstance().endTransaction(trId); + trId = -1; + } catch (ProCoreException e) { + throw new BackupException(e); + } + } + + @Override + public void restore(Path fromPath, int revision) { + try { + // 1. Resolve initial backup restore target. + // This can be DB directory directly or a temporary directory that + // will replace the DB directory. + Path dbRoot = GraphClientImpl2.getInstance().getDbFolder(); + Path restorePath = dbRoot; + if (!Files.exists(dbRoot, LinkOption.NOFOLLOW_LINKS)) { + Files.createDirectories(dbRoot); + } else { + Path dbRootParent = dbRoot.getParent(); + restorePath = dbRootParent == null ? Files.createTempDirectory("restore") + : Files.createTempDirectory(dbRootParent, "restore"); + } + + // 2. Restore the backup. + Files.walkFileTree(fromPath, new RestoreCopyVisitor(restorePath, revision)); + + // 3. Override existing DB root with restored temporary copy if necessary. + if (dbRoot != restorePath) { + FileUtils.deleteAll(dbRoot.toFile()); + Files.move(restorePath, dbRoot); + } + } catch (IOException e) { + e.printStackTrace(); + } + } + + private class RestoreCopyVisitor extends SimpleFileVisitor { + + private final Path toPath; + private final int revision; + private Path currentSubFolder; + + public RestoreCopyVisitor(Path toPath, int revision) { + this.toPath = toPath; + this.revision = revision; + } + + @Override + public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) throws IOException { + Path dirName = dir.getFileName(); + if (dirName.toString().equals(IDENTIFIER)) { + currentSubFolder = dir; + return FileVisitResult.CONTINUE; + } else if (dir.getParent().getFileName().toString().equals(IDENTIFIER)) { + Path targetPath = toPath.resolve(dirName); + if (!Files.exists(targetPath)) { + Files.createDirectory(targetPath); + } + return FileVisitResult.CONTINUE; + } else if (dirName.toString().length() == 1 && Character.isDigit(dirName.toString().charAt(0))) { + int dirNameInt = Integer.parseInt(dirName.toString()); + if (dirNameInt <= revision) { + return FileVisitResult.CONTINUE; + } else { + return FileVisitResult.SKIP_SUBTREE; + } + } else { + return FileVisitResult.CONTINUE; + } + } + + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + if (file.getFileName().toString().endsWith(".tar.gz")) + return FileVisitResult.CONTINUE; + System.out.println("Restore " + file + " to " + toPath.resolve(currentSubFolder.relativize(file))); + Files.copy(file, toPath.resolve(currentSubFolder.relativize(file)), StandardCopyOption.REPLACE_EXISTING); + return FileVisitResult.CONTINUE; + } + } + + private static class AcornBackupRunnable implements Runnable, Future { + + private final Semaphore lock; + private final Path targetPath; + private final int revision; + private final Path baseDir; + private final int latestFolder; + private final int newestFolder; + + private boolean done = false; + private final Semaphore completion = new Semaphore(0); + private BackupException exception = null; + + public AcornBackupRunnable(Semaphore lock, Path targetPath, int revision, + Path baseDir, int latestFolder, int newestFolder) { + this.lock = lock; + this.targetPath = targetPath; + this.revision = revision; + this.baseDir = baseDir; + this.latestFolder = latestFolder; + this.newestFolder = newestFolder; + } + + @Override + public void run() { + try { + doBackup(); + writeHeadstateFile(); + } catch (IOException e) { + exception = new BackupException("Acorn backup failed", e); + rollback(); + } finally { + done = true; + lock.release(); + completion.release(); + } + } + + private void doBackup() throws IOException { + Path target = targetPath.resolve(String.valueOf(revision)).resolve(IDENTIFIER); + if (!Files.exists(target)) + Files.createDirectories(target); + Files.walkFileTree(baseDir, + new BackupCopyVisitor(baseDir, target)); + } + + private void writeHeadstateFile() throws IOException { + Path AcornMetadataFile = getAcornMetadataFile(baseDir); + if (!Files.exists(AcornMetadataFile)) { + Files.createFile(AcornMetadataFile); + } + Files.write(AcornMetadataFile, + Arrays.asList(Integer.toString(newestFolder)), + StandardOpenOption.WRITE, + StandardOpenOption.TRUNCATE_EXISTING, + StandardOpenOption.CREATE); + } + + private void rollback() { + // TODO + } + + private class BackupCopyVisitor extends SimpleFileVisitor { + + private Path fromPath; + private Path toPath; + + public BackupCopyVisitor(Path fromPath, Path toPath) { + this.fromPath = fromPath; + this.toPath = toPath; + } + + @Override + public FileVisitResult preVisitDirectory(Path dir, + BasicFileAttributes attrs) throws IOException { + Path dirName = dir.getFileName(); + if (dirName.equals(fromPath)) { + Path targetPath = toPath.resolve(fromPath.relativize(dir)); + if (!Files.exists(targetPath)) { + Files.createDirectory(targetPath); + } + return FileVisitResult.CONTINUE; + } else { + int dirNameInt = Integer.parseInt(dirName.toString()); + if (latestFolder < dirNameInt && dirNameInt <= newestFolder) { + Path targetPath = toPath.resolve(fromPath + .relativize(dir)); + if (!Files.exists(targetPath)) { + Files.createDirectory(targetPath); + } + return FileVisitResult.CONTINUE; + } + return FileVisitResult.SKIP_SUBTREE; + } + } + + @Override + public FileVisitResult visitFile(Path file, + BasicFileAttributes attrs) throws IOException { + System.out.println("Backup " + file + " to " + + toPath.resolve(fromPath.relativize(file))); + Files.copy(file, toPath.resolve(fromPath.relativize(file)), + StandardCopyOption.REPLACE_EXISTING); + return FileVisitResult.CONTINUE; + } + } + + @Override + public boolean cancel(boolean mayInterruptIfRunning) { + return false; + } + + @Override + public boolean isCancelled() { + return false; + } + + @Override + public boolean isDone() { + return done; + } + + @Override + public BackupException get() throws InterruptedException { + completion.acquire(); + completion.release(); + return exception; + } + + @Override + public BackupException get(long timeout, TimeUnit unit) throws InterruptedException, TimeoutException { + if (completion.tryAcquire(timeout, unit)) + completion.release(); + else + throw new TimeoutException("Acorn backup completion waiting timed out."); + return exception; + } + + } + +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/cluster/ClusterBig.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/cluster/ClusterBig.java new file mode 100644 index 000000000..f623d587f --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/cluster/ClusterBig.java @@ -0,0 +1,1104 @@ +/******************************************************************************* + * Copyright (c) 2007, 2010 Association for Decentralized Information Management + * in Industry THTH ry. + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * VTT Technical Research Centre of Finland - initial API and implementation + *******************************************************************************/ +package org.simantics.acorn.cluster; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.util.Arrays; + +import org.simantics.acorn.internal.ClusterChange; +import org.simantics.acorn.internal.ClusterStream; +import org.simantics.acorn.internal.ClusterSupport2; +import org.simantics.acorn.internal.DebugPolicy; +import org.simantics.db.Resource; +import org.simantics.db.exception.DatabaseException; +import org.simantics.db.exception.ExternalValueException; +import org.simantics.db.exception.ValidationException; +import org.simantics.db.impl.ClusterI; +import org.simantics.db.impl.ClusterI.PredicateProcedure; +import org.simantics.db.impl.ClusterSupport; +import org.simantics.db.impl.ClusterTraitsBase; +import org.simantics.db.impl.ForEachObjectContextProcedure; +import org.simantics.db.impl.ForEachObjectProcedure; +import org.simantics.db.impl.ForPossibleRelatedValueContextProcedure; +import org.simantics.db.impl.ForPossibleRelatedValueProcedure; +import org.simantics.db.impl.IClusterTable; +import org.simantics.db.impl.Table; +import org.simantics.db.impl.TableHeader; +import org.simantics.db.impl.graph.ReadGraphImpl; +import org.simantics.db.impl.query.QueryProcessor; +import org.simantics.db.procedure.AsyncContextMultiProcedure; +import org.simantics.db.procedure.AsyncMultiProcedure; +import org.simantics.db.procore.cluster.ClusterMap; +import org.simantics.db.procore.cluster.ClusterPrintDebugInfo; +import org.simantics.db.procore.cluster.ClusterTraits; +import org.simantics.db.procore.cluster.CompleteTable; +import org.simantics.db.procore.cluster.FlatTable; +import org.simantics.db.procore.cluster.ForeignTable; +import org.simantics.db.procore.cluster.ObjectTable; +import org.simantics.db.procore.cluster.PredicateTable; +import org.simantics.db.procore.cluster.ResourceTable; +import org.simantics.db.procore.cluster.ValueTable; +import org.simantics.db.service.ClusterUID; +import org.simantics.utils.datastructures.Callback; + +final public class ClusterBig extends ClusterImpl { + private static final int TABLE_HEADER_SIZE = TableHeader.HEADER_SIZE + TableHeader.EXTRA_SIZE; + private static final int RESOURCE_TABLE_OFFSET = 0; + private static final int PREDICATE_TABLE_OFFSET = RESOURCE_TABLE_OFFSET + TABLE_HEADER_SIZE; + private static final int OBJECT_TABLE_OFFSET = PREDICATE_TABLE_OFFSET + TABLE_HEADER_SIZE; + private static final int VALUE_TABLE_OFFSET = OBJECT_TABLE_OFFSET + TABLE_HEADER_SIZE; + private static final int FLAT_TABLE_OFFSET = VALUE_TABLE_OFFSET + TABLE_HEADER_SIZE; + private static final int COMPLETE_TABLE_OFFSET = FLAT_TABLE_OFFSET + TABLE_HEADER_SIZE; + private static final int FOREIGN_TABLE_OFFSET = COMPLETE_TABLE_OFFSET + TABLE_HEADER_SIZE; + private static final int INT_HEADER_SIZE = FOREIGN_TABLE_OFFSET + TABLE_HEADER_SIZE; + private final int clusterBits; + final private ResourceTable resourceTable; + //final private ResourceTable movedResourceTable; + final private PredicateTable predicateTable; + final private ObjectTable objectTable; + final private ValueTable valueTable; + final private FlatTable flatTable; + final private ForeignTable foreignTable; + final private CompleteTable completeTable; + final private ClusterMap clusterMap; + final private int[] headerTable; + final private ClusterSupport2 clusterSupport; + + public ClusterBig(IClusterTable clusterTable, ClusterUID clusterUID, int clusterKey, ClusterSupport2 support) { + super(clusterTable, clusterUID, clusterKey, support); + if(DebugPolicy.REPORT_CLUSTER_EVENTS) + new Exception(getClusterUID().toString()).printStackTrace(); + this.headerTable = new int[INT_HEADER_SIZE]; + this.resourceTable = new ResourceTable(this, headerTable, RESOURCE_TABLE_OFFSET); + this.foreignTable = new ForeignTable(this, headerTable, FOREIGN_TABLE_OFFSET); + this.predicateTable = new PredicateTable(this, headerTable, PREDICATE_TABLE_OFFSET); + this.objectTable = new ObjectTable(this, headerTable, OBJECT_TABLE_OFFSET); + this.valueTable = new ValueTable(this, headerTable, VALUE_TABLE_OFFSET); + this.completeTable = new CompleteTable(this, headerTable, COMPLETE_TABLE_OFFSET); + this.flatTable = null; + this.clusterMap = new ClusterMap(foreignTable, flatTable); + this.clusterSupport = support; + this.clusterBits = ClusterTraitsBase.getClusterBits(clusterKey); + this.importance = 0; +// clusterTable.setDirtySizeInBytes(true); + } + protected ClusterBig(IClusterTable clusterTable, long[] longs, int[] ints, byte[] bytes, ClusterSupport2 support, int clusterKey) + throws DatabaseException { + super(clusterTable, checkValidity(0, longs, ints, bytes), clusterKey, support); + if(DebugPolicy.REPORT_CLUSTER_EVENTS) + new Exception(getClusterUID().toString()).printStackTrace(); + if (ints.length < INT_HEADER_SIZE) + throw new IllegalArgumentException("Too small integer table for cluster."); + this.headerTable = ints; + this.resourceTable = new ResourceTable(this, ints, RESOURCE_TABLE_OFFSET, longs); + this.foreignTable = new ForeignTable(this, headerTable, FOREIGN_TABLE_OFFSET, longs); + this.predicateTable = new PredicateTable(this, ints, PREDICATE_TABLE_OFFSET, ints); + this.objectTable = new ObjectTable(this, ints, OBJECT_TABLE_OFFSET, ints); + this.valueTable = new ValueTable(this, ints, VALUE_TABLE_OFFSET, bytes); + this.flatTable = null; + this.completeTable = new CompleteTable(this, headerTable, COMPLETE_TABLE_OFFSET, ints); + this.clusterMap = new ClusterMap(foreignTable, flatTable); + this.clusterSupport = support; + this.clusterBits = ClusterTraitsBase.getClusterBits(clusterKey); + } + void analyse() { + System.out.println("Cluster " + clusterId); + System.out.println("-size:" + getUsedSpace()); + System.out.println(" -rt:" + (resourceTable.getTableCapacity() * 8 + 8)); + System.out.println(" -ft:" + foreignTable.getTableCapacity() * 8); + System.out.println(" -pt:" + predicateTable.getTableCapacity() * 4); + System.out.println(" -ot:" + objectTable.getTableCapacity() * 4); + System.out.println(" -ct:" + completeTable.getTableCapacity() * 4); + System.out.println(" -vt:" + valueTable.getTableCapacity()); + + System.out.println("-resourceTable:"); + System.out.println(" -resourceCount=" + resourceTable.getResourceCount()); + System.out.println(" -size=" + resourceTable.getTableSize()); + System.out.println(" -capacity=" + resourceTable.getTableCapacity()); + System.out.println(" -count=" + resourceTable.getTableCount()); + System.out.println(" -size=" + resourceTable.getTableSize()); + //resourceTable.analyse(); + } + public void checkDirectReference(int dr) + throws DatabaseException { + if (!ClusterTraits.statementIndexIsDirect(dr)) + throw new ValidationException("Reference is not direct. Reference=" + dr); + if (ClusterTraits.isFlat(dr)) + throw new ValidationException("Reference is flat. Reference=" + dr); + if (ClusterTraits.isLocal(dr)) { + if (dr < 1 || dr > resourceTable.getUsedSize()) + throw new ValidationException("Illegal local reference. Reference=" + dr); + } else { + int fi = ClusterTraits.getForeignIndexFromReference(dr); + int ri = ClusterTraits.getResourceIndexFromForeignReference(dr); + if (fi < 1 || fi > foreignTable.getUsedSize()) + throw new ValidationException("Illegal foreign reference. Reference=" + dr + " foreign index=" + fi); + if (ri < 1 || ri > ClusterTraits.getMaxNumberOfResources()) + throw new ValidationException("Illegal foreign reference. Reference=" + dr + " resource index=" + ri); + } + } + public void checkPredicateIndex(int pi) + throws DatabaseException { + predicateTable.checkPredicateSetIndex(this, pi); + } + public void checkObjectSetReference(int or) + throws DatabaseException { + if (ClusterTraits.statementIndexIsDirect(or)) + throw new ValidationException("Illegal object set reference. Reference=" + or); + int oi = ClusterTraits.statementIndexGet(or); + this.objectTable.checkObjectSetIndex(this, oi); + } + + public void checkValueInit() + throws DatabaseException { + valueTable.checkValueInit(); + } + public void checkValue(int capacity, int index) + throws DatabaseException { + valueTable.checkValue(capacity, index); + } + public void checkValueFini() + throws DatabaseException { + valueTable.checkValueFini(); + } + public void checkForeingIndex(int fi) + throws DatabaseException { + if (fi<1 || fi > foreignTable.getUsedSize()) + throw new ValidationException("Illegal foreign index=" + fi); + } + public void checkCompleteSetReference(int cr) + throws DatabaseException { + if (!ClusterTraits.completeReferenceIsMultiple(cr)) + throw new ValidationException("Illegal complete set reference. Reference=" + cr); + int ci = cr; + this.completeTable.checkCompleteSetIndex(this, ci); + } + public void check() + throws DatabaseException { + this.completeTable.check(this); + this.objectTable.check(this); + // Must be after object table check. + this.predicateTable.check(this); + this.resourceTable.check(this); + } + @Override + public CompleteTypeEnum getCompleteType(int resourceKey, ClusterSupport support) + throws DatabaseException { + final int resourceRef = getLocalReference(resourceKey); + int completeRef = resourceTable.getCompleteObjectRef(resourceRef); + CompleteTypeEnum ct = ClusterTraits.completeReferenceGetType(completeRef); + if (DEBUG) + System.out.println("Cluster.getCompleteType rk=" + resourceKey + " ct=" + ct); + int i = ct.getValue(); + switch (i) { + case 0: return CompleteTypeEnum.NotComplete; + case 1: return CompleteTypeEnum.InstanceOf; + case 2: return CompleteTypeEnum.Inherits; + case 3: return CompleteTypeEnum.SubrelationOf; + default: throw new DatabaseException("Illegal complete type enumeration."); + } + } + + @Override + public int getCompleteObjectKey(int resourceKey, ClusterSupport support) + throws DatabaseException { + final int resourceRef = getLocalReference(resourceKey); + int completeRef = resourceTable.getCompleteObjectRef(resourceRef); + int clusterIndex; + int resourceIndex = ClusterTraits.completeReferenceGetResourceIndex(completeRef); + + ClusterI.CompleteTypeEnum completeType = ClusterTraits.completeReferenceGetType(completeRef); + if (completeType == ClusterI.CompleteTypeEnum.NotComplete) + throw new DatabaseException("Resource has multiple complete objects. Resource key=" + resourceKey + "."); + + if (ClusterTraits.completeReferenceIsLocal(completeRef)) { + clusterIndex = clusterKey; + } else { + int foreignIndex = ClusterTraits.completeReferenceGetForeignIndex(completeRef); +// System.err.println("completeRef=" + completeRef + " foreignIndex=" + foreignIndex ); + ClusterUID clusterUID = foreignTable.getResourceUID(foreignIndex).asCID(); + ClusterI c = support.getClusterByClusterUIDOrMake(clusterUID); + clusterIndex = c.getClusterKey(); + } + int key = ClusterTraits.createResourceKey(clusterIndex, resourceIndex); + if (DEBUG) + System.out.println("Cluster.complete object rk=" + resourceKey + " ck=" + key); + return key; + } + + @Override + public boolean isComplete(int resourceKey, ClusterSupport support) + throws DatabaseException { + final int resourceRef = getLocalReference(resourceKey); + int completeRef = resourceTable.getCompleteObjectRef(resourceRef); + ClusterI.CompleteTypeEnum completeType = ClusterTraits.completeReferenceGetType(completeRef); + boolean complete = completeType != ClusterI.CompleteTypeEnum.NotComplete; + if (DEBUG) + System.out.println("Cluster.key=" + resourceKey + " isComplete=" + complete); + return complete; + } + + public int getSingleObject(int resourceKey, int predicateKey, int objectIndex, ClusterSupport support) throws DatabaseException { + if (DEBUG) + System.out.println("Cluster.getSingleObject: rk=" + resourceKey + " pk=" + predicateKey); + if (0 == objectIndex) { + final int resourceIndex = getLocalReference(resourceKey); + final int pRef = getInternalReferenceOrZero(predicateKey, support); + final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey); + return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this); + } + return objectTable.getSingleObject(objectIndex, support, this); + } + + public void forObjects(int resourceKey, int predicateKey, int objectIndex, QueryProcessor processor, ReadGraphImpl graph, AsyncMultiProcedure procedure, + ClusterSupport support) throws DatabaseException { + if (DEBUG) + System.out.println("Cluster.forObjects1: rk=" + resourceKey + " pk=" + predicateKey); + if (0 == objectIndex) { + final int resourceIndex = getLocalReference(resourceKey); + final int pRef = getInternalReferenceOrZero(predicateKey, support); + final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey); + resourceTable.foreachObject(resourceIndex, graph, procedure, support, pRef, pCompleteType, completeTable, this); + return; + } + objectTable.foreachObject(graph, objectIndex, procedure, this); + } + public void forObjects(int resourceKey, int predicateKey, int objectIndex, QueryProcessor processor, ReadGraphImpl graph, C context, AsyncContextMultiProcedure procedure, + ClusterSupport support) throws DatabaseException { + if (DEBUG) + System.out.println("Cluster.forObjects1: rk=" + resourceKey + " pk=" + predicateKey); + if (0 == objectIndex) { + final int resourceIndex = getLocalReference(resourceKey); + final int pRef = getInternalReferenceOrZero(predicateKey, support); + final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey); + resourceTable.foreachObject(resourceIndex, graph, context, procedure, support, pRef, pCompleteType, completeTable, this); + return; + } + objectTable.foreachObject(graph, objectIndex, context, procedure, this); + } + @Override + public boolean forObjects(int resourceKey, int predicateKey, int objectIndex, ObjectProcedure procedure, + Context context, ClusterSupport support) throws DatabaseException { + if (DEBUG) + System.out.println("Cluster.forObjects2: rk=" + resourceKey + " pk=" + predicateKey); + if (0 == objectIndex) { + final int resourceIndex = getLocalReference(resourceKey); + final int pRef = getInternalReferenceOrZero(predicateKey, support); + final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey); + return resourceTable.foreachObject(resourceIndex, procedure, context, support, this, pRef, pCompleteType, completeTable); + } + return objectTable.foreachObject(objectIndex, procedure, context, support, this); + } + + @Override + public int getSingleObject(int resourceKey, int predicateKey, ClusterSupport support) throws DatabaseException { + if (DEBUG) + System.out.println("Cluster.getSingleObject2: rk=" + resourceKey + " pk=" + predicateKey); + final int resourceIndex = getLocalReference(resourceKey); + final int pRef = getInternalReferenceOrZero(predicateKey, support); + final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey); + if (ClusterI.CompleteTypeEnum.NotComplete != pCompleteType) + return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this); + final int predicateIndex = resourceTable.getPredicateIndex(resourceIndex); + if (0 == predicateIndex) + return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this); + int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef); + return getSingleObject(resourceKey, predicateKey, objectIndex, support); + } + + @Override + public int getSingleObject(int resourceKey, ForPossibleRelatedValueProcedure procedure, ClusterSupport support) throws DatabaseException { + final int predicateKey = procedure.predicateKey; + if (DEBUG) + System.out.println("Cluster.getSingleObject2: rk=" + resourceKey + " pk=" + predicateKey); + final int resourceIndex = getLocalReference(resourceKey); + final int pRef = getInternalReferenceOrZero(predicateKey, support); + final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey); + if (ClusterI.CompleteTypeEnum.NotComplete != pCompleteType) + return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this); + final int predicateIndex = resourceTable.getPredicateIndex(resourceIndex); + if (0 == predicateIndex) + return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this); + int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef); + return getSingleObject(resourceKey, predicateKey, objectIndex, support); + } + + @Override + public int getSingleObject(int resourceKey, ForPossibleRelatedValueContextProcedure procedure, ClusterSupport support) throws DatabaseException { + final int predicateKey = procedure.predicateKey; + if (DEBUG) + System.out.println("Cluster.getSingleObject2: rk=" + resourceKey + " pk=" + predicateKey); + final int resourceIndex = getLocalReference(resourceKey); + final int pRef = getInternalReferenceOrZero(predicateKey, support); + final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey); + if (ClusterI.CompleteTypeEnum.NotComplete != pCompleteType) + return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this); + final int predicateIndex = resourceTable.getPredicateIndex(resourceIndex); + if (0 == predicateIndex) + return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this); + int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef); + return getSingleObject(resourceKey, predicateKey, objectIndex, support); + } + + @Override + public void forObjects(ReadGraphImpl graph, int resourceKey, + int predicateKey, AsyncMultiProcedure procedure) + throws DatabaseException { + + throw new UnsupportedOperationException(); + +// SessionImplSocket session = (SessionImplSocket)graph.getSession(); +// ClusterSupport support = session.clusterTranslator; +// +// if (DEBUG) +// System.out.println("Cluster.forObjects3: rk=" + resourceKey + " pk=" + predicateKey); +// final int resourceIndex = getLocalReference(resourceKey); +// final int pRef = getInternalReferenceOrZero(predicateKey, support); +// final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey); +// if (ClusterI.CompleteTypeEnum.NotComplete != pCompleteType) { +// resourceTable.foreachObject(resourceIndex, graph, procedure, support, pRef, pCompleteType, completeTable, this); +// return; +// } +// final int predicateIndex = resourceTable.getPredicateIndex(resourceIndex); +// if (0 == predicateIndex) { +// resourceTable.foreachObject(resourceIndex, graph, procedure, support, pRef, pCompleteType, completeTable, this); +// return; +// } +// int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef); +// forObjects(resourceKey, predicateKey, objectIndex, graph.processor, graph, procedure, support); + + } + + @Override + public void forObjects(ReadGraphImpl graph, int resourceKey, ForEachObjectProcedure procedure) throws DatabaseException { + + throw new UnsupportedOperationException(); + +// SessionImplSocket session = (SessionImplSocket)graph.getSession(); +// ClusterSupport support = session.clusterTranslator; +// final int predicateKey = procedure.predicateKey; +// if (DEBUG) +// System.out.println("Cluster.forObjects3: rk=" + resourceKey + " pk=" + predicateKey); +// final int resourceIndex = getLocalReference(resourceKey); +// final int pRef = getInternalReferenceOrZero(predicateKey, support); +// final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey); +// if (ClusterI.CompleteTypeEnum.NotComplete != pCompleteType) { +// resourceTable.foreachObject(resourceIndex, graph, procedure, support, pRef, pCompleteType, completeTable, this); +// return; +// } +// final int predicateIndex = resourceTable.getPredicateIndex(resourceIndex); +// if (0 == predicateIndex) { +// resourceTable.foreachObject(resourceIndex, graph, procedure, support, pRef, pCompleteType, completeTable, this); +// return; +// } +// int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef); +// forObjects(resourceKey, predicateKey, objectIndex, graph.processor, graph, procedure, support); + + } + @Override + public void forObjects(ReadGraphImpl graph, int resourceKey, C context, + ForEachObjectContextProcedure procedure) throws DatabaseException { + + throw new UnsupportedOperationException(); + +// SessionImplSocket session = (SessionImplSocket)graph.getSession(); +// ClusterSupport support = session.clusterTranslator; +// +// final int predicateKey = procedure.predicateKey; +// +// if (DEBUG) +// System.out.println("Cluster.forObjects3: rk=" + resourceKey + " pk=" + predicateKey); +// final int resourceIndex = getLocalReference(resourceKey); +// final int pRef = getInternalReferenceOrZero(predicateKey, support); +// final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey); +// if (ClusterI.CompleteTypeEnum.NotComplete != pCompleteType) { +// resourceTable.foreachObject(resourceIndex, graph, context, procedure, support, pRef, pCompleteType, completeTable, this); +// return; +// } +// final int predicateIndex = resourceTable.getPredicateIndex(resourceIndex); +// if (0 == predicateIndex) { +// resourceTable.foreachObject(resourceIndex, graph, context, procedure, support, pRef, pCompleteType, completeTable, this); +// return; +// } +// int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef); +// forObjects(resourceKey, predicateKey, objectIndex, graph.processor, graph, context, procedure, support); + + } + + @Override + public boolean forObjects(int resourceKey, int predicateKey, + ObjectProcedure procedure, Context context, ClusterSupport support) + throws DatabaseException { + if (DEBUG) + System.out.println("Cluster.forObjects4: rk=" + resourceKey + " pk=" + predicateKey); + final int resourceIndex = getLocalReference(resourceKey); + final int pRef = getInternalReferenceOrZero(predicateKey, support); + final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey); + if (ClusterI.CompleteTypeEnum.NotComplete != pCompleteType) + return resourceTable.foreachObject(resourceIndex, procedure, context, support, this, pRef, pCompleteType, completeTable); + final int predicateIndex = resourceTable.getPredicateIndex(resourceIndex); + if (0 == predicateIndex) + return resourceTable.foreachObject(resourceIndex, procedure, context, support, this, pRef, pCompleteType, completeTable); + int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef); + return forObjects(resourceKey, predicateKey, objectIndex, procedure, context, support); + } + @Override + public boolean forPredicates(int resourceKey, + PredicateProcedure procedure, Context context, ClusterSupport support) + throws DatabaseException { + if (DEBUG) + System.out.println("Cluster.forPredicates: rk=" + resourceKey); + final int resourceIndex = getLocalReference(resourceKey); + final int predicateIndex = resourceTable.getPredicateIndex(resourceIndex); + if (0 == predicateIndex) + return resourceTable.foreachPredicate(resourceIndex, + procedure, context, support, this, completeTable); + else { + boolean broken = resourceTable.foreachPredicate(resourceIndex, + procedure, context, support, this, completeTable); + if (broken) + return true; + } + return predicateTable.foreachPredicate(predicateIndex, procedure, context, support, this); + } + @Override + public ClusterI addRelation(int sResourceKey, ClusterUID puid, int pResourceKey, ClusterUID ouid, int oResourceKey, ClusterSupport support) + throws DatabaseException { + if (DEBUG) + System.out.println("add rk=" + sResourceKey + " pk=" + pResourceKey + " ok=" + oResourceKey); + int sri = getLocalReferenceAnd(sResourceKey, support, ClusterChange.ADD_OPERATION); + int pri = getReferenceOrCreateIfForeign(pResourceKey, puid, support, ClusterStream.NULL_OPERATION); + int ori = getReferenceOrCreateIfForeign(oResourceKey, ouid, support, ClusterStream.NULL_OPERATION); + ClusterI.CompleteTypeEnum completeType = ClusterTraitsBase.getCompleteTypeFromResourceKey(pResourceKey); + boolean ret = addRelationInternal(sri, pri, ori, completeType); +// check(); + if (ret) { + support.addStatement(this); + return this; + } else { + support.cancelStatement(this); + return null; + } + } + @Override + public ClusterI addRelation(int sResourceKey, int pResourceKey, int oResourceKey, ClusterSupport support) + throws DatabaseException { + if (DEBUG) + System.out.println("add rk=" + sResourceKey + " pk=" + pResourceKey + " ok=" + oResourceKey); + int sri = getLocalReferenceAnd(sResourceKey, support, ClusterChange.ADD_OPERATION); + int pri = getReferenceOrCreateIfForeign(pResourceKey, support, ClusterStream.NULL_OPERATION); + int ori = getReferenceOrCreateIfForeign(oResourceKey, support, ClusterStream.NULL_OPERATION); + ClusterI.CompleteTypeEnum completeType = ClusterTraitsBase.getCompleteTypeFromResourceKey(pResourceKey); + boolean ret = addRelationInternal(sri, pri, ori, completeType); +// check(); + if (ret) { + support.addStatement(this); + return this; + } else { + support.cancelStatement(this); + return null; + } + } + @Override + public boolean removeRelation(int sResourceKey, int pResourceKey, int oResourceKey, ClusterSupport support) + throws DatabaseException { +// check(); + int sri = getLocalReferenceAnd(sResourceKey, support, ClusterChange.REMOVE_OPERATION); + int pri = getInternalReferenceOrZeroAnd(pResourceKey, support, ClusterStream.NULL_OPERATION); + int ori = getInternalReferenceOrZeroAnd(oResourceKey, support, ClusterStream.NULL_OPERATION); + boolean ret = false; + if (0 != pri && 0 != ori) { + ClusterI.CompleteTypeEnum completeType = ClusterTraitsBase.getCompleteTypeFromResourceKey(pResourceKey); + ret = removeRelationInternal(sri, pri, ori, completeType, support); + } + if (ret) + support.removeStatement(this); + else + support.cancelStatement(this); +// check(); + return ret; + } + @Override + public void denyRelation(int sResourceKey, int pResourceKey, int oResourceKey, ClusterSupport support) + throws DatabaseException { + int sri = checkResourceKeyIsOursAndGetResourceIndexIf(sResourceKey, support); + ResourceIndexAndId p = checkResourceKeyAndGetResourceIndexIf(pResourceKey, support); + ResourceIndexAndId o = checkResourceKeyAndGetResourceIndexIf(oResourceKey, support); + if (0 == sri || 0 == p.index || 0 == o.index) + return; +// check(); + ClusterI.CompleteTypeEnum completeType = ClusterTraitsBase.getCompleteTypeFromResourceKey(pResourceKey); + boolean ret = removeRelationInternal(sri, p.reference, o.reference, completeType, support); + if (ret) { + support.addStatementIndex(this, sResourceKey, getClusterUID(), ClusterChange.REMOVE_OPERATION); + support.addStatementIndex(this, pResourceKey, p.clusterUID, ClusterStream.NULL_OPERATION); + support.addStatementIndex(this, oResourceKey, o.clusterUID, ClusterStream.NULL_OPERATION); + support.removeStatement(this); + } +// check(); + return; + } + @Override + public InputStream getValueStream(int rResourceId, ClusterSupport support) throws DatabaseException { + if (DEBUG) + System.out.println("ClusterBig.getValue " + rResourceId); + int resourceIndex = getLocalReference(rResourceId); + try { + byte[] buffer = resourceTable.getValue(valueTable, resourceIndex); + if(buffer == null) return null; + return new ByteArrayInputStream(buffer); + } catch (ExternalValueException e) { + return support.getValueStreamEx(resourceIndex, clusterUID.second); + } + } + @Override + public byte[] getValue(int rResourceId, ClusterSupport support) + throws DatabaseException { + if (DEBUG) + System.out.println("ClusterBig.getValue " + rResourceId); + int resourceIndex = getLocalReference(rResourceId); + try { + return resourceTable.getValue(valueTable, resourceIndex); + } catch (ExternalValueException e) { + return clusterSupport.impl.getResourceFile(clusterUID.asBytes(), resourceIndex); +// return support.getValueEx(resourceIndex, clusterUID.second); + } + } + @Override + public boolean hasValue(int rResourceId, ClusterSupport support) + throws DatabaseException { + int resourceIndex = getLocalReference(rResourceId); + return resourceTable.hasValue(resourceIndex); + } + @Override + public boolean removeValue(int rResourceId, ClusterSupport support) + throws DatabaseException { + int resourceIndex = getLocalReferenceAnd(rResourceId, support, ClusterChange.DELETE_OPERATION); + support.removeValue(this); + return resourceTable.removeValue(valueTable, resourceIndex); + } + + @Override + public ClusterI setValue(int rResourceId, byte[] value, int length, ClusterSupport support) + throws DatabaseException { + int resourceIndex = getLocalReferenceAnd(rResourceId, support, ClusterStream.SET_OPERATION); + support.setValue(this, getClusterId(), value, length); + resourceTable.setValue(valueTable, resourceIndex, value, length); + return this; + } + @Override + public ClusterI modiValueEx(int rResourceId, long voffset, int length, byte[] value, int offset, ClusterSupport support) + throws DatabaseException { + int resourceIndex = getLocalReferenceAnd(rResourceId, support, ClusterStream.MODI_OPERATION); + support.modiValue(this, getClusterId(), voffset, length, value, offset); + resourceTable.setValueEx(valueTable, resourceIndex); + return this; + } + @Override + public byte[] readValueEx(int rResourceId, long voffset, int length, ClusterSupport support) + throws DatabaseException { + int resourceIndex = getLocalReference(rResourceId); + boolean isExternal = resourceTable.isValueEx(valueTable, resourceIndex); + if (!isExternal) + throw new DatabaseException("ClusterI.readValue supported only for external value. Resource key=" + rResourceId); + return support.getValueEx(resourceIndex, getClusterId(), voffset, length); + } + @Override + public long getValueSizeEx(int resourceKey, ClusterSupport support) + throws DatabaseException, ExternalValueException { + int resourceIndex = getLocalReference(resourceKey); + boolean isExternal = resourceTable.isValueEx(valueTable, resourceIndex); + if (!isExternal) + throw new ExternalValueException("ClusterI.getSize supported only for external value. Resource key=" + resourceKey); + return support.getValueSizeEx(resourceIndex, getClusterId()); + } + public boolean isValueEx(int resourceKey) + throws DatabaseException { + int resourceIndex = getLocalReference(resourceKey); + return resourceTable.isValueEx(valueTable, resourceIndex); + } + @Override + public void setValueEx(int resourceKey) + throws DatabaseException { + int resourceIndex = getLocalReference(resourceKey); + resourceTable.setValueEx(valueTable, resourceIndex); + } + @Override + public int createResource(ClusterSupport support) + throws DatabaseException { + short resourceIndex = resourceTable.createResource(); + + if(DebugPolicy.REPORT_RESOURCE_ID_ALLOCATION) + System.out.println("[RID_ALLOCATION]: ClusterBig[" + clusterId + "] allocates " + resourceIndex); + + support.createResource(this, resourceIndex, clusterId); + return ClusterTraits.createResourceKey(clusterKey, resourceIndex); + } + @Override + public boolean hasResource(int resourceKey, ClusterSupport support) { + int clusterKey = ClusterTraitsBase.getClusterKeyFromResourceKeyNoThrow(resourceKey); + if (this.clusterKey != clusterKey) // foreign resource + return false; + int resourceIndex; + try { + resourceIndex = ClusterTraits.getResourceIndexFromResourceKey(resourceKey); + } catch (DatabaseException e) { + return false; + } + if (resourceIndex > 0 & resourceIndex <= resourceTable.getTableCount()) + return true; + else + return false; + } + @Override + public int getNumberOfResources(ClusterSupport support) { + return resourceTable.getUsedSize(); + } + @Override + public long getUsedSpace() { + long rt = resourceTable.getTableCapacity() * 8 + 8; // (8 = cluster id) + long ft = foreignTable.getTableCapacity() * 8; + long pt = predicateTable.getTableCapacity() * 4; + long ot = objectTable.getTableCapacity() * 4; + long ct = completeTable.getTableCapacity() * 4; + long vt = valueTable.getTableCapacity() * 1; + long cm = clusterMap.getUsedSpace(); + + return rt + ft + pt + ot + ct + vt + cm; +// System.out.println("resource table " + rt); +// System.out.println("foreign table (non flat cluster table) " + ft); +// System.out.println("predicate table " + pt); +// long pt2 = getRealSizeOfPredicateTable() * 4; +// System.out.println("predicate table real size " + pt2); +// System.out.println("object table " + ot); +// long ot2 = getRealSizeOfObjectTable() * 4; +// System.out.println("object table real size " + ot2); +// System.out.println("value table " + vt); + } + int getRealSizeOfPredicateTable() throws DatabaseException { + SizeOfPredicateTable proc = new SizeOfPredicateTable(resourceTable, predicateTable); + resourceTable.foreachResource(proc, 0, null, null); + return proc.getSize(); + } + int getRealSizeOfObjectTable() throws DatabaseException { + SizeOfObjectTable proc = new SizeOfObjectTable(resourceTable, predicateTable, objectTable); + resourceTable.foreachResource(proc, 0, null, null); + return proc.getSize(); + } + @Override + public boolean isEmpty() { + return resourceTable.getTableCount() == 0; + } + @Override + public void printDebugInfo(String message, ClusterSupport support) + throws DatabaseException { + predicateTable.printDebugInfo(); + objectTable.printDebugInfo(); + ClusterPrintDebugInfo proc = new ClusterPrintDebugInfo(this + , resourceTable, predicateTable, support, objectTable); + resourceTable.foreachResource(proc, 0, null, null); + } + private int getInternalReferenceOrZero(int resourceKey, ClusterSupport support) + throws DatabaseException { + int clusterKey = ClusterTraits.getClusterKeyFromResourceKey(resourceKey); + int resourceIndex = ClusterTraits.getResourceIndexFromResourceKey(resourceKey); + if (this.clusterKey != clusterKey) { // foreign resource + ClusterI foreignCluster = support.getClusterByClusterKey(clusterKey); + ClusterUID clusterUID = foreignCluster.getClusterUID(); + int foreignResourceIndex = clusterMap.getForeignReferenceOrZero(resourceIndex, clusterUID); + return foreignResourceIndex; + } + return resourceIndex; + } + private int getInternalReferenceOrZeroAnd(int resourceKey, ClusterSupport support, byte op) + throws DatabaseException { + int clusterKey = ClusterTraits.getClusterKeyFromResourceKey(resourceKey); + int resourceIndex = ClusterTraits.getResourceIndexFromResourceKey(resourceKey); + if (this.clusterKey != clusterKey) { // foreign resource + ClusterUID clusterUID = clusterSupport.getClusterUIDByResourceKey(resourceKey); + int foreignResourceIndex = clusterMap.getForeignReferenceOrZero(resourceIndex, clusterUID); + support.addStatementIndex(this, resourceKey, clusterUID, op); + return foreignResourceIndex; + } + support.addStatementIndex(this, resourceKey, getClusterUID(), op); + return resourceIndex; + } + private short getLocalReference(int resourceKey) throws DatabaseException { + return ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(resourceKey); + } + private int getLocalReferenceAnd(int resourceKey, ClusterSupport support, byte op) + throws DatabaseException { + int resourceIndex = getLocalReference(resourceKey); + support.addStatementIndex(this, resourceKey, getClusterUID(), op); + return resourceIndex; + } + private int checkResourceKeyIsOursAndGetResourceIndexIf(int resourceKey, ClusterSupport support) + throws DatabaseException { + int clusterShortId = ClusterTraits.getClusterKeyFromResourceKey(resourceKey); + if (this.clusterKey != clusterShortId) + return 0; + int resourceIndex = ClusterTraits.getResourceIndexFromResourceKey(resourceKey); + return resourceIndex; + } + private int getReferenceOrCreateIfForeign(int resourceKey, ClusterUID clusterUID, ClusterSupport support, byte op) + throws DatabaseException { + int clusterKey = ClusterTraits.getClusterKeyFromResourceKey(resourceKey); + int resourceIndex = ClusterTraits.getResourceIndexFromResourceKey(resourceKey); + if (this.clusterKey != clusterKey) { + support.addStatementIndex(this, resourceKey, clusterUID, op); + return clusterMap.getForeignReferenceOrCreateByResourceKey(resourceKey, clusterUID); + } + support.addStatementIndex(this, resourceKey, getClusterUID(), op); + return resourceIndex; + } + private int getReferenceOrCreateIfForeign(int resourceKey, ClusterSupport support, byte op) + throws DatabaseException { + int clusterKey = ClusterTraits.getClusterKeyFromResourceKey(resourceKey); + int resourceIndex = ClusterTraits.getResourceIndexFromResourceKey(resourceKey); + if (this.clusterKey != clusterKey) { + ClusterUID clusterUID = clusterSupport.getClusterUIDByResourceKey(resourceKey); + support.addStatementIndex(this, resourceKey, clusterUID, op); + return clusterMap.getForeignReferenceOrCreateByResourceKey(resourceKey, clusterUID); + } + support.addStatementIndex(this, resourceKey, getClusterUID(), op); + return resourceIndex; + } + private class ResourceIndexAndId { + ResourceIndexAndId(int reference, int index, ClusterUID clusterUID) { + this.reference = reference; + this.index = index; + this.clusterUID = clusterUID; + } + public final int reference; + public final int index; + public final ClusterUID clusterUID; + } + private ResourceIndexAndId checkResourceKeyAndGetResourceIndexIf(int resourceKey, ClusterSupport support) + throws DatabaseException { + int clusterKey = ClusterTraits.getClusterKeyFromResourceKey(resourceKey); + int resourceIndex = ClusterTraits.getResourceIndexFromResourceKey(resourceKey); + if (this.clusterKey != clusterKey) { // foreign resource + ClusterI foreignCluster = support.getClusterByClusterKey(clusterKey); + ClusterUID clusterUID = foreignCluster.getClusterUID(); + int ref = clusterMap.getForeignReferenceOrCreateByResourceIndex(resourceIndex, clusterUID); + return new ResourceIndexAndId(ref, resourceIndex, clusterUID); + } + return new ResourceIndexAndId(resourceIndex, resourceIndex, getClusterUID()); + } + + @Override + final public int execute(int resourceIndex) throws DatabaseException { + int key; + if(resourceIndex > 0) { + key = clusterBits | resourceIndex; + } else { + ClusterUID clusterUID = clusterMap.getResourceUID(resourceIndex).asCID(); + ClusterI cluster = clusterSupport.getClusterByClusterUIDOrMake(clusterUID); + int foreingResourceIndex = clusterMap.getForeignResourceIndex(resourceIndex); + key = ClusterTraits.createResourceKey(cluster.getClusterKey(), foreingResourceIndex); + } + if (DEBUG) + System.out.println("Cluster.execute key=" + key); + return key; + } + + private boolean addRelationInternal(int sReference, int pReference, int oReference, ClusterI.CompleteTypeEnum completeType) + throws DatabaseException { + int predicateIndex = resourceTable.addStatement(sReference, pReference, + oReference, predicateTable, objectTable, completeType, completeTable); + if (0 == predicateIndex) + return true; // added to resourceTable + else if (0 > predicateIndex) + return false; // old complete statemenent + int newPredicateIndex = predicateTable.addPredicate(predicateIndex, + pReference, oReference, objectTable); + if (0 == newPredicateIndex) + return false; + if (predicateIndex != newPredicateIndex) + resourceTable.setPredicateIndex(sReference, newPredicateIndex); + return true; + } + private boolean removeRelationInternal(int sResourceIndex, int pResourceIndex, + int oResourceIndex, ClusterI.CompleteTypeEnum completeType, ClusterSupport support) + throws DatabaseException { + int predicateIndex = resourceTable.getPredicateIndex(sResourceIndex); + if (0 == predicateIndex || ClusterI.CompleteTypeEnum.NotComplete != completeType) + return resourceTable.removeStatementFromCache(sResourceIndex, + pResourceIndex, oResourceIndex, completeType, completeTable); + PredicateTable.Status ret = predicateTable.removePredicate(predicateIndex, pResourceIndex, oResourceIndex, objectTable); + switch (ret) { + case NothingRemoved: + return false; + case PredicateRemoved: { + if (0 == predicateTable.getPredicateSetSize(predicateIndex)) + resourceTable.setPredicateIndex(sResourceIndex, 0); + // intentionally dropping to next case + } default: + break; + } + resourceTable.removeStatement(sResourceIndex, + pResourceIndex, oResourceIndex, + completeType, completeTable, + predicateTable, objectTable, this, support); + return true; + } + @Override + public void load() { + throw new Error("Not supported."); + } + + @Override + public void load(Callback r) { + throw new Error("Not supported."); + } + + public int makeResourceKey(int resourceIndex) throws DatabaseException { + int key = 0; + if (resourceIndex > 0) // local resource + key = ClusterTraits.createResourceKey(clusterKey, resourceIndex); + else { + ClusterUID clusterUID = clusterMap.getResourceUID(resourceIndex).asCID(); + int clusterKey = clusterSupport.getClusterKeyByClusterUIDOrMake(clusterUID); + int foreingResourceIndex = clusterMap.getForeignResourceIndex(resourceIndex); + key = ClusterTraits.createResourceKey(clusterKey, foreingResourceIndex); + } + if (0 == key) + throw new DatabaseException("Failed to make resource key from " + resourceIndex); + return key; + } + @Override + public ClusterBig toBig(ClusterSupport support) throws DatabaseException { + throw new Error("Not implemented"); + } + @Override + public void load(ClusterSupport session, Runnable callback) { + throw new Error("Not implemented"); + } + @Override + public ClusterI getClusterByResourceKey(int resourceKey, + ClusterSupport support) { + throw new Error("Not implemented"); + } + @Override + public void increaseReferenceCount(int amount) { + throw new Error("Not implemented"); + } + @Override + + public void decreaseReferenceCount(int amount) { + throw new Error("Not implemented"); + } + @Override + public int getReferenceCount() { + throw new Error("Not implemented"); + } + @Override + public void releaseMemory() { + } + @Override + public void compact() { + clusterMap.compact(); + } + public boolean contains(int resourceKey) { + return ClusterTraitsBase.isCluster(clusterBits, resourceKey); + } + @Override + public ClusterTypeEnum getType() { + return ClusterTypeEnum.BIG; + } + @Override + public boolean getImmutable() { + int status = resourceTable.getClusterStatus(); + return (status & ClusterStatus.ImmutableMaskSet) == 1; + } + @Override + public void setImmutable(boolean immutable, ClusterSupport support) { + int status = resourceTable.getClusterStatus(); + if (immutable) + status |= ClusterStatus.ImmutableMaskSet; + else + status &= ClusterStatus.ImmutableMaskClear; + resourceTable.setClusterStatus(status); + support.setImmutable(this, immutable); + } + + @Override + public ClusterTables store() throws IOException { + + ClusterTables result = new ClusterTables(); + + int[] currentHeader = Arrays.copyOf(headerTable, INT_HEADER_SIZE); + + int byteSize = valueTable.getTableSize(); + byte[] byteBytes = new byte[byteSize]; + valueTable.store(byteBytes, 0); + + //FileUtils.writeFile(bytes, valueTable.table); + + result.bytes = byteBytes; + + int longSize = LONG_HEADER_SIZE + resourceTable.getTableSize() + foreignTable.getTableSize(); + long[] longBytes = new long[longSize]; + + longBytes[0] = 0; + longBytes[1] = LONG_HEADER_VERSION; + longBytes[2] = 0; + longBytes[3] = clusterUID.second; + +// Bytes.writeLE8(longBytes, 0, 0); +// Bytes.writeLE8(longBytes, 8, LONG_HEADER_VERSION); +// Bytes.writeLE8(longBytes, 16, 0); +// Bytes.writeLE8(longBytes, 24, clusterUID.second); + + int longPos = resourceTable.store(longBytes, LONG_HEADER_SIZE); + foreignTable.store(longBytes, longPos); + + result.longs = longBytes; + +// FileUtils.writeFile(longs, longBytes); + + int intSize = INT_HEADER_SIZE + predicateTable.getTableSize() + objectTable.getTableSize() + completeTable.getTableSize(); + int[] intBytes = new int[intSize]; + int intPos = INT_HEADER_SIZE; + intPos = predicateTable.store(intBytes, intPos); + intPos = objectTable.store(intBytes, intPos); + intPos = completeTable.store(intBytes, intPos); + // write header + for(int i=0;i getPredicateTable() { + return predicateTable; + } + @Override + public Table getForeignTable() { + return foreignTable; + } + @Override + public Table getCompleteTable() { + return completeTable; + } + @Override + public Table getValueTable() { + return valueTable; + } + @Override + public Table getObjectTable() { + return objectTable; + } +} + +class SizeOfPredicateTable implements ClusterI.ObjectProcedure { + private final ResourceTable mrResourceTable; + private final PredicateTable mrPredicateTable; + private int size = 0; + SizeOfPredicateTable(ResourceTable resourceTable + , PredicateTable predicateTable) { + mrResourceTable = resourceTable; + mrPredicateTable = predicateTable; + } + @Override + public boolean execute(Integer i, int resourceRef) { + int predicateIndex = mrResourceTable.getPredicateIndex(resourceRef); + if (0 == predicateIndex) + return false; // continue loop + size += mrPredicateTable.getPredicateSetSize(predicateIndex); + return false; // continue loop + } + + public int getSize() { + return size; + } + +} + +class SizeOfObjectTable implements ClusterI.ObjectProcedure { + private final ResourceTable mrResourceTable; + private final PredicateTable mrPredicateTable; + private final ObjectTable mrObjectTable; + private int size = 0; + SizeOfObjectTable(ResourceTable resourceTable + , PredicateTable predicateTable, ObjectTable objectTable) { + mrResourceTable = resourceTable; + mrPredicateTable = predicateTable; + mrObjectTable = objectTable; + } + + @Override + public boolean execute(Integer i, int resourceRef) { + int predicateIndex = mrResourceTable.getPredicateIndex(resourceRef); + if (0 == predicateIndex) + return false; // continue loop + ClusterI.PredicateProcedure procedure = new PredicateProcedure() { + @Override + public boolean execute(Object context, int pRef, int oIndex) { + if (ClusterTraits.statementIndexIsDirect(oIndex)) + return false; // no table space reserved, continue looping + int objectIndex; + try { + objectIndex = ClusterTraits.statementIndexGet(oIndex); + size += mrObjectTable.getObjectSetSize(objectIndex); + } catch (DatabaseException e) { + e.printStackTrace(); + } + return false; // continue looping + } + }; + try { + mrPredicateTable.foreachPredicate(predicateIndex, procedure, null, null, null); + } catch (DatabaseException e) { + e.printStackTrace(); + } + return false; // continue loop + } + + public int getSize() { + return size; + } + +} \ No newline at end of file diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/cluster/ClusterImpl.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/cluster/ClusterImpl.java new file mode 100644 index 000000000..353d9382a --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/cluster/ClusterImpl.java @@ -0,0 +1,226 @@ +/******************************************************************************* + * Copyright (c) 2007, 2010 Association for Decentralized Information Management + * in Industry THTH ry. + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * VTT Technical Research Centre of Finland - initial API and implementation + *******************************************************************************/ +package org.simantics.acorn.cluster; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.UUID; + +import org.simantics.acorn.internal.Change; +import org.simantics.acorn.internal.ClusterChange; +import org.simantics.acorn.internal.ClusterSupport2; +import org.simantics.db.common.utils.Logger; +import org.simantics.db.exception.DatabaseException; +import org.simantics.db.exception.InvalidClusterException; +import org.simantics.db.impl.ClusterBase; +import org.simantics.db.impl.ClusterI; +import org.simantics.db.impl.ClusterSupport; +import org.simantics.db.impl.IClusterTable; +import org.simantics.db.impl.Modifier; +import org.simantics.db.service.ClusterCollectorPolicy.CollectorCluster; +import org.simantics.db.service.ClusterUID; +import org.simantics.db.service.ClusteringSupport.Id; +import org.simantics.utils.strings.AlphanumComparator; + +public abstract class ClusterImpl extends ClusterBase implements Modifier, CollectorCluster { + protected static final int LONG_HEADER_SIZE = 7; + protected static final long LONG_HEADER_VERSION = 1; + protected static ClusterUID checkValidity(long type, long[] longs, int[] ints, byte[] bytes) + throws InvalidClusterException { + if (longs.length < LONG_HEADER_SIZE) + throw new InvalidClusterException("Header size mismatch. Expected=" + ClusterImpl.LONG_HEADER_SIZE + ", got=" + longs.length); + if (longs[0] != type) + throw new InvalidClusterException("Type mismatch. Expected=" + type + ", got=" + longs[0] + " " + ClusterUID.make(longs[2], longs[3])); + if (longs[1] != ClusterImpl.LONG_HEADER_VERSION) + throw new InvalidClusterException("Header size mismatch. Expected=" + ClusterImpl.LONG_HEADER_VERSION + ", got=" + longs[1]); + return ClusterUID.make(longs[2], longs[3]); + } + protected static Id getUniqueId(long[] longs) { + return new IdImpl(new UUID(longs[3], longs[4])); + } + static final boolean DEBUG = false; + final public IClusterTable clusterTable; + // This can be null iff the cluster has been converted to big + public Change change = new Change(); + public ClusterChange cc; + public byte[] foreignLookup; + + private boolean dirtySizeInBytes = true; + private long sizeInBytes = 0; + + protected ClusterImpl() { + clusterTable = null; + } + + public ClusterImpl(IClusterTable clusterTable, ClusterUID clusterUID, int clusterKey, ClusterSupport support) { + super(support, clusterUID, clusterKey); +// SessionImplSocket session = (SessionImplSocket)support.getSession(); +// if(session != null) + this.clusterTable = clusterTable; +// else + } + + public static ClusterImpl dummy() { + return new ClusterSmall(); + } + + public static ClusterImpl make(IClusterTable clusterTable, ClusterUID clusterUID, int clusterKey, ClusterSupport2 support) { + return new ClusterSmall(clusterUID, clusterKey, support, clusterTable); + } + public static ClusterSmall proxy(IClusterTable clusterTable, ClusterUID clusterUID, int clusterKey, long clusterId, ClusterSupport2 support) { + if (DEBUG) + new Exception("Cluster proxy for " + clusterUID).printStackTrace(); + return new ClusterSmall(null, clusterUID, clusterKey, support); + } + public static ClusterImpl make(IClusterTable clusterTable, long[] longs, int[] ints, byte[] bytes, ClusterSupport2 support, int clusterKey) + throws DatabaseException { + if (longs[0] == 0) + return new ClusterBig(clusterTable, longs, ints, bytes, support, clusterKey); + else + return new ClusterSmall(clusterTable, longs, ints, bytes, support, clusterKey); + } + +// public boolean virtual = false; + + @Override + public boolean hasVirtual() { + return false; +// return clusterTable.hasVirtual(clusterKey); + } + + @Override + public void markVirtual() { +// clusterTable.markVirtual(clusterKey); +// virtual = true; + } + + @Override + public boolean isWriteOnly() { + return false; + } + @Override + public boolean isLoaded() { + return true; + } + + @Override + public void resized() { + dirtySizeInBytes = true; +// if(clusterTable != null) +// clusterTable.setDirtySizeInBytes(true); + } + + public long getCachedSize() { + if(dirtySizeInBytes) { + try { + sizeInBytes = getUsedSpace(); + //System.err.println("recomputed size of cluster " + getClusterId() + " => " + sizeInBytes); + } catch (DatabaseException e) { + Logger.defaultLogError(e); + } + dirtySizeInBytes = false; + } + return sizeInBytes; + } + + protected void calculateModifiedId() { +// setModifiedId(new IdImpl(UUID.randomUUID())); + } + + public static class ClusterTables { + public byte[] bytes; + public int[] ints; + public long[] longs; + } + + public byte[] storeBytes() throws IOException { + throw new UnsupportedOperationException(); + } + + public ClusterTables store() throws IOException { + throw new UnsupportedOperationException(); + } + + abstract protected int getResourceTableCount(); + + public String dump(final ClusterSupport support) { + + StringBuilder sb = new StringBuilder(); + for(int i=1;i stms = new ArrayList(); + try { + + byte[] value = getValue(i, support); + if(value != null) + sb.append(" bytes: " + Arrays.toString(value) + "\n"); + + forPredicates(i, new PredicateProcedure() { + + @Override + public boolean execute(Integer c, final int predicateKey, int objectIndex) { + + try { + + forObjects(resourceKey, predicateKey, objectIndex, new ObjectProcedure() { + + @Override + public boolean execute(Integer context, int objectKey) throws DatabaseException { + + ClusterUID puid = support.getClusterByResourceKey(predicateKey).getClusterUID(); + ClusterUID ouid = support.getClusterByResourceKey(objectKey).getClusterUID(); + + stms.add(" " + puid + " " + (predicateKey&0xFFF) + " " + ouid + " " + (objectKey&0xFFF)); + + return false; + + } + + }, 0, support); + } catch (DatabaseException e) { + e.printStackTrace(); + } + + return false; + + } + + },0,support); + + Collections.sort(stms, AlphanumComparator.COMPARATOR); + + for(String s : stms) { + sb.append(s); + sb.append("\n"); + } + + } catch (DatabaseException e) { + e.printStackTrace(); + } + } + + return sb.toString(); + + } + + abstract public boolean isValueEx(int resourceIndex) throws DatabaseException; + + abstract public ClusterI addRelation(int resourceKey, ClusterUID puid, int predicateKey, ClusterUID ouid, int objectKey, ClusterSupport support) throws DatabaseException; + + @Override + public IClusterTable getClusterTable() { + return clusterTable; + } +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/cluster/ClusterSmall.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/cluster/ClusterSmall.java new file mode 100644 index 000000000..726071dbe --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/cluster/ClusterSmall.java @@ -0,0 +1,1304 @@ +/******************************************************************************* + * Copyright (c) 2007, 2010 Association for Decentralized Information Management + * in Industry THTH ry. + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * VTT Technical Research Centre of Finland - initial API and implementation + *******************************************************************************/ +package org.simantics.acorn.cluster; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.util.Arrays; + +import org.simantics.acorn.internal.ClusterChange; +import org.simantics.acorn.internal.ClusterStream; +import org.simantics.acorn.internal.ClusterSupport2; +import org.simantics.acorn.internal.DebugPolicy; +import org.simantics.db.Resource; +import org.simantics.db.exception.DatabaseException; +import org.simantics.db.exception.ExternalValueException; +import org.simantics.db.exception.ValidationException; +import org.simantics.db.impl.ClusterBase; +import org.simantics.db.impl.ClusterI; +import org.simantics.db.impl.ClusterSupport; +import org.simantics.db.impl.ClusterTraitsBase; +import org.simantics.db.impl.ForEachObjectContextProcedure; +import org.simantics.db.impl.ForEachObjectProcedure; +import org.simantics.db.impl.ForPossibleRelatedValueContextProcedure; +import org.simantics.db.impl.ForPossibleRelatedValueProcedure; +import org.simantics.db.impl.IClusterTable; +import org.simantics.db.impl.Table; +import org.simantics.db.impl.TableHeader; +import org.simantics.db.impl.graph.ReadGraphImpl; +import org.simantics.db.procedure.AsyncContextMultiProcedure; +import org.simantics.db.procedure.AsyncMultiProcedure; +import org.simantics.db.procore.cluster.ClusterMapSmall; +import org.simantics.db.procore.cluster.ClusterTraits; +import org.simantics.db.procore.cluster.ClusterTraitsSmall; +import org.simantics.db.procore.cluster.CompleteTableSmall; +import org.simantics.db.procore.cluster.ForeignTableSmall; +import org.simantics.db.procore.cluster.ObjectTable; +import org.simantics.db.procore.cluster.OutOfSpaceException; +import org.simantics.db.procore.cluster.PredicateTable; +import org.simantics.db.procore.cluster.ResourceTableSmall; +import org.simantics.db.procore.cluster.ValueTableSmall; +import org.simantics.db.service.Bytes; +import org.simantics.db.service.ClusterUID; +import org.simantics.db.service.ResourceUID; +import org.simantics.utils.datastructures.Callback; + +import gnu.trove.map.hash.TIntShortHashMap; +import gnu.trove.procedure.TIntProcedure; +import gnu.trove.set.hash.TIntHashSet; + +final public class ClusterSmall extends ClusterImpl { + private static final int TABLE_HEADER_SIZE = TableHeader.HEADER_SIZE + TableHeader.EXTRA_SIZE; + private static final int RESOURCE_TABLE_OFFSET = 0; + private static final int PREDICATE_TABLE_OFFSET = RESOURCE_TABLE_OFFSET + TABLE_HEADER_SIZE; + private static final int OBJECT_TABLE_OFFSET = PREDICATE_TABLE_OFFSET + TABLE_HEADER_SIZE; + private static final int VALUE_TABLE_OFFSET = OBJECT_TABLE_OFFSET + TABLE_HEADER_SIZE; + private static final int FLAT_TABLE_OFFSET = VALUE_TABLE_OFFSET + TABLE_HEADER_SIZE; + private static final int COMPLETE_TABLE_OFFSET = FLAT_TABLE_OFFSET + TABLE_HEADER_SIZE; + private static final int FOREIGN_TABLE_OFFSET = COMPLETE_TABLE_OFFSET + TABLE_HEADER_SIZE; + private static final int INT_HEADER_SIZE = FOREIGN_TABLE_OFFSET + TABLE_HEADER_SIZE; + private final int clusterBits; + private final ResourceTableSmall resourceTable; + private final PredicateTable predicateTable; + private final ObjectTable objectTable; + private final ValueTableSmall valueTable; + private final ForeignTableSmall foreignTable; + private final CompleteTableSmall completeTable; + private final ClusterMapSmall clusterMap; + private final int[] headerTable; + public final ClusterSupport2 clusterSupport; + private boolean proxy; + private boolean deleted = false; + + protected ClusterSmall() { + this.proxy = true; + this.headerTable = null; + this.resourceTable = null; + this.foreignTable = null; + this.predicateTable = null; + this.objectTable = null; + this.valueTable = null; + this.completeTable = null; + this.clusterMap = null; + this.clusterSupport = null; + this.clusterBits = 0; + this.importance = 0; + } + + public ClusterSmall(IClusterTable clusterTable, ClusterUID clusterUID, int clusterKey, ClusterSupport2 support) { + super(clusterTable, clusterUID, clusterKey, support); + if(DebugPolicy.REPORT_CLUSTER_EVENTS) + new Exception(clusterUID.toString()).printStackTrace(); + this.proxy = true; + this.headerTable = null; + this.resourceTable = null; + this.foreignTable = null; + this.predicateTable = null; + this.objectTable = null; + this.valueTable = null; + this.completeTable = null; + this.clusterMap = null; + this.clusterSupport = support; + this.clusterBits = 0; + this.importance = 0; +// new Exception("ClusterSmall " + clusterKey).printStackTrace(); + } + ClusterSmall(ClusterUID clusterUID, int clusterKey, ClusterSupport2 support, IClusterTable clusterTable) { + super(clusterTable, clusterUID, clusterKey, support); + if(DebugPolicy.REPORT_CLUSTER_EVENTS) + new Exception(clusterUID.toString()).printStackTrace(); + this.proxy = false; + this.clusterSupport = support; + this.headerTable = new int[INT_HEADER_SIZE]; + this.resourceTable = new ResourceTableSmall(this, headerTable, RESOURCE_TABLE_OFFSET); + this.foreignTable = new ForeignTableSmall(this, headerTable, FOREIGN_TABLE_OFFSET); + this.predicateTable = new PredicateTable(this, headerTable, PREDICATE_TABLE_OFFSET); + this.objectTable = new ObjectTable(this, headerTable, OBJECT_TABLE_OFFSET); + this.valueTable = new ValueTableSmall(this, headerTable, VALUE_TABLE_OFFSET); + this.completeTable = new CompleteTableSmall(this, headerTable, COMPLETE_TABLE_OFFSET); + this.clusterMap = new ClusterMapSmall(this, foreignTable); + this.clusterBits = ClusterTraitsBase.getClusterBits(clusterKey); +// if(clusterTable != null) +// this.importance = -clusterTable.timeCounter(); +// else + this.importance = 0; +// new Exception("ClusterSmall " + clusterKey).printStackTrace(); + } + protected ClusterSmall(IClusterTable clusterTable, long[] longs, int[] ints, byte[] bytes, ClusterSupport2 support, int clusterKey) + throws DatabaseException { + super(clusterTable, checkValidity(-1, longs, ints, bytes), clusterKey, support); + this.proxy = false; + this.clusterSupport = support; + if (ints.length < INT_HEADER_SIZE) + throw new IllegalArgumentException("Too small integer table for cluster."); + this.headerTable = ints; + if(DebugPolicy.REPORT_CLUSTER_EVENTS) new Exception(Long.toString(clusterId)).printStackTrace(); + this.resourceTable = new ResourceTableSmall(this, ints, RESOURCE_TABLE_OFFSET, longs); + this.foreignTable = new ForeignTableSmall(this, headerTable, FOREIGN_TABLE_OFFSET, longs); + this.predicateTable = new PredicateTable(this, ints, PREDICATE_TABLE_OFFSET, ints); + this.objectTable = new ObjectTable(this, ints, OBJECT_TABLE_OFFSET, ints); + this.valueTable = new ValueTableSmall(this, ints, VALUE_TABLE_OFFSET, bytes); + this.completeTable = new CompleteTableSmall(this, headerTable, COMPLETE_TABLE_OFFSET, ints); + this.clusterMap = new ClusterMapSmall(this, foreignTable); + this.clusterBits = ClusterTraitsBase.getClusterBits(clusterKey); +// if(clusterTable != null) { +// this.importance = clusterTable.timeCounter(); +// clusterTable.markImmutable(this, getImmutable()); +// } +// new Exception("ClusterSmall " + clusterKey).printStackTrace(); + } + void analyse() { + System.out.println("Cluster " + clusterId); + System.out.println("-size:" + getUsedSpace()); + System.out.println(" -rt:" + (resourceTable.getTableCapacity() * 8 + 8)); + System.out.println(" -ft:" + foreignTable.getTableCapacity() * 8); + System.out.println(" -pt:" + predicateTable.getTableCapacity() * 4); + System.out.println(" -ot:" + objectTable.getTableCapacity() * 4); + System.out.println(" -ct:" + completeTable.getTableCapacity() * 4); + System.out.println(" -vt:" + valueTable.getTableCapacity()); + + System.out.println("-resourceTable:"); + System.out.println(" -resourceCount=" + resourceTable.getResourceCount()); + System.out.println(" -size=" + resourceTable.getTableSize()); + System.out.println(" -capacity=" + resourceTable.getTableCapacity()); + System.out.println(" -count=" + resourceTable.getTableCount()); + System.out.println(" -size=" + resourceTable.getTableSize()); + //resourceTable.analyse(); + } + public void checkDirectReference(int dr) + throws DatabaseException { + if (!ClusterTraits.statementIndexIsDirect(dr)) + throw new ValidationException("Reference is not direct. Reference=" + dr); + if (ClusterTraits.isFlat(dr)) + throw new ValidationException("Reference is flat. Reference=" + dr); + if (ClusterTraits.isLocal(dr)) { + if (dr < 1 || dr > resourceTable.getUsedSize()) + throw new ValidationException("Illegal local reference. Reference=" + dr); + } else { + int fi = ClusterTraits.getForeignIndexFromReference(dr); + int ri = ClusterTraits.getResourceIndexFromForeignReference(dr); + if (fi < 1 || fi > foreignTable.getUsedSize()) + throw new ValidationException("Illegal foreign reference. Reference=" + dr + " foreign index=" + fi); + if (ri < 1 || ri > ClusterTraits.getMaxNumberOfResources()) + throw new ValidationException("Illegal foreign reference. Reference=" + dr + " resource index=" + ri); + } + } + public void checkPredicateIndex(int pi) + throws DatabaseException { + // predicateTable.checkPredicateSetIndex(this, pi); + } + public void checkObjectSetReference(int or) + throws DatabaseException { + if (ClusterTraits.statementIndexIsDirect(or)) + throw new ValidationException("Illegal object set reference. Reference=" + or); + int oi = ClusterTraits.statementIndexGet(or); + this.objectTable.checkObjectSetIndex(this, oi); + } + + public void checkValueInit() + throws DatabaseException { + valueTable.checkValueInit(); + } + public void checkValue(int capacity, int index) + throws DatabaseException { + valueTable.checkValue(capacity, index); + } + public void checkValueFini() + throws DatabaseException { + valueTable.checkValueFini(); + } + public void checkForeingIndex(int fi) + throws DatabaseException { + if (fi<1 || fi > foreignTable.getUsedSize()) + throw new ValidationException("Illegal foreign index=" + fi); + } + public void checkCompleteSetReference(int cr) + throws DatabaseException { + if (!ClusterTraits.completeReferenceIsMultiple(cr)) + throw new ValidationException("Illegal complete set reference. Reference=" + cr); + int ci = cr; + this.completeTable.checkCompleteSetIndex(this, ci); + } + public void check() + throws DatabaseException { +// this.completeTable.check(this); +// this.objectTable.check(this); +// // Must be after object table check. +// this.predicateTable.check(this); +// this.resourceTable.check(this); + } + @Override + public CompleteTypeEnum getCompleteType(int resourceKey, ClusterSupport support) + throws DatabaseException { + final int resourceRef = getLocalReference(resourceKey); + CompleteTypeEnum ct = resourceTable.getCompleteType(resourceRef); + if (DEBUG) + System.out.println("ClusterSmall.getCompleteType rk=" + resourceKey + " ct=" + ct); + return ct; + } + + @Override + public int getCompleteObjectKey(int resourceKey, ClusterSupport support) + throws DatabaseException { + final int resourceIndexOld = getLocalReference(resourceKey); + short completeRef = resourceTable.getCompleteObjectRef(resourceIndexOld); + int clusterIndex; + int resourceIndex; + if (0 == completeRef) + throw new DatabaseException("Resource's complete object refernce is null. Resource key=" + resourceKey + "."); + ClusterI.CompleteTypeEnum completeType = resourceTable.getCompleteType(resourceIndexOld); + if (completeType == ClusterI.CompleteTypeEnum.NotComplete) + throw new DatabaseException("Resource has multiple complete objects. Resource key=" + resourceKey + "."); + if (ClusterTraitsSmall.resourceRefIsLocal(completeRef)) { + clusterIndex = clusterKey; + resourceIndex = completeRef; + } else { // Resource has one complete statement. + ResourceUID resourceUID = clusterMap.getForeignResourceUID(completeRef); + ClusterUID uid = resourceUID.asCID(); + clusterIndex = clusterSupport.getClusterKeyByUID(0, uid.second); + //ClusterI c = clusterTable.getClusterByClusterUIDOrMakeProxy(uid); + //clusterIndex = c.getClusterKey(); + //assert(clusterIndex == clusterTable.getClusterByClusterUIDOrMakeProxy(uid).getClusterKey()); + resourceIndex = resourceUID.getIndex(); + } + int key = ClusterTraits.createResourceKey(clusterIndex, resourceIndex); + if (DEBUG) + System.out.println("ClusterSmall.complete object rk=" + resourceKey + " ck=" + key); + return key; + } + + @Override + public boolean isComplete(int resourceKey, ClusterSupport support) + throws DatabaseException { + final int resourceRef = getLocalReference(resourceKey); + final ClusterI.CompleteTypeEnum completeType = resourceTable.getCompleteType(resourceRef); + boolean complete = completeType != ClusterI.CompleteTypeEnum.NotComplete; + if (DEBUG) + System.out.println("ClusterSmall.key=" + resourceKey + " isComplete=" + complete); + return complete; + } + public int getSingleObject(int resourceKey, int predicateKey, int objectIndex, ClusterSupport support) throws DatabaseException { + if (DEBUG) + System.out.println("ClusterSmall.getSingleObject: rk=" + resourceKey + " pk=" + predicateKey); + if (0 == objectIndex) { + final int resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(resourceKey); + final short pRef = getInternalReferenceOrZero2(predicateKey, support); + final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey); + return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this); + } + return objectTable.getSingleObject(objectIndex, support, this); + } + + public void forObjects(ReadGraphImpl graph, int resourceKey, int predicateKey, int objectIndex, AsyncMultiProcedure procedure, + ClusterSupport support) throws DatabaseException { + if (DEBUG) + System.out.println("ClusterSmall.forObjects1: rk=" + resourceKey + " pk=" + predicateKey); + if (0 == objectIndex) { + final int resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKey(resourceKey); + final int pRef = getInternalReferenceOrZero2(predicateKey, support); + final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey); + resourceTable.foreachObject(resourceIndex, graph, procedure, support, pRef, pCompleteType, completeTable, this); + return; + } + objectTable.foreachObject(graph, objectIndex, procedure, this); + } + + public void forObjects(ReadGraphImpl graph, int resourceKey, int predicateKey, int objectIndex, C context, AsyncContextMultiProcedure procedure, + ClusterSupport support) throws DatabaseException { + if (DEBUG) + System.out.println("ClusterSmall.forObjects1: rk=" + resourceKey + " pk=" + predicateKey); + if (0 == objectIndex) { + final int resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKey(resourceKey); + final int pRef = getInternalReferenceOrZero2(predicateKey, support); + final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey); + resourceTable.foreachObject(resourceIndex, graph, context, procedure, support, pRef, pCompleteType, completeTable, this); + return; + } + objectTable.foreachObject(graph, objectIndex, context, procedure, this); + } + + @Override + public boolean forObjects(int resourceKey, int predicateKey, int objectIndex, ObjectProcedure procedure, + Context context, ClusterSupport support) throws DatabaseException { + if (DEBUG) + System.out.println("ClusterSmall.forObjects2: rk=" + resourceKey + " pk=" + predicateKey); + if (0 == objectIndex) { + final int resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKey(resourceKey); + final short pRef = getInternalReferenceOrZero2(predicateKey, support); + final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey); + return resourceTable.foreachObject(resourceIndex, procedure, context, support, this, pRef, pCompleteType, completeTable); + } + return objectTable.foreachObject(objectIndex, procedure, context, support, this); + } + + @Override + public int getSingleObject(int resourceKey, int predicateKey, ClusterSupport support) throws DatabaseException { + if (DEBUG) + System.out.println("ClusterSmall.getSingleObject2: rk=" + resourceKey + " pk=" + predicateKey); + final int resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKey(resourceKey); + final short pRef = getInternalReferenceOrZero2(predicateKey, support); + final int completeType = ClusterTraitsBase.getCompleteTypeIntFromResourceKey(predicateKey); + final ClusterI.CompleteTypeEnum pCompleteType = CompleteTypeEnum.make(completeType); + if (completeType > 0) + return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this); + final int predicateIndex = (int)resourceTable.table[(resourceIndex<<1) - 1 + resourceTable.offset] & 0xFFFFFF; + if (0 == predicateIndex) // All relevant data is in resource table. + return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this); + int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef & 0xFFFF); + return getSingleObject(resourceKey, predicateKey, objectIndex, support); + } + + @Override + public int getSingleObject(int resourceKey, ForPossibleRelatedValueProcedure procedure, ClusterSupport support) throws DatabaseException { + final short resourceIndex = (short)ClusterTraitsBase.getResourceIndexFromResourceKey(resourceKey); + final int predicateKey = procedure.predicateKey; + int clusterKey = ClusterTraitsBase.getClusterMaskFromResourceKey(resourceKey); + short pRef = 0; + if(procedure.clusterKey[0] == clusterKey) { + pRef = (short)procedure.predicateReference[0]; + } else { + pRef = getInternalReferenceOrZero2(predicateKey, support); + procedure.clusterKey[0] = clusterKey; + procedure.predicateReference[0] = pRef; + } + + final ClusterI.CompleteTypeEnum pCompleteType = procedure.completeType; + if (CompleteTypeEnum.NotComplete != pCompleteType) + return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this); + final int predicateIndex = (int)resourceTable.table[(resourceIndex<<1) - 1 + resourceTable.offset] & 0xFFFFFF; + if (0 == predicateIndex) // All relevant data is in resource table. + return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this); + int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef & 0xFFFF); + return getSingleObject(resourceKey, predicateKey, objectIndex, support); + } + + @Override + public int getSingleObject(int resourceKey, ForPossibleRelatedValueContextProcedure procedure, ClusterSupport support) throws DatabaseException { + final short resourceIndex = (short)ClusterTraitsBase.getResourceIndexFromResourceKey(resourceKey); + final int predicateKey = procedure.predicateKey; + int clusterKey = ClusterTraitsBase.getClusterMaskFromResourceKey(resourceKey); + short pRef = 0; + if(procedure.clusterKey[0] == clusterKey) { + pRef = (short)procedure.predicateReference[0]; + } else { + pRef = getInternalReferenceOrZero2(predicateKey, support); + procedure.clusterKey[0] = clusterKey; + procedure.predicateReference[0] = pRef; + } + final ClusterI.CompleteTypeEnum pCompleteType = procedure.completeType; + if (CompleteTypeEnum.NotComplete != pCompleteType) + return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this); + final int predicateIndex = (int)resourceTable.table[(resourceIndex<<1) - 1 + resourceTable.offset] & 0xFFFFFF; + if (0 == predicateIndex) // All relevant data is in resource table. + return resourceTable.getSingleObject(resourceIndex, support, pRef, pCompleteType, completeTable, this); + int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef & 0xFFFF); + return getSingleObject(resourceKey, predicateKey, objectIndex, support); + } + + @Override + public void forObjects(ReadGraphImpl graph, int resourceKey, + int predicateKey, AsyncMultiProcedure procedure) throws DatabaseException { + + throw new UnsupportedOperationException(); + +// SessionImplSocket session = (SessionImplSocket)graph.getSession(); +// ClusterSupport support = session.clusterTranslator; +// if (DEBUG) +// System.out.println("ClusterSmall.forObjects3: rk=" + resourceKey + " pk=" + predicateKey); +// final int resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKey(resourceKey); +// final int pRef = getInternalReferenceOrZero2(predicateKey, support); +// final int completeType = ClusterTraitsBase.getCompleteTypeIntFromResourceKey(predicateKey); +// final ClusterI.CompleteTypeEnum pCompleteType = CompleteTypeEnum.make(completeType); +// if (completeType > 0) { +// resourceTable.foreachObject(resourceIndex, graph, procedure, support, pRef, pCompleteType, completeTable, this); +// return; +// } +// final int predicateIndex = (int)resourceTable.table[(resourceIndex<<1) - 1 + resourceTable.offset] & 0xFFFFFF; +// if (0 == predicateIndex) { +// resourceTable.foreachObject(resourceIndex, graph, procedure, support, pRef, pCompleteType, completeTable, this); +// return; +// } +// int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef & 0xFFFF); +// forObjects(graph, resourceKey, predicateKey, objectIndex, procedure, support); + } + + public void forObjects(ReadGraphImpl graph, int resourceKey, ForEachObjectProcedure procedure) throws DatabaseException { + + throw new UnsupportedOperationException(); + +// final int resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKey(resourceKey); +// final int predicateKey = procedure.predicateKey; +// int clusterKey = ClusterTraitsBase.getClusterMaskFromResourceKey(resourceKey); +// int pRef = 0; +// if(procedure.clusterKey[0] == clusterKey) { +// pRef = procedure.predicateReference[0]; +// } else { +// SessionImplSocket session = (SessionImplSocket)graph.getSession(); +// ClusterSupport support = session.clusterTranslator; +// pRef = getInternalReferenceOrZero2(predicateKey, support); +// procedure.clusterKey[0] = clusterKey; +// procedure.predicateReference[0] = pRef; +// } +// final ClusterI.CompleteTypeEnum pCompleteType = procedure.completeType; +// if (ClusterI.CompleteTypeEnum.NotComplete != pCompleteType) { +// SessionImplSocket session = (SessionImplSocket)graph.getSession(); +// ClusterSupport support = session.clusterTranslator; +// resourceTable.foreachObject(resourceIndex, graph, procedure, support, pRef, pCompleteType, completeTable, this); +// return; +// } +// final int predicateIndex = (int)resourceTable.table[(resourceIndex<<1) - 1 + resourceTable.offset] & 0xFFFFFF; +// if (0 == predicateIndex) { +// SessionImplSocket session = (SessionImplSocket)graph.getSession(); +// ClusterSupport support = session.clusterTranslator; +// resourceTable.foreachObject(resourceIndex, graph, procedure, support, pRef, pCompleteType, completeTable, this); +// return; +// } +// int hashBase = predicateIndex + predicateTable.offset; +// if (predicateTable.table[hashBase-1] < 0) { +// int objectIndex = TableIntArraySet2.get(predicateTable.table, hashBase, pRef & 0xFFFF); +// //int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef & 0xFFFF); +// SessionImplSocket session = (SessionImplSocket)graph.getSession(); +// ClusterSupport support = session.clusterTranslator; +// forObjects(graph, resourceKey, predicateKey, objectIndex, procedure, support); +// } else { +// procedure.finished(graph); +//// graph.dec(); +// } + } + + public void forObjects(ReadGraphImpl graph, int resourceKey, C context, ForEachObjectContextProcedure procedure) throws DatabaseException { + + throw new UnsupportedOperationException(); + +// final int resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKey(resourceKey); +// final int predicateKey = procedure.predicateKey; +// int clusterKey = ClusterTraitsBase.getClusterMaskFromResourceKey(resourceKey); +// int pRef = 0; +// if(procedure.clusterKey[0] == clusterKey) { +// pRef = procedure.predicateReference[0]; +// } else { +// SessionImplSocket session = (SessionImplSocket)graph.getSession(); +// ClusterSupport support = session.clusterTranslator; +// pRef = getInternalReferenceOrZero2(predicateKey, support); +// procedure.clusterKey[0] = clusterKey; +// procedure.predicateReference[0] = pRef; +// } +// +// final ClusterI.CompleteTypeEnum pCompleteType = procedure.completeType; +// if (ClusterI.CompleteTypeEnum.NotComplete != pCompleteType) { +// SessionImplSocket session = (SessionImplSocket)graph.getSession(); +// ClusterSupport support = session.clusterTranslator; +// resourceTable.foreachObject(resourceIndex, graph, context, procedure, support, pRef, pCompleteType, completeTable, this); +// return; +// } +// final int predicateIndex = (int)resourceTable.table[(resourceIndex<<1) - 1 + resourceTable.offset] & 0xFFFFFF; +// if (0 == predicateIndex) { +// SessionImplSocket session = (SessionImplSocket)graph.getSession(); +// ClusterSupport support = session.clusterTranslator; +// resourceTable.foreachObject(resourceIndex, graph, context, procedure, support, pRef, pCompleteType, completeTable, this); +// return; +// } +// int hashBase = predicateIndex + predicateTable.offset; +// if(predicateTable.table[hashBase-1] < 0) { +// int objectIndex = TableIntArraySet2.get(predicateTable.table, hashBase, pRef & 0xFFFF); +// SessionImplSocket session = (SessionImplSocket)graph.getSession(); +// ClusterSupport support = session.clusterTranslator; +// forObjects(graph, resourceKey, predicateKey, objectIndex, context, procedure, support); +// } else { +// int objectIndex = TableIntSet2.get(predicateTable.table, hashBase, pRef & 0xFFFF); +// SessionImplSocket session = (SessionImplSocket)graph.getSession(); +// ClusterSupport support = session.clusterTranslator; +// forObjects(graph, resourceKey, predicateKey, objectIndex, context, procedure, support); +// } + } + @Override + public boolean forObjects(int resourceKey, int predicateKey, + ObjectProcedure procedure, Context context, ClusterSupport support) + throws DatabaseException { + if (DEBUG) + System.out.println("ClusterSmall.forObjects4: rk=" + resourceKey + " pk=" + predicateKey); + final short resourceIndex = (short)ClusterTraitsBase.getResourceIndexFromResourceKey(resourceKey); + final short pRef = getInternalReferenceOrZero2(predicateKey, support); + final ClusterI.CompleteTypeEnum pCompleteType = ClusterTraitsBase.getCompleteTypeFromResourceKey(predicateKey); + // PredicateType is complete i.e. all relevant data is in resource table. + if (ClusterI.CompleteTypeEnum.NotComplete != pCompleteType) { + if (DEBUG) + System.out.println("ClusterSmall.forObjects: complete type was " + pCompleteType + " cluster=" + getClusterUID()); + return resourceTable.foreachObject(resourceIndex, procedure, context, support, this, pRef, pCompleteType, completeTable); + } + final int predicateIndex = resourceTable.getPredicateIndex(resourceIndex); + if (0 == predicateIndex) { // All relevant data is in resource table. + if (DEBUG) + System.out.println("ClusterSmall.forObjects: no predicate table " + pCompleteType); + return resourceTable.foreachObject(resourceIndex, procedure, context, support, this, pRef, pCompleteType, completeTable); + } + int objectIndex = predicateTable.getObjectIndex(predicateIndex, pRef & 0xFFFF); + return forObjects(resourceKey, predicateKey, objectIndex, procedure, context, support); + } + @Override + public boolean forPredicates(int resourceKey, + PredicateProcedure procedure, Context context, ClusterSupport support) + throws DatabaseException { + if (DEBUG) + System.out.println("ClusterSmall.forPredicates: rk=" + resourceKey ); + final int resourceIndex = getLocalReference(resourceKey); + final int predicateIndex = resourceTable.getPredicateIndex(resourceIndex); + if (0 == predicateIndex) + return resourceTable.foreachPredicate(resourceIndex, + procedure, context, support, this, completeTable); + else { + boolean broken = resourceTable.foreachPredicate(resourceIndex, + procedure, context, support, this, completeTable); + if (broken) + return true; + } + return predicateTable.foreachPredicate(predicateIndex, + procedure, context, support, this); + } + + @Override + public ClusterI addRelation(int sResourceKey, ClusterUID puid, int pResourceKey, ClusterUID ouid, int oResourceKey, ClusterSupport support) throws DatabaseException { + + if(proxy) { + throw new UnsupportedOperationException(); +// ClusterImpl cluster = clusterTable.load2(clusterId, clusterKey); +// return cluster.addRelation(sResourceKey, pResourceKey, oResourceKey, support); + } + + // check(); + boolean ret; + try { + short sri = getLocalReferenceAnd(sResourceKey, support, ClusterChange.ADD_OPERATION); + short pri = getReferenceOrCreateIfForeign(pResourceKey, puid, support, ClusterStream.NULL_OPERATION); + short ori = getReferenceOrCreateIfForeign(oResourceKey, ouid, support, ClusterStream.NULL_OPERATION); + ClusterI.CompleteTypeEnum completeType = ClusterTraitsBase.getCompleteTypeFromResourceKey(pResourceKey); + ret = addRelationInternal(sri, pri, ori, completeType); + calculateModifiedId(); + } catch (OutOfSpaceException e) { + boolean streamOff = support.getStreamOff(); + if (!streamOff) { + support.cancelStatement(this); + support.setStreamOff(true); + } + ClusterI cluster = toBig(clusterSupport); + if (!streamOff) + support.setStreamOff(false); + ClusterI cluster2 = cluster.addRelation(sResourceKey, pResourceKey, oResourceKey, support); + if (cluster != cluster2) + throw new DatabaseException("Internal error. Contact application support."); + return cluster; + } +// check(); + if (ret) { + support.addStatement(this); + return this; + } else { + support.cancelStatement(this); + return null; + } + + } + + @Override + public ClusterI addRelation(int sResourceKey, int pResourceKey, int oResourceKey, ClusterSupport support) throws DatabaseException { + + if (DEBUG) + System.out.println("add rk=" + sResourceKey + " pk=" + pResourceKey + " ok=" + oResourceKey); + + if(proxy) { + throw new UnsupportedOperationException(); +// ClusterImpl cluster = clusterTable.load2(clusterId, clusterKey); +// return cluster.addRelation(sResourceKey, pResourceKey, oResourceKey, support); + } + + // check(); + boolean ret; + try { + short sri = getLocalReferenceAnd(sResourceKey, support, ClusterChange.ADD_OPERATION); + short pri = getReferenceOrCreateIfForeign(pResourceKey, support, ClusterStream.NULL_OPERATION); + short ori = getReferenceOrCreateIfForeign(oResourceKey, support, ClusterStream.NULL_OPERATION); + ClusterI.CompleteTypeEnum completeType = ClusterTraitsBase.getCompleteTypeFromResourceKey(pResourceKey); + ret = addRelationInternal(sri, pri, ori, completeType); + calculateModifiedId(); + } catch (OutOfSpaceException e) { + boolean streamOff = support.getStreamOff(); + if (!streamOff) { + support.cancelStatement(this); + support.setStreamOff(true); + } + ClusterI cluster = toBig(clusterSupport); + if (!streamOff) + support.setStreamOff(false); + ClusterI cluster2 = cluster.addRelation(sResourceKey, pResourceKey, oResourceKey, support); + if (cluster != cluster2) + throw new DatabaseException("Internal error. Contact application support."); + return cluster; + } +// check(); + if (ret) { + support.addStatement(this); + return this; + } else { + support.cancelStatement(this); + return null; + } + } + @Override + public boolean removeRelation(int sResourceKey, int pResourceKey, int oResourceKey, ClusterSupport support) + throws DatabaseException { + // check(); + short sri = getLocalReferenceAnd(sResourceKey, support, ClusterChange.REMOVE_OPERATION); + short pri = getInternalReferenceOrZeroAnd(pResourceKey, support, ClusterStream.NULL_OPERATION); + short ori = getInternalReferenceOrZeroAnd(oResourceKey, support, ClusterStream.NULL_OPERATION); + boolean ret = false; + if (0 != pri && 0 != ori) { + ClusterI.CompleteTypeEnum completeType = ClusterTraitsBase.getCompleteTypeFromResourceKey(pResourceKey); + ret = removeRelationInternal(sri, pri, ori, completeType, support); + calculateModifiedId(); + } + if (ret) + support.removeStatement(this); + else + support.cancelStatement(this); + // check(); + return ret; + } + @Override + public void denyRelation(int sResourceKey, int pResourceKey, int oResourceKey, ClusterSupport support) + throws DatabaseException { + short s = checkResourceKeyIsOursAndGetResourceIndexIf(sResourceKey, support); + ResourceReferenceAndCluster p = checkResourceKeyAndGetResourceIndexIf(pResourceKey, support); + ResourceReferenceAndCluster o = checkResourceKeyAndGetResourceIndexIf(oResourceKey, support); + if (0 == s || 0 == p.reference || 0 == o.reference) + return; + // check(); + ClusterI.CompleteTypeEnum completeType = ClusterTraitsBase.getCompleteTypeFromResourceKey(pResourceKey); + boolean ret = removeRelationInternal(s, p.reference, o.reference, completeType, support); + if (ret) { + support.addStatementIndex(this, sResourceKey, getClusterUID(), ClusterChange.REMOVE_OPERATION); + support.addStatementIndex(this, pResourceKey, p.clusterUID, ClusterStream.NULL_OPERATION); + support.addStatementIndex(this, oResourceKey, o.clusterUID, ClusterStream.NULL_OPERATION); + support.removeStatement(this); + } + calculateModifiedId(); + // check(); + return; + } + @Override + public InputStream getValueStream(int resourceKey, ClusterSupport support) throws DatabaseException { + if (DEBUG) + System.out.println("ClusterSmall.getValue " + resourceKey); + int resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(resourceKey); + try { + byte[] buffer = resourceTable.getValue(valueTable, resourceIndex); + if(buffer == null) return null; + return new ByteArrayInputStream(buffer); + } catch (ExternalValueException e) { + return support.getValueStreamEx(resourceIndex, clusterUID.second); + } + } + @Override + public byte[] getValue(int resourceKey, ClusterSupport support) + throws DatabaseException { + if (DEBUG) + System.out.println("ClusterSmall.getValue " + resourceKey); + int resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(resourceKey); + try { + return resourceTable.getValue(valueTable, resourceIndex); + } catch (ExternalValueException e) { + return clusterSupport.impl.getResourceFile(clusterUID.asBytes(), resourceIndex); + //return support.getValueEx(resourceIndex, clusterUID.second); + } + } + @Override + public boolean hasValue(int resourceKey, ClusterSupport support) + throws DatabaseException { + int resourceIndex = getLocalReference(resourceKey); + return resourceTable.hasValue(resourceIndex); + } + @Override + public boolean removeValue(int resourceKey, ClusterSupport support) + throws DatabaseException { + int resourceIndex = getLocalReferenceAnd(resourceKey, support, ClusterChange.DELETE_OPERATION); + support.removeValue(this); + calculateModifiedId(); + return resourceTable.removeValue(valueTable, resourceIndex); + } + @Override + public ClusterI setValue(int rResourceId, byte[] value, int length, ClusterSupport support) + throws DatabaseException { + int resourceIndex = getLocalReferenceAnd(rResourceId, support, ClusterStream.SET_OPERATION); + support.setValue(this, getClusterId(), value, length); + try { + resourceTable.setValue(valueTable, resourceIndex, value, length); + calculateModifiedId(); + return this; + } catch (OutOfSpaceException e) { + boolean streamOff = support.getStreamOff(); + if (!streamOff) + support.setStreamOff(true); + ClusterI cluster = toBig(support); + cluster.setValue(rResourceId, value, length, support); + if (!streamOff) + support.setStreamOff(false); + return cluster; + } + } + @Override + public ClusterI modiValueEx(int rResourceId, long voffset, int length, byte[] value, int offset, ClusterSupport support) + throws DatabaseException { + int resourceIndex = getLocalReferenceAnd(rResourceId, support, ClusterStream.MODI_OPERATION); + support.modiValue(this, getClusterId(), voffset, length, value, offset); + resourceTable.setValueEx(valueTable, resourceIndex); + calculateModifiedId(); + return this; + } + @Override + public byte[] readValueEx(int rResourceId, long voffset, int length, ClusterSupport support) + throws DatabaseException { + int resourceIndex = getLocalReference(rResourceId); + boolean isExternal = resourceTable.isValueEx(valueTable, resourceIndex); + if (!isExternal) + throw new DatabaseException("ClusterI.readValue supported only for external value. Resource key=" + rResourceId); + return support.getValueEx(resourceIndex, getClusterId(), voffset, length); + } + @Override + public boolean isValueEx(int resourceKey) throws DatabaseException { + int resourceIndex = getLocalReference(resourceKey); + return resourceTable.isValueEx(valueTable, resourceIndex); + } + @Override + public long getValueSizeEx(int rResourceId, ClusterSupport support) + throws DatabaseException, ExternalValueException { + int resourceIndex = getLocalReference(rResourceId); + boolean isExternal = resourceTable.isValueEx(valueTable, resourceIndex); + if (!isExternal) + throw new ExternalValueException("ClusterI.getValueSizeEx supported only for external value. Resource key=" + rResourceId); + return support.getValueSizeEx(resourceIndex, getClusterId()); + } + @Override + public void setValueEx(int rResourceId) + throws DatabaseException { + int resourceIndex = getLocalReference(rResourceId); + resourceTable.setValueEx(valueTable, resourceIndex); + } + @Override + public int createResource(ClusterSupport support) + throws DatabaseException { + + if(proxy) { + throw new UnsupportedOperationException(); +// ClusterImpl cluster = clusterTable.load2(clusterId, clusterKey); +// return cluster.createResource(support); + } + + short resourceIndex = resourceTable.createResource(); + calculateModifiedId(); + if(DebugPolicy.REPORT_RESOURCE_ID_ALLOCATION) + System.out.println("[RID_ALLOCATION]: ClusterSmall[" + clusterId + "] allocates " + resourceIndex); + support.createResource(this, resourceIndex, getClusterId()); + return ClusterTraits.createResourceKey(clusterKey, resourceIndex); + } + @Override + public boolean hasResource(int resourceKey, ClusterSupport support) { + int clusterKey = ClusterTraitsBase.getClusterKeyFromResourceKeyNoThrow(resourceKey); + if (this.clusterKey != clusterKey) // foreign resource + return false; + int resourceIndex; + try { + resourceIndex = ClusterTraits.getResourceIndexFromResourceKey(resourceKey); + } catch (DatabaseException e) { + return false; + } + if (resourceIndex > 0 & resourceIndex <= resourceTable.getTableCount()) + return true; + else + return false; + } + @Override + public int getNumberOfResources(ClusterSupport support) + throws DatabaseException { + + if(proxy) { + throw new UnsupportedOperationException(); +// ClusterImpl cluster = clusterTable.load2(clusterId, clusterKey); +// return cluster.getNumberOfResources(support); + } + + return resourceTable.getUsedSize(); + } + + public int getNumberOfResources() { + + if(proxy) throw new IllegalStateException(); + + return resourceTable.getUsedSize(); + + } + + @Override + public long getUsedSpace() { + if(isEmpty()) return 0; + long rt = resourceTable.getTableCapacity() * 8 + 8; // (8 = cluster id) + long ft = foreignTable.getTableCapacity() * 8; + long pt = predicateTable.getTableCapacity() * 4; + long ot = objectTable.getTableCapacity() * 4; + long ct = completeTable.getTableCapacity() * 4; + long vt = valueTable.getTableCapacity() * 1; + long cm = clusterMap.getUsedSpace(); + return rt + ft + pt + ot + ct + vt + cm; + } + @Override + public boolean isEmpty() { + if(resourceTable == null) return true; + return resourceTable.getTableCount() == 0; + } + @Override + public void printDebugInfo(String message, ClusterSupport support) + throws DatabaseException { + throw new DatabaseException("Not implemented!"); + } + private short getInternalReferenceOrZero2(int resourceKey, ClusterSupport support) throws DatabaseException { + int resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(resourceKey); + if (!ClusterTraitsBase.isCluster(clusterBits, resourceKey)) { + return clusterMap.getForeignReferenceOrZero(resourceKey); + } else { + return (short)resourceIndex; + } + } + private short getInternalReferenceOrZeroAnd(int resourceKey, ClusterSupport support, byte op) + throws DatabaseException { + int clusterKey = ClusterTraits.getClusterKeyFromResourceKey(resourceKey); + int resourceIndex = ClusterTraits.getResourceIndexFromResourceKey(resourceKey); + if (this.clusterKey != clusterKey) { // foreign resource + ClusterUID clusterUID = clusterSupport.getClusterUIDByResourceKey(resourceKey); + short foreignRef = clusterMap.getForeignReferenceOrZero(resourceKey); + support.addStatementIndex(this, resourceKey, clusterUID, op); + return foreignRef; + } + support.addStatementIndex(this, resourceKey, getClusterUID(), op); + return (short)resourceIndex; + } + private final short getLocalReference(int resourceKey) throws DatabaseException { + return ClusterTraits.getResourceIndexFromResourceKeyNoThrow(resourceKey); + } + private final short getLocalReferenceAnd(int resourceKey, ClusterSupport support, byte op) + throws DatabaseException { + short resourceIndex = getLocalReference(resourceKey); + support.addStatementIndex(this, resourceKey, getClusterUID(), op); + return resourceIndex; + } + private short checkResourceKeyIsOursAndGetResourceIndexIf(int resourceKey, ClusterSupport support) + throws DatabaseException { + int clusterShortId = ClusterTraits.getClusterKeyFromResourceKey(resourceKey); + if (this.clusterKey != clusterShortId) + return 0; + int resourceIndex = ClusterTraits.getResourceIndexFromResourceKey(resourceKey); + return (short)resourceIndex; + } + private short getReferenceOrCreateIfForeign(int resourceKey, ClusterUID clusterUID, ClusterSupport support, byte op) + throws DatabaseException { + int clusterKey = ClusterTraits.getClusterKeyFromResourceKey(resourceKey); + short resourceIndex = (short)ClusterTraits.getResourceIndexFromResourceKey(resourceKey); + if (this.clusterKey != clusterKey) { + support.addStatementIndex(this, resourceKey, clusterUID, op); + short ref = clusterMap.getForeignReferenceOrCreateByResourceKey(resourceKey, clusterUID); + return ref; + } + support.addStatementIndex(this, resourceKey, getClusterUID(), op); + return resourceIndex; + } + private short getReferenceOrCreateIfForeign(int resourceKey, ClusterSupport support, byte op) + throws DatabaseException { + int clusterKey = ClusterTraits.getClusterKeyFromResourceKey(resourceKey); + short resourceIndex = (short)ClusterTraits.getResourceIndexFromResourceKey(resourceKey); + if (this.clusterKey != clusterKey) { + ClusterUID clusterUID = clusterSupport.getClusterUIDByResourceKey(resourceKey); + support.addStatementIndex(this, resourceKey, clusterUID, op); + short ref = clusterMap.getForeignReferenceOrCreateByResourceKey(resourceKey, clusterUID); + return ref; + } + support.addStatementIndex(this, resourceKey, getClusterUID(), op); + return resourceIndex; + } + private class ResourceReferenceAndCluster { + ResourceReferenceAndCluster(short reference, ClusterUID clusterUID) { + this.reference = reference; + this.clusterUID = clusterUID; + } + public final short reference; + public final ClusterUID clusterUID; + } + private ResourceReferenceAndCluster checkResourceKeyAndGetResourceIndexIf(int resourceKey, ClusterSupport support) + throws DatabaseException { + int clusterKey = ClusterTraits.getClusterKeyFromResourceKey(resourceKey); + short resourceIndex = (short)ClusterTraits.getResourceIndexFromResourceKey(resourceKey); + if (this.clusterKey != clusterKey) { // foreign resource + ClusterI foreignCluster = support.getClusterByClusterKey(clusterKey); + ClusterUID clusterUID = foreignCluster.getClusterUID(); + short ref = clusterMap.getForeignReferenceOrZero(resourceKey); + return new ResourceReferenceAndCluster(ref, clusterUID); + } + return new ResourceReferenceAndCluster(resourceIndex, getClusterUID()); + } + + static long fTime = 0; + + @Override + final public int execute(int resourceReference) throws DatabaseException { + short resourceRef = (short)resourceReference; + int key; + if (ClusterTraitsSmall.resourceRefIsLocal(resourceRef)) { + key = clusterBits | resourceRef; + } else { + short foreignIndex = ClusterTraitsSmall.resourceRefGetForeignIndex((short)resourceRef); + //long start = System.nanoTime(); + ResourceUID resourceUID = foreignTable.getResourceUID(foreignIndex); + int clusterKey = clusterSupport.getClusterKeyByClusterUIDOrMake(resourceUID.asCID()); +// ClusterBase cluster = clusterSupport.getClusterByClusterUIDOrMake(resourceUID.asCID()); + key = ClusterTraitsBase.createResourceKey(clusterKey, resourceUID.getIndex()); + //fTime += System.nanoTime() - start; + //System.err.println("fTime: " + 1e-9*fTime); + } + if (DEBUG) + System.out.println("ClusterSmall.execute key=" + key); + return key; + } + + private boolean addRelationInternal(short sReference, short pReference, short oReference, ClusterI.CompleteTypeEnum completeType) + throws DatabaseException { + int predicateIndex = resourceTable.addStatement(sReference, pReference, oReference, predicateTable, objectTable, completeType, completeTable); + if (0 == predicateIndex) + return true; // added to resourceTable + else if (0 > predicateIndex) + return false; // old complete statemenent + int newPredicateIndex = predicateTable.addPredicate(predicateIndex, 0xFFFF & pReference, 0xFFFF & oReference, objectTable); + if (0 == newPredicateIndex) + return false; + if (predicateIndex != newPredicateIndex) + resourceTable.setPredicateIndex(sReference, newPredicateIndex); + return true; + } + private boolean removeRelationInternal(int sResourceIndex, short pResourceIndex, + short oResourceIndex, ClusterI.CompleteTypeEnum completeType, ClusterSupport support) + throws DatabaseException { + int predicateIndex = resourceTable.getPredicateIndex(sResourceIndex); + if (0 == predicateIndex || ClusterI.CompleteTypeEnum.NotComplete != completeType) + return resourceTable.removeStatementFromCache(sResourceIndex, + pResourceIndex, oResourceIndex, completeType, completeTable); + PredicateTable.Status ret = predicateTable.removePredicate(predicateIndex, 0xFFFF & pResourceIndex, 0xFFFF & oResourceIndex, objectTable); + switch (ret) { + case NothingRemoved: + return false; + case PredicateRemoved: { + if (0 == predicateTable.getPredicateSetSize(predicateIndex)) + resourceTable.setPredicateIndex(sResourceIndex, 0); + // intentionally dropping to next case + } default: + break; + } + resourceTable.removeStatement(sResourceIndex, + pResourceIndex, oResourceIndex, + completeType, completeTable, + predicateTable, objectTable, support); + return true; + } + @Override + public void load() { + throw new Error("Not supported."); + } + + @Override + public void load(Callback r) { + throw new Error("Not supported."); + } + + public boolean contains(int resourceKey) { + return ClusterTraitsBase.isCluster(clusterBits, resourceKey); + } + @Override + public void load(final ClusterSupport support, final Runnable callback) { + + throw new UnsupportedOperationException(); + +// try { +// clusterTable.load2(clusterId, clusterKey); +// callback.run(); +// } catch (DatabaseException e) { +// e.printStackTrace(); +// } + + } + @Override + public ClusterI getClusterByResourceKey(int resourceKey, + ClusterSupport support) { + throw new Error(); + } + @Override + public void increaseReferenceCount(int amount) { + throw new Error(); + } + @Override + public void decreaseReferenceCount(int amount) { + throw new Error(); + } + @Override + public int getReferenceCount() { + throw new Error(); + } + @Override + public void releaseMemory() { + } + @Override + public void compact() { + clusterMap.compact(); + } + @Override + public boolean isLoaded() { + return !proxy; + } + +// public ClusterImpl tryLoad(SessionImplSocket sessionImpl) { +// +// throw new UnsupportedOperationException(); +// assert(Constants.ReservedClusterId != clusterId); +// +// return clusterTable.tryLoad(clusterId, clusterKey); +// +// } + + + @Override + public ClusterBig toBig(ClusterSupport support) + throws DatabaseException { + if (DEBUG) { + System.out.println("DEBUG: toBig cluster=" + clusterId); + new Exception().printStackTrace(); + } + ClusterBig big = new ClusterBig(clusterSupport, getClusterUID(), clusterKey, (ClusterSupport2)support); + big.cc = this.cc; +// if(big.cc != null) +// big.cc.clusterImpl = this; + resourceTable.toBig(big, support, this); + big.foreignLookup = this.foreignLookup; + big.change = this.change; + this.cc = null; + this.foreignLookup = null; + this.change = null; + return big; + } + + @Override + public ClusterTypeEnum getType() { + return ClusterTypeEnum.SMALL; + } + @Override + public boolean getImmutable() { + int status = resourceTable.getClusterStatus(); + return (status & ClusterStatus.ImmutableMaskSet) == 1; + } + @Override + public void setImmutable(boolean immutable, ClusterSupport support) { + if(resourceTable != null) { + int status = resourceTable.getClusterStatus(); + if (immutable) + status |= ClusterStatus.ImmutableMaskSet; + else + status &= ClusterStatus.ImmutableMaskClear; + resourceTable.setClusterStatus(status); + } + support.setImmutable(this, immutable); + } + + @Override + public String toString() { + try { + final TIntHashSet set = new TIntHashSet(); + TIntShortHashMap map = foreignTable.getResourceHashMap(); + map.forEachKey(new TIntProcedure() { + @Override + public boolean execute(int value) { + set.add(value & 0xfffff000); + return true; + } + }); + return "ClusterSmall[" + getClusterUID() + " - " + getClusterId() + " - " + getNumberOfResources() + " - " + foreignTable.getResourceHashMap().size() + " - " + set.size() + "]"; + } catch (DatabaseException e) { + return "ClusterSmall[" + getNumberOfResources() + "]"; + } + } + + // Memory map + // bytes (b) | headers(i) | predicateTable (i) | objectTable (i) | completeTable (i) | resourceTable (l) | foreignTable (l) + + @Override + public byte[] storeBytes() throws IOException { + + int byteSize = valueTable.getTableSize(); + int longSize = LONG_HEADER_SIZE + resourceTable.getTableSize() + foreignTable.getTableSize(); + int intSize = INT_HEADER_SIZE + predicateTable.getTableSize() + objectTable.getTableSize() + completeTable.getTableSize(); + + byte[] raw = new byte[12 + byteSize + 8*longSize + 4*intSize]; + + int[] currentHeader = Arrays.copyOf(headerTable, INT_HEADER_SIZE); + + Bytes.writeLE(raw, 0, byteSize); + Bytes.writeLE(raw, 4, intSize); + Bytes.writeLE(raw, 8, longSize); + + int rawPos = valueTable.storeBytes(raw, 0, 12); + + int intBase = rawPos; + + rawPos += 4*INT_HEADER_SIZE; + rawPos = predicateTable.storeBytes(raw, (rawPos-intBase)>>2, rawPos); + rawPos = objectTable.storeBytes(raw, (rawPos-intBase)>>2, rawPos); + rawPos = completeTable.storeBytes(raw, (rawPos-intBase)>>2, rawPos); + + int longBase = rawPos; + + rawPos += 8*LONG_HEADER_SIZE; + rawPos = resourceTable.storeBytes(raw, (rawPos-longBase)>>3, rawPos); + rawPos = foreignTable.storeBytes(raw, (rawPos-longBase)>>3, rawPos); + + Bytes.writeLE8(raw, longBase, -1); + Bytes.writeLE8(raw, longBase+8, LONG_HEADER_VERSION); + Bytes.writeLE8(raw, longBase+16, 0); + Bytes.writeLE8(raw, longBase+24, clusterUID.second); + + // write header + for(int i=0;i getPredicateTable() { + return predicateTable; + } + + @Override + public Table getForeignTable() { + return foreignTable; + } + + @Override + public int makeResourceKey(int pRef) throws DatabaseException { + throw new UnsupportedOperationException(); + } + + @Override + public Table getCompleteTable() { + return completeTable; + } + + @Override + public Table getValueTable() { + return valueTable; + } + + @Override + public Table getObjectTable() { + return objectTable; + } + +} + +class ClusterStatus { + public static final int ImmutableMaskClear = 0xFFFFFFFE; + public static final int ImmutableMaskSet = 0x00000001; + public static final int DeletedMaskClear = 0xFFFFFFFD; + public static final int DeletedMaskSet = 0x00000002; +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/AcornDatabase.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/AcornDatabase.java new file mode 100644 index 000000000..63c73dd63 --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/AcornDatabase.java @@ -0,0 +1,229 @@ +package org.simantics.acorn.internal; + +import java.io.File; +import java.io.IOException; +import java.nio.file.DirectoryStream; +import java.nio.file.FileVisitOption; +import java.nio.file.FileVisitResult; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.SimpleFileVisitor; +import java.nio.file.attribute.BasicFileAttributes; +import java.util.EnumSet; +import java.util.Properties; + +import org.simantics.acorn.GraphClientImpl2; +import org.simantics.db.Database; +import org.simantics.db.DatabaseUserAgent; +import org.simantics.db.ServiceLocator; +import org.simantics.db.common.utils.Logger; +import org.simantics.db.server.ProCoreException; + +/** + * @author Tuukka Lehtonen + */ +public class AcornDatabase implements Database { + + private final Path folder; + + private DatabaseUserAgent userAgent; + + public AcornDatabase(Path folder) { + this.folder = folder; + } + + @Override + public DatabaseUserAgent getUserAgent() { + return userAgent; + } + + @Override + public void setUserAgent(DatabaseUserAgent dbUserAgent) { + userAgent = dbUserAgent; + } + + @Override + public Status getStatus() { + return Status.Local; + } + + @Override + public File getFolder() { + return folder.toFile(); + } + + @Override + public boolean isFolderOk() { + return isFolderOk(folder.toFile()); + } + + @Override + public boolean isFolderOk(File aFolder) { + if (!aFolder.isDirectory()) + return false; + return true; + } + + @Override + public boolean isFolderEmpty() { + return isFolderEmpty(folder.toFile()); + } + + @Override + public boolean isFolderEmpty(File aFolder) { + Path path = aFolder.toPath(); + if (!Files.isDirectory(path)) + return false; + try (DirectoryStream folderStream = Files.newDirectoryStream(path)) { + return !folderStream.iterator().hasNext(); + } catch (IOException e) { + Logger.defaultLogError("Failed to open folder stream. folder=" + path, e); + return false; + } + } + + @Override + public void initFolder(Properties properties) throws ProCoreException { + try { + Files.createDirectories(folder); + } catch (IOException e) { + throw new ProCoreException(e); + } + } + + @Override + public void deleteFiles() throws ProCoreException { + // TODO: somehow check that the acorn client is not active. + deleteTree(folder); + } + + @Override + public void start() throws ProCoreException { + } + + @Override + public boolean isRunning() throws ProCoreException { + return true; + } + + @Override + public boolean tryToStop() throws ProCoreException { + return true; +// throw new UnsupportedOperationException(); + } + + @Override + public void connect() throws ProCoreException { + } + + @Override + public boolean isConnected() throws ProCoreException { + return true; + } + + @Override + public String execute(String command) throws ProCoreException { + throw new UnsupportedOperationException("execute(" + command + ")"); + } + + @Override + public void disconnect() throws ProCoreException { + } + + @Override + public void clone(File to, int revision, boolean saveHistory) throws ProCoreException { + // TODO: implement + throw new UnsupportedOperationException(); + } + + @Override + public Path createFromChangeSets(int revision) throws ProCoreException { + // TODO: implement + throw new UnsupportedOperationException(); + } + + @Override + public void deleteGuard() throws ProCoreException { + // TODO: implement + throw new UnsupportedOperationException(); + } + + @Override + public Path dumpChangeSets() throws ProCoreException { + // TODO: implement + throw new UnsupportedOperationException(); + } + + @Override + public void purgeDatabase() throws ProCoreException { + // TODO: implement + throw new UnsupportedOperationException(); + } + + @Override + public long serverGetTailChangeSetId() throws ProCoreException { + // "We have it all" + // But after purging we don't so beware. + // TODO: beware for purge + return 1; + } + + @Override + public Session newSession(ServiceLocator locator) throws ProCoreException { + try { + return new GraphClientImpl2(this, folder, locator); + } catch (IOException e) { + throw new ProCoreException(e); + } + } + + @Override + public Journal getJournal() throws ProCoreException { + // TODO: implement + throw new UnsupportedOperationException(); + } + + private static void deleteTree(Path path) throws ProCoreException { + if (!Files.exists(path)) + return; + + class Visitor extends SimpleFileVisitor { + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + try { + Files.delete(file); + } catch (IOException ioe) { + ioe.printStackTrace(); + throw ioe; + } + return FileVisitResult.CONTINUE; + } + @Override + public FileVisitResult postVisitDirectory(Path dir, IOException e) throws IOException { + if (e == null) { + try { + Files.delete(dir); + } catch (IOException ioe) { + ioe.printStackTrace(); + throw ioe; + } + return FileVisitResult.CONTINUE; + } + throw e; + } + } + try { + Visitor v = new Visitor(); + EnumSet opts = EnumSet.noneOf(FileVisitOption.class); + Files.walkFileTree(path, opts, Integer.MAX_VALUE, v); + } catch (IOException e) { + throw new ProCoreException("Could not delete " + path, e); + } + } + + @Override + public String getCompression() { + return "LZ4"; + } + +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/Activator.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/Activator.java new file mode 100644 index 000000000..b6cb59b40 --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/Activator.java @@ -0,0 +1,62 @@ +/******************************************************************************* + * Copyright (c) 2007, 2010 Association for Decentralized Information Management + * in Industry THTH ry. + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * VTT Technical Research Centre of Finland - initial API and implementation + *******************************************************************************/ +package org.simantics.acorn.internal; + +import org.eclipse.core.runtime.Plugin; +import org.osgi.framework.BundleContext; + +/** + * @author Antti Villberg + */ +public class Activator extends Plugin { + + // The plug-in ID + public static final String BUNDLE_ID = "org.simantics.acorn"; //$NON-NLS-1$ + // The shared instance + private static Activator plugin; + + /** + * The constructor + */ + public Activator() { + } + + /* + * (non-Javadoc) + * @see org.eclipse.ui.plugin.AbstractUIPlugin#start(org.osgi.framework.BundleContext) + */ + @Override + public void start(BundleContext context) throws Exception { + super.start(context); + plugin = this; + } + + /* + * (non-Javadoc) + * @see org.eclipse.ui.plugin.AbstractUIPlugin#stop(org.osgi.framework.BundleContext) + */ + @Override + public void stop(BundleContext context) throws Exception { + plugin = null; + super.stop(context); + } + + /** + * Returns the shared instance + * + * @return the shared instance + */ + public static Activator getDefault() { + return plugin; + } + +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/BijectionMap.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/BijectionMap.java new file mode 100644 index 000000000..3de77d2aa --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/BijectionMap.java @@ -0,0 +1,119 @@ +/******************************************************************************* + * Copyright (c) 2007, 2010 Association for Decentralized Information Management + * in Industry THTH ry. + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * VTT Technical Research Centre of Finland - initial API and implementation + *******************************************************************************/ +/* + * Created on Jan 21, 2005 + * + * Copyright Toni Kalajainen + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.simantics.acorn.internal; + +import java.util.HashMap; +import java.util.Map; +import java.util.Set; + +/** + * Bijection map is a Map that has no values or keys, only 1:1 mappings + * of values. These value/keys will be called with left and right side + * values. + * + * Each value can exist only once on a side + * + * @author Toni Kalajainen + */ +public class BijectionMap { + + /** The keys of tableLeft are left-side-values and + * values are right-side-values */ + private final Map tableLeft = new HashMap(); + /** The keys of tableRight are right-side-values and + * values on it are left-side-values */ + private final Map tableRight = new HashMap(); + + public boolean containsLeft(L leftValue) + { + return tableLeft.containsKey(leftValue); + } + + public boolean containsRight(R rightValue) + { + return tableRight.containsKey(rightValue); + } + + public void map(L leftValue, R rightValue) + { + // Remove possible old mapping + R oldRight = tableLeft.remove(leftValue); + if (oldRight != null) { + tableRight.remove(oldRight); + } else { + L oldLeft = tableRight.remove(rightValue); + if (oldLeft != null) { + tableLeft.remove(oldLeft); + } + } + + tableLeft.put(leftValue, rightValue); + tableRight.put(rightValue, leftValue); + } + + public int size() + { + return tableLeft.size(); + } + + public L getLeft(R rightValue) { + return tableRight.get(rightValue); + } + + public R getRight(L leftValue) { + return tableLeft.get(leftValue); + } + + public R removeWithLeft(L leftValue) { + R rightValue = tableLeft.remove(leftValue); + if (rightValue!=null) + tableRight.remove(rightValue); + return rightValue; + } + + public L removeWithRight(R rightValue) { + L leftValue = tableRight.remove(rightValue); + if (leftValue!=null) + tableLeft.remove(leftValue); + return leftValue; + } + + public Set getLeftSet() { + return tableLeft.keySet(); + } + + public Set getRightSet() { + return tableRight.keySet(); + } + + public void clear() { + tableLeft.clear(); + tableRight.clear(); + } +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/Change.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/Change.java new file mode 100644 index 000000000..305e31537 --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/Change.java @@ -0,0 +1,70 @@ +package org.simantics.acorn.internal; + +import org.simantics.db.service.ClusterUID; + +final public class Change { + + byte op0; + int key0; + int key1; + int key2; + ClusterUID clusterUID1; + ClusterUID clusterUID2; + byte[] lookup1; + byte[] lookup2; + byte lookIndex1; + byte lookIndex2; + int lastArg = 0; + + @Override + public String toString() { + return "Change " + (key0&0xffff) + " " + (key1&0xffff) + " " + (key2&0xffff) + " " + clusterUID2 + " " + clusterUID2; + } + + public final void init() { + lastArg = 0; + } + + public final void initValue() { + lastArg = 0; + } + + final void addStatementIndex0(int key, byte op) { + assert (op != 0); + key0 = key; + op0 = op; + } + + final void addStatementIndex1(int key, ClusterUID clusterUID, byte lookIndex, byte[] lookup) { + key1 = key; + clusterUID1 = clusterUID; + lookIndex1 = lookIndex; + lookup1 = lookup; +// if(lookIndex > 0) +// System.err.println("statementIndex1 " + pos + " " + lookIndex); + } + + final void addStatementIndex2(int key, ClusterUID clusterUID, byte lookIndex, byte[] lookup) { + key2 = key; + clusterUID2 = clusterUID; + lookIndex2 = lookIndex; + lookup2 = lookup; + } + + final public void addStatementIndex(int key, ClusterUID clusterUID, byte op) { + + // new Exception("lastArg=" + lastArg).printStackTrace(); + + assert (lastArg < 3); + + if (0 == lastArg) + addStatementIndex0(key, op); + else if (1 == lastArg) + addStatementIndex1(key, clusterUID, (byte)0, null); + else if (2 == lastArg) + addStatementIndex2(key, clusterUID, (byte)0, null); + + lastArg++; + + } +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterChange.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterChange.java new file mode 100644 index 000000000..b1fbb5d9c --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterChange.java @@ -0,0 +1,735 @@ +package org.simantics.acorn.internal; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import org.simantics.acorn.internal.ClusterStream.ClusterEnum; +import org.simantics.acorn.internal.ClusterStream.Data; +import org.simantics.acorn.internal.ClusterStream.DebugInfo; +import org.simantics.acorn.internal.ClusterStream.OpEnum; +import org.simantics.acorn.internal.ClusterStream.StmEnum; +import org.simantics.compressions.Compressions; +import org.simantics.db.exception.RuntimeDatabaseException; +import org.simantics.db.impl.ClusterTraitsBase; +import org.simantics.db.procore.cluster.ClusterTraits; +import org.simantics.db.procore.cluster.ClusterTraitsSmall; +import org.simantics.db.service.Bytes; +import org.simantics.db.service.ClusterUID; +import org.simantics.utils.datastructures.Pair; + +import gnu.trove.map.hash.TIntByteHashMap; +import gnu.trove.map.hash.TLongIntHashMap; + + +public final class ClusterChange { + + public static final int VERSION = 1; + public static final byte ADD_OPERATION = 2; + public static final byte REMOVE_OPERATION = 3; + public static final byte DELETE_OPERATION = 5; + + public static final boolean DEBUG = false; + public static final boolean DEBUG_STAT = false; + public static final boolean DEBUG_CCS = false; + + private static DebugInfo sum = new DebugInfo(); + + public final TIntByteHashMap foreignTable = new TIntByteHashMap(); + private final DebugInfo info; +// private final GraphSession graphSession; + public final ClusterUID clusterUID; + private final int SIZE_OFFSET; +// private final int HEADER_SIZE; + // How much buffer is used before stream is flushed to server. The bigger the better. + public static final int MAX_FIXED_BYTES = (1<<15) + (1<<14); + private static final int MAX_FIXED_OPERATION_SIZE = 17 + 16; + private static final int MAX_FIXED_OPERATION_SIZE_AND_ROOM_FOR_ERROR = MAX_FIXED_OPERATION_SIZE + 36; + private int nextSize = MAX_FIXED_BYTES; + int byteIndex = 0; + private byte[] bytes = null; // Operation data. +// private final byte[] header; + private boolean flushed = false; + private ArrayList> stream; + +// public ClusterImpl clusterImpl; + + public ClusterChange( ArrayList> stream, ClusterUID clusterUID) { + this.clusterUID = clusterUID; + long[] longs = new long[ClusterUID.getLongLength()]; + clusterUID.toLong(longs, 0); + this.stream = stream; +// this.graphSession = clusterStream.graphSession; + info = new DebugInfo(); +// HEADER_SIZE = 8 + longs.length * 8; +// header = new byte[HEADER_SIZE]; + SIZE_OFFSET = 0; +// Bytes.writeLE(header, SIZE_OFFSET + 0, 0); // Correct byte vector size is set with setHeaderVectorSize() later. +// Bytes.writeLE(header, SIZE_OFFSET + 4, VERSION); +// for (int i=0, offset=8; i>>8); + } + void flushCollect(Change c) { + throw new UnsupportedOperationException(); +// flushInternal(graphSession, clusterUID); +// if (DEBUG) +// printlnd("Cluster change data was flushed."); +// if (null != c) { +// if (DEBUG) +// printlnd("Clearing lookup for " + c.toString()); +// c.lookup1 = null; +// c.lookup2 = null; +// } +// if (null != clusterImpl) { +// clusterImpl.foreignLookup = null; +// } + } + + private final boolean checkBufferSpace(Change c) { +// clusterStream.changes.checkFlush(); + if(bytes == null) initBuffer(); + if (MAX_FIXED_BYTES - byteIndex > MAX_FIXED_OPERATION_SIZE_AND_ROOM_FOR_ERROR) { + return false; + } + flush(); +// initBuffer(); + return true; + } + + private final void checkBufferSpace(int size) { + if(bytes == null) initBuffer(); + if (bytes.length - byteIndex >= size) + return; + nextSize = Math.max(MAX_FIXED_BYTES, size); + flush(); + initBuffer(); + } + + public final void addChange(Change c) { + checkInitialization(); + checkBufferSpace(c); + byte operation = c.op0; + if(operation == ADD_OPERATION) + addStm(c, StmEnum.Add); + else if (operation == REMOVE_OPERATION) + addStm(c, StmEnum.Remove); + else if (operation == DELETE_OPERATION) { + if (DEBUG) + printlnd("Delete value offset=" + byteIndex + " " + c); + addByte(OpEnum.Delete.getOrMask()); + addShort(ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(c.key0)); + } + c.lastArg = 0; + } + + private final void addForeignLong(short index, ClusterUID clusterUID) { + byteIndex = clusterUID.toByte(bytes, byteIndex); + bytes[byteIndex++] = (byte)(index & 0xFF); + bytes[byteIndex++] = (byte)(index >>> 8); + } + + private final ClusterEnum addIndexAndCluster(int key, ClusterUID clusterUID, byte lookIndex, byte[] lookup) { + assert(!clusterUID.equals(ClusterUID.Null)); + short resourceIndex = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(key); + if (clusterUID.equals(this.clusterUID)) { + bytes[byteIndex++] = (byte)(resourceIndex & 0xFF); + bytes[byteIndex++] = (byte)(resourceIndex >>> 8); + return ClusterEnum.Local; + } + + byte foreign = 0; + if(lookIndex > 0) { + if(lookup != null) + foreign = lookup[lookIndex]; + } else { + foreign = foreignTable.get(key); + } + if (0 != foreign) { + if (foreign > 256) + throw new RuntimeDatabaseException("Internal error, contact application support." + + "Too big foreing index=" + foreign + " max=256"); + --foreign; + bytes[byteIndex++] = foreign; + return ClusterEnum.ForeignShort; + } else { + byte position = (byte) (foreignTable.size() + 1); + if(lookup != null) + lookup[lookIndex] = position; + foreignTable.put(key, position); + if (DEBUG_STAT) + info.sForeign = foreignTable.size(); + if (clusterUID.equals(ClusterUID.Null)) + throw new RuntimeDatabaseException("Internal error, contact application support." + + "Cluster unique id not defined for foreing cluster."); + addForeignLong(resourceIndex, clusterUID); + return ClusterEnum.ForeignLong; + } + } + + private final void addByte(byte b) { + bytes[byteIndex++] = b; + } + + private final void addShort(short s) { + bytes[byteIndex++] = (byte)(s & 0xFF); + bytes[byteIndex++] = (byte)(s >>> 8); + } + +// private final void addShort(int s) { +// bytes[byteIndex++] = (byte) (s & 0xFF); +// bytes[byteIndex++] = (byte) ((s >>> 8) & 0xFF); +// } + + private final void addInt(int i) { +// System.err.println("addInt " + i + " " + i); + bytes[byteIndex++] = (byte) (i & 0xFF); + bytes[byteIndex++] = (byte) ((i >>> 8) & 0xFF); + bytes[byteIndex++] = (byte) ((i >>> 16) & 0xFF); + bytes[byteIndex++] = (byte) ((i >>> 24) & 0xFF); + // buffer.asIntBuffer().put(i); + // buffer.position(buffer.position()+4); + } + +// private void addLong6(long l) { +//// System.err.println("addLong " + l); +// bytes[byteIndex++] = (byte) (l & 0xFF); +// bytes[byteIndex++] = (byte) ((l >>> 8) & 0xFF); +// bytes[byteIndex++] = (byte) ((l >>> 16) & 0xFF); +// bytes[byteIndex++] = (byte) ((l >>> 24) & 0xFF); +// bytes[byteIndex++] = (byte) ((l >>> 32) & 0xFF); +// bytes[byteIndex++] = (byte) ((l >>> 40) & 0xFF); +// // buffer.asLongBuffer().put(l); +// // buffer.position(buffer.position() + 6); +// } + + private void addLong7(long l) { + bytes[byteIndex++] = (byte) (l & 0xFF); + bytes[byteIndex++] = (byte) ((l >>> 8) & 0xFF); + bytes[byteIndex++] = (byte) ((l >>> 16) & 0xFF); + bytes[byteIndex++] = (byte) ((l >>> 24) & 0xFF); + bytes[byteIndex++] = (byte) ((l >>> 32) & 0xFF); + bytes[byteIndex++] = (byte) ((l >>> 40) & 0xFF); + bytes[byteIndex++] = (byte) ((l >>> 48) & 0xFF); + // buffer.asLongBuffer().put(l); + // buffer.position(buffer.position() + 7); + } + +// private void addLong(long l) { +// bytes[byteIndex++] = (byte) (l & 0xFF); +// bytes[byteIndex++] = (byte) ((l >>> 8) & 0xFF); +// bytes[byteIndex++] = (byte) ((l >>> 16) & 0xFF); +// bytes[byteIndex++] = (byte) ((l >>> 24) & 0xFF); +// bytes[byteIndex++] = (byte) ((l >>> 32) & 0xFF); +// bytes[byteIndex++] = (byte) ((l >>> 40) & 0xFF); +// bytes[byteIndex++] = (byte) ((l >>> 48) & 0xFF); +// bytes[byteIndex++] = (byte) ((l >>> 56) & 0xFF); +// } + private final byte bufferPop() { + return bytes[--byteIndex]; + } + + final class DebugStm { + StmEnum e; + int r; + int p; + int o; + ClusterUID pc; + ClusterUID oc; + + DebugStm(StmEnum e, int r, int p, ClusterUID pc, int o, ClusterUID oc) { + this.e = e; + this.r = r; + this.p = p; + this.o = o; + this.pc = pc; + this.oc = oc; + } + + @Override + public String toString() { + short ri = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(r); + short pi = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(p); + short oi = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(o); + return "" + e + " rk=" + r + " ri=" + ri + " rc=" + clusterUID + + " pk=" + p + " pi=" + pi + " pc=" + pc + + " ok=" + o + " oi=" + oi + " oc=" + oc; + } + + public String toString2() { + return "" + e + " r=" + r + " rc=" + clusterUID + " p=" + p + + " pc=" + pc + " o=" + o + " oc=" + oc; + } + + public String toString3() { + short ri = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(r); + short pi = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(p); + short oi = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(o); + return "" + e + " ri=" + ri + + " pi=" + pi + " pc=" + pc + + " oi=" + oi + " oc=" + oc; + } + } + + private List debugStms = new ArrayList(); + + @SuppressWarnings("unused") + private final void addStm(Change c, StmEnum stmEnum) { + + if (DEBUG_STAT) + ++info.nStms; + if (DEBUG || DEBUG_CCS) { + DebugStm d = new DebugStm(stmEnum, c.key0, c.key1, c.clusterUID1, c.key2, c.clusterUID2); + if (DEBUG_CCS) + debugStms.add(d); + if (DEBUG) { + printlnd(d.toString3() + " offset=" + byteIndex); + } + } + // int opPos = buffer.position(); + int opPos = byteIndex++; + // buffer.put((byte)0); // operation code + // addByte((byte)0); + + boolean done = true; + + ClusterEnum a = addIndexAndCluster(c.key1, c.clusterUID1, c.lookIndex1, c.lookup1); + byte ab = 0; + + // ForeignShort = byte + // Local = short + // ForeignLong = 8 byte + if (a != ClusterEnum.ForeignShort) { + ab = bufferPop(); + done = false; + } + + ClusterEnum b = addIndexAndCluster(c.key2, c.clusterUID2, c.lookIndex2, c.lookup2); + byte bb = 0; + if (b != ClusterEnum.ForeignShort) { + bb = bufferPop(); + done = false; + } + + int ri = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(c.key0); + if (ClusterTraitsSmall.isIllegalResourceIndex(ri)) + throw new RuntimeDatabaseException("Assertion error. Illegal resource index=" + ri); + bytes[byteIndex++] = (byte)ri; // index low byte + if(!done) { + Data data = ClusterEnum.getData(stmEnum, a, b); + int left = 6 - data.bits; + int op = ri >>> (8 + left); + ri >>>= 8; + ri &= (1 << left) - 1; + if (a != ClusterEnum.ForeignShort) { + ri |= ab << left; + left += 6; + } + if (b != ClusterEnum.ForeignShort) { + ri |= bb << left; + left += 6; + } + switch (data.bytes) { + default: + throw new RuntimeDatabaseException("Assertion error. Illegal number of bytes=" + data.bytes); + case 2: + bytes[byteIndex++] = (byte)(ri & 0xFF); + bytes[byteIndex++] = (byte)((ri >>> 8) & 0xFF); + break; + case 1: + bytes[byteIndex++] = (byte)(ri & 0xFF); + break; + case 0: + break; + } + op |= data.mask; + this.bytes[opPos] = (byte)op; + } else { + if (stmEnum == StmEnum.Add) + bytes[opPos] = (byte)((ri >>> 8) + 64); + else + bytes[opPos] = (byte)((ri >>> 8) + 128); + } + if (DEBUG_STAT) { + if (a == ClusterEnum.Local && b == ClusterEnum.Local) { + ++info.nLocal; + } else if (a == ClusterEnum.Local || b == ClusterEnum.Local) { + ++info.nPartly; + } else { + ++info.nForeign; + } + } + if (foreignTable.size() > 252) + flush(); +// throw new UnsupportedOperationException(); + //flushInternal(graphSession, clusterUID); + } + + private final int modiValue(int ri, long value_offset, byte[] bytes, int offset, int size) { + if (DEBUG) + printlnd("Modify value ri=" + ri + " vo=" + value_offset + " size=" + size + " total=" + bytes.length); + if (ClusterTraitsBase.isIllegalResourceIndex(ri)) + throw new RuntimeDatabaseException("Assertion error. Illegal resource index=" + ri); + if (value_offset > (1L << 58 - 1)) + throw new RuntimeDatabaseException("Illegal value offset=" + + value_offset); + if (size < 0 || size > MAX_FIXED_BYTES - 1) + throw new RuntimeDatabaseException("Illegal value size=" + size); + if (offset + size > bytes.length) + throw new RuntimeDatabaseException("Illegal value size=" + size); + checkBufferSpace(12 + size); + addByte(OpEnum.Modify.getOrMask()); + ri |= (value_offset >>> 56) << 14; // add top two bits + addShort((short) ri); + value_offset &= (1L << 56) - 1; + addLong7(value_offset); + addShort((short) size); + if (DEBUG) + System.out.println("Modify value fixed part end offset=" + byteIndex); + int copied = Math.min(size, this.bytes.length - byteIndex); + System.arraycopy(bytes, offset, this.bytes, byteIndex, copied); + byteIndex += size; + return copied; + } + +// private final void modiValueBig(int ri, long voffset, int left, byte[] bytes, int offset) { +// checkBufferSpace(0); +// int current = Math.min(this.bytes.length - byteIndex - 12, left); +// if(current >= 0) { +// int written = modiValue(ri, voffset, bytes, offset, current); +// voffset += written; +// offset += written; +// left -= written; +// } +//// flushInternal(graphSession, clusterUID); +// while (left > 0) { +// int length = Math.min(left, (1 << 16) - 1); +// if (DEBUG) +// printlnd("Modify big value ri=" + ri + " vo=" + voffset + " len=" + length); +// int psize = length + 12; +//// setHeaderVectorSize(psize); +// byte[] message = new byte[psize/*+HEADER_SIZE*/]; +//// System.arraycopy(header, 0, message, 0, HEADER_SIZE); +// int to = 0; +// Bytes.write(message, to++, OpEnum.Modify.getOrMask()); +// short index = (short)(ri | (voffset >>> 56)<<14); // add top two bits +// Bytes.writeLE(message, to, index); to += 2; +// Bytes.writeLE7(message, to, voffset & ((1L << 56) - 1)); to += 7; +// Bytes.writeLE(message, to, (short)length); to += 2; +// System.arraycopy(bytes, offset, message, to, length); +//// graphSession.updateCluster(new UpdateClusterFunction(message)); +// voffset += length; +// offset += length; +// left -= length; +// } +// } + + private final int setValueBig(int ri, byte[] bytes, int length_) { + checkBufferSpace(12); + int sum = 0; + int voffset = 0; + int offset = 0; + int left = length_; + while (left > 0) { + int length = Math.min(left, MAX_FIXED_BYTES - 12 - byteIndex); + if (DEBUG) + printlnd("Set big value ri=" + ri + " vo=" + voffset + " len=" + length); + int written = modiValue(ri, voffset, bytes, offset, length); + sum += written; + voffset += written; + offset += written; + left -= written; + checkBufferSpace(12); + } + return sum; + } + + private final int setValueSmall(int ri, byte[] bytes, int length) { + checkBufferSpace(5 + length); + int pos = byteIndex; + int i = length << 14 | ri; + if (length < 32) { + byte op = (byte) (OpEnum.SetShort.getOrMask() | length >>> 2); + addByte(op); + short s = (short) i; + addShort(s); + } else { + addByte(OpEnum.Set.getOrMask()); + addInt(i); + } + System.arraycopy(bytes, 0, this.bytes, byteIndex, length); + byteIndex += length; + int len = byteIndex - pos; + return len; + } + + final void setValue(short index, byte[] bytes) { + setValue(index, bytes, bytes.length); + } + + final public void setValue(short index, byte[] bytes, int length) { + checkInitialization(); + if (ClusterTraitsBase.isIllegalResourceIndex(index)) + throw new RuntimeDatabaseException("Assertion error. Illegal resource index=" + index); + if (DEBUG) + printlnd("Set value ri=" + index + + " len=" + length + + " bytes=" + Arrays.toString(Arrays.copyOfRange(bytes, 0, Math.min(10, length)))); + int len; + /* + * The limit for the cluster stream is (1<18)-1 but this avoids the + * conversion to big cluster. + */ + if (length > ClusterTraitsSmall.VALUE_SIZE_MAX) + len = setValueBig(index, bytes, length); + else + len = setValueSmall(index, bytes, length); + if (DEBUG_STAT) { + ++info.nValues; + info.sValues += len + length; + } + } + +// final void setValue(Change c, byte[] bytes, int length) { +// short ri = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(c.key0); +// setValue(ri, bytes, length); +// c.initValue(); +// } + +// final void modiValue(Change c, long voffset, int length, byte[] bytes, int offset) { +// checkInitialization(); +// int ri = ClusterTraitsBase.getResourceIndexFromResourceKeyNoThrow(c.key0); +// if (DEBUG) +// printlnd("Modify value ri=" + ri +// + " voff=" + voffset +// + " vlen=" + length +// + " blen=" + bytes.length +// + " boff=" + offset +// + " bytes=" + Arrays.toString(Arrays.copyOfRange(bytes, 0, Math.min(10, bytes.length)))); +// modiValueBig(ri, voffset, length, bytes, offset); +// c.init(); +// if (DEBUG_STAT) { +// ++info.nValues; +// info.sValues += length; +// } +// } + final void setImmutable(boolean immutable) { + checkInitialization(); +// clusterChange2.setImmutable(immutable); + } + final void undoValueEx(int resourceIndex) { + checkInitialization(); +// clusterChange2.undoValueEx(resourceIndex); + } + final void setDeleted(boolean deleted) { + checkInitialization(); +// clusterChange2.setDeleted(deleted); + } + final void corrupt() { + checkInitialization(); + addByte((byte)0); + } + + public byte[] getBytes() { + byte[] copy = new byte[byteIndex]; + System.arraycopy(bytes, 0, copy, 0, byteIndex); + return copy; + } + + /** + * @param graphSession + * @param clusterId + * @return true if actually flushed something + */ + final boolean flush(/*GraphSession graphSession,*/ ClusterUID clusterUID) { + throw new UnsupportedOperationException(); +// if (byteIndex > 0) { +// if(DebugPolicy.REPORT_CLUSTER_STREAM) +// System.err.println("Flush cluster change set stream " + this); +// setHeaderVectorSize(byteIndex); +// byte[] copy = new byte[byteIndex + HEADER_SIZE]; +// System.arraycopy(header, 0, copy, 0, HEADER_SIZE); +// System.arraycopy(bytes, 0, copy, HEADER_SIZE, byteIndex); +// UpdateClusterFunction updateClusterFunction = new UpdateClusterFunction(copy); +// if (DEBUG_CCS) { +// for (DebugStm stm : debugStms) +// printlnd(stm.toString2()); +// debugStms.clear(); +// } +// if (DEBUG_STAT) { +// info.tot = updateClusterFunction.operation.length; +// printlnd("ReallyFlush: " + info.toString()); +// sum.add(info); +// printlnd("ReallyFlush sum: " + sum.toString()); +// } +// // long start = System.nanoTime(); +// graphSession.updateCluster(updateClusterFunction); +// // long duration = System.nanoTime() - start; +// // duration2 += duration; +// // System.err.println("updateCluster " + 1e-9*duration); +// // System.err.println("updateCluster total " + 1e-9*duration2); +// clear(); +// clusterChange2.flush(graphSession); +// return true; +// } else if (clusterChange2.isDirty()) { +// clusterChange2.flush(graphSession); +// clear(); +// return true; +// } else if (flushed) { +// flushed = false; +// return true; +// } else { +// return true; +// } + } + + final void flushInternal(ClusterUID clusterUID) { + throw new UnsupportedOperationException(); +// flush(graphSession, clusterUID); +// flushed = true; + } + + final class ForeignTable { + private final TLongIntHashMap table = new TLongIntHashMap(); + + private long createKey(short index, long cluster) { + assert (cluster <= (1L << 48) - 1); + return (cluster << 14) | index; + } + + public int get(short index, long cluster) { + int value = table.get(createKey(index, cluster)); + if (DEBUG) + printlnd("ForeignTable get c=" + clusterUID + " i=" + + (value - 1) + " r=" + index + " rc=" + cluster); + return value; + } + + public int put(short index, long cluster, int value) { + if (DEBUG) + printlnd("ForeignTable put c=" + clusterUID + " i=" + + (value - 1) + " r=" + index + " rc=" + cluster); + return table.put(createKey(index, cluster), value); + } + + public int size() { + return table.size(); + } + + public void clear() { + table.clear(); + } + } + + @Override + public int hashCode() { + return 31*clusterUID.hashCode(); + } + + @Override + public boolean equals(Object object) { + if (this == object) + return true; + else if (object == null) + return false; + else if (!(object instanceof ClusterChange)) + return false; + ClusterChange r = (ClusterChange)object; + return r.clusterUID.equals(clusterUID); + } + + public void flush() { + + if(byteIndex > 0) { + + final ClusterUID cuid = clusterUID; + + byte[] block = getBytes(); + byte[] raw = new byte[block.length + 28]; + Bytes.writeLE(raw, 0, 1); + System.arraycopy(cuid.asBytes(), 0, raw, 4, 16); + Bytes.writeLE(raw, 20, block.length); + System.arraycopy(block, 0, raw, 24, block.length); + Bytes.writeLE(raw, 24+block.length, 0); + + ByteBuffer rawBB = ByteBuffer.wrap(raw); + ByteBuffer outputBB = ByteBuffer.allocate(raw.length + raw.length/8); + //outputBB.order(ByteOrder.LITTLE_ENDIAN); + int compressedSize = Compressions.get(Compressions.LZ4).compressBuffer(rawBB, 0, raw.length, outputBB, 0); + + byte[] data_ = null; + if(compressedSize < raw.length) { + data_ = new byte[compressedSize]; + outputBB.get(data_,0,compressedSize); + } else { + data_ = raw; + } + + byte[] data = new byte[data_.length+24]; + Bytes.writeLE(data, 0, 0); + Bytes.writeLE(data, 4, 0); + Bytes.writeLE(data, 8, raw.length); + Bytes.writeLE(data, 12, raw.length); + Bytes.writeLE(data, 16, data_.length); + System.arraycopy(data_, 0, data, 20, data_.length); + Bytes.writeLE(data, 20+data_.length, 0); + + stream.add(Pair.make(clusterUID, data)); + clear(); + initBuffer(); + + } + } + +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterChange2.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterChange2.java new file mode 100644 index 000000000..472b4d7b7 --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterChange2.java @@ -0,0 +1,70 @@ +package org.simantics.acorn.internal; + + +public class ClusterChange2 { + public static final int VERSION = 2; + public static final byte SET_IMMUTABLE_OPERATION = 1; // + public static final byte UNDO_VALUE_OPERATION = 2; // + private static final int INCREMENT = 1<<10; +// private boolean dirty = false; +// private byte[] bytes; +// private int byteIndex; +// private ClusterUID clusterUID; +// private ClusterImpl cluster; +// ClusterChange2(ClusterUID clusterUID, ClusterImpl cluster) { +// this.clusterUID = clusterUID; +// this.cluster = cluster; +// init(); +// } +// void init() { +//// System.err.println("clusterChange2 dirty " + cluster.clusterId); +// dirty = false; +// bytes = new byte[INCREMENT]; +// byteIndex = 0; +// addInt(0); // Size of byte vector. Set by flush. +// addInt(VERSION); +// byteIndex = clusterUID.toByte(bytes, 8); +// } +// boolean isDirty() { +// return dirty; +// } +// void flush(GraphSession graphSession) { +//// System.err.println("flush2 clusterChange2 " + dirty + this); +// if (!dirty) +// return; +// Bytes.writeLE(bytes, 0, byteIndex - 4); +// byte[] ops = Arrays.copyOf(bytes, byteIndex); +//// System.err.println("flush2 clusterChange2 " + cluster.clusterId + " " + ops.length + " bytes."); +// graphSession.updateCluster(new UpdateClusterFunction(ops)); +// init(); +// } +// void setImmutable(boolean immutable) { +// dirty = true; +// addByte(SET_IMMUTABLE_OPERATION); +// addByte((byte)(immutable ? -1 : 0)); +// } +// void undoValueEx(int resourceIndex) { +// dirty = true; +// addByte(UNDO_VALUE_OPERATION); +// addInt(resourceIndex); +// } +// private final void checkSpace(int len) { +// if (bytes.length - byteIndex > len) +// return; +// bytes = Arrays.copyOf(bytes, bytes.length + len + INCREMENT); +// } +// private final void addByte(byte value) { +// checkSpace(1); +// bytes[byteIndex++] = value; +// } +// private final void addInt(int value) { +// checkSpace(4); +// Bytes.writeLE(bytes, byteIndex, value); +// byteIndex += 4; +// } +//// private void addLong(long value) { +//// checkSpace(8); +//// Bytes.writeLE(bytes, byteIndex, value); +//// byteIndex += 8; +//// } +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterStream.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterStream.java new file mode 100644 index 000000000..2b1ae1979 --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterStream.java @@ -0,0 +1,437 @@ +/******************************************************************************* + * Copyright (c) 2007, 2010 Association for Decentralized Information Management + * in Industry THTH ry. + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * VTT Technical Research Centre of Finland - initial API and implementation + *******************************************************************************/ +package org.simantics.acorn.internal; + +import java.util.ArrayList; + +import org.simantics.db.exception.DatabaseException; +import org.simantics.db.service.ClusterUID; + +final public class ClusterStream { + +// // public static long duration2 = 0; +// + public static final boolean DEBUG = false; + public static final byte NULL_OPERATION = 0; + public static final byte CREATE_OPERATION = 1; + public static final byte SET_OPERATION = 4; + public static final byte MODI_OPERATION = 6; + public static final byte KILL_OPERATION = 7; +// boolean off = false; +// public GraphSession graphSession; +// final SessionImplSocket session; +//// private int flushCount = 0; +// final private boolean alwaysOff; +// private int stamp; +// private int acceptedStamp; +// private boolean dirty = false; +//// final private ArrayList clusterChanges = new ArrayList(); +// +// final ClusterChangeManager changes = new ClusterChangeManager(); +// +//// final TLongObjectHashMap clusterChanges = new TLongObjectHashMap(); +// +// // private final Change lastChange = new Change(); +// ClusterStream(SessionImplSocket session, GraphSession graphSession, +// boolean alwaysOff) { +// this.session = session; +// this.graphSession = graphSession; +// this.alwaysOff = alwaysOff; +// } +// +// +// boolean isDirty() { +// return dirty; +// } +// +// void markDirty() { +// dirty = true; +// } +// +// void setOff(boolean value) { +// if (alwaysOff) { +// off = true; +// } else { +// off = value; +// } +// } +// +// boolean getOff() { +// return off; +// } +// +// void createResource(ClusterChange cc, short operationIndex, ClusterUID clusterUID) { +// if (off) +// return; +// assert (null != cc); +// assert (0 != operationIndex); +// assert (!ClusterUID.Null.equals(clusterUID)); +// if (DEBUG) +// System.out.println("DEBUG: Created resource index=" + operationIndex + " cluster=" + clusterUID); +// cc.createResource(operationIndex); +// } +// +// final void addStatementIndex(Change change, int key, ClusterUID clusterUID, byte op) { +// if (off) +// return; +// assert (key > 0); +// assert (null != change); +// assert (!ClusterUID.Null.equals(clusterUID)); +// change.addStatementIndex(key, clusterUID, op); +// } +// +// void addStatement(ClusterChange cc, Change change) { +// if (off) +// return; +// assert (null != cc); +// assert (null != change); +// cc.addChange(change); +// } +// +// void cancelStatement(Change change) { +// if (off) +// return; +// assert (null != change); +// change.init(); +// } +// +// void removeStatement(ClusterChange cc, Change change, long clusterId) { +// if (off) +// return; +// assert (null != cc); +// assert (null != change); +// cc.addChange(change); +// } +// +// void cancelValue(Change change) { +// if (off) +// return; +// assert (null != change); +// change.init(); +// } +// +// void removeValue(ClusterChange cc, Change change, long clusterId) { +// if (off) +// return; +// // ClusterChange cc = getClusterChange(clusterId); +// assert (null != cc); +// assert (null != change); +// cc.addChange(change); +// } +// +// void setValue(ClusterChange cc, Change change, long clusterId, byte[] bytes, int length) { +// if (off) +// return; +// assert (null != cc); +// assert (null != change); +// // ClusterChange cc = getClusterChange(clusterId); +// cc.setValue(change, bytes, length); +// } +// +// void modiValue(ClusterChange cc, Change change, long clusterId, +// long voffset, int length, byte[] bytes, int offset) { +// assert (null != cc); +// assert (null != change); +// cc.modiValue(change, voffset, length, bytes, offset); +// } +// +// void undoValueEx(ClusterChange cc, Change change, int resourceIndex) { +// cc.undoValueEx(resourceIndex); +// } +// void setImmutable(ClusterChange cc, Change change, long clusterId, boolean immutable) { +// if (off) +// return; +// cc.setImmutable(immutable); +// } +// public void corruptCluster(ClusterChange cc, long clusterId) +// throws DatabaseException { +// if (off) +// return; +// if (DEBUG) +// System.out.println("ClusterStream.corrupt cid=" + clusterId + "."); +// assert (null != cc); +// cc.corrupt(); +// } +// +// int getStamp() { +// return stamp; +// } +// +// void flush() { +// if (off) +// return; +//// flushCount++; +// return; +// } +// +// void flush(long clusterId) { +// if (off) +// return; +// ClusterUID clusterUID = session.clusterTable.clusterIds.getClusterUID(clusterId); +// ArrayList ccs = new ArrayList(); +// for(ClusterChange cc : changes.get()) { +// if(cc.clusterUID.equals(clusterUID)) { +// if (cc.flush(graphSession, cc.clusterUID)) { +// ccs.add(cc); +// if (stamp == acceptedStamp) +// ++stamp; +// } else { +//// System.err.println("kasdi"); +// } +// } +// } +// changes.remove(ccs); +// } +// +// /** +// * @return true if the stream has accepted all changes +// */ +// public boolean reallyFlush() { +// // Last possibility to mark clusters immutable before write only clusters are gone +// session.handleCreatedClusters(); +// // These shall be requested from server +// session.clusterTable.removeWriteOnlyClusters(); +// if (!off && changes.size() > 0) { +// for(ClusterChange cc : changes.get()) { +// if (cc.flush(graphSession, cc.clusterUID)) +// if (stamp == acceptedStamp) +// ++stamp; +// } +// changes.clear(); +// } +// dirty = false; +// return hasAcceptedAllChanges(); +// } +// +// /** +// * Clear all changes and set stream status to empty. +// */ +// public void clear() { +// changes.clear(); +// acceptedStamp = stamp; +// dirty = false; +// } +// +// private boolean hasAcceptedAllChanges() { +// return stamp == acceptedStamp; +// } +// +// void accept() { +// acceptedStamp = stamp; +// } +// +// + + static class DebugInfo { + long nStms; + long nLocal; + long nPartly; + long nForeign; + long nValues; + long sValues; + long sForeign; + long tot; + + void clear() { + nStms = 0; + nLocal = 0; + nPartly = 0; + nForeign = 0; + sForeign = 0; + nValues = 0; + sValues = 0; + tot = 0; + } + + void add(DebugInfo di) { + nStms += di.nStms; + nLocal += di.nLocal; + nPartly += di.nPartly; + nForeign += di.nForeign; + sForeign += di.sForeign; + nValues += di.nValues; + sValues += di.sValues; + tot += di.tot; + } + + @Override + public String toString() { + return "val=" + nValues + " stm=" + nStms + " loc=" + nLocal + + " par=" + nPartly + " ful=" + nForeign + " for=" + + sForeign + " vat=" + sValues + " tot=" + tot; + } + } + + enum StmEnum { + Add(0, (byte) 0), Remove(1, (byte) 0x20); + StmEnum(int ordinal, byte mask) { + this.ordinal = ordinal; + this.mask = mask; + } + + public int ordinal; + private byte mask; + + byte getOrMask() { + return mask; + } + } + + final static class Data { + + final byte mask; // or mask for operation code (don't care bits are zero) + final short bits; // how many bits are reserved for resource index (0,2,4,6) + final int bytes; + + Data(int mask, int bits, ClusterEnum a, ClusterEnum b) { + this.mask = (byte) (mask << bits); + this.bits = (short) bits; + this.bytes = bytes(bits, a, b); + } + + private static int bytes(int bits, ClusterEnum a, ClusterEnum b) { + int left = 6 - bits; + if (a != ClusterEnum.ForeignShort) { + left += 6; + } + if (b != ClusterEnum.ForeignShort) { + left += 6; + } + int bytes = left >>> 3; + if ((left & 7) != 0) + bytes++; + return bytes; + } + + } + + enum ClusterEnum { + Local(0), ForeignShort(1), ForeignLong(2); + public int ordinal; + + ClusterEnum(int ordinal) { + this.ordinal = ordinal; + } + + static Data[][][] maps = new Data[2][3][3]; + static { + // mask: 00000000 + // op: 000000|r12-13 + // p1 + // o1 + // r0-7 + // o2 | p2 | r8-11 + maps[StmEnum.Add.ordinal][Local.ordinal][Local.ordinal] = new Data( + 0, 2, Local, Local); + // mask: 11000000 + // op: 1100 | r10-13 + // p1 + // o for index + // r0-7 + // p2 | ri 8-9 + maps[StmEnum.Add.ordinal][Local.ordinal][ForeignShort.ordinal] = new Data( + 12, 4, Local, ForeignShort); + // mask: 00001000 + // op: 000010 | r12-13 + maps[StmEnum.Add.ordinal][Local.ordinal][ForeignLong.ordinal] = new Data( + 2, 2, Local, ForeignLong); + // mask: 11010000 + // op: 1101 | r10-13 + maps[StmEnum.Add.ordinal][ForeignShort.ordinal][Local.ordinal] = new Data( + 13, 4, ForeignShort, Local); + + // mask: 01000000 + // op: 01 | r8-13 + // p for index + // o for index + // r0-7 + maps[StmEnum.Add.ordinal][ForeignShort.ordinal][ForeignShort.ordinal] = new Data( + 1, 6, ForeignShort, ForeignShort); + // mask: 11100000 + // op: 1110 | r10-13 + maps[StmEnum.Add.ordinal][ForeignShort.ordinal][ForeignLong.ordinal] = new Data( + 14, 4, ForeignShort, ForeignLong); + // mask: 00010000 + // op: 000100 | r12-13 + maps[StmEnum.Add.ordinal][ForeignLong.ordinal][Local.ordinal] = new Data( + 4, 2, ForeignLong, Local); + // mask: 11110000 + // op: 1111 | r10-13 + maps[StmEnum.Add.ordinal][ForeignLong.ordinal][ForeignShort.ordinal] = new Data( + 15, 4, ForeignLong, ForeignShort); + // mask: 00011000 + // op: 000110 | r12-13 + maps[StmEnum.Add.ordinal][ForeignLong.ordinal][ForeignLong.ordinal] = new Data( + 6, 2, ForeignLong, ForeignLong); + + // mask: 00000100 + // op: 000001 | r12-13 + maps[StmEnum.Remove.ordinal][Local.ordinal][Local.ordinal] = new Data( + 1, 2, Local, Local); + // mask: 01100001 + // op: 01100001 + // p1 + // o for index + // r0-7 + // p2 | ri 8-13 + maps[StmEnum.Remove.ordinal][Local.ordinal][ForeignShort.ordinal] = new Data( + 49, 0, Local, ForeignShort); + // mask: 00001100 + // op: 000011 | r12-13 + maps[StmEnum.Remove.ordinal][Local.ordinal][ForeignLong.ordinal] = new Data( + 3, 2, Local, ForeignLong); + // mask: 00100000 + // op: 0010 | r10-13 + maps[StmEnum.Remove.ordinal][ForeignShort.ordinal][Local.ordinal] = new Data( + 2, 4, ForeignShort, Local); + // mask: 10000000 + // op: 10 | r8-13 + maps[StmEnum.Remove.ordinal][ForeignShort.ordinal][ForeignShort.ordinal] = new Data( + 2, 6, ForeignShort, ForeignShort); + // mask: 00110010 + // op: 00110010 + maps[StmEnum.Remove.ordinal][ForeignShort.ordinal][ForeignLong.ordinal] = new Data( + 50, 0, ForeignShort, ForeignLong); + // mask: 00010100 + // op: 000101 | r12-13 + maps[StmEnum.Remove.ordinal][ForeignLong.ordinal][Local.ordinal] = new Data( + 5, 2, ForeignLong, Local); + // mask: 00110011 + // op: 00110011 + maps[StmEnum.Remove.ordinal][ForeignLong.ordinal][ForeignShort.ordinal] = new Data( + 51, 0, ForeignLong, ForeignShort); + // mask: 00011100 + // op: 000111 | r12-13 + maps[StmEnum.Remove.ordinal][ForeignLong.ordinal][ForeignLong.ordinal] = new Data( + 7, 2, ForeignLong, ForeignLong); + } + + static Data getData(StmEnum s, ClusterEnum a, ClusterEnum b) { + return maps[s.ordinal][a.ordinal][b.ordinal]; + // return maps.get(s).get(a).get(b); + } + } + + enum OpEnum { + Create((byte) 52), Set((byte) 53), SetShort((byte) 56), Delete( + (byte) 54), Modify((byte) 55); + OpEnum(byte mask) { + this.mask = mask; + } + + public byte getOrMask() { + return mask; + } + + private byte mask; + } +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterSupport2.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterSupport2.java new file mode 100644 index 000000000..7cd007ace --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterSupport2.java @@ -0,0 +1,340 @@ +package org.simantics.acorn.internal; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock; +import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock; + +import org.simantics.acorn.ClusterManager; +import org.simantics.db.Session; +import org.simantics.db.exception.DatabaseException; +import org.simantics.db.impl.ClusterBase; +import org.simantics.db.impl.ClusterI; +import org.simantics.db.impl.ClusterSupport; +import org.simantics.db.impl.IClusterTable; +import org.simantics.db.service.ClusterUID; + +import gnu.trove.map.hash.TIntObjectHashMap; + +public class ClusterSupport2 implements ClusterSupport, IClusterTable { + + final private static boolean DEBUG = false; + + public ClusterManager impl; + + public TIntObjectHashMap uidCache = new TIntObjectHashMap(); + + public ClusterSupport2(ClusterManager impl) { + this.impl = impl; + } + + @Override + public int createClusterKeyByClusterUID(ClusterUID clusterUID, long clusterId) { + throw new UnsupportedOperationException(); + } + + @Override + public ClusterBase getClusterByClusterUIDOrMake(ClusterUID clusterUID) { + try { + return impl.getClusterByClusterUIDOrMake(clusterUID); + } catch (DatabaseException e) { + e.printStackTrace(); + return null; + } + } + + @Override + public ClusterBase getClusterByClusterId(long clusterId) { + throw new UnsupportedOperationException(); + } + + @Override + public ClusterBase getClusterByClusterKey(int clusterKey) { + throw new UnsupportedOperationException(); + } + + ReentrantReadWriteLock uidLock = new ReentrantReadWriteLock(); + ReadLock uidRead = uidLock.readLock(); + WriteLock uidWrite = uidLock.writeLock(); + + @Override + public ClusterUID getClusterUIDByResourceKey(int resourceKey) throws DatabaseException { + + ClusterUID cuid; + + uidRead.lock(); + cuid = uidCache.get(resourceKey >> 12); + uidRead.unlock(); + if(cuid != null) return cuid; + uidWrite.lock(); + cuid = uidCache.get(resourceKey >> 12); + if(cuid == null) { + cuid = impl.getClusterUIDByResourceKeyWithoutMutex(resourceKey); + uidCache.put(resourceKey >> 12, cuid); + } + uidWrite.unlock(); + + return cuid; + + } + + @Override + public int getClusterKeyByClusterUIDOrMake(ClusterUID clusterUID) { + return impl.getClusterKeyByClusterUIDOrMakeWithoutMutex(clusterUID); + } + + @Override + public int getClusterKeyByClusterUIDOrMake(long id1, long id2) { + throw new UnsupportedOperationException(); + } + + @Override + public ClusterBase getClusterByResourceKey(int resourceKey) { + throw new UnsupportedOperationException(); +// return impl.getClusterByResourceKey(resourceKey); + } + + @Override + public long getClusterIdOrCreate(ClusterUID clusterUID) { + return impl.getClusterIdOrCreate(clusterUID); + } + + @Override + public void addStatement(Object cluster) { + // nop + } + + @Override + public void cancelStatement(Object cluster) { + // nop + } + + @Override + public void removeStatement(Object cluster) { + // nop + } + + @Override + public void removeValue(Object cluster) { + // nop + } + + @Override + public void setImmutable(Object cluster, boolean immutable) { + // nop + } + + @Override + public void setDeleted(Object cluster, boolean deleted) { + // TODO Auto-generated method stub + + } + + + + @Override + public void cancelValue(Object cluster) { + throw new UnsupportedOperationException(); + } + + @Override + public void setValue(Object cluster, long clusterId, byte[] bytes, + int length) { + // nop + } + + @Override + public void modiValue(Object _cluster, long clusterId, long voffset, + int length, byte[] bytes, int offset) { + // nop + } + + @Override + public void createResource(Object cluster, short resourceIndex, + long clusterId) { + // No op + } + + @Override + public void addStatementIndex(Object cluster, int resourceKey, + ClusterUID clusterUID, byte op) { + // No op + } + + @Override + public void setStreamOff(boolean setOff) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean getStreamOff() { + return true; + } + + + private static class ResourceSegment { + public long valueSize; + + public byte[] bytes; + + ResourceSegment(long valueSize, byte[] bytes) { + this.valueSize = valueSize; + this.bytes = bytes; + } + } + + public ResourceSegment getResourceSegment(int resourceIndex, ClusterUID clusterUID, long offset, short size) + throws DatabaseException { + if (DEBUG) + System.out.println("DEBUG: getResourceSegment ri=" + resourceIndex + " cid=" + clusterUID + " offset=" + offset + " size=" + size); + + org.simantics.db.Database.Session.ResourceSegment t = impl.getResourceSegment(clusterUID.asBytes(), resourceIndex, offset, size); + return new ResourceSegment(t.getValueSize(), t.getSegment()); + + } + + protected byte[] getValueBig(ClusterBase cluster, int resourceIndex, int offset, int length) throws DatabaseException { + + assert(offset == 0); + assert(length == 0); + + ClusterUID clusterUID = cluster.clusterUID; + + return impl.getResourceFile(clusterUID.asBytes(), resourceIndex); + + } + + protected InputStream getValueStreamBig(ClusterBase cluster, final int resourceIndex, int offset, int length) throws DatabaseException { + + final ClusterUID clusterUID = cluster.clusterUID; + + if (DEBUG) + System.out.println("DEBUG: getResourceFile ri=" + resourceIndex + " cid=" + clusterUID + " off=" + offset + " len=" + length); + final int IMAX = 0xFFFF; + short slen = (short)Math.min(length != 0 ? length : IMAX, IMAX); + final ResourceSegment s = getResourceSegment(resourceIndex, clusterUID, offset, slen); + if (s.valueSize < 0) + throw new DatabaseException("Failed to get value for resource index=" + resourceIndex + + " cluster=" + clusterUID + " off=" + offset + " len=" + length + " (1)."); + int ilen = (int)slen & 0xFFFF; + assert(s.bytes.length <= ilen); + if (0 == length) { + if (s.valueSize > Integer.MAX_VALUE) + throw new DatabaseException("Failed to get value for resource index=" + resourceIndex + + " cluster=" + clusterUID + " off=" + offset + " len=" + length + + ". Value size=" + s.valueSize + " (2)."); + length = (int)s.valueSize; + } + long rSize = s.valueSize - offset; + if (rSize < length) + throw new DatabaseException("Failed to get value for resource index=" + resourceIndex + + " cluster=" + clusterUID + " off=" + offset + " len=" + length + + ". Value size=" + s.valueSize + " (3)."); + else if (length <= IMAX) + return new ByteArrayInputStream(s.bytes); + + final int finalLength = length; + + return new InputStream() { + + int left = finalLength; + long valueOffset = 0; + int offset = 0; + ResourceSegment _s = s; + + @Override + public int read() throws IOException { + + if(left <= 0) throw new IllegalStateException(); + + if(offset == _s.bytes.length) { + short slen = (short)Math.min(left, IMAX); + valueOffset += _s.bytes.length; + try { + _s = getResourceSegment(resourceIndex, clusterUID, valueOffset, slen); + } catch (DatabaseException e) { + throw new IOException(e); + } + offset = 0; + } + + left--; + int result = _s.bytes[offset++]; + if(result < 0) result += 256; + return result; + + } + + }; + + } + + @Override + public InputStream getValueStreamEx(int resourceIndex, long clusterId) + throws DatabaseException { + ClusterBase cluster = impl.getClusterByClusterUIDOrMakeProxy(ClusterUID.make(0, clusterId)); + return getValueStreamBig(cluster, resourceIndex, 0, 0); + } + + @Override + public byte[] getValueEx(int resourceIndex, long clusterId) + throws DatabaseException { + ClusterBase cluster = impl.getClusterByClusterUIDOrMakeProxy(ClusterUID.make(0, clusterId)); + return getValueBig(cluster, resourceIndex, 0, 0); + } + + @Override + public byte[] getValueEx(int resourceIndex, long clusterId, long voffset, + int length) throws DatabaseException { + throw new UnsupportedOperationException(); + } + + @Override + public long getValueSizeEx(int resourceIndex, long clusterId) + throws DatabaseException { + throw new UnsupportedOperationException(); + } + + @Override + public int wait4RequestsLess(int limit) throws DatabaseException { + throw new UnsupportedOperationException(); + } + + @Override + public Session getSession() { + return null; + } + + @Override + public IClusterTable getClusterTable() { + return this; + } + + @Override + public T getClusterByClusterUIDOrMakeProxy(ClusterUID clusterUID) { + try { + return (T)impl.getClusterByClusterUIDOrMakeProxy(clusterUID); + } catch (DatabaseException e) { + e.printStackTrace(); + return null; + } + } + + @Override + public T getClusterProxyByResourceKey(int resourceKey) { + try { + return impl.getClusterProxyByResourceKey(resourceKey); + } catch (DatabaseException e) { + e.printStackTrace(); + return null; + } + } + + @Override + public int getClusterKeyByUID(long id1, long id2) throws DatabaseException { + return impl.getClusterKeyByUID(id1, id2); + } + +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterUpdateProcessor.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterUpdateProcessor.java new file mode 100644 index 000000000..0044d72d8 --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterUpdateProcessor.java @@ -0,0 +1,86 @@ +package org.simantics.acorn.internal; + +import org.simantics.acorn.ClusterManager; +import org.simantics.acorn.cluster.ClusterImpl; +import org.simantics.acorn.lru.CachingClusterSupport; +import org.simantics.acorn.lru.ClusterUpdateOperation; +import org.simantics.db.exception.DatabaseException; +import org.simantics.db.impl.ClusterSupport; +import org.simantics.db.service.ClusterUID; + +public class ClusterUpdateProcessor extends ClusterUpdateProcessorBase { + + final ClusterSupport support; + final ClusterUpdateOperation info; + private ClusterImpl cluster; + + public ClusterUpdateProcessor(ClusterManager client, ClusterSupport support, byte[] operations, ClusterUpdateOperation info) throws DatabaseException { + super(client, operations); + this.support = support; + this.info = info; + } + + @Override + void create() throws DatabaseException { + cluster.createResource(support); + } + + @Override + void delete(int ri) throws DatabaseException { + + boolean oldValueEx = cluster.isValueEx(ri); + byte[] old = cluster.getValue(ri, support); + if(old != null) cluster.removeValue(ri, support); + info.ccs.oldValueEx.add(oldValueEx ? (byte)1 : 0); + info.ccs.oldValues.add(old); + + } + + @Override + void modify(int resourceKey, long offset, int size, byte[] bytes, int pos) + throws DatabaseException { + + cluster = (ClusterImpl)cluster.modiValueEx(resourceKey, offset, size, bytes, pos, support); + manager.modiFileEx(cluster.getClusterUID(), resourceKey, offset, size, bytes, pos, support); + + } + + @Override + void set(int resourceKey, byte[] bytes, int length) + throws DatabaseException { + + byte[] old = cluster.getValue(resourceKey, support); + boolean oldValueEx = cluster.isValueEx(resourceKey); + cluster = (ClusterImpl)cluster.setValue(resourceKey, valueBuffer, length, support); + info.ccs.oldValueEx.add(oldValueEx ? (byte)1 : 0); + info.ccs.oldValues.add(old); + + } + + @Override + void claim(int resourceKey, int predicateKey, int objectKey, ClusterUID puid, ClusterUID ouid) + throws DatabaseException { + + ClusterImpl c = (ClusterImpl)cluster.addRelation(resourceKey, puid, predicateKey, ouid, objectKey, support); + if(c != null) cluster = c; + info.ccs.statementMask.add(c != null ? (byte)1 : 0); + + } + + @Override + void deny(int resourceKey, int predicateKey, int objectKey, ClusterUID puid, ClusterUID ouid) + throws DatabaseException { + + boolean modified = cluster.removeRelation(resourceKey, predicateKey, objectKey, support); + info.ccs.statementMask.add(modified ? (byte)1 : 0); + + } + + public ClusterImpl process(ClusterImpl cluster) { + this.cluster = cluster; + process(); + info.finish(); + return this.cluster; + } + +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterUpdateProcessor2.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterUpdateProcessor2.java new file mode 100644 index 000000000..7ce8673cb --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterUpdateProcessor2.java @@ -0,0 +1,30 @@ +package org.simantics.acorn.internal; + +import org.simantics.acorn.cluster.ClusterImpl; +import org.simantics.acorn.lru.ClusterUpdateOperation; +import org.simantics.db.impl.ClusterSupport; + +public class ClusterUpdateProcessor2 extends ClusterUpdateProcessorBase2 { + + final ClusterSupport support; + final ClusterUpdateOperation info; + private ClusterImpl cluster; + + public ClusterUpdateProcessor2(ClusterSupport support, byte[] operations, ClusterUpdateOperation info) { + super(operations); + this.support = support; + this.info = info; + } + + public void process(ClusterImpl cluster) { + this.cluster = cluster; + process(); + info.finish(); + } + + @Override + void setImmutable(boolean value) { + cluster.setImmutable(value, support); + } + +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterUpdateProcessorBase.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterUpdateProcessorBase.java new file mode 100644 index 000000000..e0e733c1e --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterUpdateProcessorBase.java @@ -0,0 +1,475 @@ +package org.simantics.acorn.internal; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; + +import org.simantics.acorn.ClusterManager; +import org.simantics.acorn.internal.ClusterStream.ClusterEnum; +import org.simantics.acorn.internal.ClusterStream.Data; +import org.simantics.acorn.internal.ClusterStream.StmEnum; +import org.simantics.db.exception.DatabaseException; +import org.simantics.db.service.Bytes; +import org.simantics.db.service.ClusterUID; + +abstract public class ClusterUpdateProcessorBase { + + public final static boolean DEBUG = false; + + final protected ClusterManager manager; + final public byte[] bytes; + private int pos = 0; + final private int len; + final private ClusterUID uid; + final private int clusterKey; + final public int version; + + final Map clusterKeyCache = new HashMap(); + + public int getResourceKey(ClusterUID uid, int index) { + Integer match = clusterKeyCache.get(uid); + if(match != null) return match+index; + int key = manager.getResourceKeyWitoutMutex(uid, 0); + clusterKeyCache.put(uid, key); + return key+index; + } + + + public ClusterUpdateProcessorBase(ClusterManager client, byte[] operations) throws DatabaseException { + this.manager = client; + this.bytes = operations; + this.len = Bytes.readLE4(bytes, 0)+4; // whatta? + version = Bytes.readLE4(bytes, 4); + long cuid1 = Bytes.readLE8(bytes, 8); + long cuid2 = Bytes.readLE8(bytes, 16); + uid = ClusterUID.make(cuid1, cuid2); + pos = 24; + client.clusterLRU.acquireMutex(); + try { + clusterKey = client.clusterLRU.getClusterKeyByUID(cuid1, cuid2) << 12; + } catch (Throwable t) { + throw new IllegalStateException(t); + } finally { + client.clusterLRU.releaseMutex(); + } + } + + public ClusterUID getClusterUID() { + return uid; + } + + private void processCreate() { + int r = Bytes.readLE2(bytes, pos); + pos+=2; + if(DEBUG) System.err.println("DEBUG: New ri=" + r + " offset=" + (pos-3-24)); + try { + create(); + } catch (DatabaseException e) { + e.printStackTrace(); + } + + } + + private void processDelete() { + + int ri = Bytes.readLE2(bytes, pos); + pos += 2; + + if(DEBUG) System.err.println("DEBUG: Delete " + ri); + + try { + delete(ri); + } catch (DatabaseException e) { + e.printStackTrace(); + } + + } + + private void processModify(int op) { + + int ri = Bytes.readLE2(bytes, pos); + pos+=2; + long offset = Bytes.readLE7(bytes, pos); + pos+=7; + int size = Bytes.readLE2(bytes, pos); + pos+=2; + + offset += (ri>>14) << 56; + ri = ri & 0x3FFF; + + if(size < 0) + throw new IllegalStateException(); + if(ri < 1) + throw new IllegalStateException(); + if(ri > 4095) + throw new IllegalStateException(); + + if(DEBUG) System.err.println("DEBUG: Modify " + ri + " " + offset + " " + size + " offset=" + (pos-1-24) + " " + Arrays.toString(Arrays.copyOf(valueBuffer,size))); + + try { + modify(clusterKey + ri, offset, size, bytes, pos); + } catch (DatabaseException e) { + e.printStackTrace(); + } + + pos += size; + + } + + private void processSet(int op) { + + int s = Bytes.readLE4(bytes, pos); + int length = (s >> 14); + if(length < 1) + throw new IllegalStateException(); + int r = s & 0x3FFF; + + pos += 4; + System.arraycopy(bytes, pos, valueBuffer, 0, length); + pos += length; + + if(DEBUG) System.err.println("DEBUG: Set " + r + " " + length + " offset=" + (pos-1-24) + " " + Arrays.toString(Arrays.copyOf(valueBuffer,length))); + + try { + set(clusterKey+r, valueBuffer, length); + } catch (DatabaseException e) { + e.printStackTrace(); + } + + } + + byte[] valueBuffer = new byte[65536]; + + private void processSetShort(int op) { + + int s = Bytes.readLE2(bytes, pos); + int length = ((op&7)<<2) + (s >> 14); + if(length < 1) + throw new IllegalStateException(); + if(length > 31) + throw new IllegalStateException(); + int r = s & 0x3FFF; + + if(DEBUG) System.err.println("DEBUG: SetShort " + r + " " + length + " offset=" + (pos-1-24) + " " + Arrays.toString(Arrays.copyOf(valueBuffer,length))); + pos += 2; + + System.arraycopy(bytes, pos, valueBuffer, 0, length); + pos += length; + + try { + set(clusterKey+r, valueBuffer, length); + } catch (DatabaseException e) { + e.printStackTrace(); + } + + } + + private void processStatementResource(ClusterEnum enu, int pOrO) { + if(ClusterEnum.ForeignShort == enu) { + int fs = bytes[pos++]&0xff; + foreignRefs[pOrO] = fs; + } else if(ClusterEnum.Local == enu) { + int lo = bytes[pos++]&0xff; + lows[pOrO] = lo; + } else { + long l1 = Bytes.readLE8(bytes, pos); + pos += 8; + long l2 = Bytes.readLE8(bytes, pos); + pos += 8; + ClusterUID cuid = ClusterUID.make(l1, l2); + foreignClusters[foreignPos] = cuid; + int lo = bytes[pos++]&0xff; + foreignIndices[foreignPos] = lo; + foreignRefs[pOrO] = foreignPos; + foreignPos++; + lows[pOrO] = lo; + } + } + + ClusterUID[] foreignClusters = new ClusterUID[256]; + int[] foreignIndices = new int[256]; + int foreignPos = 0; + int lows[] = new int[2]; + int foreignRefs[] = new int[2]; + + private void processStatement(int op, StmEnum stmEnum, ClusterEnum p, ClusterEnum o) { + + int curPos = pos-1-24; + + processStatementResource(p, 0); + processStatementResource(o, 1); + + int ri = bytes[pos++]&0xff; + int pi = 0; + int oi = 0; + + ClusterUID puid = uid; + ClusterUID ouid = puid; + + if(ClusterEnum.ForeignShort == p && ClusterEnum.ForeignShort == o) { + ri |= (op&0x3F) << 8; + } else { + Data data = ClusterEnum.getData(stmEnum, p, o); + // data.left is the amount of bytes in last two bytes + if(data.bytes == 0) { + ri = ri | ((op&0x3F)<<8); + } else { + int extra = 0; + int opBits = data.bits; + int extraBits = 6-opBits; + if(data.bytes == 1) { + extra = bytes[pos++]&0xff; + int high = extra >> extraBits; + if(ClusterEnum.ForeignShort == p) { + oi = lows[1] + (high<<8); + } else { + pi = lows[0] + (high<<8); + } + } else { + extra = Bytes.readLE2(bytes, pos); + pos += 2; + int high1 = (extra >> extraBits)&((1<<6)-1); + int high2 = (extra >> (extraBits+6))&((1<<6)-1); + if(ClusterEnum.ForeignShort == p) { + oi = lows[1] + (high1<<8); + } else { + pi = lows[0] + (high1<<8); + oi = lows[1] + (high2<<8); + } + } + ri = ri | ((extra&((1< 4095) + throw new IllegalStateException(); + if(pi > 4095) + throw new IllegalStateException(); + if(oi > 4095) + throw new IllegalStateException(); + + if(StmEnum.Add == stmEnum) { + + if(DEBUG) + System.err.println("DEBUG: ClusterChange " + uid + ": Add ri=" + ri + " pi=" + pi + " oi=" + oi + " pc=" + puid + " oc=" + ouid + " offset=" + curPos + " " + p.ordinal + " " + o.ordinal); + + int predicateKey = getResourceKey(puid, pi); + int objectKey = getResourceKey(ouid, oi); + try { + claim(clusterKey+ri, predicateKey, objectKey, puid, ouid); + } catch (DatabaseException e) { + e.printStackTrace(); + } + + } else { + + if(DEBUG) + System.err.println("DEBUG: ClusterChange " + uid + ": Rem ri=" + ri + " pi=" + pi + " oi=" + oi + " pc=" + puid + " oc=" + ouid + " offset=" + curPos + " " + p.ordinal + " " + o.ordinal); + + int predicateKey = getResourceKey(puid, pi); + int objectKey = getResourceKey(ouid, oi); + try { + deny(clusterKey+ri, predicateKey, objectKey, puid, ouid); + } catch (DatabaseException e) { + e.printStackTrace(); + } + + } + + } + + public void process() { + + foreignPos = 0; + + if(DEBUG) System.err.println("DEBUG: process " + uid + " " + len); + + // op resolution for statement operation: + + // 2 first bits + // op: 01 | r8-13 + // op: 10 | r8-13 + + // 3 first bits (000) + // op: 000000 | r12-13 + // op: 000001 | r12-13 + // op: 000010 | r12-13 + // op: 000011 | r12-13 + // op: 000100 | r12-13 + // op: 000101 | r12-13 + // op: 000110 | r12-13 + // op: 000111 | r12-13 + + // 4 first bits + // op: 1100 | r10-13 + // op: 1101 | r10-13 + // op: 1110 | r10-13 + // op: 1111 | r10-13 + // op: 0010 | r10-13 + + // 6 bits + // op: 00110001 = 49 + // op: 00110010 = 50 + // op: 00110011 = 51 + // other: 0011xxxx + + while(pos < len) { + + int op = bytes[pos++]&0xff; + + // common prefix: 0011 + switch(op) { + + case 49: + processStatement(op, StmEnum.Remove, ClusterEnum.Local, ClusterEnum.ForeignShort); + break; + case 50: + processStatement(op, StmEnum.Remove, ClusterEnum.ForeignShort, ClusterEnum.ForeignLong); + break; + case 51: + processStatement(op, StmEnum.Remove, ClusterEnum.ForeignLong, ClusterEnum.ForeignShort); + break; + // 52 = 32+16+4 = 00110100 + case 52: + processCreate(); + break; + // 53 = 32+16+4+1 = 00110101 + case 53: + processSet(op); + break; + // 54 = 32+16+4+2 = 00110110 + case 54: + processDelete(); + break; + // 55 = 32+16+4+2+1 = 00110111 + case 55: + processModify(op); + break; + default: + + int bits6 = ((int)op)&0xC0; + switch(bits6) { + + case 0x40: + processStatement(op, StmEnum.Add, ClusterEnum.ForeignShort, ClusterEnum.ForeignShort); + break; + case 0x80: + processStatement(op, StmEnum.Remove, ClusterEnum.ForeignShort, ClusterEnum.ForeignShort); + break; + default: + + int bits5 = ((int)op)&0xE0; + if(bits5 == 0) { + + int bits2 = (((int)op)&0xFC) >> 2; + + // 3 top bits are 0 + // 6 bits of op + + switch(bits2) { + + case 0: + processStatement(op, StmEnum.Add, ClusterEnum.Local, ClusterEnum.Local); + break; + case 1: + processStatement(op, StmEnum.Remove, ClusterEnum.Local, ClusterEnum.Local); + break; + case 2: + processStatement(op, StmEnum.Add, ClusterEnum.Local, ClusterEnum.ForeignLong); + break; + case 3: + processStatement(op, StmEnum.Remove, ClusterEnum.Local, ClusterEnum.ForeignLong); + break; + case 4: + processStatement(op, StmEnum.Add, ClusterEnum.ForeignLong, ClusterEnum.Local); + break; + case 5: + processStatement(op, StmEnum.Remove, ClusterEnum.ForeignLong, ClusterEnum.Local); + break; + case 6: + processStatement(op, StmEnum.Add, ClusterEnum.ForeignLong, ClusterEnum.ForeignLong); + break; + case 7: + processStatement(op, StmEnum.Remove, ClusterEnum.ForeignLong, ClusterEnum.ForeignLong); + break; + + } + + } else { + + // 4 top bits of op + // 4 low bits of payload + + int bits4 = (((int)op)&0xF0)>>4; + switch(bits4) { + case 0b1100: + processStatement(op, StmEnum.Add, ClusterEnum.Local, ClusterEnum.ForeignShort); + break; + case 0b1101: + processStatement(op, StmEnum.Add, ClusterEnum.ForeignShort, ClusterEnum.Local); + break; + case 0b1110: + processStatement(op, StmEnum.Add, ClusterEnum.ForeignShort, ClusterEnum.ForeignLong); + break; + case 0b1111: + processStatement(op, StmEnum.Add, ClusterEnum.ForeignLong, ClusterEnum.ForeignShort); + break; + case 0b0010: + processStatement(op, StmEnum.Remove, ClusterEnum.ForeignShort, ClusterEnum.Local); + break; + case 0b0011: + int bits3 = (((int)op)&0xF8)>>3; + if(bits3 == 7) + processSetShort(op); + break; + } + + } + + } + + } + + } + + } + + + abstract void create() throws DatabaseException; + abstract void delete(int resourceIndex) throws DatabaseException; + abstract void modify(int resourceKey, long offset, int size, byte[] bytes, int pos) throws DatabaseException; + abstract void set(int resourceKey, byte[] bytes, int length) throws DatabaseException; + + abstract void claim(int resourceKey, int predicateKey, int objectKey, ClusterUID puid, ClusterUID ouid) throws DatabaseException; + abstract void deny(int resourceKey, int predicateKey, int objectKey, ClusterUID puid, ClusterUID ouid) throws DatabaseException; + +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterUpdateProcessorBase2.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterUpdateProcessorBase2.java new file mode 100644 index 000000000..e821b46eb --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/ClusterUpdateProcessorBase2.java @@ -0,0 +1,63 @@ +package org.simantics.acorn.internal; + +import org.simantics.db.service.Bytes; +import org.simantics.db.service.ClusterUID; + +public abstract class ClusterUpdateProcessorBase2 { + + final private byte[] bytes; + private int pos = 0; + final private int len; + final private ClusterUID uid; + + public ClusterUpdateProcessorBase2(byte[] operations) { + this.bytes = operations; + this.len = Bytes.readLE4(bytes, 0) + 4; // whatta? + int version = Bytes.readLE4(bytes, 4); + assert(version == ClusterChange2.VERSION); + long cuid1 = Bytes.readLE8(bytes, 8); + long cuid2 = Bytes.readLE8(bytes, 16); + pos = 24; + uid = ClusterUID.make(cuid1, cuid2); + } + + public ClusterUID getClusterUID() { + return uid; + } + + private void processSetImmutable(int op) { + int value = bytes[pos++]&0xff; + setImmutable(value > 0); + } + + private void processUndoValue(int op) { + Bytes.readLE4(bytes, pos); + pos+=4; + } + + public void process() { + + while(pos < len) { + + int op = bytes[pos++]&0xff; + + switch(op) { + + case ClusterChange2.SET_IMMUTABLE_OPERATION: + processSetImmutable(op); + break; + case ClusterChange2.UNDO_VALUE_OPERATION: + processUndoValue(op); + break; + default: + throw new IllegalStateException(); + + } + + } + + } + + abstract void setImmutable(boolean value); + +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/DebugPolicy.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/DebugPolicy.java new file mode 100644 index 000000000..d694abe83 --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/DebugPolicy.java @@ -0,0 +1,19 @@ +package org.simantics.acorn.internal; + + +/** + * @author Antti Villberg + */ +public final class DebugPolicy { + + public static final boolean REPORT_RESOURCE_ID_ALLOCATION = false; + public static final boolean REPORT_CLUSTER_ID_ALLOCATION = false; + public static final boolean REPORT_CLUSTER_EVENTS = false; + public static final boolean REPORT_CLUSTER_LOADING = false; + public static final boolean REPORT_CLUSTER_LOADING_STACKS = false; + public static final boolean REPORT_CLUSTER_STREAM = false; + public static final boolean CLUSTER_COLLECTION = false; + public static final boolean LOG_SERVER_EVENTS = false; + public static final boolean SHOW_SERVER_EVENTS = false; // Requires LOG_SERVER_EVENTS to be true. + +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/UndoClusterUpdateProcessor.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/UndoClusterUpdateProcessor.java new file mode 100644 index 000000000..d545e51ad --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/internal/UndoClusterUpdateProcessor.java @@ -0,0 +1,112 @@ +package org.simantics.acorn.internal; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import org.simantics.acorn.ClusterManager; +import org.simantics.acorn.lru.ClusterChangeSet; +import org.simantics.acorn.lru.ClusterStreamChunk; +import org.simantics.acorn.lru.ClusterChangeSet.Entry; +import org.simantics.acorn.lru.ClusterChangeSet.Type; +import org.simantics.db.exception.DatabaseException; +import org.simantics.db.service.ClusterUID; + +public class UndoClusterUpdateProcessor extends ClusterUpdateProcessorBase { + + public final static boolean DEBUG = false; + + final private ClusterChangeSet ccs; + + private int oldValuesIndex = 0; + private int statementMaskIndex = 0; + + final public List entries = new ArrayList(); + + public UndoClusterUpdateProcessor(ClusterManager client, ClusterStreamChunk chunk, ClusterChangeSet ccs) throws DatabaseException { + super(client, readOperation(client, chunk, ccs)); + this.ccs = ccs; + } + + private static byte[] readOperation(ClusterManager manager, ClusterStreamChunk chunk, ClusterChangeSet ccs) { + +// ClusterStreamChunk chunk; +// manager.streamLRU.acquireMutex(); +// try { +// chunk = ccs.getChunk(manager); +// } catch (Throwable t) { +// throw new IllegalStateException(t); +// } finally { +// manager.streamLRU.releaseMutex(); +// } +// +// chunk.acquireMutex(); +// try { +// chunk.ve + chunk.makeResident(); + return chunk.getOperation(ccs.chunkOffset); +// } catch (Throwable t) { +// throw new IllegalStateException(t); +// } finally { +// chunk.releaseMutex(); +// } + } + + @Override + void create() throws DatabaseException { + } + + @Override + void delete(int ri) throws DatabaseException { + + byte[] old = ccs.oldValues.get(oldValuesIndex); + boolean oldValueEx = ccs.oldValueEx.get(oldValuesIndex) > 0; + oldValuesIndex++; + + if(old != null) { + entries.add(new Entry(ri, oldValueEx, old, null)); + } + + } + + @Override + void modify(int resourceKey, long offset, int size, byte[] bytes, int pos) + throws DatabaseException { + + } + + @Override + void set(int resourceKey, byte[] bytes, int length) + throws DatabaseException { + + byte[] old = ccs.oldValues.get(oldValuesIndex); + boolean oldValueEx = ccs.oldValueEx.get(oldValuesIndex) > 0; + oldValuesIndex++; + + entries.add(new Entry(resourceKey, oldValueEx, old, Arrays.copyOf(valueBuffer, length))); + + } + + @Override + void claim(int resourceKey, int predicateKey, int objectKey, ClusterUID puid, ClusterUID ouid) + throws DatabaseException { + + boolean add = ccs.statementMask.get(statementMaskIndex++) > 0; + if(add) { + entries.add(new Entry(Type.ADD, resourceKey, puid, predicateKey & 0xFFF, ouid, objectKey & 0xFFF)); + } + + } + + @Override + void deny(int resourceKey, int predicateKey, int objectKey, ClusterUID puid, ClusterUID ouid) + throws DatabaseException { + + boolean remove = ccs.statementMask.get(statementMaskIndex++) > 0; + if(remove) { + entries.add(new Entry(Type.REMOVE, resourceKey, puid, predicateKey & 0xFFF, ouid, objectKey & 0xFFF)); + } + + } + +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/AccessTime.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/AccessTime.java new file mode 100644 index 000000000..8a32ef230 --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/AccessTime.java @@ -0,0 +1,23 @@ +package org.simantics.acorn.lru; + +public class AccessTime { + + private long last = 0; + + private static AccessTime INSTANCE = new AccessTime(); + + private AccessTime() { + + } + + public static AccessTime getInstance() { + return INSTANCE; + } + + public synchronized long getAccessTime() { + long result = System.nanoTime(); + last = Math.max(result, last+1); + return last; + } + +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/CachingClusterSupport.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/CachingClusterSupport.java new file mode 100644 index 000000000..a2c489901 --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/CachingClusterSupport.java @@ -0,0 +1,160 @@ +package org.simantics.acorn.lru; + +import java.io.InputStream; + +import org.simantics.db.Session; +import org.simantics.db.exception.DatabaseException; +import org.simantics.db.impl.ClusterBase; +import org.simantics.db.impl.ClusterSupport; +import org.simantics.db.impl.IClusterTable; +import org.simantics.db.service.ClusterUID; + +public class CachingClusterSupport implements ClusterSupport { + + private ClusterSupport backend; + + public CachingClusterSupport(ClusterSupport backend) { + this.backend = backend; + } + + @Override + public int createClusterKeyByClusterUID(ClusterUID clusterUID, long clusterId) { + throw new UnsupportedOperationException(); + } + + @Override + public ClusterBase getClusterByClusterUIDOrMake(ClusterUID clusterUID) { + throw new UnsupportedOperationException(); + } + + @Override + public ClusterBase getClusterByClusterId(long clusterId) { + throw new UnsupportedOperationException(); + } + + @Override + public ClusterBase getClusterByClusterKey(int clusterKey) { + throw new UnsupportedOperationException(); + } + + @Override + public int getClusterKeyByClusterUIDOrMake(ClusterUID clusterUID) { + throw new UnsupportedOperationException(); + } + + @Override + public int getClusterKeyByClusterUIDOrMake(long id1, long id2) { + throw new UnsupportedOperationException(); + } + + @Override + public ClusterBase getClusterByResourceKey(int resourceKey) { + throw new UnsupportedOperationException(); + } + + @Override + public long getClusterIdOrCreate(ClusterUID clusterUID) { + throw new UnsupportedOperationException(); + } + + @Override + public void addStatement(Object cluster) { + throw new UnsupportedOperationException(); + } + + @Override + public void cancelStatement(Object cluster) { + throw new UnsupportedOperationException(); + } + + @Override + public void removeStatement(Object cluster) { + throw new UnsupportedOperationException(); + } + + @Override + public void cancelValue(Object cluster) { + throw new UnsupportedOperationException(); + } + + @Override + public void removeValue(Object cluster) { + throw new UnsupportedOperationException(); + } + + @Override + public void setValue(Object cluster, long clusterId, byte[] bytes, int length) { + throw new UnsupportedOperationException(); + } + + @Override + public void modiValue(Object cluster, long clusterId, long voffset, int length, byte[] bytes, int offset) { + throw new UnsupportedOperationException(); + } + + @Override + public void setImmutable(Object cluster, boolean immutable) { + throw new UnsupportedOperationException(); + } + + @Override + public void setDeleted(Object cluster, boolean deleted) { + throw new UnsupportedOperationException(); + } + + @Override + public void createResource(Object cluster, short resourceIndex, long clusterId) { + backend.createResource(cluster, resourceIndex, clusterId); + } + + @Override + public void addStatementIndex(Object cluster, int resourceKey, ClusterUID clusterUID, byte op) { + throw new UnsupportedOperationException(); + } + + @Override + public void setStreamOff(boolean setOff) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean getStreamOff() { + throw new UnsupportedOperationException(); + } + + @Override + public InputStream getValueStreamEx(int resourceIndex, long clusterId) throws DatabaseException { + throw new UnsupportedOperationException(); + } + + @Override + public byte[] getValueEx(int resourceIndex, long clusterId) throws DatabaseException { + throw new UnsupportedOperationException(); + } + + @Override + public byte[] getValueEx(int resourceIndex, long clusterId, long voffset, int length) throws DatabaseException { + throw new UnsupportedOperationException(); + } + + @Override + public long getValueSizeEx(int resourceIndex, long clusterId) throws DatabaseException { + throw new UnsupportedOperationException(); + } + + @Override + public int wait4RequestsLess(int limit) throws DatabaseException { + throw new UnsupportedOperationException(); + } + + @Override + public Session getSession() { + throw new UnsupportedOperationException(); + } + + @Override + public IClusterTable getClusterTable() { + throw new UnsupportedOperationException(); + } + +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ChangeSetInfo.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ChangeSetInfo.java new file mode 100644 index 000000000..12351a519 --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ChangeSetInfo.java @@ -0,0 +1,113 @@ +package org.simantics.acorn.lru; + +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Arrays; + +import org.simantics.db.service.Bytes; +import org.simantics.utils.datastructures.Pair; + +import gnu.trove.list.array.TByteArrayList; + +public class ChangeSetInfo extends LRUObject { + + private byte[] metadataBytes; + private ArrayList clusterChangeSetIds; + + // Stub + public ChangeSetInfo(LRU LRU, Path readDir, Long revision, int offset, int length) { + super(LRU, revision, readDir, "clusterStream", offset, length, false, false); + LRU.map(this); + } + + // New + public ChangeSetInfo(LRU LRU, Long revision, byte[] bytes, ArrayList clusterChangeSetIds) { + super(LRU, revision, LRU.getDirectory(), "clusterStream", true, true); + this.metadataBytes = bytes; + this.metadataBytes = bytes; + this.clusterChangeSetIds = clusterChangeSetIds; + LRU.insert(this, accessTime); + } + + public ArrayList getCSSIds() { + if(VERIFY) verifyAccess(); + return clusterChangeSetIds; + } + + public byte[] getMetadataBytes() { + + if(VERIFY) verifyAccess(); + + makeResident(); + + return metadataBytes; + + } + + private static void writeLE(TByteArrayList bytes, int value) { + + bytes.add( (byte) (value & 0xFF)); + bytes.add((byte) ((value >>> 8) & 0xFF)); + bytes.add((byte) ((value >>> 16) & 0xFF)); + bytes.add((byte) ((value >>> 24) & 0xFF)); + + } + + @Override + protected Pair toBytes() { + + TByteArrayList result = new TByteArrayList(); + writeLE(result, metadataBytes.length); + result.add(metadataBytes); + writeLE(result, clusterChangeSetIds.size()); + for(String id : clusterChangeSetIds) { + byte[] bb = id.getBytes(); + writeLE(result, bb.length); + result.add(bb); + } + + release(); + + byte[] ret = result.toArray(); + + return Pair.make(ret, ret.length); + + } + + @Override + void release() { + clusterChangeSetIds = null; + metadataBytes = null; + } + + @Override + public void fromFile(byte[] data) { + + clusterChangeSetIds = new ArrayList(); + + int metadataLength = Bytes.readLE4(data, 0); + metadataBytes = Arrays.copyOfRange(data, 4, 4+metadataLength); + int offset = 4+metadataLength; + int numberOfChangeSets = Bytes.readLE4(data, offset); + offset += 4; + for(int i=0;i oldValues = new ArrayList(); + + public ClusterChangeSet(String id ,ClusterUID cuid) { + this.id = id; + this.cuid = cuid; + String[] ss = id.split("\\."); + chunkKey = ss[0]; + chunkOffset = Integer.parseInt(ss[1]); + } + + public ClusterStreamChunk getChunk(ClusterManager manager) { + return manager.streamLRU.get(chunkKey); + } + +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterInfo.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterInfo.java new file mode 100644 index 000000000..1cd582267 --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterInfo.java @@ -0,0 +1,346 @@ +package org.simantics.acorn.lru; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.file.Path; + +import org.simantics.acorn.ClusterManager; +import org.simantics.acorn.Persistable; +import org.simantics.acorn.cluster.ClusterImpl; +import org.simantics.acorn.cluster.ClusterSmall; +import org.simantics.acorn.cluster.ClusterImpl.ClusterTables; +import org.simantics.acorn.internal.ClusterSupport2; +import org.simantics.compressions.CompressionCodec; +import org.simantics.compressions.Compressions; +import org.simantics.db.ClusterCreator; +import org.simantics.db.exception.DatabaseException; +import org.simantics.db.service.Bytes; +import org.simantics.db.service.ClusterUID; +import org.simantics.utils.datastructures.Pair; + +public class ClusterInfo extends LRUObject implements Persistable { + + final private ClusterManager manager; + private ClusterImpl cluster; + public int changeSetId; + private ClusterUpdateState updateState; + + public static final String COMPRESSION = "LZ4"; + + // Stub + public ClusterInfo(ClusterManager manager, LRU LRU, Path readDirectory, ClusterUID uid, int offset, int length) { + super(LRU, uid, readDirectory, uid.toString() + ".cluster", offset, length, false, false); + this.manager = manager; + this.cluster = null; + LRU.map(this); + } + + // New + public ClusterInfo(ClusterManager manager, LRU LRU, ClusterImpl cluster) { + super(LRU, cluster.getClusterUID(), LRU.getDirectory(), cluster.getClusterUID().toString() + ".cluster", true, true); + this.manager = manager; + this.cluster = cluster; + LRU.insert(this, accessTime); + LRU.swap(getKey()); + } + + public T clone(ClusterUID uid, ClusterCreator creator) throws DatabaseException { + + // Updates have been ensured at this point + + acquireMutex(); + + try { + if(isResident()) { + ClusterTables tables = cluster.store(); + return creator.create(uid, tables.bytes, tables.ints, tables.longs); + } + } catch (IOException e) { + throw new DatabaseException(e); + } catch (Throwable t) { + throw new IllegalStateException(t); + } finally { + releaseMutex(); + } + + // Ensure pending updates here - this may take some time + LRU.waitPending(this, false); + + acquireMutex(); + try { + + if(isResident()) { + ClusterTables tables = cluster.store(); + return creator.create(uid, tables.bytes, tables.ints, tables.longs); + } else { + byte[] data = readFile(); + ClusterTables tables = new ClusterTables(); + loadCluster(getKey(), manager.support, data, tables); + return creator.create(uid, tables.bytes, tables.ints, tables.longs); + } + + } catch (IOException e) { + throw new DatabaseException(e); + } catch (Throwable t) { + throw new IllegalStateException(t); + } finally { + releaseMutex(); + } + + } + + static class ClusterDecompressor { + + byte[] decompressBuffer = new byte[1024*1024]; + + public synchronized ClusterTables readCluster(ClusterUID uid, byte[] compressed) throws IOException { + + int deflatedSize = Bytes.readLE4(compressed, compressed.length-4); + + if(decompressBuffer.length < deflatedSize) + decompressBuffer = new byte[Math.max(3*decompressBuffer.length / 2, deflatedSize)]; + + CompressionCodec codec = Compressions.get(Compressions.LZ4); + + ByteBuffer input = ByteBuffer.wrap(compressed); + ByteBuffer output = ByteBuffer.wrap(decompressBuffer); + + int decompressedSize = codec.decompressBuffer(input, 0, compressed.length-4, output, 0, decompressBuffer.length); + assert(decompressedSize <= decompressBuffer.length); + + int byteLength = Bytes.readLE4(decompressBuffer, 0); + int intLength = Bytes.readLE4(decompressBuffer, 4); + int longLength = Bytes.readLE4(decompressBuffer, 8); + + byte[] bytes = new byte[byteLength]; + int[] ints = new int[intLength]; + long[] longs = new long[longLength]; + + System.arraycopy(decompressBuffer, 12, bytes, 0, byteLength); + + int offset = 12+byteLength; + for(int i=0;i toBytes() { + + try { + + byte[] raw = null; + + if(cluster instanceof ClusterSmall) { + raw = cluster.storeBytes(); + } else { + + ClusterTables tables = cluster.store(); + + raw = new byte[12 + tables.bytes.length + (tables.ints.length<<2) + (tables.longs.length<<3)]; + + Bytes.writeLE(raw, 0, tables.bytes.length); + Bytes.writeLE(raw, 4, tables.ints.length); + Bytes.writeLE(raw, 8, tables.longs.length); + + System.arraycopy(tables.bytes, 0, raw, 12, tables.bytes.length); + int offset = 12+tables.bytes.length; + for(int i=0;i { + + final private BijectionMap clusterMapping = new BijectionMap(); + final private ClusterManager manager; + + public ClusterLRU(ClusterManager manager, String identifier, Path writeDir) { + + super(identifier, writeDir); + this.manager = manager; + + clusterMapping.map(ClusterUID.make(0,2), clusterMapping.size() + 1); + + } + + public ClusterInfo getOrCreate(ClusterUID uid, boolean makeIfNull) { + + try { + + acquireMutex(); + + ClusterInfo info = get(uid); + + if (info == null) { + + if(!makeIfNull) throw new IllegalStateException("Asked for an existing cluster " + uid + " that was not found."); + + Integer clusterKey = clusterMapping.getRight(uid); + if (clusterKey == null) { + clusterKey = clusterMapping.size() + 1; + clusterMapping.map(uid, clusterKey); + } + + info = new ClusterInfo(manager, this, ClusterImpl.make(manager.support, + uid, clusterKey, manager.support)); + + } + + return info; + + } catch (Throwable t) { + throw new IllegalStateException(t); + } finally { + + releaseMutex(); + + } + + } + + /* + * This method waits - we have no locks here + */ + public void ensureUpdates(ClusterUID uid) throws DatabaseException { + + ClusterInfo info = getWithoutMutex(uid); + if(info == null) + throw new ClusterDoesNotExistException("Asked a cluster which does not exist: " + uid); + info.waitForUpdates(); + + } + + public ClusterInfo get(ClusterUID uid, boolean makeIfNull, boolean ensureUpdates) throws DatabaseException { + + if (ensureUpdates) { + try { + ensureUpdates(uid); + } catch (ClusterDoesNotExistException e) { + if (makeIfNull) { + Logger.defaultLogError("For debug purposes, creating cluster which does not exist", e); + } else { + throw e; + } + } + } + return getOrCreate(uid, makeIfNull); + } + + public ClusterInfo get(ClusterUID uid, boolean makeIfNull) throws DatabaseException { + return get(uid, makeIfNull, true); + + } + + public int getResourceKey(ClusterUID uid, int index) { + + if(VERIFY) verifyAccess(); + + Integer i = clusterMapping.getRight(uid); + if (i == null) { + i = clusterMapping.size() + 1; + clusterMapping.map(uid, i); + } + return (i << 12) + index; + + } + + public int getResourceKeyWithoutMutex(ClusterUID uid, int index) { + + acquireMutex(); + try { + return getResourceKey(uid, index); + } catch (Throwable t) { + throw new IllegalStateException(t); + } finally { + releaseMutex(); + } + + } + + public int createClusterKeyByClusterUID(ClusterUID uid) { + + if(VERIFY) verifyAccess(); + + Integer i = clusterMapping.getRight(uid); + if (i == null) { + i = clusterMapping.size() + 1; + clusterMapping.map(uid, i); + } + return i; + + } + + public ClusterBase getClusterByClusterUIDOrMake(ClusterUID uid) throws DatabaseException { + + if(VERIFY) verifyAccess(); + + int key = createClusterKeyByClusterUID(uid); + return getClusterByClusterKey(key); + + } + + public int getClusterKeyByClusterUIDOrMake(ClusterUID clusterUID) { + + if(VERIFY) verifyAccess(); + + return createClusterKeyByClusterUID(clusterUID); + + } + + public int getClusterKeyByClusterUIDOrMakeWithoutMutex(ClusterUID clusterUID) { + acquireMutex(); + try { + return getClusterKeyByClusterUIDOrMake(clusterUID); + } catch (Throwable t) { + throw new IllegalStateException(t); + } finally { + releaseMutex(); + } + } + + public ClusterBase getClusterByClusterKey(int clusterKey) throws DatabaseException { + + if(VERIFY) verifyAccess(); + + ClusterUID uid = clusterMapping.getLeft(clusterKey); + ClusterInfo info = get(uid, true); + info.acquireMutex(); + try { + return info.getCluster(); + } catch (Throwable t) { + throw new IllegalStateException(t); + } finally { + info.releaseMutex(); + } + + } + + public ClusterUID getClusterUIDByResourceKey(int resourceKey) + throws DatabaseException { + + if(VERIFY) verifyAccess(); + + int clusterKey = resourceKey >> 12; + return clusterMapping.getLeft(clusterKey); + + } + + public ClusterUID getClusterUIDByResourceKeyWithoutMutex(int resourceKey) throws DatabaseException { + acquireMutex(); + try { + return getClusterUIDByResourceKey(resourceKey); + } catch (Throwable t) { + throw new IllegalStateException(t); + } finally { + releaseMutex(); + } + } + + @SuppressWarnings("unchecked") + public T getClusterByClusterUIDOrMakeProxy(ClusterUID uid) throws DatabaseException { + return (T) getClusterByClusterUIDOrMake(uid); + } + + @SuppressWarnings("unchecked") + public T getClusterProxyByResourceKey(int resourceKey) throws DatabaseException { + + if(VERIFY) verifyAccess(); + + return (T) getClusterByClusterKey(resourceKey >> 12); + + } + + public int getClusterKeyByUID(long id1, long id2) throws DatabaseException { + + if(VERIFY) verifyAccess(); + + return getClusterKeyByClusterUIDOrMake(ClusterUID.make(id1, id2)); + + } + + public int getClusterKeyByUIDWithoutMutex(long id1, long id2) throws DatabaseException { + + acquireMutex(); + try { + return getClusterKeyByClusterUIDOrMake(ClusterUID.make(id1, id2)); + } catch (Throwable t) { + throw new IllegalStateException(t); + } finally { + releaseMutex(); + } + + } + + + public static void main(String[] args) throws Exception { + + long start = System.nanoTime(); + + final TIntIntHashMap map = new TIntIntHashMap(0, 0.9f); + + AtomicInteger counter = new AtomicInteger(0); + AtomicBoolean written = new AtomicBoolean(false); + + //final Semaphore ws = new Semaphore(1); + + Thread write = new Thread() { + + @Override + public void run() { + try { + for(int i=0;i<100000000;i++) { + synchronized(map) { +// ws.acquire(); + map.put(i, i); +// ws.release(); + } + //if((i & 0xfffff) == 0) System.err.println("Write " + i); + counter.incrementAndGet(); + } + written.set(true); + } catch (Throwable e) { + e.printStackTrace(); + } + } + + }; + write.start(); + + Thread read = new Thread() { + + @Override + public void run() { + try { + while(!written.get()) { + double r = Math.random(); + double max = counter.get(); + int key = (int)(max*r); + int value = map.get(key); + if(key != value) { + //System.err.println("Read failed " + key + " vs. " + value); + //ws.acquire(); + synchronized(map) { + value = map.get(key); + if(key != value) { + System.err.println("Read failed for real " + key + " vs. " + value); + } + //ws.release(); + } + } + //if((key & 0xfffff) == 0) System.err.println("Read " + key); + } + } catch (Throwable e) { + e.printStackTrace(); + } + } + + }; + read.start(); + + write.join(); + read.join(); + + long duration = System.nanoTime() - start; + System.err.println("took " + 1e-9*duration + "s."); + + } + +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterStreamChunk.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterStreamChunk.java new file mode 100644 index 000000000..23cbfb1ce --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/ClusterStreamChunk.java @@ -0,0 +1,302 @@ +package org.simantics.acorn.lru; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.file.Path; +import java.util.ArrayList; + +import org.simantics.acorn.ClusterManager; +import org.simantics.acorn.Persistable; +import org.simantics.acorn.internal.ClusterChange; +import org.simantics.acorn.internal.UndoClusterUpdateProcessor; +import org.simantics.compressions.CompressionCodec; +import org.simantics.compressions.Compressions; +import org.simantics.db.exception.DatabaseException; +import org.simantics.db.service.Bytes; +import org.simantics.utils.datastructures.Pair; + +import gnu.trove.list.array.TByteArrayList; + +public class ClusterStreamChunk extends LRUObject implements Persistable { + + // 500KB is a fine chunk + private static int MAX_CHUNK_SIZE = 500*1024; + + int size = 0; + final private ClusterManager manager; + private boolean committed = false; + + public int nextToProcess = 0; + + public ArrayList operations = new ArrayList(); + + // Stub + public ClusterStreamChunk(ClusterManager manager, LRU LRU, Path readDir, String id, int offset, int length) { + super(LRU, id, readDir, "clusterStream", offset, length, false, false); + this.manager = manager; + LRU.map(this); + } + + // Creation + public ClusterStreamChunk(ClusterManager manager, LRU LRU, String id) { + super(LRU, id, LRU.getDirectory(), "clusterStream", true, true); + this.manager = manager; + LRU.insert(this, accessTime); + } + + public UndoClusterUpdateProcessor getUndoProcessor(ClusterManager clusters, int chunkOffset, String ccsId) throws DatabaseException { + + if(VERIFY) verifyAccess(); + + makeResident(true); + + ClusterUpdateOperation op = operations.get(chunkOffset); + if(op == null) throw new IllegalStateException("Cluster Update Operation " + ccsId + " was not found."); + if(op.ccs == null) throw new IllegalStateException("Cluster ChangeSet " + ccsId + " was not found."); + + UndoClusterUpdateProcessor proc = new UndoClusterUpdateProcessor(clusters, this, op.ccs); + if(proc.version != ClusterChange.VERSION) + return null; + + // This cluster and CCS can still be under preparation => wait + clusters.clusterLRU.ensureUpdates(proc.getClusterUID()); + + proc.process(); + + cancelForceResident(); + + return proc; + + } + + public void addOperation(ClusterUpdateOperation op) { + if(committed) + throw new IllegalStateException(); + operations.add(op); + size += op.data.length; +// if(isCommitted()) { +// LRU.refresh(this); +// } + } + + public byte[] getOperation(int index) { + return operations.get(index).data; + } + + public void commit() { + committed = true; + } + + public boolean isCommitted() { + if(size > MAX_CHUNK_SIZE) committed = true; + return committed; + } + + @Override + public boolean canBePersisted() { + if(!super.canBePersisted()) return false; + if(!isCommitted()) return false; + for(ClusterUpdateOperation op : operations) { + if(!op.finished) return false; + } + return true; + } + + private static void writeLE(TByteArrayList bytes, int value) { + + bytes.add( (byte) (value & 0xFF)); + bytes.add((byte) ((value >>> 8) & 0xFF)); + bytes.add((byte) ((value >>> 16) & 0xFF)); + bytes.add((byte) ((value >>> 24) & 0xFF)); + + } + + final public static void writeLE8(TByteArrayList bytes, long value) { + + bytes.add( (byte) (value & 0xFF)); + bytes.add((byte) ((value >>> 8) & 0xFF)); + bytes.add((byte) ((value >>> 16) & 0xFF)); + bytes.add((byte) ((value >>> 24) & 0xFF)); + bytes.add((byte) ((value >>> 32) & 0xFF)); + bytes.add((byte) ((value >>> 40) & 0xFF)); + bytes.add((byte) ((value >>> 48) & 0xFF)); + bytes.add((byte) ((value >>> 56) & 0xFF)); + + } + + @Override + protected Pair toBytes() { + + assert(isCommitted()); + + TByteArrayList raw = new TByteArrayList(); + + writeLE(raw, operations.size()); + + for(ClusterUpdateOperation op : operations) { + + writeLE(raw, op.data.length); + raw.add(op.data); + op.data = null; + + writeLE(raw, op.ccs.statementMask.size()); + raw.add(op.ccs.statementMask.toArray()); + writeLE(raw, op.ccs.oldValueEx.size()); + raw.add(op.ccs.oldValueEx.toArray()); + writeLE(raw, op.ccs.oldValues.size()); + + for(byte[] oldValue : op.ccs.oldValues) { + int len = (oldValue != null ? oldValue.length : -1); + writeLE(raw, len); + if(oldValue != null) { + raw.add(oldValue); + } + } + + } + + byte[] raw_ = raw.toArray(); + CompressionCodec codec = Compressions.get(Compressions.LZ4); + ByteBuffer input = ByteBuffer.wrap(raw_); + ByteBuffer output = ByteBuffer.allocate(raw_.length + raw_.length/8); + int compressedSize = codec.compressBuffer(input, 0, raw_.length, output, 0); + + // We append inflated size - cannot prepend since decompression cannot handle offsets in input + final byte[] rawOutput = new byte[compressedSize+4]; + output.get(rawOutput,0,compressedSize); + Bytes.writeLE(rawOutput, compressedSize, raw_.length); + + release(); + + return Pair.make(rawOutput, rawOutput.length); + + } + + @Override + void release() { + + for(ClusterUpdateOperation op : operations) { + op.data = null; + op.ccs = null; + } + + } + + static class StreamDecompressor { + +// byte[] decompressBuffer = new byte[1024*1024]; + + public synchronized byte[] decompressBuffer(byte[] compressed) throws IOException { + + int deflatedSize = Bytes.readLE4(compressed, compressed.length-4); + + byte[] result = new byte[deflatedSize]; + +// if(decompressBuffer.length < deflatedSize) +// decompressBuffer = new byte[Math.max(3*decompressBuffer.length / 2, deflatedSize)]; + + CompressionCodec codec = Compressions.get(Compressions.LZ4); + + ByteBuffer input = ByteBuffer.wrap(compressed); + ByteBuffer output = ByteBuffer.wrap(result); + + int decompressedSize = codec.decompressBuffer(input, 0, compressed.length-4, output, 0, result.length); + assert(decompressedSize == deflatedSize); + + return result; + + } + + + } + + private static StreamDecompressor decompressor = new StreamDecompressor(); + + @Override + public void fromFile(byte[] data_) { + + try { + + byte[] data = decompressor.decompressBuffer(data_); + + operations = new ArrayList(); + + int offset = 0; + int opLen = Bytes.readLE4(data, offset); + offset += 4; + + for(int i=0;i(oldValuesSize); + for(int j=0;j LRU, String id, int size) { + super(LRU, id, LRU.getDirectory(), id.toString() + ".extFile", true, true); + this.bytes = new TByteArrayList(size); + LRU.insert(this, accessTime); + } + + public byte[] getResourceFile() { + + if(VERIFY) verifyAccess(); + + makeResident(); + return bytes.toArray(); + + } + + + public ResourceSegment getResourceSegment(final byte[] clusterUID, + final int resourceIndex, final long segmentOffset, short segmentSize) + throws ProCoreException { + + if(VERIFY) verifyAccess(); + + makeResident(); + + try { + + int segSize = segmentSize; + if (segSize < 0) + segSize += 65536; + if (segmentSize == -1) + segSize = Math.min(65535, bytes.size()); + + final long valueSize = bytes.size(); + + final byte[] segment = bytes.toArray((int) segmentOffset, segSize); + + return new ResourceSegment() { + + @Override + public long getValueSize() { + return valueSize; + } + + @Override + public byte[] getSegment() { + return segment; + } + + @Override + public int getResourceIndex() { + return resourceIndex; + } + + @Override + public long getOffset() { + return segmentOffset; + } + + @Override + public byte[] getClusterId() { + return clusterUID; + } + }; + + } catch (Throwable t) { + + t.printStackTrace(); + + } + + throw new UnsupportedOperationException(); + + } + + public void updateData(byte[] newBytes, long offset, long pos, long size) { + + if(VERIFY) verifyAccess(); + makeResident(); + + if(size == 0) { + bytes.remove((int)offset, (int)(bytes.size()-offset)); + } else { + bytes.fill((int) (offset + size), (int) (offset + size), (byte) 0); + bytes.set((int) offset, newBytes, (int) pos, (int) size); + } + + setDirty(); + + } + + @Override + public Pair toBytes() { + byte[] result = bytes.toArray(); + release(); + return Pair.make(result, result.length); + } + + @Override + protected void release() { + bytes = null; + } + + @Override + public void fromFile(byte[] data) { + bytes = new TByteArrayList(data); + } + + @Override + protected String getExtension() { + return "extFile"; + } + + @Override + protected boolean overwrite() { + return true; + } + +} \ No newline at end of file diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/LRU.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/LRU.java new file mode 100644 index 000000000..508127db9 --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/LRU.java @@ -0,0 +1,624 @@ +package org.simantics.acorn.lru; + +import java.io.IOException; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; +import java.util.TreeMap; +import java.util.concurrent.ScheduledThreadPoolExecutor; +import java.util.concurrent.Semaphore; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; + +import org.simantics.acorn.GraphClientImpl2; +import org.simantics.db.common.utils.Logger; + +/* + * The order rule of synchronization for LRU and LRUObject is: + * ยง Always lock LRUObject first! + * + */ + +public class LRU> { + + public static boolean VERIFY = true; + + final private long swapTime = 5L*1000000000L; + final private int swapSize = 200; + + final private HashMap map = new HashMap(); + final private TreeMap priorityQueue = new TreeMap(); + + final private Semaphore mutex = new Semaphore(1); + final private String identifier; + + private Path writeDir; + + private Thread mutexOwner; + + public Map pending = new HashMap(); + + public LRU(String identifier, Path writeDir) { + this.identifier = identifier; + this.writeDir = writeDir; + resume(); + } + + /* + * Public interface + */ + + public void acquireMutex() { + + try { + + while(!mutex.tryAcquire(3, TimeUnit.SECONDS)) { + System.err.println("Mutex is taking a long time to acquire - owner is " + mutexOwner); + } + + if(VERIFY) + mutexOwner = Thread.currentThread(); + + } catch (InterruptedException e) { + throw new IllegalStateException(e); + } + } + + public void releaseMutex() { + mutex.release(); + mutexOwner = null; + } + + public void shutdown() { + if (GraphClientImpl2.DEBUG) + System.err.println("Shutting down LRU writers " + writers); + writers.shutdown(); + try { + writers.awaitTermination(60, TimeUnit.SECONDS); + } catch (InterruptedException e) { + e.printStackTrace(); + } + + } + + public void resume() { + writers = new ScheduledThreadPoolExecutor(2, new ThreadFactory() { + + @Override + public Thread newThread(Runnable r) { + return new Thread(r, identifier + " File Writer"); + } + + }); + if (GraphClientImpl2.DEBUG) + System.err.println("Resuming LRU writers " + writers); + } + + /* + * This method violates the synchronization order rule between LRU and MapVAlue + * External synchronization is used to ensure correct operation + */ + public void persist(ArrayList state) { + + acquireMutex(); + + try { + + for (MapValue value : values()) { + value.acquireMutex(); + // for debugging purposes + boolean persisted = false; + try { + // Persist the value if needed + persisted = value.persist(); + } finally { + // WriteRunnable may want to + value.releaseMutex(); + } + // Wait pending if value was actually persisted + waitPending(value, false); + // Take lock again + value.acquireMutex(); + try { + // Record the value + state.add(value.getStateKey()); + } catch (Throwable t) { + throw new IllegalStateException(t); + } finally { + value.releaseMutex(); + } + } + + } catch (Throwable t) { + throw new IllegalStateException(t); + } finally { + releaseMutex(); + } + + } + + public MapValue getWithoutMutex(MapKey key) { + + acquireMutex(); + try { + return get(key); + } catch (Throwable t) { + throw new IllegalStateException(t); + } finally { + releaseMutex(); + } + + } + + public MapValue get(MapKey key) { + + if(VERIFY) verifyAccess(); + + return map.get(key); + + } + + public void map(MapValue info) { + + if(VERIFY) verifyAccess(); + + map.put(info.getKey(), info); + + } + + public Collection values() { + + if(VERIFY) verifyAccess(); + + return map.values(); + + } + + public boolean swapForced() { + + acquireMutex(); + + try { + return swap(0, 0, null); + } catch (Throwable t) { + throw new IllegalStateException(t); + } finally { + releaseMutex(); + } + + } + + public boolean swap(long lifeTime, int targetSize) { + + if(VERIFY) verifyAccess(); + + return swap(lifeTime, targetSize, null); + + } + + /* + * This is called under global lock + */ + public void setWriteDir(Path dir) { + + this.writeDir = dir; + + } + + + /* + * Package access + */ + + void insert(MapValue info, long accessTime) { + + if(VERIFY) verifyAccess(); + + map.put(info.getKey(), info); + priorityQueue.put(accessTime, info.getKey()); + + } + + /* + * We have access to ClusterLRU - try to refresh value if available + */ + boolean tryRefresh(MapValue info) { + + if(VERIFY) verifyAccess(); + + if(!info.tryAcquireMutex()) + return false; + + try { + + priorityQueue.remove(info.getLastAccessTime()); + info.accessed(); + map.put(info.getKey(), info); + priorityQueue.put(info.getLastAccessTime(), info.getKey()); + + return true; + + } catch (Throwable t) { + throw new IllegalStateException(t); + } finally { + + info.releaseMutex(); + + } + + } + + /* + * We have access to MapValue and no access to clusterLRU + */ + void refresh(MapValue info, boolean needMutex) { + + if(VERIFY) { + if(!needMutex) verifyAccess(); + info.verifyAccess(); + } + + if(needMutex) + acquireMutex(); + + try { + + priorityQueue.remove(info.getLastAccessTime()); + info.accessed(); + map.put(info.getKey(), info); + priorityQueue.put(info.getLastAccessTime(), info.getKey()); + + } catch (Throwable t) { + throw new IllegalStateException(t); + } finally { + + if(needMutex) + releaseMutex(); + + } + + } + + /* + * Private implementation + */ + + public int size() { + + if(VERIFY) verifyAccess(); + + return priorityQueue.size(); + + } + + boolean swap(MapKey excluded) { + + if(VERIFY) verifyAccess(); + + return swap(swapTime, swapSize, excluded); + + } + + boolean swap(long lifeTime, int targetSize, MapKey excluded) { + + if(VERIFY) verifyAccess(); + + MapValue valueToSwap = getValueToSwap(lifeTime, targetSize, excluded); + if(valueToSwap != null) { + + if(valueToSwap.tryAcquireMutex()) { + + try { + + if(valueToSwap.canBePersisted()) { + valueToSwap.persist(); + return true; + } + + } catch (Throwable t) { + throw new IllegalStateException(t); + } finally { + valueToSwap.releaseMutex(); + } + } + + } + + return false; + + } + + + private MapValue getValueToSwap1(long lifeTime, int targetSize, MapKey excluded) { + + if(VERIFY) verifyAccess(); + + for(int i=0;i<10;i++) { + + long candidate = getSwapCandidate(lifeTime, targetSize); + if(candidate == 0) return null; + + MapKey key = priorityQueue.remove(candidate); + if(key.equals(excluded)) { + tryRefresh(map.get(key)); + continue; + } + + return map.get(key); + + } + + return null; + + } + + + private MapValue getValueToSwap(long lifeTime, int targetSize, MapKey excluded) { + + if(VERIFY) verifyAccess(); + + for(int i=0;i<10;i++) { + + // Lock LRU and get a candidate + MapValue value = getValueToSwap1(lifeTime, targetSize, excluded); + if(value == null) return null; + + if(value.tryAcquireMutex()) { + + try { + + // This may lock the object + if(value.canBePersisted()) return value; + // Insert back the value + refresh(value, false); + + } catch (Throwable t) { + throw new IllegalStateException(t); + } finally { + + value.releaseMutex(); + + } + + } + + } + + return null; + + } + + private long getSwapCandidate(long lifeTime, int targetSize) { + + if(VERIFY) verifyAccess(); + + if(priorityQueue.isEmpty()) return 0; + + long currentTime = System.nanoTime(); + Long lowest = priorityQueue.firstKey(); + + if(currentTime - lowest > lifeTime || priorityQueue.size() > targetSize) { + return lowest; + } + + return 0; + + } + + /* + * Tries to persist this object. Can fail if the object cannot be persisted at this time. + * + */ + boolean persist(Object object_) { + + MapValue object = (MapValue)object_; + + if(VERIFY) object.verifyAccess(); + + if(object.isDirty()) { + + // It is possible that this just became unpersistable. Fail here in this case. + if(!object.canBePersisted()) { + return false; + } + + assert(object.isResident()); + + Path f = writeDir.resolve(object.getFileName()); + + WriteRunnable runnable = new WriteRunnable(f, object); + + synchronized(pending) { + WriteRunnable existing = pending.put(object.getKey().toString(), runnable); + assert(existing == null); + } + + writers.execute(runnable); + + object.setResident(false); + object.setDirty(false); + + return true; + + } else if(object.isResident()) { + + object.release(); + object.setResident(false); + return false; + + } + + return false; + + } + + int makeResident(Object object_, boolean keepResident) { + + MapValue object = (MapValue)object_; + + if(VERIFY) object.verifyAccess(); + + try { + + object.setForceResident(keepResident); + + if(object.isResident()) { + refresh(object, true); + return 0; + } + + waitPending(object, true); + + byte[] data = object.readFile(); + + object.fromFile(data); + object.setResident(true); + + acquireMutex(); + try { + refresh(object, false); + swap(swapTime, swapSize, object.getKey()); + } catch (Throwable t) { + throw new IllegalStateException(t); + } finally { + releaseMutex(); + } + + return data.length; + + } catch (IOException e) { + + e.printStackTrace(); + + } + + return 0; + + } + + static int readCounter = 0; + static int writeCounter = 0; + + ScheduledThreadPoolExecutor writers; + + void waitPending(MapValue value, boolean hasMutex) { + + WriteRunnable r = null; + boolean inProgress = false; + synchronized(pending) { + r = pending.get(value.getKey().toString()); + if(r != null) { + synchronized(r) { + if(r.committed) { + // just being written - just need to wait + inProgress = true; + } else { + r.committed = true; + // we do the writing + } + } + } + } + if(r != null) { + if(inProgress) { +// System.err.println("reader waits for WriteRunnable to finish"); + try { + r.s.acquire(); + } catch (InterruptedException e) { + e.printStackTrace(); + } + } else { +// System.err.println("reader took WriteRunnable"); + try { + r.runReally(hasMutex); + } catch (Throwable e) { + e.printStackTrace(); + Logger.defaultLogError(e); + } + } + } + + } + + public class WriteRunnable implements Runnable { + + Path bytes; + MapValue impl; + boolean committed = false; + private Semaphore s = new Semaphore(0); + + WriteRunnable(Path bytes, MapValue impl) { + this.bytes = bytes; + this.impl = impl; + } + + @Override + public void run() { + synchronized(impl) { + + synchronized(this) { + + if(committed) return; + + committed = true; + + } + try { + runReally(false); + } catch (Throwable e) { + e.printStackTrace(); + Logger.defaultLogError(e); + } + } + } + + public void runReally(boolean hasMutex) throws IOException { + + if(!hasMutex) + impl.acquireMutex(); + + try { + + // These have been set in method persist + assert(!impl.isResident()); + assert(!impl.isDirty()); + + impl.toFile(bytes); + + synchronized(pending) { + pending.remove(impl.getKey().toString()); + s.release(Integer.MAX_VALUE); + } + } finally { + if(!hasMutex) + impl.releaseMutex(); + } + + } + + } + + public Path getDirectory() { + return writeDir; + } + + /* + * Protected implementation + * + */ + + protected void verifyAccess() { +// assert (mutex.availablePermits() == 0); + if (mutex.availablePermits() != 0) + throw new IllegalStateException("identifier=" + identifier + " mutex has " + mutex.availablePermits() + " available permits, should be 0! Current mutexOwner is " + mutexOwner); + } + + /* + * Private implementation + * + */ + + +} diff --git a/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/LRUObject.java b/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/LRUObject.java new file mode 100644 index 000000000..1079bf5f0 --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/acorn/lru/LRUObject.java @@ -0,0 +1,236 @@ +package org.simantics.acorn.lru; + +import java.io.IOException; +import java.nio.file.Path; +import java.util.concurrent.Semaphore; +import java.util.concurrent.TimeUnit; + +import org.simantics.acorn.FileIO; +import org.simantics.acorn.Persistable; +import org.simantics.utils.datastructures.Pair; + +public abstract class LRUObject> implements Persistable { + + public static boolean VERIFY = true; + + // Final stuff + final protected LRU LRU; + final private Semaphore mutex = new Semaphore(1); + final private MapKey key; + final private String fileName; + + // Mutable stuff + protected long accessTime = AccessTime.getInstance().getAccessTime(); + private int offset; + private int length; + private boolean resident = true; + private boolean dirty = true; + private boolean forceResident = false; + + // DEBUG +// private boolean isForceResidentSetAfterLastGet = false; + + private Path readDirectory; + + private Thread mutexOwner; + + // for loading + public LRUObject(LRU LRU, MapKey key, Path readDirectory, String fileName, int offset, int length, boolean dirty, boolean resident) { + this.LRU = LRU; + this.key = key; + this.fileName = fileName; + this.offset = offset; + this.length = length; + this.readDirectory = readDirectory; + this.dirty = dirty; + this.resident = resident; + } + + // for creating + public LRUObject(LRU LRU, MapKey key, Path readDirectory, String fileName, boolean dirty, boolean resident) { + this(LRU, key, readDirectory, fileName, -1, -1, dirty, resident); + } + + /* + * Public interface + */ + public MapKey getKey() { + // This can be called without mutex + return key; + } + + public void acquireMutex() { + + try { + + while(!mutex.tryAcquire(3, TimeUnit.SECONDS)) { + System.err.println("Mutex is taking a long time to acquire - owner is " + mutexOwner); + } + + if(VERIFY) + mutexOwner = Thread.currentThread(); + + } catch (InterruptedException e) { + throw new IllegalStateException(e); + } + } + + public boolean tryAcquireMutex() { + return mutex.tryAcquire(); + } + + public void releaseMutex() { + mutex.release(); + } + + @Override + public void toFile(Path bytes) throws IOException { + if(VERIFY) verifyAccess(); + Pair pair = toBytes(); + byte[] data = pair.first; + int length = pair.second; + FileIO fio = FileIO.get(bytes); + int offset = fio.saveBytes(data, length, overwrite()); + setPosition(offset, length); + } + + public int makeResident() { + if(VERIFY) verifyAccess(); + return LRU.makeResident(this, false); + } + + public int makeResident(boolean keepResident) { + if(VERIFY) verifyAccess(); + return LRU.makeResident(this, true); + } + + /* + * Package implementation details + */ + + abstract void release(); + abstract String getExtension(); + + String getStateKey() { + String result = getKey().toString() + "#" + getDirectory().getFileName() + "#" + getOffset() + "#" + getLength(); + if(offset == -1) + throw new IllegalStateException(result); + return result; + } + + long getLastAccessTime() { + if(VERIFY) verifyAccess(); + return accessTime; + } + + void accessed() { + if(VERIFY) verifyAccess(); + accessTime = AccessTime.getInstance().getAccessTime(); + } + + boolean persist() { + if(VERIFY) verifyAccess(); + if(LRU.persist(this)) { + readDirectory = LRU.getDirectory(); + return true; + } else { + return false; + } + } + + void setForceResident(boolean value) { + if(VERIFY) verifyAccess(); + forceResident = value; +// isForceResidentSetAfterLastGet = true; + } + + boolean canBePersisted() { + if(VERIFY) verifyAccess(); +// isForceResidentSetAfterLastGet = false; + return !forceResident; + } + + boolean isDirty() { + if(VERIFY) verifyAccess(); + return dirty; + } + + boolean isResident() { + if(VERIFY) verifyAccess(); + return resident; + } + + String getFileName() { + if(VERIFY) verifyAccess(); + return fileName; + } + + void setResident(boolean value) { + if(VERIFY) verifyAccess(); + resident = value; + } + + void setDirty(boolean value) { + if(VERIFY) verifyAccess(); + dirty = value; + } + + byte[] readFile() throws IOException { + if(VERIFY) verifyAccess(); + Path dir = getDirectory(); + Path f = dir.resolve(getFileName()); + FileIO fio = FileIO.get(f); + return fio.readBytes(getOffset(), getLength()); + } + + /* + * Protected implementation details + */ + + abstract protected boolean overwrite(); + + abstract protected Pair toBytes(); + + protected void setDirty() { + if(VERIFY) verifyAccess(); + dirty = true; + } + + protected void verifyAccess() { + assert(mutex.availablePermits() == 0); + } + + protected synchronized void cancelForceResident() { + setForceResident(false); + } + + /* + * Private implementation details + */ + + private int getOffset() { + if(VERIFY) verifyAccess(); + return offset; + } + + private int getLength() { + if(VERIFY) verifyAccess(); + return length; + } + + private void setPosition(int offset, int length) { + if(VERIFY) verifyAccess(); + if(offset == -1) + throw new IllegalStateException(); + this.offset = offset; + this.length = length; + if(overwrite() && offset > 0) + throw new IllegalStateException(); + } + + private Path getDirectory() { + if(VERIFY) verifyAccess(); + return readDirectory; + } + +} \ No newline at end of file diff --git a/bundles/org.simantics.acorn/src/org/simantics/db/javacore/HeadState.java b/bundles/org.simantics.acorn/src/org/simantics/db/javacore/HeadState.java new file mode 100644 index 000000000..5a96be2df --- /dev/null +++ b/bundles/org.simantics.acorn/src/org/simantics/db/javacore/HeadState.java @@ -0,0 +1,73 @@ +package org.simantics.db.javacore; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.Serializable; +import java.nio.file.Files; +import java.nio.file.Path; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.util.ArrayList; +import java.util.Arrays; + +import org.simantics.acorn.InvalidHeadStateException; + +public class HeadState implements Serializable { + + private static final long serialVersionUID = -4135031566499790077L; + + public int headChangeSetId = 0; + public long transactionId = 1; + public long reservedIds = 3; + + public ArrayList clusters = new ArrayList<>(); + public ArrayList files = new ArrayList<>(); + public ArrayList stream = new ArrayList<>(); + public ArrayList cs = new ArrayList<>(); +// public ArrayList ccs = new ArrayList(); + + public static HeadState load(Path directory) throws InvalidHeadStateException { + Path f = directory.resolve("head.state"); + try { + byte[] bytes = Files.readAllBytes(f); + MessageDigest sha1 = MessageDigest.getInstance("SHA-1"); + int digestLength = sha1.getDigestLength(); + sha1.update(bytes, digestLength, bytes.length - digestLength); + byte[] newChecksum = sha1.digest(); + if (!Arrays.equals(newChecksum, Arrays.copyOfRange(bytes, 0, digestLength))) { + throw new InvalidHeadStateException( + "Checksum " + Arrays.toString(newChecksum) + " does not match excpected " + + Arrays.toString(Arrays.copyOfRange(bytes, 0, digestLength)) + " for " + f.toAbsolutePath()); + } + try (ObjectInputStream ois = new ObjectInputStream(new ByteArrayInputStream(bytes, digestLength, bytes.length - digestLength))) { + HeadState state = (HeadState) ois.readObject(); + return state; + } + } catch (IOException i) { + return new HeadState(); + } catch (ClassNotFoundException c) { +// throw new Error("HeadState class not found", c); + return new HeadState(); + } catch (NoSuchAlgorithmException e) { + throw new Error("SHA-1 Algorithm not found", e); + } + } + + public static void validateHeadStateIntegrity(Path headState) throws InvalidHeadStateException, IOException { + try { + byte[] bytes = Files.readAllBytes(headState); + MessageDigest sha1 = MessageDigest.getInstance("SHA-1"); + int digestLength = sha1.getDigestLength(); + sha1.update(bytes, digestLength, bytes.length - digestLength); + byte[] newChecksum = sha1.digest(); + if (!Arrays.equals(newChecksum, Arrays.copyOfRange(bytes, 0, digestLength))) { + throw new InvalidHeadStateException( + "Checksum " + Arrays.toString(newChecksum) + " does not match excpected " + + Arrays.toString(Arrays.copyOfRange(bytes, 0, digestLength)) + " for " + headState.toAbsolutePath()); + } + } catch (NoSuchAlgorithmException e) { + throw new Error("SHA-1 digest not found, should not happen", e); + } + } +} -- 2.43.2