newDB
authorChengsong
Sat, 05 Feb 2022 15:30:01 +0000
changeset 412 48876e1092f1
parent 409 f71df68776bb
child 413 b85f8e28fbd8
newDB
thys2/blexer1.sc
--- a/thys2/blexer1.sc	Fri Feb 04 00:05:12 2022 +0000
+++ b/thys2/blexer1.sc	Sat Feb 05 15:30:01 2022 +0000
@@ -384,7 +384,7 @@
       case AALTS(bs1, rs) => {
             val rs_simp = rs.map(strongBsimp(_))
             val flat_res = flats(rs_simp)
-            val dist_res = strongDB(flat_res)//distinctBy(flat_res, erase)
+            val dist_res = distinctBy2(flat_res)//distinctBy(flat_res, erase)
             dist_res match {
               case Nil => AZERO
               case s :: Nil => fuse(bs1, s)
@@ -417,6 +417,33 @@
     }
   } 
 
+  def projectFirstChild(rs: List[ARexp]) : List[ARexp] = rs match {
+    case (ASEQ(bs, r1, r2p)::rs1) => r1::projectFirstChild(rs1)
+    case Nil => Nil
+  }
+
+
+  def distinctBy2(xs: List[ARexp], acc: List[Rexp] = Nil): List[ARexp] = xs match {
+    case Nil => Nil
+    case (x::xs) => {
+      val res = erase(x)
+      if(acc.contains(res))
+        distinctBy2(xs, acc)
+      else
+        x match {
+          case ASEQ(bs0, AALTS(bs1, rs), r2) => 
+            val newTerms =  distinctBy2(rs.map(r1 => ASEQ(Nil, r1, r2)), acc)
+            val rsPrime = projectFirstChild(newTerms)
+            newTerms match {
+              case Nil => distinctBy2(xs, acc)
+              case t::Nil => ASEQ(bs0, fuse(bs1, rsPrime.head), r2)::distinctBy2(xs, erase(t)::acc)
+              case ts => ASEQ(bs0, AALTS(bs1, rsPrime), r2)::distinctBy2(xs, newTerms.map(erase(_)):::acc)
+            }
+          case x => x::distinctBy2(xs, res::acc)
+        }
+    }
+  } 
+
   def prettyRexp(r: Rexp) : String = r match {
       case STAR(r0) => s"${prettyRexp(r0)}*"
       case SEQ(CHAR(c), r2) => c.toString ++ prettyRexp(r2)
@@ -481,6 +508,12 @@
   
   def bdersSimp(s: String, r: Rexp) : ARexp = bders_simp(s.toList, internalise(r))
 
+  def bders_simpS(s: List[Char], r: ARexp) : ARexp = s match {
+    case Nil => r 
+    case c::s => bders_simpS(s, strongBsimp(bder(c, r)))
+  }
+
+  def bdersSimpS(s: String, r: Rexp) : ARexp = bders_simpS(s.toList, internalise(r))
 
   def erase(r:ARexp): Rexp = r match{
     case AZERO => ZERO
@@ -510,7 +543,25 @@
     }
   } 
 
+//(r1+r2)r + (r2'+r3)r' ~> (r1+r2)r + (r3)r' (erase r = erase r') (erase r2 = erase r2')
+//s = s1@s2 \in L R  s1 \in L r2 s2 \in L r
+//s = s3@s4  s3 \in L r1 s4 \in L r |s3| < |s1| \nexists s3' s4' s.t. s3'@s4' = s and s3' \in L r1 s4' \in L r
+//(r1+r4)r ~> r1r +_usedToBeSeq r4r 
+// | ss7:  "erase a01 = erase a02 ∧ (distinctBy as2 erase (set (map erase as1)) = as2p)  ⟹ 
+//         (rsa@[ASEQ bs (AALTs bs1 as1) (ASTAR bs01 a01)]@rsb@[ASEQ bs (AALTs bs2 as2) (ASTAR bs02 a02)]@rsc)  s↝
+//         (rsa@[ASEQ bs (AALTs bs1 as1) (ASTAR bs01 a01)]@rsb@[ASEQ bs (AALTs bs2 as2p) (ASTAR bs02 a02)]@rsc)"
+// | ss8:  "erase a01 = erase a02 ∧ (distinctBy [a2] erase (set (map erase as1)) = [])  ⟹ 
+//         (rsa@[ASEQ bs (AALTs bs1 as1) (ASTAR bs01 a01)]@rsb@[ASEQ bs a2 (ASTAR bs02 a02)]@rsc) s↝                                                
+//         (rsa@[ASEQ bs (AALTs bs1 as1) (ASTAR bs01 a01)]@rsb@rsc)"
+// | ss9:  "erase a01 = erase a02 ∧ (distinctBy as2 erase {erase a1} = as2p)  ⟹ 
+//         (rsa@[ASEQ bs a1 (ASTAR bs01 a01)]@rsb@[ASEQ bs (AALTs bs2 as2) (ASTAR bs02 a02)]@rsc) s↝ 
+//         (rsa@[ASEQ bs a1 (ASTAR bs01 a01)]@rsb@[ASEQ bs (AALTs bs2 as2p) (ASTAR bs02 a02)]@rsc)"
 
+
+//# of top-level terms
+//      r1r2 -> r11r2 + r12 -> r21r2 + r22 + r12' -> 4 terms -> 5 terms -> 6..........
+// if string long enough --> #| r1r2 \s s | > unbounded? No ->  #| r1r2 \s s | <  #| pders UNIV r1| + #|pders UNIV r2| <= awidth r1r2
+//r* -> r1r* -> r21r* + r22r* -> 4 terms -> 8 terms..............
   def strongDB(xs: List[ARexp], 
                        acc1: List[Rexp] = Nil, 
                        acc2 : List[(List[Rexp], Rexp)] = Nil): List[ARexp] = xs match {
@@ -538,7 +589,7 @@
                             case newHead::Nil =>
                                 ASTAR(bs0, r0) :: 
                                 strongDB(xs, erase(x)::acc1, 
-                                acc2.updated(i, (oldHeadsUpdated, headListAlready._2)) )//TODO: acc2 already contains headListAlready
+                                acc2.updated(i, (oldHeadsUpdated, headListAlready._2)) )
                             case Nil =>
                                 strongDB(xs, erase(x)::acc1, 
                                 acc2)
@@ -579,6 +630,17 @@
     
 }
 
+def shortRexpOutput(r: Rexp) : String = r match {
+    case CHAR(c) => c.toString
+    case ONE => "1"
+    case ZERO => "0"
+    case SEQ(r1, r2) => "[" ++ shortRexpOutput(r1) ++ "~" ++ shortRexpOutput(r2) ++ "]"
+    case ALTS(r1, r2) => "(" ++ shortRexpOutput(r1) ++ "+" ++ shortRexpOutput(r2) ++ ")"
+    case STAR(r) => "[" ++ shortRexpOutput(r) ++ "]*"
+    //case STAR(r) => "STAR(" ++ shortRexpOutput(r) ++ ")"
+    //case RTOP => "RTOP"
+  }
+
 
 def blex_simp(r: ARexp, s: List[Char]) : Bits = s match {
     case Nil => {
@@ -608,28 +670,121 @@
 
   def asize(a: ARexp) = size(erase(a))
 
+//pder related
+type Mon = (Char, Rexp)
+type Lin = Set[Mon]
+
+def dot_prod(rs: Set[Rexp], r: Rexp): Set[Rexp] = r match {
+    case ZERO => Set()
+    case ONE => rs
+    case r => rs.map((re) => if (re == ONE) r else SEQ(re, r)  )   
+  }
+  def cir_prod(l: Lin, t: Rexp): Lin = t match {//remember this Lin is different from the Lin in Antimirov's paper. Here it does not mean the set of all subsets of linear forms that does not contain zero, but rather the type a set of linear forms
+    case ZERO => Set()
+    case ONE => l
+    case t => l.map( m => m._2 match {case ZERO => m case ONE => (m._1, t) case p => (m._1, SEQ(p, t)) }  )
+  }
+  def lf(r: Rexp): Lin = r match {
+    case ZERO => Set()
+    case ONE => Set()
+    case CHAR(f) => {
+      //val Some(c) = alphabet.find(f) 
+      Set((f, ONE))
+    }
+    case ALTS(r1, r2) => {
+      lf(r1 ) ++ lf(r2)
+    }
+    case STAR(r1) => cir_prod(lf(r1), STAR(r1)) //may try infix notation later......
+    case SEQ(r1, r2) =>{
+      if (nullable(r1))
+        cir_prod(lf(r1), r2) ++ lf(r2)
+      else
+        cir_prod(lf(r1), r2) 
+    }
+  }
+  def lfs(r: Set[Rexp]): Lin = {
+    r.foldLeft(Set[Mon]())((acc, r) => acc ++ lf(r))
+  }
+
+  def pder(x: Char, t: Rexp): Set[Rexp] = {
+    val lft = lf(t)
+    (lft.filter(mon => if(mon._1 == x) true else false)).map(mon => mon._2)
+  }
+  def pders_single(s: List[Char], t: Rexp) : Set[Rexp] = s match {
+    case x::xs => pders(xs, pder(x, t))
+    case Nil => Set(t)
+  }
+  def pders(s: List[Char], ts: Set[Rexp]) : Set[Rexp] = s match {
+    case x::xs => pders(xs, ts.foldLeft(Set[Rexp]())((acc, t) => acc ++ pder(x, t)))
+    case Nil => ts 
+  }
+  def pderss(ss: List[List[Char]], t: Rexp): Set[Rexp] = ss.foldLeft( Set[Rexp]() )( (acc, s) => pders_single(s, t) ++ acc )
+  def pdera(t: Rexp): Set[Rexp] = lf(t).map(mon => mon._2)
+  //all implementation of partial derivatives that involve set union are potentially buggy
+  //because they don't include the original regular term before they are pdered.
+  //now only pderas is fixed.
+  def pderas(t: Set[Rexp], d: Int): Set[Rexp] = if(d > 0) pderas(lfs(t).map(mon => mon._2), d - 1) ++ t else lfs(t).map(mon => mon._2) ++ t//repeated application of pderas over the newest set of pders.
+  def pderUNIV(r: Rexp) : Set[Rexp] = pderas(Set(r), awidth(r))
+  def awidth(r: Rexp) : Int = r match {
+    case CHAR(c) => 1
+    case SEQ(r1, r2) => awidth(r1) + awidth(r2)
+    case ALTS(r1, r2) => awidth(r1) + awidth(r2)
+    case ONE => 0
+    case ZERO => 0
+    case STAR(r) => awidth(r)
+  }
+  //def sigma_lf(l: Set[Mon]) : Rexp = ALTS(l.map(mon => SEQ(CHAR(mon._1),mon._2)).toList)
+  //def sigma(rs: Set[Rexp]) : Rexp = ALTS(rs.toList)
+  def o(r: Rexp) = if (nullable(r)) ONE else ZERO
+  //def nlf(t: Rexp) : Rexp = ALTS(List( o(t), sigma_lf(lf(t)) ))
+  def pdp(x: Char, r: Rexp) : Set[Rexp] = r match {
+    case ZERO => Set[Rexp]()
+    case ONE => Set[Rexp]()
+    case CHAR(f) => if(x == f) Set(ONE) else Set[Rexp]()
+    case ALTS(r1, r2) => pdp(x, r1) ++ pdp(x, r2)
+    case STAR(r1) => pdp(x, r).map(a => SEQ(a, STAR(r1)))
+    case SEQ(a0, b) => if(nullable(a0)) pdp(x, a0).map(a => SEQ(a, b)) ++ pdp(x, b) else pdp(x, a0).map(a => SEQ(a, b))
+  }
+  def pdps(s: List[Char], ts: Set[Rexp]): Set[Rexp] = s match {
+    case x::xs => pdps(xs, ts.foldLeft(Set[Rexp]())((acc, t) => acc ++ pder(x, t)))
+    case Nil => ts   
+  }
+  def pdpss(ss: List[List[Char]], t: Rexp): Set[Rexp] = ss.foldLeft( Set[Rexp]())((acc, s) => pdps(s, Set(t)) ++ acc)
+
+
 
 // @arg(doc = "small tests")
-val STARREG = ((STAR("a") | STAR("aa") ).%).%
+val STARREG = (((STAR("a") | (STAR("aa")) | STAR(STAR("aaa") | STAR("aaaa")) | STAR("aaaaa") | (STAR("aaaaaa")) | STAR("aaaaaaa") | STAR("aaaaaaaa")).%).%).%
+//(STAR("a") | ( STAR("aaa")) | STAR("aaaa") | STAR("aaaaa")).%.%.%
 
 @main
 def small() = {
 
-  val prog0 = """aaaaaaaaa"""
-  println(s"test: $prog0")
+
 //   println(lexing_simp(NOTREG, prog0))
 //   val v = lex_simp(NOTREG, prog0.toList)
 //   println(v)
 
 //   val d =  (lex_simp(NOTREG, prog0.toList))
 //   println(d)
+  val pderSTAR = pderUNIV(STARREG)
+  val refSize = pderSTAR.map(size(_)).sum
+  println(refSize)
+  for(i <- 10 to 100){
+    val prog0 = "a" * i
+    println(s"test: $prog0")
+    val bd = bdersSimp(prog0, STARREG)//DB
+    val sbd = bdersSimpS(prog0, STARREG)//strongDB
 
-  val bd = bdersSimp(prog0, STARREG)
-    println(erase(bd))
-    println(asize(bd))
+
+    // println(shortRexpOutput(erase(sbd)))
+    // println(shortRexpOutput(erase(bd)))
+    println("pdersize, original, strongSimp, simp")
+    println(refSize, size(STARREG), asize(sbd), asize(bd))
 
     val vres = blexing_simp( STARREG, prog0)
     println(vres)
+  }
 //   println(vs.length)
 //   println(vs)