forked from datafold/data-diff
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathast_classes.py
1032 lines (766 loc) · 30.2 KB
/
ast_classes.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
from dataclasses import field
from datetime import datetime
from typing import Any, Generator, List, Optional, Sequence, Type, Union, Dict
from runtype import dataclass
from typing_extensions import Self
from ..utils import join_iter, ArithString
from ..abcs import Compilable
from ..abcs.database_types import AbstractTable
from ..abcs.mixins import AbstractMixin_Regex, AbstractMixin_TimeTravel
from ..schema import Schema
from .compiler import Compiler, cv_params, Root, CompileError
from .base import SKIP, DbPath, args_as_tuple, SqeletonError
class QueryBuilderError(SqeletonError):
pass
class QB_TypeError(QueryBuilderError):
pass
class ExprNode(Compilable):
"Base class for query expression nodes"
type: Any = None
def _dfs_values(self):
yield self
for k, vs in dict(self).items(): # __dict__ provided by runtype.dataclass
if k == "source_table":
# Skip data-sources, we're only interested in data-parameters
continue
if not isinstance(vs, (list, tuple)):
vs = [vs]
for v in vs:
if isinstance(v, ExprNode):
yield from v._dfs_values()
def cast_to(self, to):
return Cast(self, to)
# Query expressions can only interact with objects that are an instance of 'Expr'
Expr = Union[ExprNode, str, bool, int, float, datetime, ArithString, None]
@dataclass
class Code(ExprNode, Root):
code: str
args: Dict[str, Expr] = None
def compile(self, c: Compiler) -> str:
if not self.args:
return self.code
args = {k: c.compile(v) for k, v in self.args.items()}
return self.code.format(**args)
def _expr_type(e: Expr) -> type:
if isinstance(e, ExprNode):
return e.type
return type(e)
@dataclass
class Alias(ExprNode):
expr: Expr
name: str
def compile(self, c: Compiler) -> str:
return f"{c.compile(self.expr)} AS {c.quote(self.name)}"
@property
def type(self):
return _expr_type(self.expr)
def _drop_skips(exprs):
return [e for e in exprs if e is not SKIP]
def _drop_skips_dict(exprs_dict):
return {k: v for k, v in exprs_dict.items() if v is not SKIP}
class ITable(AbstractTable):
source_table: Any
schema: Schema = None
def select(self, *exprs, distinct=SKIP, optimizer_hints=SKIP, **named_exprs) -> "ITable":
"""Create a new table with the specified fields"""
exprs = args_as_tuple(exprs)
exprs = _drop_skips(exprs)
named_exprs = _drop_skips_dict(named_exprs)
exprs += _named_exprs_as_aliases(named_exprs)
resolve_names(self.source_table, exprs)
return Select.make(self, columns=exprs, distinct=distinct, optimizer_hints=optimizer_hints)
def where(self, *exprs):
exprs = args_as_tuple(exprs)
exprs = _drop_skips(exprs)
if not exprs:
return self
resolve_names(self.source_table, exprs)
return Select.make(self, where_exprs=exprs)
def order_by(self, *exprs):
exprs = _drop_skips(exprs)
if not exprs:
return self
resolve_names(self.source_table, exprs)
return Select.make(self, order_by_exprs=exprs)
def limit(self, limit: int):
if limit is SKIP:
return self
return Select.make(self, limit_expr=limit)
def join(self, target: "ITable"):
"""Join this table with the target table."""
return Join([self, target])
def group_by(self, *keys) -> "GroupBy":
"""Group according to the given keys.
Must be followed by a call to :ref:``GroupBy.agg()``
"""
keys = _drop_skips(keys)
resolve_names(self.source_table, keys)
return GroupBy(self, keys)
def _get_column(self, name: str):
if self.schema:
name = self.schema.get_key(name) # Get the actual name. Might be case-insensitive.
return Column(self, name)
# def __getattr__(self, column):
# return self._get_column(column)
def __getitem__(self, column):
if not isinstance(column, str):
raise TypeError()
return self._get_column(column)
def count(self):
return Select(self, [Count()])
def union(self, other: "ITable"):
"""SELECT * FROM self UNION other"""
return TableOp("UNION", self, other)
def union_all(self, other: "ITable"):
"""SELECT * FROM self UNION ALL other"""
return TableOp("UNION ALL", self, other)
def minus(self, other: "ITable"):
"""SELECT * FROM self EXCEPT other"""
# aka
return TableOp("EXCEPT", self, other)
def intersect(self, other: "ITable"):
"""SELECT * FROM self INTERSECT other"""
return TableOp("INTERSECT", self, other)
@dataclass
class Concat(ExprNode):
exprs: list
sep: str = None
def compile(self, c: Compiler) -> str:
# We coalesce because on some DBs (e.g. MySQL) concat('a', NULL) is NULL
items = [f"coalesce({c.compile(Code(c.dialect.to_string(c.compile(expr))))}, '<null>')" for expr in self.exprs]
assert items
if len(items) == 1:
return items[0]
if self.sep:
items = list(join_iter(f"'{self.sep}'", items))
return c.dialect.concat(items)
@dataclass
class Count(ExprNode):
expr: Expr = None
distinct: bool = False
type = int
def compile(self, c: Compiler) -> str:
expr = c.compile(self.expr) if self.expr else "*"
if self.distinct:
return f"count(distinct {expr})"
return f"count({expr})"
class LazyOps:
def __add__(self, other):
return BinOp("+", [self, other])
def __sub__(self, other):
return BinOp("-", [self, other])
def __neg__(self):
return UnaryOp("-", self)
def __gt__(self, other):
return BinBoolOp(">", [self, other])
def __ge__(self, other):
return BinBoolOp(">=", [self, other])
def __eq__(self, other):
if other is None:
return BinBoolOp("IS", [self, None])
return BinBoolOp("=", [self, other])
def __lt__(self, other):
return BinBoolOp("<", [self, other])
def __le__(self, other):
return BinBoolOp("<=", [self, other])
def __or__(self, other):
return BinBoolOp("OR", [self, other])
def __and__(self, other):
return BinBoolOp("AND", [self, other])
def is_distinct_from(self, other):
return IsDistinctFrom(self, other)
def like(self, other):
return BinBoolOp("LIKE", [self, other])
def test_regex(self, other):
return TestRegex(self, other)
def sum(self):
return Func("SUM", [self])
def max(self):
return Func("MAX", [self])
def min(self):
return Func("MIN", [self])
@dataclass
class TestRegex(ExprNode, LazyOps):
string: Expr
pattern: Expr
def compile(self, c: Compiler) -> str:
if not isinstance(c.dialect, AbstractMixin_Regex):
raise NotImplementedError(f"No regex implementation for database '{c.database}'")
regex = c.dialect.test_regex(self.string, self.pattern)
return c.compile(regex)
@dataclass(eq=False)
class Func(ExprNode, LazyOps):
name: str
args: Sequence[Expr]
def compile(self, c: Compiler) -> str:
args = ", ".join(c.compile(e) for e in self.args)
return f"{self.name}({args})"
@dataclass
class WhenThen(ExprNode):
when: Expr
then: Expr
def compile(self, c: Compiler) -> str:
return f"WHEN {c.compile(self.when)} THEN {c.compile(self.then)}"
@dataclass
class CaseWhen(ExprNode):
cases: Sequence[WhenThen]
else_expr: Expr = None
def compile(self, c: Compiler) -> str:
assert self.cases
when_thens = " ".join(c.compile(case) for case in self.cases)
else_expr = (" ELSE " + c.compile(self.else_expr)) if self.else_expr is not None else ""
return f"CASE {when_thens}{else_expr} END"
@property
def type(self):
then_types = {_expr_type(case.then) for case in self.cases}
if self.else_expr:
then_types |= _expr_type(self.else_expr)
if len(then_types) > 1:
raise QB_TypeError(f"Non-matching types in when: {then_types}")
(t,) = then_types
return t
def when(self, *whens: Expr) -> "QB_When":
"""Add a new 'when' clause to the case expression
Must be followed by a call to `.then()`
"""
whens = args_as_tuple(whens)
whens = _drop_skips(whens)
if not whens:
raise QueryBuilderError("Expected valid whens")
# XXX reimplementing api.and_()
if len(whens) == 1:
return QB_When(self, whens[0])
return QB_When(self, BinBoolOp("AND", whens))
def else_(self, then: Expr) -> Self:
"""Add an 'else' clause to the case expression.
Can only be called once!
"""
if self.else_expr is not None:
raise QueryBuilderError(f"Else clause already specified in {self}")
return self.replace(else_expr=then)
@dataclass
class QB_When:
"Partial case-when, used for query-building"
casewhen: CaseWhen
when: Expr
def then(self, then: Expr) -> CaseWhen:
"""Add a 'then' clause after a 'when' was added."""
case = WhenThen(self.when, then)
return self.casewhen.replace(cases=self.casewhen.cases + [case])
@dataclass(eq=False, order=False)
class IsDistinctFrom(ExprNode, LazyOps):
a: Expr
b: Expr
type = bool
def compile(self, c: Compiler) -> str:
a = c.dialect.to_comparable(c.compile(self.a), self.a.type)
b = c.dialect.to_comparable(c.compile(self.b), self.b.type)
return c.dialect.is_distinct_from(a, b)
@dataclass(eq=False, order=False)
class BinOp(ExprNode, LazyOps):
op: str
args: Sequence[Expr]
def compile(self, c: Compiler) -> str:
expr = f" {self.op} ".join(c.compile(a) for a in self.args)
return f"({expr})"
@property
def type(self):
types = {_expr_type(i) for i in self.args}
if len(types) > 1:
raise TypeError(f"Expected all args to have the same type, got {types}")
(t,) = types
return t
@dataclass
class UnaryOp(ExprNode, LazyOps):
op: str
expr: Expr
def compile(self, c: Compiler) -> str:
return f"({self.op}{c.compile(self.expr)})"
class BinBoolOp(BinOp):
type = bool
@dataclass(eq=False, order=False)
class Column(ExprNode, LazyOps):
source_table: ITable
name: str
@property
def type(self):
if self.source_table.schema is None:
raise QueryBuilderError(f"Schema required for table {self.source_table}")
return self.source_table.schema[self.name]
def compile(self, c: Compiler) -> str:
if c._table_context:
if len(c._table_context) > 1:
aliases = [
t for t in c._table_context if isinstance(t, TableAlias) and t.source_table is self.source_table
]
if not aliases:
return c.quote(self.name)
elif len(aliases) > 1:
raise CompileError(f"Too many aliases for column {self.name}")
(alias,) = aliases
return f"{c.quote(alias.name)}.{c.quote(self.name)}"
return c.quote(self.name)
@dataclass
class TablePath(ExprNode, ITable):
path: DbPath
schema: Optional[Schema] = field(default=None, repr=False)
@property
def source_table(self) -> Self:
return self
def compile(self, c: Compiler) -> str:
path = self.path # c.database._normalize_table_path(self.name)
return ".".join(map(c.quote, path))
# Statement shorthands
def create(self, source_table: ITable = None, *, if_not_exists: bool = False, primary_keys: List[str] = None):
"""Returns a query expression to create a new table.
Parameters:
source_table: a table expression to use for initializing the table.
If not provided, the table must have a schema specified.
if_not_exists: Add a 'if not exists' clause or not. (note: not all dbs support it!)
primary_keys: List of column names which define the primary key
"""
if source_table is None and not self.schema:
raise ValueError("Either schema or source table needed to create table")
if isinstance(source_table, TablePath):
source_table = source_table.select()
return CreateTable(self, source_table, if_not_exists=if_not_exists, primary_keys=primary_keys)
def drop(self, if_exists=False):
"""Returns a query expression to delete the table.
Parameters:
if_not_exists: Add a 'if not exists' clause or not. (note: not all dbs support it!)
"""
return DropTable(self, if_exists=if_exists)
def truncate(self):
"""Returns a query expression to truncate the table. (remove all rows)"""
return TruncateTable(self)
def insert_rows(self, rows: Sequence, *, columns: List[str] = None):
"""Returns a query expression to insert rows to the table, given as Python values.
Parameters:
rows: A list of tuples. Must all have the same width.
columns: Names of columns being populated. If specified, must have the same length as the tuples.
"""
rows = list(rows)
return InsertToTable(self, ConstantTable(rows), columns=columns)
def insert_row(self, *values, columns: List[str] = None):
"""Returns a query expression to insert a single row to the table, given as Python values.
Parameters:
columns: Names of columns being populated. If specified, must have the same length as 'values'
"""
return InsertToTable(self, ConstantTable([values]), columns=columns)
def insert_expr(self, expr: Expr):
"""Returns a query expression to insert rows to the table, given as a query expression.
Parameters:
expr: query expression to from which to read the rows
"""
if isinstance(expr, TablePath):
expr = expr.select()
return InsertToTable(self, expr)
def time_travel(
self, *, before: bool = False, timestamp: datetime = None, offset: int = None, statement: str = None
) -> Compilable:
"""Selects historical data from the table
Parameters:
before: If false, inclusive of the specified point in time.
If True, only return the time before it. (at/before)
timestamp: A constant timestamp
offset: the time 'offset' seconds before now
statement: identifier for statement, e.g. query ID
Must specify exactly one of `timestamp`, `offset` or `statement`.
"""
if sum(int(i is not None) for i in (timestamp, offset, statement)) != 1:
raise ValueError("Must specify exactly one of `timestamp`, `offset` or `statement`.")
if timestamp is not None:
assert offset is None and statement is None
@dataclass
class TableAlias(ExprNode, ITable):
source_table: ITable
name: str
def compile(self, c: Compiler) -> str:
return f"{c.compile(self.source_table)} {c.quote(self.name)}"
@dataclass
class Join(ExprNode, ITable, Root):
source_tables: Sequence[ITable]
op: str = None
on_exprs: Sequence[Expr] = None
columns: Sequence[Expr] = None
@property
def source_table(self) -> Self:
return self
@property
def schema(self):
assert self.columns # TODO Implement SELECT *
s = self.source_tables[0].schema # TODO validate types match between both tables
return type(s)({c.name: c.type for c in self.columns})
def on(self, *exprs) -> Self:
"""Add an ON clause, for filtering the result of the cartesian product (i.e. the JOIN)"""
if len(exprs) == 1:
(e,) = exprs
if isinstance(e, Generator):
exprs = tuple(e)
exprs = _drop_skips(exprs)
if not exprs:
return self
return self.replace(on_exprs=(self.on_exprs or []) + exprs)
def select(self, *exprs, **named_exprs) -> Union[Self, ITable]:
"""Select fields to return from the JOIN operation
See Also: ``ITable.select()``
"""
if self.columns is not None:
# join-select already applied
return super().select(*exprs, **named_exprs)
exprs = _drop_skips(exprs)
named_exprs = _drop_skips_dict(named_exprs)
exprs += _named_exprs_as_aliases(named_exprs)
resolve_names(self.source_table, exprs)
# TODO Ensure exprs <= self.columns ?
return self.replace(columns=exprs)
def compile(self, parent_c: Compiler) -> str:
tables = [
t if isinstance(t, TableAlias) else TableAlias(t, parent_c.new_unique_name()) for t in self.source_tables
]
c = parent_c.add_table_context(*tables, in_join=True, in_select=False)
op = " JOIN " if self.op is None else f" {self.op} JOIN "
joined = op.join(c.compile(t) for t in tables)
if self.on_exprs:
on = " AND ".join(c.compile(e) for e in self.on_exprs)
res = f"{joined} ON {on}"
else:
res = joined
columns = "*" if self.columns is None else ", ".join(map(c.compile, self.columns))
select = f"SELECT {columns} FROM {res}"
if parent_c.in_select:
select = f"({select}) {c.new_unique_name()}"
elif parent_c.in_join:
select = f"({select})"
return select
@dataclass
class GroupBy(ExprNode, ITable, Root):
table: ITable
keys: Sequence[Expr] = None # IKey?
values: Sequence[Expr] = None
having_exprs: Sequence[Expr] = None
@property
def source_table(self):
return self
def __post_init__(self):
assert self.keys or self.values
def having(self, *exprs) -> Self:
"""Add a 'HAVING' clause to the group-by"""
exprs = args_as_tuple(exprs)
exprs = _drop_skips(exprs)
if not exprs:
return self
resolve_names(self.table, exprs)
return self.replace(having_exprs=(self.having_exprs or []) + exprs)
def agg(self, *exprs) -> Self:
"""Select aggregated fields for the group-by."""
exprs = args_as_tuple(exprs)
exprs = _drop_skips(exprs)
resolve_names(self.table, exprs)
return self.replace(values=(self.values or []) + exprs)
def compile(self, c: Compiler) -> str:
if self.values is None:
raise CompileError(".group_by() must be followed by a call to .agg()")
keys = [str(i + 1) for i in range(len(self.keys))]
columns = (self.keys or []) + (self.values or [])
if isinstance(self.table, Select) and self.table.columns is None and self.table.group_by_exprs is None:
return c.compile(
self.table.replace(
columns=columns,
group_by_exprs=[Code(k) for k in keys],
having_exprs=self.having_exprs,
)
)
keys_str = ", ".join(keys)
columns_str = ", ".join(c.compile(x) for x in columns)
having_str = (
" HAVING " + " AND ".join(map(c.compile, self.having_exprs)) if self.having_exprs is not None else ""
)
select = (
f"SELECT {columns_str} FROM {c.replace(in_select=True).compile(self.table)} GROUP BY {keys_str}{having_str}"
)
if c.in_select:
select = f"({select}) {c.new_unique_name()}"
elif c.in_join:
select = f"({select})"
return select
@dataclass
class TableOp(ExprNode, ITable, Root):
op: str
table1: ITable
table2: ITable
@property
def source_table(self):
return self
@property
def type(self):
# TODO ensure types of both tables are compatible
return self.table1.type
@property
def schema(self):
s1 = self.table1.schema
s2 = self.table2.schema
assert len(s1) == len(s2)
return s1
def compile(self, parent_c: Compiler) -> str:
c = parent_c.replace(in_select=False)
table_expr = f"{c.compile(self.table1)} {self.op} {c.compile(self.table2)}"
if parent_c.in_select:
table_expr = f"({table_expr}) {c.new_unique_name()}"
elif parent_c.in_join:
table_expr = f"({table_expr})"
return table_expr
@dataclass
class Select(ExprNode, ITable, Root):
table: Expr = None
columns: Sequence[Expr] = None
where_exprs: Sequence[Expr] = None
order_by_exprs: Sequence[Expr] = None
group_by_exprs: Sequence[Expr] = None
having_exprs: Sequence[Expr] = None
limit_expr: int = None
distinct: bool = False
optimizer_hints: Sequence[Expr] = None
@property
def schema(self):
s = self.table.schema
if s is None or self.columns is None:
return s
return type(s)({c.name: c.type for c in self.columns})
@property
def source_table(self):
return self
def compile(self, parent_c: Compiler) -> str:
c = parent_c.replace(in_select=True) # .add_table_context(self.table)
columns = ", ".join(map(c.compile, self.columns)) if self.columns else "*"
distinct = "DISTINCT " if self.distinct else ""
optimizer_hints = c.dialect.optimizer_hints(self.optimizer_hints) if self.optimizer_hints else ""
select = f"SELECT {optimizer_hints}{distinct}{columns}"
if self.table:
select += " FROM " + c.compile(self.table)
elif c.dialect.PLACEHOLDER_TABLE:
select += f" FROM {c.dialect.PLACEHOLDER_TABLE}"
if self.where_exprs:
select += " WHERE " + " AND ".join(map(c.compile, self.where_exprs))
if self.group_by_exprs:
select += " GROUP BY " + ", ".join(map(c.compile, self.group_by_exprs))
if self.having_exprs:
assert self.group_by_exprs
select += " HAVING " + " AND ".join(map(c.compile, self.having_exprs))
if self.order_by_exprs:
select += " ORDER BY " + ", ".join(map(c.compile, self.order_by_exprs))
if self.limit_expr is not None:
has_order_by = bool(self.order_by_exprs)
select += " " + c.dialect.offset_limit(0, self.limit_expr, has_order_by=has_order_by)
if parent_c.in_select:
select = f"({select}) {c.new_unique_name()}"
elif parent_c.in_join:
select = f"({select})"
return select
@classmethod
def make(cls, table: ITable, distinct: bool = SKIP, optimizer_hints: str = SKIP, **kwargs):
assert "table" not in kwargs
if not isinstance(table, cls): # If not Select
if distinct is not SKIP:
kwargs["distinct"] = distinct
if optimizer_hints is not SKIP:
kwargs["optimizer_hints"] = optimizer_hints
return cls(table, **kwargs)
# We can safely assume isinstance(table, Select)
if optimizer_hints is not SKIP:
kwargs["optimizer_hints"] = optimizer_hints
if distinct is not SKIP:
if distinct == False and table.distinct:
return cls(table, **kwargs)
kwargs["distinct"] = distinct
if table.limit_expr or table.group_by_exprs:
return cls(table, **kwargs)
# Fill in missing attributes, instead of nesting instances
for k, v in kwargs.items():
if getattr(table, k) is not None:
if k == "where_exprs": # Additive attribute
kwargs[k] = getattr(table, k) + v
elif k in ["distinct", "optimizer_hints"]:
pass
else:
raise ValueError(k)
return table.replace(**kwargs)
@dataclass
class Cte(ExprNode, ITable):
source_table: Expr
name: str = None
params: Sequence[str] = None
def compile(self, parent_c: Compiler) -> str:
c = parent_c.replace(_table_context=[], in_select=False)
compiled = c.compile(self.source_table)
name = self.name or parent_c.new_unique_name()
name_params = f"{name}({', '.join(self.params)})" if self.params else name
parent_c._subqueries[name_params] = compiled
return name
@property
def schema(self):
# TODO add cte to schema
return self.source_table.schema
def _named_exprs_as_aliases(named_exprs):
return [Alias(expr, name) for name, expr in named_exprs.items()]
def resolve_names(source_table, exprs):
i = 0
for expr in exprs:
# Iterate recursively and update _ResolveColumn instances with the right expression
if isinstance(expr, ExprNode):
for v in expr._dfs_values():
if isinstance(v, _ResolveColumn):
v.resolve(source_table._get_column(v.resolve_name))
i += 1
@dataclass(frozen=False, eq=False, order=False)
class _ResolveColumn(ExprNode, LazyOps):
resolve_name: str
resolved: Expr = None
def resolve(self, expr: Expr):
if self.resolved is not None:
raise QueryBuilderError("Already resolved!")
self.resolved = expr
def _get_resolved(self) -> Expr:
if self.resolved is None:
raise QueryBuilderError(f"Column not resolved: {self.resolve_name}")
return self.resolved
def compile(self, c: Compiler) -> str:
return self._get_resolved().compile(c)
@property
def type(self):
return self._get_resolved().type
@property
def name(self):
return self._get_resolved().name
class This:
"""Builder object for accessing table attributes.
Automatically evaluates to the the 'top-most' table during compilation.
"""
def __getattr__(self, name):
return _ResolveColumn(name)
def __getitem__(self, name):
if isinstance(name, (list, tuple)):
return [_ResolveColumn(n) for n in name]
return _ResolveColumn(name)
@dataclass
class In(ExprNode):
expr: Expr
list: Sequence[Expr]
type = bool
def compile(self, c: Compiler):
elems = ", ".join(map(c.compile, self.list))
return f"({c.compile(self.expr)} IN ({elems}))"
@dataclass
class Cast(ExprNode):
expr: Expr
target_type: Expr
def compile(self, c: Compiler) -> str:
return f"cast({c.compile(self.expr)} as {c.compile(self.target_type)})"
@dataclass
class Random(ExprNode, LazyOps):
type = float
def compile(self, c: Compiler) -> str:
return c.dialect.random()
@dataclass
class ConstantTable(ExprNode):
rows: Sequence[Sequence]
def compile(self, c: Compiler) -> str:
raise NotImplementedError()
def compile_for_insert(self, c: Compiler):
return c.dialect.constant_values(self.rows)
@dataclass
class Explain(ExprNode, Root):
select: Select
type = str
def compile(self, c: Compiler) -> str:
return c.dialect.explain_as_text(c.compile(self.select))
class CurrentTimestamp(ExprNode):
type = datetime
def compile(self, c: Compiler) -> str:
return c.dialect.current_timestamp()
@dataclass
class TimeTravel(ITable):
table: TablePath
before: bool = False
timestamp: datetime = None
offset: int = None
statement: str = None
def compile(self, c: Compiler) -> str:
assert isinstance(c, AbstractMixin_TimeTravel)
return c.compile(
c.time_travel(
self.table, before=self.before, timestamp=self.timestamp, offset=self.offset, statement=self.statement
)
)
# DDL
class Statement(Compilable, Root):
type = None
@dataclass
class CreateTable(Statement):
path: TablePath
source_table: Expr = None
if_not_exists: bool = False
primary_keys: List[str] = None
def compile(self, c: Compiler) -> str:
ne = "IF NOT EXISTS " if self.if_not_exists else ""
if self.source_table:
return f"CREATE TABLE {ne}{c.compile(self.path)} AS {c.compile(self.source_table)}"
schema = ", ".join(f"{c.dialect.quote(k)} {c.dialect.type_repr(v)}" for k, v in self.path.schema.items())
pks = (
", PRIMARY KEY (%s)" % ", ".join(self.primary_keys)
if self.primary_keys and c.dialect.SUPPORTS_PRIMARY_KEY
else ""
)
return f"CREATE TABLE {ne}{c.compile(self.path)}({schema}{pks})"
@dataclass
class DropTable(Statement):
path: TablePath
if_exists: bool = False
def compile(self, c: Compiler) -> str:
ie = "IF EXISTS " if self.if_exists else ""
return f"DROP TABLE {ie}{c.compile(self.path)}"
@dataclass
class TruncateTable(Statement):
path: TablePath
def compile(self, c: Compiler) -> str:
return f"TRUNCATE TABLE {c.compile(self.path)}"
@dataclass
class InsertToTable(Statement):
path: TablePath
expr: Expr
columns: List[str] = None
returning_exprs: List[str] = None
def compile(self, c: Compiler) -> str:
if isinstance(self.expr, ConstantTable):
expr = self.expr.compile_for_insert(c)
else:
expr = c.compile(self.expr)
columns = "(%s)" % ", ".join(map(c.quote, self.columns)) if self.columns is not None else ""
return f"INSERT INTO {c.compile(self.path)}{columns} {expr}"
def returning(self, *exprs) -> Self:
"""Add a 'RETURNING' clause to the insert expression.
Note: Not all databases support this feature!
"""
if self.returning_exprs: