Dataset class for Sequel::DataObjects::Database objects.
| columns | -> | columns_without_introspection |
Enable column introspection for every dataset.
# File lib/sequel/extensions/columns_introspection.rb, line 84
84: def self.introspect_all_columns
85: Sequel::Deprecation.deprecate('Sequel::Dataset.introspect_all_columns', "Please use Database.extension :columns_introspection to load the extension into all databases")
86: include ColumnsIntrospection
87: remove_method(:columns) if instance_methods(false).map{|x| x.to_s}.include?('columns')
88: end
Yields a paginated dataset for each page and returns the receiver. Does a count to find the total number of records for this dataset.
# File lib/sequel/extensions/pagination.rb, line 36
36: def each_page(page_size)
37: Sequel::Deprecation.deprecate('Loading the pagination extension globally', "Please use Database/Dataset#extension to load the extension into this dataset") unless is_a?(DatasetPagination)
38: raise(Error, "You cannot paginate a dataset that already has a limit") if @opts[:limit]
39: record_count = count
40: total_pages = (record_count / page_size.to_f).ceil
41: (1..total_pages).each{|page_no| yield paginate(page_no, page_size, record_count)}
42: self
43: end
Return a cloned nullified dataset.
# File lib/sequel/extensions/null_dataset.rb, line 90
90: def nullify
91: clone.nullify!
92: end
Nullify the current dataset
# File lib/sequel/extensions/null_dataset.rb, line 95
95: def nullify!
96: Sequel::Deprecation.deprecate('Loading the null_dataset extension globally', "Please use Database/Dataset#extension to load the extension into this dataset") unless is_a?(Nullifiable)
97: extend NullDataset
98: end
Returns a paginated dataset. The returned dataset is limited to the page size at the correct offset, and extended with the Pagination module. If a record count is not provided, does a count of total number of records for this dataset.
# File lib/sequel/extensions/pagination.rb, line 26
26: def paginate(page_no, page_size, record_count=nil)
27: Sequel::Deprecation.deprecate('Loading the pagination extension globally', "Please use Database/Dataset#extension to load the extension into this dataset") unless is_a?(DatasetPagination)
28: raise(Error, "You cannot paginate a dataset that already has a limit") if @opts[:limit]
29: paginated = limit(page_size, (page_no - 1) * page_size)
30: paginated.extend(Pagination)
31: paginated.set_pagination_info(page_no, page_size, record_count || count)
32: end
Pretty prints the records in the dataset as plain-text table.
# File lib/sequel/extensions/pretty_table.rb, line 30
30: def print(*cols)
31: Sequel::Deprecation.deprecate('Loading the pretty_table extension globally', "Please use Database/Dataset#extension to load the extension into this dataset") unless is_a?(DatasetPrinter)
32: ds = naked
33: rows = ds.all
34: Sequel::PrettyTable.print(rows, cols.empty? ? ds.columns : cols)
35: end
Translates a query block into a dataset. Query blocks are an alternative to Sequel‘s usual method chaining, by using instance_eval with a proxy object:
dataset = DB[:items].query do
select :x, :y, :z
filter{(x > 1) & (y > 2)}
reverse :z
end
Which is the same as:
dataset = DB[:items].select(:x, :y, :z).filter{(x > 1) & (y > 2)}.reverse(:z)
# File lib/sequel/extensions/query.rb, line 47
47: def query(&block)
48: Sequel::Deprecation.deprecate('Loading the query extension globally', "Please use Database/Dataset#extension to load the extension into this dataset") unless is_a?(DatasetQuery)
49: query = Query.new(self)
50: query.instance_eval(&block)
51: query.dataset
52: end
Remove columns from the list of selected columns. If any of the currently selected columns use expressions/aliases, this will remove selected columns with the given aliases. It will also remove entries from the selection that match exactly:
# Assume columns a, b, and c in items table DB[:items] # SELECT * FROM items DB[:items].select_remove(:c) # SELECT a, b FROM items DB[:items].select(:a, :b___c, :c___b).select_remove(:c) # SELECT a, c AS b FROM items DB[:items].select(:a, :b___c, :c___b).select_remove(:c___b) # SELECT a, b AS c FROM items
Note that there are a few cases where this method may not work correctly:
There may be other cases where this method does not work correctly, use it with caution.
# File lib/sequel/extensions/select_remove.rb, line 40
40: def select_remove(*cols)
41: Sequel::Deprecation.deprecate('Loading the select_remove extension globally', "Please use Database/Dataset#extension to load the extension into this dataset") unless is_a?(SelectRemove)
42: if (sel = @opts[:select]) && !sel.empty?
43: select(*(columns.zip(sel).reject{|c, s| cols.include?(c)}.map{|c, s| s} - cols))
44: else
45: select(*(columns - cols))
46: end
47: end
Return a string that can be processed by the dot program (included with graphviz) in order to see a visualization of the dataset‘s abstract syntax tree.
# File lib/sequel/extensions/to_dot.rb, line 152
152: def to_dot
153: Sequel::Deprecation.deprecate('Loading the to_dot extension globally', "Please use Database/Dataset#extension to load the extension into this dataset") unless is_a?(ToDot::DatasetMethods)
154: ToDot.output(self)
155: end
| MUTATION_METHODS | = | QUERY_METHODS - [:paginate, :naked, :from_self] | All methods that should have a ! method added that modifies the receiver. |
| identifier_input_method | [W] | Set the method to call on identifiers going into the database for this dataset |
| identifier_output_method | [W] | Set the method to call on identifiers coming the database for this dataset |
| quote_identifiers | [W] | Whether to quote identifiers for this dataset |
| row_proc | [RW] | The row_proc for this database, should be any object that responds to call with a single hash argument and returns the object you want each to return. |
Setup mutation (e.g. filter!) methods. These operate the same as the non-! methods, but replace the options of the current dataset with the options of the resulting dataset.
Do not call this method with untrusted input, as that can result in arbitrary code execution.
# File lib/sequel/dataset/mutation.rb, line 17
17: def self.def_mutation_method(*meths)
18: options = meths.pop if meths.last.is_a?(Hash)
19: mod = options[:module] if options
20: mod ||= self
21: meths.each do |meth|
22: mod.class_eval("def #{meth}!(*args, &block); mutation_method(:#{meth}, *args, &block) end", __FILE__, __LINE__)
23: end
24: end
Load an extension into the receiver. In addition to requiring the extension file, this also modifies the dataset to work with the extension (usually extending it with a module defined in the extension file). If no related extension file exists or the extension does not have specific support for Database objects, an Error will be raised. Returns self.
# File lib/sequel/dataset/mutation.rb, line 47
47: def extension!(*exts)
48: Sequel.extension(*exts)
49: exts.each do |ext|
50: if pr = Sequel.synchronize{EXTENSIONS[ext]}
51: pr.call(self)
52: else
53: raise(Error, "Extension #{ext} does not have specific support handling individual datasets")
54: end
55: end
56: self
57: end
These methods all return modified copies of the receiver.
| EXTENSIONS | = | {} | Hash of extension name symbols to callable objects to load the extension into the Dataset object (usually by extending it with a module defined in the extension). | |
| COLUMN_CHANGE_OPTS | = | [:select, :sql, :from, :join].freeze | The dataset options that require the removal of cached columns if changed. | |
| NON_SQL_OPTIONS | = | [:server, :defaults, :overrides, :graph, :eager_graph, :graph_aliases] | Which options don‘t affect the SQL generation. Used by simple_select_all? to determine if this is a simple SELECT * FROM table. | |
| CONDITIONED_JOIN_TYPES | = | [:inner, :full_outer, :right_outer, :left_outer, :full, :right, :left] | These symbols have _join methods created (e.g. inner_join) that call join_table with the symbol, passing along the arguments and block from the method call. | |
| UNCONDITIONED_JOIN_TYPES | = | [:natural, :natural_left, :natural_right, :natural_full, :cross] | These symbols have _join methods created (e.g. natural_join) that call join_table with the symbol. They only accept a single table argument which is passed to join_table, and they raise an error if called with a block. | |
| JOIN_METHODS | = | (CONDITIONED_JOIN_TYPES + UNCONDITIONED_JOIN_TYPES).map{|x| "#{x}_join".to_sym} + [:join, :join_table] | All methods that return modified datasets with a joined table added. | |
| QUERY_METHODS | = | (<<-METHS).split.map{|x| x.to_sym} + JOIN_METHODS add_graph_aliases and distinct except exclude exclude_having exclude_where filter for_update from from_self graph grep group group_and_count group_by having intersect invert limit lock_style naked or order order_append order_by order_more order_prepend paginate qualify query reverse reverse_order select select_all select_append select_group select_more server set_defaults set_graph_aliases set_overrides unfiltered ungraphed ungrouped union unlimited unordered where with with_recursive with_sql METHS ).split.map{|x| x.to_sym} + JOIN_METHODS | Methods that return modified datasets |
Register an extension callback for Dataset objects. ext should be the extension name symbol, and mod should either be a Module that the dataset is extended with, or a callable object called with the database object. If mod is not provided, a block can be provided and is treated as the mod object.
If mod is a module, this also registers a Database extension that will extend all of the database‘s datasets.
# File lib/sequel/dataset/query.rb, line 55
55: def self.register_extension(ext, mod=nil, &block)
56: if mod
57: raise(Error, "cannot provide both mod and block to Dataset.register_extension") if block
58: if mod.is_a?(Module)
59: block = proc{|ds| ds.extend(mod)}
60: Sequel::Database.register_extension(ext){|db| db.extend_datasets(mod)}
61: else
62: block = mod
63: end
64: end
65: Sequel.synchronize{EXTENSIONS[ext] = block}
66: end
Adds an further filter to an existing filter using AND. If no filter exists an error is raised. This method is identical to filter except it expects an existing filter.
DB[:table].filter(:a).and(:b) # SELECT * FROM table WHERE a AND b
# File lib/sequel/dataset/query.rb, line 73
73: def and(*cond, &block)
74: unless @opts[:having] || @opts[:where]
75: Sequel::Deprecation.deprecate('Dataset#and will no longer raise for an unfilered dataset starting in Sequel 4.')
76: raise(InvalidOperation, "No existing filter found.")
77: end
78: if @opts[:having]
79: Sequel::Deprecation.deprecate('Dataset#and will no longer modify the HAVING clause starting in Sequel 4. Switch to using Dataset#having or use the filter_having extension.')
80: having(*cond, &block)
81: else
82: where(*cond, &block)
83: end
84: end
Returns a new clone of the dataset with with the given options merged. If the options changed include options in COLUMN_CHANGE_OPTS, the cached columns are deleted. This method should generally not be called directly by user code.
# File lib/sequel/dataset/query.rb, line 90
90: def clone(opts = nil)
91: c = super()
92: if opts
93: c.instance_variable_set(:@opts, @opts.merge(opts))
94: c.instance_variable_set(:@columns, nil) if @columns && !opts.each_key{|o| break if COLUMN_CHANGE_OPTS.include?(o)}
95: else
96: c.instance_variable_set(:@opts, @opts.dup)
97: end
98: c
99: end
Returns a copy of the dataset with the SQL DISTINCT clause. The DISTINCT clause is used to remove duplicate rows from the output. If arguments are provided, uses a DISTINCT ON clause, in which case it will only be distinct on those columns, instead of all returned columns. Raises an error if arguments are given and DISTINCT ON is not supported.
DB[:items].distinct # SQL: SELECT DISTINCT * FROM items DB[:items].order(:id).distinct(:id) # SQL: SELECT DISTINCT ON (id) * FROM items ORDER BY id
# File lib/sequel/dataset/query.rb, line 110
110: def distinct(*args)
111: raise(InvalidOperation, "DISTINCT ON not supported") if !args.empty? && !supports_distinct_on?
112: clone(:distinct => args)
113: end
Adds an EXCEPT clause using a second dataset object. An EXCEPT compound dataset returns all rows in the current dataset that are not in the given dataset. Raises an InvalidOperation if the operation is not supported. Options:
| :alias : | Use the given value as the from_self alias |
| :all : | Set to true to use EXCEPT ALL instead of EXCEPT, so duplicate rows can occur |
| :from_self : | Set to false to not wrap the returned dataset in a from_self, use with care. |
DB[:items].except(DB[:other_items]) # SELECT * FROM (SELECT * FROM items EXCEPT SELECT * FROM other_items) AS t1 DB[:items].except(DB[:other_items], :all=>true, :from_self=>false) # SELECT * FROM items EXCEPT ALL SELECT * FROM other_items DB[:items].except(DB[:other_items], :alias=>:i) # SELECT * FROM (SELECT * FROM items EXCEPT SELECT * FROM other_items) AS i
# File lib/sequel/dataset/query.rb, line 132
132: def except(dataset, opts={})
133: unless opts.is_a?(Hash)
134: Sequel::Deprecation.deprecate('Passing a non-hash as the second argument to Dataset#except', "Please switch to an options hash with the :all option")
135: opts = {:all=>opts}
136: end
137: raise(InvalidOperation, "EXCEPT not supported") unless supports_intersect_except?
138: raise(InvalidOperation, "EXCEPT ALL not supported") if opts[:all] && !supports_intersect_except_all?
139: compound_clone(:except, dataset, opts)
140: end
Performs the inverse of Dataset#filter. Note that if you have multiple filter conditions, this is not the same as a negation of all conditions.
DB[:items].exclude(:category => 'software') # SELECT * FROM items WHERE (category != 'software') DB[:items].exclude(:category => 'software', :id=>3) # SELECT * FROM items WHERE ((category != 'software') OR (id != 3))
# File lib/sequel/dataset/query.rb, line 150
150: def exclude(*cond, &block)
151: Sequel::Deprecation.deprecate('Dataset#exclude will no longer modify the HAVING clause starting in Sequel 4. Switch to using Dataset#exclude_having or use the filter_having extension.') if @opts[:having]
152: _filter_or_exclude(true, @opts[:having] ? :having : :where, *cond, &block)
153: end
Inverts the given conditions and adds them to the HAVING clause.
DB[:items].select_group(:name).exclude_having{count(name) < 2}
# SELECT name FROM items GROUP BY name HAVING (count(name) >= 2)
# File lib/sequel/dataset/query.rb, line 159
159: def exclude_having(*cond, &block)
160: _filter_or_exclude(true, :having, *cond, &block)
161: end
Inverts the given conditions and adds them to the WHERE clause.
DB[:items].select_group(:name).exclude_where(:category => 'software')
# SELECT * FROM items WHERE (category != 'software')
DB[:items].select_group(:name).
exclude_having{count(name) < 2}.
exclude_where(:category => 'software')
# SELECT name FROM items WHERE (category != 'software')
# GROUP BY name HAVING (count(name) >= 2)
# File lib/sequel/dataset/query.rb, line 173
173: def exclude_where(*cond, &block)
174: _filter_or_exclude(true, :where, *cond, &block)
175: end
Returns a copy of the dataset with the given conditions imposed upon it. If the query already has a HAVING clause, then the conditions are imposed in the HAVING clause. If not, then they are imposed in the WHERE clause.
filter accepts the following argument types:
filter also takes a block, which should return one of the above argument types, and is treated the same way. This block yields a virtual row object, which is easy to use to create identifiers and functions. For more details on the virtual row support, see the "Virtual Rows" guide
If both a block and regular argument are provided, they get ANDed together.
Examples:
DB[:items].filter(:id => 3)
# SELECT * FROM items WHERE (id = 3)
DB[:items].filter('price < ?', 100)
# SELECT * FROM items WHERE price < 100
DB[:items].filter([[:id, [1,2,3]], [:id, 0..10]])
# SELECT * FROM items WHERE ((id IN (1, 2, 3)) AND ((id >= 0) AND (id <= 10)))
DB[:items].filter('price < 100')
# SELECT * FROM items WHERE price < 100
DB[:items].filter(:active)
# SELECT * FROM items WHERE :active
DB[:items].filter{price < 100}
# SELECT * FROM items WHERE (price < 100)
Multiple filter calls can be chained for scoping:
software = dataset.filter(:category => 'software').filter{price < 100}
# SELECT * FROM items WHERE ((category = 'software') AND (price < 100))
See the the "Dataset Filtering" guide for more examples and details.
# File lib/sequel/dataset/query.rb, line 234
234: def filter(*cond, &block)
235: Sequel::Deprecation.deprecate('Dataset#filter will no longer modify the HAVING clause starting in Sequel 4. Switch to using Dataset#having or use the filter_having extension.') if @opts[:having]
236: _filter(@opts[:having] ? :having : :where, *cond, &block)
237: end
Returns a copy of the dataset with the source changed. If no source is given, removes all tables. If multiple sources are given, it is the same as using a CROSS JOIN (cartesian product) between all tables.
DB[:items].from # SQL: SELECT * DB[:items].from(:blah) # SQL: SELECT * FROM blah DB[:items].from(:blah, :foo) # SQL: SELECT * FROM blah, foo
# File lib/sequel/dataset/query.rb, line 253
253: def from(*source)
254: table_alias_num = 0
255: sources = []
256: ctes = nil
257: source.each do |s|
258: case s
259: when Hash
260: Sequel::Deprecation.deprecate('Dataset#from will no longer treat an input hash as an alias specifier. Switch to aliasing using Sequel.as or use the hash_aliases extension.')
261: s.each{|k,v| sources << SQL::AliasedExpression.new(k,v)}
262: when Dataset
263: if hoist_cte?(s)
264: ctes ||= []
265: ctes += s.opts[:with]
266: s = s.clone(:with=>nil)
267: end
268: sources << SQL::AliasedExpression.new(s, dataset_alias(table_alias_num+=1))
269: when Symbol
270: sch, table, aliaz = split_symbol(s)
271: if aliaz
272: s = sch ? SQL::QualifiedIdentifier.new(sch, table) : SQL::Identifier.new(table)
273: sources << SQL::AliasedExpression.new(s, aliaz.to_sym)
274: else
275: sources << s
276: end
277: else
278: sources << s
279: end
280: end
281: o = {:from=>sources.empty? ? nil : sources}
282: o[:with] = (opts[:with] || []) + ctes if ctes
283: o[:num_dataset_sources] = table_alias_num if table_alias_num > 0
284: clone(o)
285: end
Returns a dataset selecting from the current dataset. Supplying the :alias option controls the alias of the result.
ds = DB[:items].order(:name).select(:id, :name) # SELECT id,name FROM items ORDER BY name ds.from_self # SELECT * FROM (SELECT id, name FROM items ORDER BY name) AS t1 ds.from_self(:alias=>:foo) # SELECT * FROM (SELECT id, name FROM items ORDER BY name) AS foo
# File lib/sequel/dataset/query.rb, line 298
298: def from_self(opts={})
299: fs = {}
300: @opts.keys.each{|k| fs[k] = nil unless NON_SQL_OPTIONS.include?(k)}
301: clone(fs).from(opts[:alias] ? as(opts[:alias]) : self)
302: end
Match any of the columns to any of the patterns. The terms can be strings (which use LIKE) or regular expressions (which are only supported on MySQL and PostgreSQL). Note that the total number of pattern matches will be Array(columns).length * Array(terms).length, which could cause performance issues.
Options (all are boolean):
| :all_columns : | All columns must be matched to any of the given patterns. |
| :all_patterns : | All patterns must match at least one of the columns. |
| :case_insensitive : | Use a case insensitive pattern match (the default is case sensitive if the database supports it). |
If both :all_columns and :all_patterns are true, all columns must match all patterns.
Examples:
dataset.grep(:a, '%test%') # SELECT * FROM items WHERE (a LIKE '%test%') dataset.grep([:a, :b], %w'%test% foo') # SELECT * FROM items WHERE ((a LIKE '%test%') OR (a LIKE 'foo') OR (b LIKE '%test%') OR (b LIKE 'foo')) dataset.grep([:a, :b], %w'%foo% %bar%', :all_patterns=>true) # SELECT * FROM a WHERE (((a LIKE '%foo%') OR (b LIKE '%foo%')) AND ((a LIKE '%bar%') OR (b LIKE '%bar%'))) dataset.grep([:a, :b], %w'%foo% %bar%', :all_columns=>true) # SELECT * FROM a WHERE (((a LIKE '%foo%') OR (a LIKE '%bar%')) AND ((b LIKE '%foo%') OR (b LIKE '%bar%'))) dataset.grep([:a, :b], %w'%foo% %bar%', :all_patterns=>true, :all_columns=>true) # SELECT * FROM a WHERE ((a LIKE '%foo%') AND (b LIKE '%foo%') AND (a LIKE '%bar%') AND (b LIKE '%bar%'))
# File lib/sequel/dataset/query.rb, line 335
335: def grep(columns, patterns, opts={})
336: if opts[:all_patterns]
337: conds = Array(patterns).map do |pat|
338: SQL::BooleanExpression.new(opts[:all_columns] ? :AND : :OR, *Array(columns).map{|c| SQL::StringExpression.like(c, pat, opts)})
339: end
340: filter(SQL::BooleanExpression.new(opts[:all_patterns] ? :AND : :OR, *conds))
341: else
342: conds = Array(columns).map do |c|
343: SQL::BooleanExpression.new(:OR, *Array(patterns).map{|pat| SQL::StringExpression.like(c, pat, opts)})
344: end
345: filter(SQL::BooleanExpression.new(opts[:all_columns] ? :AND : :OR, *conds))
346: end
347: end
Returns a copy of the dataset with the results grouped by the value of the given columns. If a block is given, it is treated as a virtual row block, similar to filter.
DB[:items].group(:id) # SELECT * FROM items GROUP BY id
DB[:items].group(:id, :name) # SELECT * FROM items GROUP BY id, name
DB[:items].group{[a, sum(b)]} # SELECT * FROM items GROUP BY a, sum(b)
# File lib/sequel/dataset/query.rb, line 356
356: def group(*columns, &block)
357: virtual_row_columns(columns, block)
358: clone(:group => (columns.compact.empty? ? nil : columns))
359: end
Returns a dataset grouped by the given column with count by group. Column aliases may be supplied, and will be included in the select clause. If a block is given, it is treated as a virtual row block, similar to filter.
Examples:
DB[:items].group_and_count(:name).all
# SELECT name, count(*) AS count FROM items GROUP BY name
# => [{:name=>'a', :count=>1}, ...]
DB[:items].group_and_count(:first_name, :last_name).all
# SELECT first_name, last_name, count(*) AS count FROM items GROUP BY first_name, last_name
# => [{:first_name=>'a', :last_name=>'b', :count=>1}, ...]
DB[:items].group_and_count(:first_name___name).all
# SELECT first_name AS name, count(*) AS count FROM items GROUP BY first_name
# => [{:name=>'a', :count=>1}, ...]
DB[:items].group_and_count{substr(first_name, 1, 1).as(initial)}.all
# SELECT substr(first_name, 1, 1) AS initial, count(*) AS count FROM items GROUP BY substr(first_name, 1, 1)
# => [{:initial=>'a', :count=>1}, ...]
# File lib/sequel/dataset/query.rb, line 387
387: def group_and_count(*columns, &block)
388: select_group(*columns, &block).select_more(COUNT_OF_ALL_AS_COUNT)
389: end
Adds the appropriate CUBE syntax to GROUP BY.
# File lib/sequel/dataset/query.rb, line 392
392: def group_cube
393: raise Error, "GROUP BY CUBE not supported on #{db.database_type}" unless supports_group_cube?
394: clone(:group_options=>:cube)
395: end
Adds the appropriate ROLLUP syntax to GROUP BY.
# File lib/sequel/dataset/query.rb, line 398
398: def group_rollup
399: raise Error, "GROUP BY ROLLUP not supported on #{db.database_type}" unless supports_group_rollup?
400: clone(:group_options=>:rollup)
401: end
Returns a copy of the dataset with the HAVING conditions changed. See filter for argument types.
DB[:items].group(:sum).having(:sum=>10) # SELECT * FROM items GROUP BY sum HAVING (sum = 10)
# File lib/sequel/dataset/query.rb, line 407
407: def having(*cond, &block)
408: _filter(:having, *cond, &block)
409: end
Adds an INTERSECT clause using a second dataset object. An INTERSECT compound dataset returns all rows in both the current dataset and the given dataset. Raises an InvalidOperation if the operation is not supported. Options:
| :alias : | Use the given value as the from_self alias |
| :all : | Set to true to use INTERSECT ALL instead of INTERSECT, so duplicate rows can occur |
| :from_self : | Set to false to not wrap the returned dataset in a from_self, use with care. |
DB[:items].intersect(DB[:other_items]) # SELECT * FROM (SELECT * FROM items INTERSECT SELECT * FROM other_items) AS t1 DB[:items].intersect(DB[:other_items], :all=>true, :from_self=>false) # SELECT * FROM items INTERSECT ALL SELECT * FROM other_items DB[:items].intersect(DB[:other_items], :alias=>:i) # SELECT * FROM (SELECT * FROM items INTERSECT SELECT * FROM other_items) AS i
# File lib/sequel/dataset/query.rb, line 428
428: def intersect(dataset, opts={})
429: unless opts.is_a?(Hash)
430: Sequel::Deprecation.deprecate('Passing a non-hash as the second argument to Dataset#intersect', "Please switch to an options hash with the :all option")
431: opts = {:all=>opts}
432: end
433: raise(InvalidOperation, "INTERSECT not supported") unless supports_intersect_except?
434: raise(InvalidOperation, "INTERSECT ALL not supported") if opts[:all] && !supports_intersect_except_all?
435: compound_clone(:intersect, dataset, opts)
436: end
Inverts the current filter.
DB[:items].filter(:category => 'software').invert # SELECT * FROM items WHERE (category != 'software') DB[:items].filter(:category => 'software', :id=>3).invert # SELECT * FROM items WHERE ((category != 'software') OR (id != 3))
# File lib/sequel/dataset/query.rb, line 445
445: def invert
446: having, where = @opts[:having], @opts[:where]
447: unless having || where
448: Sequel::Deprecation.deprecate('Dataset#invert will no longer raise for an unfilered dataset starting in Sequel 4.')
449: raise(Error, "No current filter")
450: end
451: o = {}
452: o[:having] = SQL::BooleanExpression.invert(having) if having
453: o[:where] = SQL::BooleanExpression.invert(where) if where
454: clone(o)
455: end
Alias of inner_join
# File lib/sequel/dataset/query.rb, line 458
458: def join(*args, &block)
459: inner_join(*args, &block)
460: end
Returns a joined dataset. Not usually called directly, users should use the appropriate join method (e.g. join, left_join, natural_join, cross_join) which fills in the type argument.
Takes the following arguments:
Examples:
DB[:a].join_table(:cross, :b)
# SELECT * FROM a CROSS JOIN b
DB[:a].join_table(:inner, DB[:b], :c=>d)
# SELECT * FROM a INNER JOIN (SELECT * FROM b) AS t1 ON (t1.c = a.d)
DB[:a].join_table(:left, :b___c, [:d])
# SELECT * FROM a LEFT JOIN b AS c USING (d)
DB[:a].natural_join(:b).join_table(:inner, :c) do |ta, jta, js|
(Sequel.qualify(ta, :d) > Sequel.qualify(jta, :e)) & {Sequel.qualify(ta, :f)=>DB.from(js.first.table).select(:g)}
end
# SELECT * FROM a NATURAL JOIN b INNER JOIN c
# ON ((c.d > b.e) AND (c.f IN (SELECT g FROM b)))
# File lib/sequel/dataset/query.rb, line 516
516: def join_table(type, table, expr=nil, options={}, &block)
517: if hoist_cte?(table)
518: s, ds = hoist_cte(table)
519: return s.join_table(type, ds, expr, options, &block)
520: end
521:
522: using_join = expr.is_a?(Array) && !expr.empty? && expr.all?{|x| x.is_a?(Symbol)}
523: if using_join && !supports_join_using?
524: h = {}
525: expr.each{|e| h[e] = e}
526: return join_table(type, table, h, options)
527: end
528:
529: case options
530: when Hash
531: table_alias = options[:table_alias]
532: last_alias = options[:implicit_qualifier]
533: qualify_type = options[:qualify]
534: when Symbol, String, SQL::Identifier
535: Sequel::Deprecation.deprecate('Passing a non-hash as the options hash to Dataset#join_table', "Please switch to an options hash with the :table_alias option")
536: table_alias = options
537: last_alias = nil
538: else
539: raise Error, "invalid options format for join_table: #{options.inspect}"
540: end
541:
542: if table.is_a?(Dataset)
543: if table_alias.nil?
544: table_alias_num = (@opts[:num_dataset_sources] || 0) + 1
545: table_alias = dataset_alias(table_alias_num)
546: end
547: table_name = table_alias
548: else
549: table, implicit_table_alias = split_alias(table)
550: table_alias ||= implicit_table_alias
551: table_name = table_alias || table
552: end
553:
554: join = if expr.nil? and !block
555: SQL::JoinClause.new(type, table, table_alias)
556: elsif using_join
557: raise(Sequel::Error, "can't use a block if providing an array of symbols as expr") if block
558: SQL::JoinUsingClause.new(expr, type, table, table_alias)
559: else
560: last_alias ||= @opts[:last_joined_table] || first_source_alias
561: if Sequel.condition_specifier?(expr)
562: expr = expr.collect do |k, v|
563: qualify_type = default_join_table_qualification if qualify_type.nil?
564: case qualify_type
565: when false
566: nil # Do no qualification
567: when :deep
568: k = Sequel::Qualifier.new(self, table_name).transform(k)
569: v = Sequel::Qualifier.new(self, last_alias).transform(v)
570: else
571: k = qualified_column_name(k, table_name) if k.is_a?(Symbol)
572: v = qualified_column_name(v, last_alias) if v.is_a?(Symbol)
573: end
574: [k,v]
575: end
576: expr = SQL::BooleanExpression.from_value_pairs(expr)
577: end
578: if block
579: expr2 = yield(table_name, last_alias, @opts[:join] || [])
580: expr = expr ? SQL::BooleanExpression.new(:AND, expr, expr2) : expr2
581: end
582: SQL::JoinOnClause.new(expr, type, table, table_alias)
583: end
584:
585: opts = {:join => (@opts[:join] || []) + [join], :last_joined_table => table_name}
586: opts[:num_dataset_sources] = table_alias_num if table_alias_num
587: clone(opts)
588: end
If given an integer, the dataset will contain only the first l results. If given a range, it will contain only those at offsets within that range. If a second argument is given, it is used as an offset. To use an offset without a limit, pass nil as the first argument.
DB[:items].limit(10) # SELECT * FROM items LIMIT 10 DB[:items].limit(10, 20) # SELECT * FROM items LIMIT 10 OFFSET 20 DB[:items].limit(10...20) # SELECT * FROM items LIMIT 10 OFFSET 10 DB[:items].limit(10..20) # SELECT * FROM items LIMIT 11 OFFSET 10 DB[:items].limit(nil, 20) # SELECT * FROM items OFFSET 20
# File lib/sequel/dataset/query.rb, line 607
607: def limit(l, o = (no_offset = true; nil))
608: return from_self.limit(l, o) if @opts[:sql]
609:
610: if l.is_a?(Range)
611: o = l.first
612: l = l.last - l.first + (l.exclude_end? ? 0 : 1)
613: end
614: l = l.to_i if l.is_a?(String) && !l.is_a?(LiteralString)
615: if l.is_a?(Integer)
616: raise(Error, 'Limits must be greater than or equal to 1') unless l >= 1
617: end
618: opts = {:limit => l}
619: if o
620: o = o.to_i if o.is_a?(String) && !o.is_a?(LiteralString)
621: if o.is_a?(Integer)
622: raise(Error, 'Offsets must be greater than or equal to 0') unless o >= 0
623: end
624: opts[:offset] = o
625: elsif !no_offset
626: opts[:offset] = nil
627: end
628: clone(opts)
629: end
Returns a cloned dataset with the given lock style. If style is a string, it will be used directly. You should never pass a string to this method that is derived from user input, as that can lead to SQL injection.
A symbol may be used for database independent locking behavior, but all supported symbols have separate methods (e.g. for_update).
DB[:items].lock_style('FOR SHARE NOWAIT') # SELECT * FROM items FOR SHARE NOWAIT
# File lib/sequel/dataset/query.rb, line 640
640: def lock_style(style)
641: clone(:lock => style)
642: end
Returns a cloned dataset without a row_proc.
ds = DB[:items]
ds.row_proc = proc{|r| r.invert}
ds.all # => [{2=>:id}]
ds.naked.all # => [{:id=>2}]
# File lib/sequel/dataset/query.rb, line 650
650: def naked
651: ds = clone
652: ds.row_proc = nil
653: ds
654: end
Adds an alternate filter to an existing filter using OR. If no filter exists an Error is raised.
DB[:items].filter(:a).or(:b) # SELECT * FROM items WHERE a OR b
# File lib/sequel/dataset/query.rb, line 660
660: def or(*cond, &block)
661: clause = (@opts[:having] ? :having : :where)
662: unless @opts[clause]
663: Sequel::Deprecation.deprecate('Dataset#or will no longer raise for an unfilered dataset starting in Sequel 4.')
664: raise(InvalidOperation, "No existing filter found.")
665: end
666: Sequel::Deprecation.deprecate('Dataset#or will no longer modify the HAVING clause starting in Sequel 4. You can use the filter_having extension to continue to use the current behavior.') if clause == :having
667: cond = cond.first if cond.size == 1
668: if cond.respond_to?(:empty?) && cond.empty? && !block
669: clone
670: else
671: clone(clause => SQL::BooleanExpression.new(:OR, @opts[clause], filter_expr(cond, &block)))
672: end
673: end
Returns a copy of the dataset with the order changed. If the dataset has an existing order, it is ignored and overwritten with this order. If a nil is given the returned dataset has no order. This can accept multiple arguments of varying kinds, such as SQL functions. If a block is given, it is treated as a virtual row block, similar to filter.
DB[:items].order(:name) # SELECT * FROM items ORDER BY name
DB[:items].order(:a, :b) # SELECT * FROM items ORDER BY a, b
DB[:items].order(Sequel.lit('a + b')) # SELECT * FROM items ORDER BY a + b
DB[:items].order(:a + :b) # SELECT * FROM items ORDER BY (a + b)
DB[:items].order(Sequel.desc(:name)) # SELECT * FROM items ORDER BY name DESC
DB[:items].order(Sequel.asc(:name, :nulls=>:last)) # SELECT * FROM items ORDER BY name ASC NULLS LAST
DB[:items].order{sum(name).desc} # SELECT * FROM items ORDER BY sum(name) DESC
DB[:items].order(nil) # SELECT * FROM items
# File lib/sequel/dataset/query.rb, line 689
689: def order(*columns, &block)
690: virtual_row_columns(columns, block)
691: clone(:order => (columns.compact.empty?) ? nil : columns)
692: end
Alias of order_more, for naming consistency with order_prepend.
# File lib/sequel/dataset/query.rb, line 695
695: def order_append(*columns, &block)
696: order_more(*columns, &block)
697: end
Returns a copy of the dataset with the order columns added to the end of the existing order.
DB[:items].order(:a).order(:b) # SELECT * FROM items ORDER BY b DB[:items].order(:a).order_more(:b) # SELECT * FROM items ORDER BY a, b
# File lib/sequel/dataset/query.rb, line 709
709: def order_more(*columns, &block)
710: columns = @opts[:order] + columns if @opts[:order]
711: order(*columns, &block)
712: end
Returns a copy of the dataset with the order columns added to the beginning of the existing order.
DB[:items].order(:a).order(:b) # SELECT * FROM items ORDER BY b DB[:items].order(:a).order_prepend(:b) # SELECT * FROM items ORDER BY b, a
# File lib/sequel/dataset/query.rb, line 719
719: def order_prepend(*columns, &block)
720: ds = order(*columns, &block)
721: @opts[:order] ? ds.order_more(*@opts[:order]) : ds
722: end
Qualify to the given table, or first source if no table is given.
DB[:items].filter(:id=>1).qualify # SELECT items.* FROM items WHERE (items.id = 1) DB[:items].filter(:id=>1).qualify(:i) # SELECT i.* FROM items WHERE (i.id = 1)
# File lib/sequel/dataset/query.rb, line 731
731: def qualify(table=first_source)
732: o = @opts
733: return clone if o[:sql]
734: h = {}
735: (o.keys & QUALIFY_KEYS).each do |k|
736: h[k] = qualified_expression(o[k], table)
737: end
738: h[:select] = [SQL::ColumnAll.new(table)] if !o[:select] || o[:select].empty?
739: clone(h)
740: end
Return a copy of the dataset with unqualified identifiers in the SELECT, WHERE, GROUP, HAVING, and ORDER clauses qualified by the given table. If no columns are currently selected, select all columns of the given table.
DB[:items].filter(:id=>1).qualify_to(:i) # SELECT i.* FROM items WHERE (i.id = 1)
# File lib/sequel/dataset/query.rb, line 749
749: def qualify_to(table)
750: Sequel::Deprecation.deprecate('Dataset#qualify_to', 'Switch to Dataset#qualify or use the sequel_3_dataset_methods extension')
751: qualify(table)
752: end
Qualify the dataset to its current first source. This is useful if you have unqualified identifiers in the query that all refer to the first source, and you want to join to another table which has columns with the same name as columns in the current dataset. See qualify_to.
DB[:items].filter(:id=>1).qualify_to_first_source # SELECT items.* FROM items WHERE (items.id = 1)
# File lib/sequel/dataset/query.rb, line 762
762: def qualify_to_first_source
763: Sequel::Deprecation.deprecate('Dataset#qualify_to_first_source', 'Switch to Dataset#qualify or use the sequel_3_dataset_methods extension')
764: qualify
765: end
Modify the RETURNING clause, only supported on a few databases. If returning is used, instead of insert returning the autogenerated primary key or update/delete returning the number of modified rows, results are returned using fetch_rows.
DB[:items].returning # RETURNING * DB[:items].returning(nil) # RETURNING NULL DB[:items].returning(:id, :name) # RETURNING id, name
# File lib/sequel/dataset/query.rb, line 775
775: def returning(*values)
776: clone(:returning=>values)
777: end
Returns a copy of the dataset with the order reversed. If no order is given, the existing order is inverted.
DB[:items].reverse(:id) # SELECT * FROM items ORDER BY id DESC
DB[:items].reverse{foo(bar)} # SELECT * FROM items ORDER BY foo(bar) DESC
DB[:items].order(:id).reverse # SELECT * FROM items ORDER BY id DESC
DB[:items].order(:id).reverse(Sequel.desc(:name)) # SELECT * FROM items ORDER BY name ASC
# File lib/sequel/dataset/query.rb, line 786
786: def reverse(*order, &block)
787: virtual_row_columns(order, block)
788: order(*invert_order(order.empty? ? @opts[:order] : order))
789: end
Returns a copy of the dataset with the columns selected changed to the given columns. This also takes a virtual row block, similar to filter.
DB[:items].select(:a) # SELECT a FROM items
DB[:items].select(:a, :b) # SELECT a, b FROM items
DB[:items].select{[a, sum(b)]} # SELECT a, sum(b) FROM items
# File lib/sequel/dataset/query.rb, line 803
803: def select(*columns, &block)
804: virtual_row_columns(columns, block)
805: m = []
806: columns.each do |i|
807: if i.is_a?(Hash)
808: Sequel::Deprecation.deprecate('Dataset#select will no longer treat an input hash as an alias specifier. Switch to aliasing using Sequel.as or use the hash_aliases extension.')
809: m.concat(i.map{|k, v| SQL::AliasedExpression.new(k,v)})
810: else
811: m << i
812: end
813: end
814: clone(:select => m)
815: end
Returns a copy of the dataset selecting the wildcard if no arguments are given. If arguments are given, treat them as tables and select all columns (using the wildcard) from each table.
DB[:items].select(:a).select_all # SELECT * FROM items DB[:items].select_all(:items) # SELECT items.* FROM items DB[:items].select_all(:items, :foo) # SELECT items.*, foo.* FROM items
# File lib/sequel/dataset/query.rb, line 824
824: def select_all(*tables)
825: if tables.empty?
826: clone(:select => nil)
827: else
828: select(*tables.map{|t| i, a = split_alias(t); a || i}.map{|t| SQL::ColumnAll.new(t)})
829: end
830: end
Returns a copy of the dataset with the given columns added to the existing selected columns. If no columns are currently selected, it will select the columns given in addition to *.
DB[:items].select(:a).select(:b) # SELECT b FROM items DB[:items].select(:a).select_append(:b) # SELECT a, b FROM items DB[:items].select_append(:b) # SELECT *, b FROM items
# File lib/sequel/dataset/query.rb, line 839
839: def select_append(*columns, &block)
840: cur_sel = @opts[:select]
841: if !cur_sel || cur_sel.empty?
842: unless supports_select_all_and_column?
843: return select_all(*(Array(@opts[:from]) + Array(@opts[:join]))).select_more(*columns, &block)
844: end
845: cur_sel = [WILDCARD]
846: end
847: select(*(cur_sel + columns), &block)
848: end
Set both the select and group clauses with the given columns. Column aliases may be supplied, and will be included in the select clause. This also takes a virtual row block similar to filter.
DB[:items].select_group(:a, :b)
# SELECT a, b FROM items GROUP BY a, b
DB[:items].select_group(:c___a){f(c2)}
# SELECT c AS a, f(c2) FROM items GROUP BY c, f(c2)
# File lib/sequel/dataset/query.rb, line 859
859: def select_group(*columns, &block)
860: virtual_row_columns(columns, block)
861: select(*columns).group(*columns.map{|c| unaliased_identifier(c)})
862: end
Returns a copy of the dataset with the given columns added to the existing selected columns. If no columns are currently selected it will just select the columns given.
DB[:items].select(:a).select(:b) # SELECT b FROM items DB[:items].select(:a).select_more(:b) # SELECT a, b FROM items DB[:items].select_more(:b) # SELECT b FROM items
# File lib/sequel/dataset/query.rb, line 871
871: def select_more(*columns, &block)
872: if @opts[:select]
873: columns = @opts[:select] + columns
874: else
875: Sequel::Deprecation.deprecate('Dataset#select_more will no longer remove the wildcard selection from the Dataset starting in Sequel 4. Switch to using Dataset#select if you want that behavior.')
876: end
877: select(*columns, &block)
878: end
Set the server for this dataset to use. Used to pick a specific database shard to run a query against, or to override the default (where SELECT uses :read_only database and all other queries use the :default database). This method is always available but is only useful when database sharding is being used.
DB[:items].all # Uses the :read_only or :default server DB[:items].delete # Uses the :default server DB[:items].server(:blah).delete # Uses the :blah server
# File lib/sequel/dataset/query.rb, line 889
889: def server(servr)
890: clone(:server=>servr)
891: end
Set the default values for insert and update statements. The values hash passed to insert or update are merged into this hash, so any values in the hash passed to insert or update will override values passed to this method.
DB[:items].set_defaults(:a=>'a', :c=>'c').insert(:a=>'d', :b=>'b')
# INSERT INTO items (a, c, b) VALUES ('d', 'c', 'b')
# File lib/sequel/dataset/query.rb, line 899
899: def set_defaults(hash)
900: Sequel::Deprecation.deprecate('Dataset#set_defaults', 'Please use the dataset_set_overrides extension if you want to continue using it')
901: clone(:defaults=>(@opts[:defaults]||{}).merge(hash))
902: end
Set values that override hash arguments given to insert and update statements. This hash is merged into the hash provided to insert or update, so values will override any values given in the insert/update hashes.
DB[:items].set_overrides(:a=>'a', :c=>'c').insert(:a=>'d', :b=>'b')
# INSERT INTO items (a, c, b) VALUES ('a', 'c', 'b')
# File lib/sequel/dataset/query.rb, line 910
910: def set_overrides(hash)
911: Sequel::Deprecation.deprecate('Dataset#set_overrides', 'Please use the dataset_set_overrides extension if you want to continue using it')
912: clone(:overrides=>hash.merge(@opts[:overrides]||{}))
913: end
Unbind bound variables from this dataset‘s filter and return an array of two objects. The first object is a modified dataset where the filter has been replaced with one that uses bound variable placeholders. The second object is the hash of unbound variables. You can then prepare and execute (or just call) the dataset with the bound variables to get results.
ds, bv = DB[:items].filter(:a=>1).unbind
ds # SELECT * FROM items WHERE (a = $a)
bv # {:a => 1}
ds.call(:select, bv)
# File lib/sequel/dataset/query.rb, line 925
925: def unbind
926: u = Unbinder.new
927: ds = clone(:where=>u.transform(opts[:where]), :join=>u.transform(opts[:join]))
928: [ds, u.binds]
929: end
Adds a UNION clause using a second dataset object. A UNION compound dataset returns all rows in either the current dataset or the given dataset. Options:
| :alias : | Use the given value as the from_self alias |
| :all : | Set to true to use UNION ALL instead of UNION, so duplicate rows can occur |
| :from_self : | Set to false to not wrap the returned dataset in a from_self, use with care. |
DB[:items].union(DB[:other_items]) # SELECT * FROM (SELECT * FROM items UNION SELECT * FROM other_items) AS t1 DB[:items].union(DB[:other_items], :all=>true, :from_self=>false) # SELECT * FROM items UNION ALL SELECT * FROM other_items DB[:items].union(DB[:other_items], :alias=>:i) # SELECT * FROM (SELECT * FROM items UNION SELECT * FROM other_items) AS i
# File lib/sequel/dataset/query.rb, line 963
963: def union(dataset, opts={})
964: unless opts.is_a?(Hash)
965: Sequel::Deprecation.deprecate('Passing a non-hash as the second argument to Dataset#union', "Please switch to an options hash with the :all option")
966: opts = {:all=>opts}
967: end
968: compound_clone(:union, dataset, opts)
969: end
Add a condition to the WHERE clause. See filter for argument types.
DB[:items].group(:a).having(:a).filter(:b) # SELECT * FROM items GROUP BY a HAVING a AND b DB[:items].group(:a).having(:a).where(:b) # SELECT * FROM items WHERE b GROUP BY a HAVING a
# File lib/sequel/dataset/query.rb, line 992
992: def where(*cond, &block)
993: _filter(:where, *cond, &block)
994: end
Add a common table expression (CTE) with the given name and a dataset that defines the CTE. A common table expression acts as an inline view for the query. Options:
| :args : | Specify the arguments/columns for the CTE, should be an array of symbols. |
| :recursive : | Specify that this is a recursive CTE |
DB[:items].with(:items, DB[:syx].filter(:name.like('A%')))
# WITH items AS (SELECT * FROM syx WHERE (name LIKE 'A%')) SELECT * FROM items
# File lib/sequel/dataset/query.rb, line 1004
1004: def with(name, dataset, opts={})
1005: raise(Error, 'This datatset does not support common table expressions') unless supports_cte?
1006: if hoist_cte?(dataset)
1007: s, ds = hoist_cte(dataset)
1008: s.with(name, ds, opts)
1009: else
1010: clone(:with=>(@opts[:with]||[]) + [opts.merge(:name=>name, :dataset=>dataset)])
1011: end
1012: end
Add a recursive common table expression (CTE) with the given name, a dataset that defines the nonrecursive part of the CTE, and a dataset that defines the recursive part of the CTE. Options:
| :args : | Specify the arguments/columns for the CTE, should be an array of symbols. |
| :union_all : | Set to false to use UNION instead of UNION ALL combining the nonrecursive and recursive parts. |
DB[:t].with_recursive(:t,
DB[:i1].select(:id, :parent_id).filter(:parent_id=>nil),
DB[:i1].join(:t, :id=>:parent_id).select(:i1__id, :i1__parent_id),
:args=>[:id, :parent_id])
# WITH RECURSIVE "t"("id", "parent_id") AS (
# SELECT "id", "parent_id" FROM "i1" WHERE ("parent_id" IS NULL)
# UNION ALL
# SELECT "i1"."id", "i1"."parent_id" FROM "i1" INNER JOIN "t" ON ("t"."id" = "i1"."parent_id")
# ) SELECT * FROM "t"
# File lib/sequel/dataset/query.rb, line 1030
1030: def with_recursive(name, nonrecursive, recursive, opts={})
1031: raise(Error, 'This datatset does not support common table expressions') unless supports_cte?
1032: if hoist_cte?(nonrecursive)
1033: s, ds = hoist_cte(nonrecursive)
1034: s.with_recursive(name, ds, recursive, opts)
1035: elsif hoist_cte?(recursive)
1036: s, ds = hoist_cte(recursive)
1037: s.with_recursive(name, nonrecursive, ds, opts)
1038: else
1039: clone(:with=>(@opts[:with]||[]) + [opts.merge(:recursive=>true, :name=>name, :dataset=>nonrecursive.union(recursive, {:all=>opts[:union_all] != false, :from_self=>false}))])
1040: end
1041: end
Returns a copy of the dataset with the static SQL used. This is useful if you want to keep the same row_proc/graph, but change the SQL used to custom SQL.
DB[:items].with_sql('SELECT * FROM foo') # SELECT * FROM foo
You can use placeholders in your SQL and provide arguments for those placeholders:
DB[:items].with_sql('SELECT ? FROM foo', 1) # SELECT 1 FROM foo
You can also provide a method name and arguments to call to get the SQL:
DB[:items].with_sql(:insert_sql, :b=>1) # INSERT INTO items (b) VALUES (1)
# File lib/sequel/dataset/query.rb, line 1055
1055: def with_sql(sql, *args)
1056: if sql.is_a?(Symbol)
1057: sql = send(sql, *args)
1058: else
1059: sql = SQL::PlaceholderLiteralString.new(sql, args) unless args.empty?
1060: end
1061: clone(:sql=>sql)
1062: end
Add the dataset to the list of compounds
# File lib/sequel/dataset/query.rb, line 1067
1067: def compound_clone(type, dataset, opts)
1068: if hoist_cte?(dataset)
1069: s, ds = hoist_cte(dataset)
1070: return s.compound_clone(type, ds, opts)
1071: end
1072: ds = compound_from_self.clone(:compounds=>Array(@opts[:compounds]).map{|x| x.dup} + [[type, dataset.compound_from_self, opts[:all]]])
1073: opts[:from_self] == false ? ds : ds.from_self(opts)
1074: end
Return true if the dataset has a non-nil value for any key in opts.
# File lib/sequel/dataset/query.rb, line 1077
1077: def options_overlap(opts)
1078: !(@opts.collect{|k,v| k unless v.nil?}.compact & opts).empty?
1079: end
Whether this dataset is a simple SELECT * FROM table.
# File lib/sequel/dataset/query.rb, line 1082
1082: def simple_select_all?
1083: o = @opts.reject{|k,v| v.nil? || NON_SQL_OPTIONS.include?(k)}
1084: o.length == 1 && (f = o[:from]) && f.length == 1 && (f.first.is_a?(Symbol) || f.first.is_a?(SQL::AliasedExpression))
1085: end
These methods all execute the dataset‘s SQL on the database. They don‘t return modified datasets, so if used in a method chain they should be the last method called.
| ACTION_METHODS | = | (<<-METHS).split.map{|x| x.to_sym} << [] []= all avg count columns columns! delete each empty? fetch_rows first first! get import insert insert_multiple interval last map max min multi_insert paged_each range select_hash select_hash_groups select_map select_order_map set single_record single_value sum to_csv to_hash to_hash_groups truncate update METHS ).split.map{|x| x.to_sym} | Action methods defined by Sequel that execute code on the database. |
Inserts the given argument into the database. Returns self so it can be used safely when chaining:
DB[:items] << {:id=>0, :name=>'Zero'} << DB[:old_items].select(:id, name)
# File lib/sequel/dataset/actions.rb, line 24
24: def <<(arg)
25: insert(arg)
26: self
27: end
Returns the first record matching the conditions. Examples:
DB[:table][:id=>1] # SELECT * FROM table WHERE (id = 1) LIMIT 1
# => {:id=1}
# File lib/sequel/dataset/actions.rb, line 33
33: def [](*conditions)
34: raise(Error, ARRAY_ACCESS_ERROR_MSG) if (conditions.length == 1 and conditions.first.is_a?(Integer)) or conditions.length == 0
35: first(*conditions)
36: end
Update all records matching the conditions with the values specified. Returns the number of rows affected.
DB[:table][:id=>1] = {:id=>2} # UPDATE table SET id = 2 WHERE id = 1
# => 1 # number of rows affected
# File lib/sequel/dataset/actions.rb, line 43
43: def []=(conditions, values)
44: Sequel::Deprecation.deprecate('Dataset#[]=', 'Please load the sequel_3_dataset_methods extension to continue using it')
45: filter(conditions).update(values)
46: end
Returns an array with all records in the dataset. If a block is given, the array is iterated over after all items have been loaded.
DB[:table].all # SELECT * FROM table
# => [{:id=>1, ...}, {:id=>2, ...}, ...]
# Iterate over all rows in the table
DB[:table].all{|row| p row}
# File lib/sequel/dataset/actions.rb, line 56
56: def all(&block)
57: a = []
58: each{|r| a << r}
59: post_load(a)
60: a.each(&block) if block
61: a
62: end
Returns the average value for the given column/expression. Uses a virtual row block if no argument is given.
DB[:table].avg(:number) # SELECT avg(number) FROM table LIMIT 1
# => 3
DB[:table].avg{function(column)} # SELECT avg(function(column)) FROM table LIMIT 1
# => 1
# File lib/sequel/dataset/actions.rb, line 71
71: def avg(column=Sequel.virtual_row(&Proc.new))
72: aggregate_dataset.get{avg(column).as(:avg)}
73: end
Returns the columns in the result set in order as an array of symbols. If the columns are currently cached, returns the cached value. Otherwise, a SELECT query is performed to retrieve a single row in order to get the columns.
If you are looking for all columns for a single table and maybe some information about each column (e.g. database type), see Database#schema.
DB[:table].columns # => [:id, :name]
# File lib/sequel/dataset/actions.rb, line 84
84: def columns
85: return @columns if @columns
86: ds = unfiltered.unordered.naked.clone(:distinct => nil, :limit => 1, :offset=>nil)
87: ds.each{break}
88: @columns = ds.instance_variable_get(:@columns)
89: @columns || []
90: end
Returns the number of records in the dataset. If an argument is provided, it is used as the argument to count. If a block is provided, it is treated as a virtual row, and the result is used as the argument to count.
DB[:table].count # SELECT count(*) AS count FROM table LIMIT 1
# => 3
DB[:table].count(:column) # SELECT count(column) AS count FROM table LIMIT 1
# => 2
DB[:table].count{foo(column)} # SELECT count(foo(column)) AS count FROM table LIMIT 1
# => 1
# File lib/sequel/dataset/actions.rb, line 113
113: def count(arg=(no_arg=true), &block)
114: if no_arg
115: if block
116: arg = Sequel.virtual_row(&block)
117: aggregate_dataset.get{count(arg).as(count)}
118: else
119: aggregate_dataset.get{count(:*){}.as(count)}.to_i
120: end
121: elsif block
122: raise Error, 'cannot provide both argument and block to Dataset#count'
123: else
124: aggregate_dataset.get{count(arg).as(count)}
125: end
126: end
Deletes the records in the dataset. The returned value should be number of records deleted, but that is adapter dependent.
DB[:table].delete # DELETE * FROM table # => 3
# File lib/sequel/dataset/actions.rb, line 133
133: def delete(&block)
134: sql = delete_sql
135: if uses_returning?(:delete)
136: returning_fetch_rows(sql, &block)
137: else
138: execute_dui(sql)
139: end
140: end
Iterates over the records in the dataset as they are yielded from the database adapter, and returns self.
DB[:table].each{|row| p row} # SELECT * FROM table
Note that this method is not safe to use on many adapters if you are running additional queries inside the provided block. If you are running queries inside the block, you should use all instead of each for the outer queries, or use a separate thread or shard inside each.
# File lib/sequel/dataset/actions.rb, line 151
151: def each
152: if @opts[:graph]
153: graph_each{|r| yield r}
154: elsif row_proc = @row_proc
155: fetch_rows(select_sql){|r| yield row_proc.call(r)}
156: else
157: fetch_rows(select_sql){|r| yield r}
158: end
159: self
160: end
Returns true if no records exist in the dataset, false otherwise
DB[:table].empty? # SELECT 1 AS one FROM table LIMIT 1 # => false
# File lib/sequel/dataset/actions.rb, line 166
166: def empty?
167: get(Sequel::SQL::AliasedExpression.new(1, :one)).nil?
168: end
Executes a select query and fetches records, yielding each record to the supplied block. The yielded records should be hashes with symbol keys. This method should probably should not be called by user code, use each instead.
# File lib/sequel/dataset/actions.rb, line 174
174: def fetch_rows(sql)
175: Sequel::Deprecation.deprecate('Dataset#fetch_rows default implementation and Sequel::NotImplemented', 'All dataset instances can be assumed to implement fetch_rows')
176: raise NotImplemented, NOTIMPL_MSG
177: end
If a integer argument is given, it is interpreted as a limit, and then returns all matching records up to that limit. If no argument is passed, it returns the first matching record. If any other type of argument(s) is passed, it is given to filter and the first matching record is returned. If a block is given, it is used to filter the dataset before returning anything.
If there are no records in the dataset, returns nil (or an empty array if an integer argument is given).
Examples:
DB[:table].first # SELECT * FROM table LIMIT 1
# => {:id=>7}
DB[:table].first(2) # SELECT * FROM table LIMIT 2
# => [{:id=>6}, {:id=>4}]
DB[:table].first(:id=>2) # SELECT * FROM table WHERE (id = 2) LIMIT 1
# => {:id=>2}
DB[:table].first("id = 3") # SELECT * FROM table WHERE (id = 3) LIMIT 1
# => {:id=>3}
DB[:table].first("id = ?", 4) # SELECT * FROM table WHERE (id = 4) LIMIT 1
# => {:id=>4}
DB[:table].first{id > 2} # SELECT * FROM table WHERE (id > 2) LIMIT 1
# => {:id=>5}
DB[:table].first("id > ?", 4){id < 6} # SELECT * FROM table WHERE ((id > 4) AND (id < 6)) LIMIT 1
# => {:id=>5}
DB[:table].first(2){id < 2} # SELECT * FROM table WHERE (id < 2) LIMIT 2
# => [{:id=>1}]
# File lib/sequel/dataset/actions.rb, line 214
214: def first(*args, &block)
215: ds = block ? filter(&block) : self
216:
217: if args.empty?
218: ds.single_record
219: else
220: args = (args.size == 1) ? args.first : args
221: if args.is_a?(Integer)
222: ds.limit(args).all
223: else
224: ds.filter(args).single_record
225: end
226: end
227: end
Calls first. If first returns nil (signaling that no row matches), raise a Sequel::NoMatchingRow exception.
# File lib/sequel/dataset/actions.rb, line 231
231: def first!(*args, &block)
232: first(*args, &block) || raise(Sequel::NoMatchingRow)
233: end
Return the column value for the first matching record in the dataset. Raises an error if both an argument and block is given.
DB[:table].get(:id) # SELECT id FROM table LIMIT 1
# => 3
ds.get{sum(id)} # SELECT sum(id) FROM table LIMIT 1
# => 6
You can pass an array of arguments to return multiple arguments, but you must make sure each element in the array has an alias that Sequel can determine:
DB[:table].get([:id, :name]) # SELECT id, name FROM table LIMIT 1
# => [3, 'foo']
DB[:table].get{[sum(id).as(sum), name]} # SELECT sum(id) AS sum, name FROM table LIMIT 1
# => [6, 'foo']
# File lib/sequel/dataset/actions.rb, line 253
253: def get(column=(no_arg=true; nil), &block)
254: ds = naked
255: if block
256: raise(Error, ARG_BLOCK_ERROR_MSG) unless no_arg
257: ds = ds.select(&block)
258: column = ds.opts[:select]
259: column = nil if column.is_a?(Array) && column.length < 2
260: else
261: ds = if column.is_a?(Array)
262: ds.select(*column)
263: else
264: ds.select(column)
265: end
266: end
267:
268: if column.is_a?(Array)
269: if r = ds.single_record
270: r.values_at(*hash_key_symbols(column))
271: end
272: else
273: ds.single_value
274: end
275: end
Inserts multiple records into the associated table. This method can be used to efficiently insert a large number of records into a table in a single query if the database supports it. Inserts are automatically wrapped in a transaction.
This method is called with a columns array and an array of value arrays:
DB[:table].import([:x, :y], [[1, 2], [3, 4]]) # INSERT INTO table (x, y) VALUES (1, 2) # INSERT INTO table (x, y) VALUES (3, 4)
This method also accepts a dataset instead of an array of value arrays:
DB[:table].import([:x, :y], DB[:table2].select(:a, :b)) # INSERT INTO table (x, y) SELECT a, b FROM table2
Options:
| :commit_every : | Open a new transaction for every given number of records. For example, if you provide a value of 50, will commit after every 50 records. |
| :server : | Set the server/shard to use for the transaction and insert queries. |
| :slice : | Same as :commit_every, :commit_every takes precedence. |
# File lib/sequel/dataset/actions.rb, line 300
300: def import(columns, values, opts={})
301: return @db.transaction{insert(columns, values)} if values.is_a?(Dataset)
302:
303: return if values.empty?
304: raise(Error, IMPORT_ERROR_MSG) if columns.empty?
305: ds = opts[:server] ? server(opts[:server]) : self
306:
307: if slice_size = opts[:commit_every] || opts[:slice]
308: offset = 0
309: rows = []
310: while offset < values.length
311: rows << ds._import(columns, values[offset, slice_size], opts)
312: offset += slice_size
313: end
314: rows.flatten
315: else
316: ds._import(columns, values, opts)
317: end
318: end
Inserts values into the associated table. The returned value is generally the value of the primary key for the inserted row, but that is adapter dependent.
insert handles a number of different argument formats:
| no arguments or single empty hash : | Uses DEFAULT VALUES |
| single hash : | Most common format, treats keys as columns an values as values |
| single array : | Treats entries as values, with no columns |
| two arrays : | Treats first array as columns, second array as values |
| single Dataset : | Treats as an insert based on a selection from the dataset given, with no columns |
| array and dataset : | Treats as an insert based on a selection from the dataset given, with the columns given by the array. |
Examples:
DB[:items].insert
# INSERT INTO items DEFAULT VALUES
DB[:items].insert({})
# INSERT INTO items DEFAULT VALUES
DB[:items].insert([1,2,3])
# INSERT INTO items VALUES (1, 2, 3)
DB[:items].insert([:a, :b], [1,2])
# INSERT INTO items (a, b) VALUES (1, 2)
DB[:items].insert(:a => 1, :b => 2)
# INSERT INTO items (a, b) VALUES (1, 2)
DB[:items].insert(DB[:old_items])
# INSERT INTO items SELECT * FROM old_items
DB[:items].insert([:a, :b], DB[:old_items])
# INSERT INTO items (a, b) SELECT * FROM old_items
# File lib/sequel/dataset/actions.rb, line 355
355: def insert(*values, &block)
356: sql = insert_sql(*values)
357: if uses_returning?(:insert)
358: returning_fetch_rows(sql, &block)
359: else
360: execute_insert(sql)
361: end
362: end
Inserts multiple values. If a block is given it is invoked for each item in the given array before inserting it. See multi_insert as a possibly faster version that may be able to insert multiple records in one SQL statement (if supported by the database). Returns an array of primary keys of inserted rows.
DB[:table].insert_multiple([{:x=>1}, {:x=>2}])
# => [4, 5]
# INSERT INTO table (x) VALUES (1)
# INSERT INTO table (x) VALUES (2)
DB[:table].insert_multiple([{:x=>1}, {:x=>2}]){|row| row[:y] = row[:x] * 2; row }
# => [6, 7]
# INSERT INTO table (x, y) VALUES (1, 2)
# INSERT INTO table (x, y) VALUES (2, 4)
# File lib/sequel/dataset/actions.rb, line 379
379: def insert_multiple(array, &block)
380: Sequel::Deprecation.deprecate('Dataset#insert_multiple', 'Please load the sequel_3_dataset_methods extension to continue using it')
381: if block
382: array.map{|i| insert(block.call(i))}
383: else
384: array.map{|i| insert(i)}
385: end
386: end
Returns the interval between minimum and maximum values for the given column/expression. Uses a virtual row block if no argument is given.
DB[:table].interval(:id) # SELECT (max(id) - min(id)) FROM table LIMIT 1
# => 6
DB[:table].interval{function(column)} # SELECT (max(function(column)) - min(function(column))) FROM table LIMIT 1
# => 7
# File lib/sequel/dataset/actions.rb, line 395
395: def interval(column=Sequel.virtual_row(&Proc.new))
396: aggregate_dataset.get{(max(column) - min(column)).as(:interval)}
397: end
Reverses the order and then runs first with the given arguments and block. Note that this will not necessarily give you the last record in the dataset, unless you have an unambiguous order. If there is not currently an order for this dataset, raises an Error.
DB[:table].order(:id).last # SELECT * FROM table ORDER BY id DESC LIMIT 1
# => {:id=>10}
DB[:table].order(Sequel.desc(:id)).last(2) # SELECT * FROM table ORDER BY id ASC LIMIT 2
# => [{:id=>1}, {:id=>2}]
# File lib/sequel/dataset/actions.rb, line 409
409: def last(*args, &block)
410: raise(Error, 'No order specified') unless @opts[:order]
411: reverse.first(*args, &block)
412: end
Maps column values for each record in the dataset (if a column name is given), or performs the stock mapping functionality of Enumerable otherwise. Raises an Error if both an argument and block are given.
DB[:table].map(:id) # SELECT * FROM table
# => [1, 2, 3, ...]
DB[:table].map{|r| r[:id] * 2} # SELECT * FROM table
# => [2, 4, 6, ...]
You can also provide an array of column names:
DB[:table].map([:id, :name]) # SELECT * FROM table # => [[1, 'A'], [2, 'B'], [3, 'C'], ...]
# File lib/sequel/dataset/actions.rb, line 428
428: def map(column=nil, &block)
429: if column
430: raise(Error, ARG_BLOCK_ERROR_MSG) if block
431: return naked.map(column) if row_proc
432: if column.is_a?(Array)
433: super(){|r| r.values_at(*column)}
434: else
435: super(){|r| r[column]}
436: end
437: else
438: super(&block)
439: end
440: end
Returns the maximum value for the given column/expression. Uses a virtual row block if no argument is given.
DB[:table].max(:id) # SELECT max(id) FROM table LIMIT 1
# => 10
DB[:table].max{function(column)} # SELECT max(function(column)) FROM table LIMIT 1
# => 7
# File lib/sequel/dataset/actions.rb, line 449
449: def max(column=Sequel.virtual_row(&Proc.new))
450: aggregate_dataset.get{max(column).as(:max)}
451: end
Returns the minimum value for the given column/expression. Uses a virtual row block if no argument is given.
DB[:table].min(:id) # SELECT min(id) FROM table LIMIT 1
# => 1
DB[:table].min{function(column)} # SELECT min(function(column)) FROM table LIMIT 1
# => 0
# File lib/sequel/dataset/actions.rb, line 460
460: def min(column=Sequel.virtual_row(&Proc.new))
461: aggregate_dataset.get{min(column).as(:min)}
462: end
This is a front end for import that allows you to submit an array of hashes instead of arrays of columns and values:
DB[:table].multi_insert([{:x => 1}, {:x => 2}])
# INSERT INTO table (x) VALUES (1)
# INSERT INTO table (x) VALUES (2)
Be aware that all hashes should have the same keys if you use this calling method, otherwise some columns could be missed or set to null instead of to default values.
This respects the same options as import.
# File lib/sequel/dataset/actions.rb, line 476
476: def multi_insert(hashes, opts={})
477: return if hashes.empty?
478: columns = hashes.first.keys
479: import(columns, hashes.map{|h| columns.map{|c| h[c]}}, opts)
480: end
Yields each row in the dataset, but interally uses multiple queries as needed with limit and offset to process the entire result set without keeping all rows in the dataset in memory, even if the underlying driver buffers all query results in memory.
Because this uses multiple queries internally, in order to remain consistent, it also uses a transaction internally. Additionally, to make sure that all rows in the dataset are yielded and none are yielded twice, the dataset must have an unambiguous order. Sequel requires that datasets using this method have an order, but it cannot ensure that the order is unambiguous.
Options:
| :rows_per_fetch : | The number of rows to fetch per query. Defaults to 1000. |
# File lib/sequel/dataset/actions.rb, line 495
495: def paged_each(opts={})
496: unless @opts[:order]
497: raise Sequel::Error, "Dataset#paged_each requires the dataset be ordered"
498: end
499:
500: total_limit = @opts[:limit]
501: offset = @opts[:offset] || 0
502:
503: if server = @opts[:server]
504: opts = opts.merge(:server=>server)
505: end
506:
507: rows_per_fetch = opts[:rows_per_fetch] || 1000
508: num_rows_yielded = rows_per_fetch
509: total_rows = 0
510:
511: db.transaction(opts) do
512: while num_rows_yielded == rows_per_fetch && (total_limit.nil? || total_rows < total_limit)
513: if total_limit && total_rows + rows_per_fetch > total_limit
514: rows_per_fetch = total_limit - total_rows
515: end
516:
517: num_rows_yielded = 0
518: limit(rows_per_fetch, offset).each do |row|
519: num_rows_yielded += 1
520: total_rows += 1 if total_limit
521: yield row
522: end
523:
524: offset += rows_per_fetch
525: end
526: end
527:
528: self
529: end
Returns a Range instance made from the minimum and maximum values for the given column/expression. Uses a virtual row block if no argument is given.
DB[:table].range(:id) # SELECT max(id) AS v1, min(id) AS v2 FROM table LIMIT 1
# => 1..10
DB[:table].interval{function(column)} # SELECT max(function(column)) AS v1, min(function(column)) AS v2 FROM table LIMIT 1
# => 0..7
# File lib/sequel/dataset/actions.rb, line 538
538: def range(column=Sequel.virtual_row(&Proc.new))
539: if r = aggregate_dataset.select{[min(column).as(v1), max(column).as(v2)]}.first
540: (r[:v1]..r[:v2])
541: end
542: end
Returns a hash with key_column values as keys and value_column values as values. Similar to to_hash, but only selects the columns given.
DB[:table].select_hash(:id, :name) # SELECT id, name FROM table
# => {1=>'a', 2=>'b', ...}
You can also provide an array of column names for either the key_column, the value column, or both:
DB[:table].select_hash([:id, :foo], [:name, :bar]) # SELECT * FROM table
# {[1, 3]=>['a', 'c'], [2, 4]=>['b', 'd'], ...}
When using this method, you must be sure that each expression has an alias that Sequel can determine. Usually you can do this by calling the as method on the expression and providing an alias.
# File lib/sequel/dataset/actions.rb, line 559
559: def select_hash(key_column, value_column)
560: _select_hash(:to_hash, key_column, value_column)
561: end
Returns a hash with key_column values as keys and an array of value_column values. Similar to to_hash_groups, but only selects the columns given.
DB[:table].select_hash(:name, :id) # SELECT id, name FROM table
# => {'a'=>[1, 4, ...], 'b'=>[2, ...], ...}
You can also provide an array of column names for either the key_column, the value column, or both:
DB[:table].select_hash([:first, :middle], [:last, :id]) # SELECT * FROM table
# {['a', 'b']=>[['c', 1], ['d', 2], ...], ...}
When using this method, you must be sure that each expression has an alias that Sequel can determine. Usually you can do this by calling the as method on the expression and providing an alias.
# File lib/sequel/dataset/actions.rb, line 578
578: def select_hash_groups(key_column, value_column)
579: _select_hash(:to_hash_groups, key_column, value_column)
580: end
Selects the column given (either as an argument or as a block), and returns an array of all values of that column in the dataset. If you give a block argument that returns an array with multiple entries, the contents of the resulting array are undefined. Raises an Error if called with both an argument and a block.
DB[:table].select_map(:id) # SELECT id FROM table
# => [3, 5, 8, 1, ...]
DB[:table].select_map{id * 2} # SELECT (id * 2) FROM table
# => [6, 10, 16, 2, ...]
You can also provide an array of column names:
DB[:table].select_map([:id, :name]) # SELECT id, name FROM table # => [[1, 'A'], [2, 'B'], [3, 'C'], ...]
If you provide an array of expressions, you must be sure that each entry in the array has an alias that Sequel can determine. Usually you can do this by calling the as method on the expression and providing an alias.
# File lib/sequel/dataset/actions.rb, line 602
602: def select_map(column=nil, &block)
603: _select_map(column, false, &block)
604: end
The same as select_map, but in addition orders the array by the column.
DB[:table].select_order_map(:id) # SELECT id FROM table ORDER BY id
# => [1, 2, 3, 4, ...]
DB[:table].select_order_map{id * 2} # SELECT (id * 2) FROM table ORDER BY (id * 2)
# => [2, 4, 6, 8, ...]
You can also provide an array of column names:
DB[:table].select_order_map([:id, :name]) # SELECT id, name FROM table ORDER BY id, name # => [[1, 'A'], [2, 'B'], [3, 'C'], ...]
If you provide an array of expressions, you must be sure that each entry in the array has an alias that Sequel can determine. Usually you can do this by calling the as method on the expression and providing an alias.
# File lib/sequel/dataset/actions.rb, line 622
622: def select_order_map(column=nil, &block)
623: _select_map(column, true, &block)
624: end
Alias for update, but not aliased directly so subclasses don‘t have to override both methods.
# File lib/sequel/dataset/actions.rb, line 628
628: def set(*args)
629: Sequel::Deprecation.deprecate('Dataset#set', 'Please switch to Dataset#update or load the sequel_3_dataset_methods extension to continue using it')
630: update(*args)
631: end
Returns the sum for the given column/expression. Uses a virtual row block if no column is given.
DB[:table].sum(:id) # SELECT sum(id) FROM table LIMIT 1
# => 55
DB[:table].sum{function(column)} # SELECT sum(function(column)) FROM table LIMIT 1
# => 10
# File lib/sequel/dataset/actions.rb, line 657
657: def sum(column=Sequel.virtual_row(&Proc.new))
658: aggregate_dataset.get{sum(column).as(:sum)}
659: end
Returns a string in CSV format containing the dataset records. By default the CSV representation includes the column titles in the first line. You can turn that off by passing false as the include_column_titles argument.
This does not use a CSV library or handle quoting of values in any way. If any values in any of the rows could include commas or line endings, you shouldn‘t use this.
puts DB[:table].to_csv # SELECT * FROM table # id,name # 1,Jim # 2,Bob
# File lib/sequel/dataset/actions.rb, line 674
674: def to_csv(include_column_titles = true)
675: Sequel::Deprecation.deprecate('Dataset#to_csv', 'Please load the sequel_3_dataset_methods extension to continue using it')
676: n = naked
677: cols = n.columns
678: csv = ''
679: csv << "#{cols.join(COMMA_SEPARATOR)}\r\n" if include_column_titles
680: n.each{|r| csv << "#{cols.collect{|c| r[c]}.join(COMMA_SEPARATOR)}\r\n"}
681: csv
682: end
Returns a hash with one column used as key and another used as value. If rows have duplicate values for the key column, the latter row(s) will overwrite the value of the previous row(s). If the value_column is not given or nil, uses the entire hash as the value.
DB[:table].to_hash(:id, :name) # SELECT * FROM table
# {1=>'Jim', 2=>'Bob', ...}
DB[:table].to_hash(:id) # SELECT * FROM table
# {1=>{:id=>1, :name=>'Jim'}, 2=>{:id=>2, :name=>'Bob'}, ...}
You can also provide an array of column names for either the key_column, the value column, or both:
DB[:table].to_hash([:id, :foo], [:name, :bar]) # SELECT * FROM table
# {[1, 3]=>['Jim', 'bo'], [2, 4]=>['Bob', 'be'], ...}
DB[:table].to_hash([:id, :name]) # SELECT * FROM table
# {[1, 'Jim']=>{:id=>1, :name=>'Jim'}, [2, 'Bob'=>{:id=>2, :name=>'Bob'}, ...}
# File lib/sequel/dataset/actions.rb, line 703
703: def to_hash(key_column, value_column = nil)
704: h = {}
705: if value_column
706: return naked.to_hash(key_column, value_column) if row_proc
707: if value_column.is_a?(Array)
708: if key_column.is_a?(Array)
709: each{|r| h[r.values_at(*key_column)] = r.values_at(*value_column)}
710: else
711: each{|r| h[r[key_column]] = r.values_at(*value_column)}
712: end
713: else
714: if key_column.is_a?(Array)
715: each{|r| h[r.values_at(*key_column)] = r[value_column]}
716: else
717: each{|r| h[r[key_column]] = r[value_column]}
718: end
719: end
720: elsif key_column.is_a?(Array)
721: each{|r| h[r.values_at(*key_column)] = r}
722: else
723: each{|r| h[r[key_column]] = r}
724: end
725: h
726: end
Returns a hash with one column used as key and the values being an array of column values. If the value_column is not given or nil, uses the entire hash as the value.
DB[:table].to_hash(:name, :id) # SELECT * FROM table
# {'Jim'=>[1, 4, 16, ...], 'Bob'=>[2], ...}
DB[:table].to_hash(:name) # SELECT * FROM table
# {'Jim'=>[{:id=>1, :name=>'Jim'}, {:id=>4, :name=>'Jim'}, ...], 'Bob'=>[{:id=>2, :name=>'Bob'}], ...}
You can also provide an array of column names for either the key_column, the value column, or both:
DB[:table].to_hash([:first, :middle], [:last, :id]) # SELECT * FROM table
# {['Jim', 'Bob']=>[['Smith', 1], ['Jackson', 4], ...], ...}
DB[:table].to_hash([:first, :middle]) # SELECT * FROM table
# {['Jim', 'Bob']=>[{:id=>1, :first=>'Jim', :middle=>'Bob', :last=>'Smith'}, ...], ...}
# File lib/sequel/dataset/actions.rb, line 746
746: def to_hash_groups(key_column, value_column = nil)
747: h = {}
748: if value_column
749: return naked.to_hash_groups(key_column, value_column) if row_proc
750: if value_column.is_a?(Array)
751: if key_column.is_a?(Array)
752: each{|r| (h[r.values_at(*key_column)] ||= []) << r.values_at(*value_column)}
753: else
754: each{|r| (h[r[key_column]] ||= []) << r.values_at(*value_column)}
755: end
756: else
757: if key_column.is_a?(Array)
758: each{|r| (h[r.values_at(*key_column)] ||= []) << r[value_column]}
759: else
760: each{|r| (h[r[key_column]] ||= []) << r[value_column]}
761: end
762: end
763: elsif key_column.is_a?(Array)
764: each{|r| (h[r.values_at(*key_column)] ||= []) << r}
765: else
766: each{|r| (h[r[key_column]] ||= []) << r}
767: end
768: h
769: end
Truncates the dataset. Returns nil.
DB[:table].truncate # TRUNCATE table # => nil
# File lib/sequel/dataset/actions.rb, line 775
775: def truncate
776: execute_ddl(truncate_sql)
777: end
Updates values for the dataset. The returned value is generally the number of rows updated, but that is adapter dependent. values should a hash where the keys are columns to set and values are the values to which to set the columns.
DB[:table].update(:x=>nil) # UPDATE table SET x = NULL # => 10 DB[:table].update(:x=>:x+1, :y=>0) # UPDATE table SET x = (x + 1), y = 0 # => 10
# File lib/sequel/dataset/actions.rb, line 789
789: def update(values={}, &block)
790: sql = update_sql(values)
791: if uses_returning?(:update)
792: returning_fetch_rows(sql, &block)
793: else
794: execute_dui(sql)
795: end
796: end
Execute the given SQL and return the number of rows deleted. This exists solely as an optimization, replacing with_sql(sql).delete. It‘s significantly faster as it does not require cloning the current dataset.
# File lib/sequel/dataset/actions.rb, line 801
801: def with_sql_delete(sql)
802: execute_dui(sql)
803: end
Internals of import. If primary key values are requested, use separate insert commands for each row. Otherwise, call multi_insert_sql and execute each statement it gives separately.
# File lib/sequel/dataset/actions.rb, line 810
810: def _import(columns, values, opts)
811: trans_opts = opts.merge(:server=>@opts[:server])
812: if opts[:return] == :primary_key
813: @db.transaction(trans_opts){values.map{|v| insert(columns, v)}}
814: else
815: stmts = multi_insert_sql(columns, values)
816: @db.transaction(trans_opts){stmts.each{|st| execute_dui(st)}}
817: end
818: end
Return an array of arrays of values given by the symbols in ret_cols.
# File lib/sequel/dataset/actions.rb, line 821
821: def _select_map_multiple(ret_cols)
822: map{|r| r.values_at(*ret_cols)}
823: end
These methods don‘t fit cleanly into another section.
| NOTIMPL_MSG | = | "This method must be overridden in Sequel adapters".freeze |
| ARRAY_ACCESS_ERROR_MSG | = | 'You cannot call Dataset#[] with an integer or with no arguments.'.freeze |
| ARG_BLOCK_ERROR_MSG | = | 'Must use either an argument or a block, not both'.freeze |
| IMPORT_ERROR_MSG | = | 'Using Sequel::Dataset#import an empty column array is not allowed'.freeze |
Constructs a new Dataset instance with an associated database and options. Datasets are usually constructed by invoking the Database#[] method:
DB[:posts]
Sequel::Dataset is an abstract class that is not useful by itself. Each database adapter provides a subclass of Sequel::Dataset, and has the Database#dataset method return an instance of that subclass.
# File lib/sequel/dataset/misc.rb, line 50
50: def initialize(db, opts = (no_arg_given=true; nil))
51: @db = db
52: # REMOVE40
53: Sequel::Deprecation.deprecate('Passing the opts argument to Database#dataset or Dataset#initialize', 'Clone the dataset afterward to change the opts') unless no_arg_given
54: @opts = opts || {}.extend(DeprecateModifyHash)
55: end
REMOVE40
# File lib/sequel/dataset/misc.rb, line 21
21: def db=(v)
22: Sequel::Deprecation.deprecate('Dataset#db=', 'Please load the sequel_3_dataset_methods extension to continue using it')
23: @db = v
24: end
Yield a dataset for each server in the connection pool that is tied to that server. Intended for use in sharded environments where all servers need to be modified with the same data:
DB[:configs].where(:key=>'setting').each_server{|ds| ds.update(:value=>'new_value')}
# File lib/sequel/dataset/misc.rb, line 73
73: def each_server
74: db.servers.each{|s| yield server(s)}
75: end
Returns the string with the LIKE metacharacters (% and _) escaped. Useful for when the LIKE term is a user-provided string where metacharacters should not be recognized. Example:
ds.escape_like("foo\\%_") # 'foo\\\%\_'
# File lib/sequel/dataset/misc.rb, line 82
82: def escape_like(string)
83: string.gsub(/[\\%_]/){|m| "\\#{m}"}
84: end
Alias of first_source_alias
# File lib/sequel/dataset/misc.rb, line 87
87: def first_source
88: first_source_alias
89: end
The first source (primary table) for this dataset. If the dataset doesn‘t have a table, raises an Error. If the table is aliased, returns the aliased name.
DB[:table].first_source_alias # => :table DB[:table___t].first_source_alias # => :t
# File lib/sequel/dataset/misc.rb, line 99
99: def first_source_alias
100: source = @opts[:from]
101: if source.nil? || source.empty?
102: raise Error, 'No source specified for query'
103: end
104: case s = source.first
105: when SQL::AliasedExpression
106: s.aliaz
107: when Symbol
108: _, _, aliaz = split_symbol(s)
109: aliaz ? aliaz.to_sym : s
110: else
111: s
112: end
113: end
The first source (primary table) for this dataset. If the dataset doesn‘t have a table, raises an error. If the table is aliased, returns the original table, not the alias
DB[:table].first_source_table # => :table DB[:table___t].first_source_table # => :table
# File lib/sequel/dataset/misc.rb, line 124
124: def first_source_table
125: source = @opts[:from]
126: if source.nil? || source.empty?
127: raise Error, 'No source specified for query'
128: end
129: case s = source.first
130: when SQL::AliasedExpression
131: s.expression
132: when Symbol
133: sch, table, aliaz = split_symbol(s)
134: aliaz ? (sch ? SQL::QualifiedIdentifier.new(sch, table) : table.to_sym) : s
135: else
136: s
137: end
138: end
The String instance method to call on identifiers before sending them to the database.
# File lib/sequel/dataset/misc.rb, line 148
148: def identifier_input_method
149: if defined?(@identifier_input_method)
150: @identifier_input_method
151: elsif db.respond_to?(:identifier_input_method)
152: @identifier_input_method = db.identifier_input_method
153: else
154: Sequel::Deprecation.deprecate('Calling Dataset#identifier_input_method for a dataset where the database doesn\'t implement identifier_input_method will raise a NoMethodError in Sequel 4.')
155: @identifier_input_method = nil
156: end
157: end
The String instance method to call on identifiers before sending them to the database.
# File lib/sequel/dataset/misc.rb, line 161
161: def identifier_output_method
162: if defined?(@identifier_output_method)
163: @identifier_output_method
164: elsif db.respond_to?(:identifier_output_method)
165: @identifier_output_method = db.identifier_output_method
166: else
167: Sequel::Deprecation.deprecate('Calling Dataset#identifier_output_method for a dataset where the database doesn\'t implement identifier_output_method will raise a NoMethodError in Sequel 4.')
168: @identifier_output_method = nil
169: end
170: end
# File lib/sequel/dataset/misc.rb, line 25
25: def opts=(v)
26: Sequel::Deprecation.deprecate('Dataset#opts=', 'Please load the sequel_3_dataset_methods extension to continue using it')
27: @opts = v
28: end
Splits a possible implicit alias in c, handling both SQL::AliasedExpressions and Symbols. Returns an array of two elements, with the first being the main expression, and the second being the alias.
# File lib/sequel/dataset/misc.rb, line 189
189: def split_alias(c)
190: case c
191: when Symbol
192: c_table, column, aliaz = split_symbol(c)
193: [c_table ? SQL::QualifiedIdentifier.new(c_table, column.to_sym) : column.to_sym, aliaz]
194: when SQL::AliasedExpression
195: [c.expression, c.aliaz]
196: when SQL::JoinClause
197: [c.table, c.table_alias]
198: else
199: [c, nil]
200: end
201: end
Creates a unique table alias that hasn‘t already been used in the dataset. table_alias can be any type of object accepted by alias_symbol. The symbol returned will be the implicit alias in the argument, possibly appended with "_N" if the implicit alias has already been used, where N is an integer starting at 0 and increasing until an unused one is found.
You can provide a second addition array argument containing symbols that should not be considered valid table aliases. The current aliases for the FROM and JOIN tables are automatically included in this array.
DB[:table].unused_table_alias(:t) # => :t DB[:table].unused_table_alias(:table) # => :table_0 DB[:table, :table_0].unused_table_alias(:table) # => :table_1 DB[:table, :table_0].unused_table_alias(:table, [:table_1, :table_2]) # => :table_3
# File lib/sequel/dataset/misc.rb, line 225
225: def unused_table_alias(table_alias, used_aliases = [])
226: table_alias = alias_symbol(table_alias)
227: used_aliases += opts[:from].map{|t| alias_symbol(t)} if opts[:from]
228: used_aliases += opts[:join].map{|j| j.table_alias ? alias_alias_symbol(j.table_alias) : alias_symbol(j.table)} if opts[:join]
229: if used_aliases.include?(table_alias)
230: i = 0
231: loop do
232: ta = "#{table_alias}_#{i}""#{table_alias}_#{i}"
233: return ta unless used_aliases.include?(ta)
234: i += 1
235: end
236: else
237: table_alias
238: end
239: end
On some adapters, these use native prepared statements and bound variables, on others support is emulated. For details, see the "Prepared Statements/Bound Variables" guide.
| PREPARED_ARG_PLACEHOLDER | = | LiteralString.new('?').freeze |
Set the bind variables to use for the call. If bind variables have already been set for this dataset, they are updated with the contents of bind_vars.
DB[:table].filter(:id=>:$id).bind(:id=>1).call(:first)
# SELECT * FROM table WHERE id = ? LIMIT 1 -- (1)
# => {:id=>1}
# File lib/sequel/dataset/prepared_statements.rb, line 217
217: def bind(bind_vars={})
218: clone(:bind_vars=>@opts[:bind_vars] ? @opts[:bind_vars].merge(bind_vars) : bind_vars)
219: end
For the given type (:select, :first, :insert, :insert_select, :update, or :delete), run the sql with the bind variables specified in the hash. values is a hash passed to insert or update (if one of those types is used), which may contain placeholders.
DB[:table].filter(:id=>:$id).call(:first, :id=>1)
# SELECT * FROM table WHERE id = ? LIMIT 1 -- (1)
# => {:id=>1}
# File lib/sequel/dataset/prepared_statements.rb, line 228
228: def call(type, bind_variables={}, *values, &block)
229: prepare(type, nil, *values).call(bind_variables, &block)
230: end
Prepare an SQL statement for later execution. Takes a type similar to call, and the name symbol of the prepared statement. While name defaults to nil, it should always be provided as a symbol for the name of the prepared statement, as some databases require that prepared statements have names.
This returns a clone of the dataset extended with PreparedStatementMethods, which you can call with the hash of bind variables to use. The prepared statement is also stored in the associated database, where it can be called by name. The following usage is identical:
ps = DB[:table].filter(:name=>:$name).prepare(:first, :select_by_name)
ps.call(:name=>'Blah')
# SELECT * FROM table WHERE name = ? -- ('Blah')
# => {:id=>1, :name=>'Blah'}
DB.call(:select_by_name, :name=>'Blah') # Same thing
# File lib/sequel/dataset/prepared_statements.rb, line 250
250: def prepare(type, name=nil, *values)
251: ps = to_prepared_statement(type, values)
252: db.set_prepared_statement(name, ps) if name
253: ps
254: end
Return a cloned copy of the current dataset extended with PreparedStatementMethods, setting the type and modify values.
# File lib/sequel/dataset/prepared_statements.rb, line 260
260: def to_prepared_statement(type, values=nil)
261: ps = bind
262: ps.extend(PreparedStatementMethods)
263: ps.orig_dataset = self
264: ps.prepared_type = type
265: ps.prepared_modify_values = values
266: ps
267: end
Dataset graphing changes the dataset to yield hashes where keys are table name symbols and values are hashes representing the columns related to that table. All of these methods return modified copies of the receiver.
Adds the given graph aliases to the list of graph aliases to use, unlike set_graph_aliases, which replaces the list (the equivalent of select_more when graphing). See set_graph_aliases.
DB[:table].add_graph_aliases(:some_alias=>[:table, :column]) # SELECT ..., table.column AS some_alias
# File lib/sequel/dataset/graph.rb, line 16
16: def add_graph_aliases(graph_aliases)
17: unless ga = opts[:graph_aliases]
18: unless opts[:graph] && (ga = opts[:graph][:column_aliases])
19: Sequel::Deprecation.deprecate('Calling Dataset#add_graph_aliases before #graph or #set_graph_aliases', 'Please call it after #graph or #set_graph_aliases')
20: end
21: end
22: columns, graph_aliases = graph_alias_columns(graph_aliases)
23: select_more(*columns).clone(:graph_aliases => ga.merge(graph_aliases))
24: end
Similar to Dataset#join_table, but uses unambiguous aliases for selected columns and keeps metadata about the aliases for use in other methods.
Arguments:
| dataset : | Can be a symbol (specifying a table), another dataset, or an object that responds to dataset and returns a symbol or a dataset |
| join_conditions : | Any condition(s) allowed by join_table. |
| block : | A block that is passed to join_table. |
Options:
| :from_self_alias : | The alias to use when the receiver is not a graphed dataset but it contains multiple FROM tables or a JOIN. In this case, the receiver is wrapped in a from_self before graphing, and this option determines the alias to use. |
| :implicit_qualifier : | The qualifier of implicit conditions, see join_table. |
| :join_type : | The type of join to use (passed to join_table). Defaults to :left_outer. |
| :qualify: | The type of qualification to do, see join_table. |
| :select : | An array of columns to select. When not used, selects all columns in the given dataset. When set to false, selects no columns and is like simply joining the tables, though graph keeps some metadata about the join that makes it important to use graph instead of join_table. |
| :table_alias : | The alias to use for the table. If not specified, doesn‘t alias the table. You will get an error if the the alias (or table) name is used more than once. |
# File lib/sequel/dataset/graph.rb, line 51
51: def graph(dataset, join_conditions = nil, options = {}, &block)
52: # Allow the use of a dataset or symbol as the first argument
53: # Find the table name/dataset based on the argument
54: table_alias = options[:table_alias]
55: case dataset
56: when Symbol
57: table = dataset
58: dataset = @db[dataset]
59: table_alias ||= table
60: when ::Sequel::Dataset
61: if dataset.simple_select_all?
62: table = dataset.opts[:from].first
63: table_alias ||= table
64: else
65: table = dataset
66: table_alias ||= dataset_alias((@opts[:num_dataset_sources] || 0)+1)
67: end
68: else
69: raise Error, "The dataset argument should be a symbol or dataset"
70: end
71:
72: # Raise Sequel::Error with explanation that the table alias has been used
73: raise_alias_error = lambda do
74: raise(Error, "this #{options[:table_alias] ? 'alias' : 'table'} has already been been used, please specify " \
75: "#{options[:table_alias] ? 'a different alias' : 'an alias via the :table_alias option'}")
76: end
77:
78: # Only allow table aliases that haven't been used
79: raise_alias_error.call if @opts[:graph] && @opts[:graph][:table_aliases] && @opts[:graph][:table_aliases].include?(table_alias)
80:
81: # Use a from_self if this is already a joined table
82: ds = (!@opts[:graph] && (@opts[:from].length > 1 || @opts[:join])) ? from_self(:alias=>options[:from_self_alias] || first_source) : self
83:
84: # Join the table early in order to avoid cloning the dataset twice
85: ds = ds.join_table(options[:join_type] || :left_outer, table, join_conditions, :table_alias=>table_alias, :implicit_qualifier=>options[:implicit_qualifier], :qualify=>options[:qualify], &block)
86: opts = ds.opts
87:
88: # Whether to include the table in the result set
89: add_table = options[:select] == false ? false : true
90: # Whether to add the columns to the list of column aliases
91: add_columns = !ds.opts.include?(:graph_aliases)
92:
93: # Setup the initial graph data structure if it doesn't exist
94: if graph = opts[:graph]
95: opts[:graph] = graph = graph.dup
96: select = opts[:select].dup
97: [:column_aliases, :table_aliases, :column_alias_num].each{|k| graph[k] = graph[k].dup}
98: else
99: master = alias_symbol(ds.first_source_alias)
100: raise_alias_error.call if master == table_alias
101: # Master hash storing all .graph related information
102: graph = opts[:graph] = {}
103: # Associates column aliases back to tables and columns
104: column_aliases = graph[:column_aliases] = {}
105: # Associates table alias (the master is never aliased)
106: table_aliases = graph[:table_aliases] = {master=>self}
107: # Keep track of the alias numbers used
108: ca_num = graph[:column_alias_num] = Hash.new(0)
109: # All columns in the master table are never
110: # aliased, but are not included if set_graph_aliases
111: # has been used.
112: if add_columns
113: if (select = @opts[:select]) && !select.empty? && !(select.length == 1 && (select.first.is_a?(SQL::ColumnAll)))
114: select = select.each do |sel|
115: column = case sel
116: when Symbol
117: _, c, a = split_symbol(sel)
118: (a || c).to_sym
119: when SQL::Identifier
120: sel.value.to_sym
121: when SQL::QualifiedIdentifier
122: column = sel.column
123: column = column.value if column.is_a?(SQL::Identifier)
124: column.to_sym
125: when SQL::AliasedExpression
126: column = sel.aliaz
127: column = column.value if column.is_a?(SQL::Identifier)
128: column.to_sym
129: else
130: raise Error, "can't figure out alias to use for graphing for #{sel.inspect}"
131: end
132: column_aliases[column] = [master, column]
133: end
134: select = qualified_expression(select, master)
135: else
136: select = columns.map do |column|
137: column_aliases[column] = [master, column]
138: SQL::QualifiedIdentifier.new(master, column)
139: end
140: end
141: end
142: end
143:
144: # Add the table alias to the list of aliases
145: # Even if it isn't been used in the result set,
146: # we add a key for it with a nil value so we can check if it
147: # is used more than once
148: table_aliases = graph[:table_aliases]
149: table_aliases[table_alias] = add_table ? dataset : nil
150:
151: # Add the columns to the selection unless we are ignoring them
152: if add_table && add_columns
153: column_aliases = graph[:column_aliases]
154: ca_num = graph[:column_alias_num]
155: # Which columns to add to the result set
156: cols = options[:select] || dataset.columns
157: # If the column hasn't been used yet, don't alias it.
158: # If it has been used, try table_column.
159: # If that has been used, try table_column_N
160: # using the next value of N that we know hasn't been
161: # used
162: cols.each do |column|
163: col_alias, identifier = if column_aliases[column]
164: column_alias = "#{table_alias}_#{column}""#{table_alias}_#{column}"
165: if column_aliases[column_alias]
166: column_alias_num = ca_num[column_alias]
167: column_alias = "#{column_alias}_#{column_alias_num}""#{column_alias}_#{column_alias_num}"
168: ca_num[column_alias] += 1
169: end
170: [column_alias, SQL::AliasedExpression.new(SQL::QualifiedIdentifier.new(table_alias, column), column_alias)]
171: else
172: ident = SQL::QualifiedIdentifier.new(table_alias, column)
173: [column, ident]
174: end
175: column_aliases[col_alias] = [table_alias, column]
176: select.push(identifier)
177: end
178: end
179: add_columns ? ds.select(*select) : ds
180: end
This allows you to manually specify the graph aliases to use when using graph. You can use it to only select certain columns, and have those columns mapped to specific aliases in the result set. This is the equivalent of select for a graphed dataset, and must be used instead of select whenever graphing is used.
| graph_aliases : | Should be a hash with keys being symbols of column aliases, and values being either symbols or arrays with one to three elements. If the value is a symbol, it is assumed to be the same as a one element array containing that symbol. The first element of the array should be the table alias symbol. The second should be the actual column name symbol. If the array only has a single element the column name symbol will be assumed to be the same as the corresponding hash key. If the array has a third element, it is used as the value returned, instead of table_alias.column_name. |
DB[:artists].graph(:albums, :artist_id=>:id).
set_graph_aliases(:name=>:artists,
:album_name=>[:albums, :name],
:forty_two=>[:albums, :fourtwo, 42]).first
# SELECT artists.name, albums.name AS album_name, 42 AS forty_two ...
# File lib/sequel/dataset/graph.rb, line 205
205: def set_graph_aliases(graph_aliases)
206: columns, graph_aliases = graph_alias_columns(graph_aliases)
207: ds = select(*columns)
208: ds.opts[:graph_aliases] = graph_aliases
209: ds
210: end
These methods all return booleans, with most describing whether or not the dataset supports a feature.
Whether this dataset quotes identifiers.
# File lib/sequel/dataset/features.rb, line 10
10: def quote_identifiers?
11: if defined?(@quote_identifiers)
12: @quote_identifiers
13: elsif db.respond_to?(:quote_identifiers?)
14: @quote_identifiers = db.quote_identifiers?
15: else
16: Sequel::Deprecation.deprecate('Calling Dataset#quote_identifiers? for a dataset where the database doesn\'t implement quote_identifiers? will raise a NoMethodError in Sequel 4.')
17: @quote_identifiers = false
18: end
19: end
Whether you must use a column alias list for recursive CTEs (false by default).
# File lib/sequel/dataset/features.rb, line 30
30: def recursive_cte_requires_column_aliases?
31: false
32: end
Whether type specifiers are required for prepared statement/bound variable argument placeholders (i.e. :bv__integer)
# File lib/sequel/dataset/features.rb, line 42
42: def requires_placeholder_type_specifiers?
43: false
44: end
Whether the dataset supports common table expressions (the WITH clause). If given, type can be :select, :insert, :update, or :delete, in which case it determines whether WITH is supported for the respective statement type.
# File lib/sequel/dataset/features.rb, line 49
49: def supports_cte?(type=:select)
50: send("#{type}_clause_methods""#{type}_clause_methods").include?("#{type}_with_sql""#{type}_with_sql")
51: end
Whether the dataset supports common table expressions (the WITH clause) in subqueries. If false, applies the WITH clause to the main query, which can cause issues if multiple WITH clauses use the same name.
# File lib/sequel/dataset/features.rb, line 56
56: def supports_cte_in_subqueries?
57: false
58: end
Whether the dataset supports the IS TRUE syntax.
# File lib/sequel/dataset/features.rb, line 92
92: def supports_is_true?
93: true
94: end
Whether the dataset supports the JOIN table USING (column1, …) syntax.
# File lib/sequel/dataset/features.rb, line 97
97: def supports_join_using?
98: true
99: end
Whether modifying joined datasets is supported.
# File lib/sequel/dataset/features.rb, line 102
102: def supports_modifying_joins?
103: false
104: end
Whether the dataset supports pattern matching by regular expressions.
# File lib/sequel/dataset/features.rb, line 119
119: def supports_regexp?
120: false
121: end
Whether the RETURNING clause is supported for the given type of query. type can be :insert, :update, or :delete.
# File lib/sequel/dataset/features.rb, line 125
125: def supports_returning?(type)
126: send("#{type}_clause_methods""#{type}_clause_methods").include?("#{type}_returning_sql""#{type}_returning_sql")
127: end
Whether the database supports SELECT *, column FROM table
# File lib/sequel/dataset/features.rb, line 130
130: def supports_select_all_and_column?
131: true
132: end
Whether the dataset supports timezones in literal timestamps
# File lib/sequel/dataset/features.rb, line 135
135: def supports_timestamp_timezones?
136: false
137: end
Whether the dataset supports fractional seconds in literal timestamps
# File lib/sequel/dataset/features.rb, line 140
140: def supports_timestamp_usecs?
141: true
142: end
Returns a DELETE SQL query string. See delete.
dataset.filter{|o| o.price >= 100}.delete_sql
# => "DELETE FROM items WHERE (price >= 100)"
# File lib/sequel/dataset/sql.rb, line 12
12: def delete_sql
13: return static_sql(opts[:sql]) if opts[:sql]
14: check_modification_allowed!
15: clause_sql(:delete)
16: end
Returns an EXISTS clause for the dataset as a LiteralString.
DB.select(1).where(DB[:items].exists) # SELECT 1 WHERE (EXISTS (SELECT * FROM items))
# File lib/sequel/dataset/sql.rb, line 22
22: def exists
23: SQL::PlaceholderLiteralString.new(EXISTS, [self], true)
24: end
Returns an INSERT SQL query string. See insert.
DB[:items].insert_sql(:a=>1) # => "INSERT INTO items (a) VALUES (1)"
# File lib/sequel/dataset/sql.rb, line 30
30: def insert_sql(*values)
31: return static_sql(@opts[:sql]) if @opts[:sql]
32:
33: check_modification_allowed!
34:
35: columns = []
36:
37: case values.size
38: when 0
39: return insert_sql({})
40: when 1
41: case vals = values.at(0)
42: when Hash
43: vals = @opts[:defaults].merge(vals) if @opts[:defaults]
44: vals = vals.merge(@opts[:overrides]) if @opts[:overrides]
45: values = []
46: vals.each do |k,v|
47: columns << k
48: values << v
49: end
50: when Dataset, Array, LiteralString
51: values = vals
52: end
53: when 2
54: if (v0 = values.at(0)).is_a?(Array) && ((v1 = values.at(1)).is_a?(Array) || v1.is_a?(Dataset) || v1.is_a?(LiteralString))
55: columns, values = v0, v1
56: raise(Error, "Different number of values and columns given to insert_sql") if values.is_a?(Array) and columns.length != values.length
57: end
58: end
59:
60: if values.is_a?(Array) && values.empty? && !insert_supports_empty_values?
61: columns = [columns().last]
62: values = [DEFAULT]
63: end
64: clone(:columns=>columns, :values=>values)._insert_sql
65: end
Returns a literal representation of a value to be used as part of an SQL expression.
DB[:items].literal("abc'def\\") #=> "'abc''def\\\\'"
DB[:items].literal(:items__id) #=> "items.id"
DB[:items].literal([1, 2, 3]) => "(1, 2, 3)"
DB[:items].literal(DB[:items]) => "(SELECT * FROM items)"
DB[:items].literal(:x + 1 > :y) => "((x + 1) > y)"
If an unsupported object is given, an Error is raised.
# File lib/sequel/dataset/sql.rb, line 77
77: def literal_append(sql, v)
78: case v
79: when Symbol
80: literal_symbol_append(sql, v)
81: when String
82: case v
83: when LiteralString
84: sql << v
85: when SQL::Blob
86: literal_blob_append(sql, v)
87: else
88: literal_string_append(sql, v)
89: end
90: when Integer
91: sql << literal_integer(v)
92: when Hash
93: literal_hash_append(sql, v)
94: when SQL::Expression
95: literal_expression_append(sql, v)
96: when Float
97: sql << literal_float(v)
98: when BigDecimal
99: sql << literal_big_decimal(v)
100: when NilClass
101: sql << literal_nil
102: when TrueClass
103: sql << literal_true
104: when FalseClass
105: sql << literal_false
106: when Array
107: literal_array_append(sql, v)
108: when Time
109: sql << (v.is_a?(SQLTime) ? literal_sqltime(v) : literal_time(v))
110: when DateTime
111: sql << literal_datetime(v)
112: when Date
113: sql << literal_date(v)
114: when Dataset
115: literal_dataset_append(sql, v)
116: else
117: literal_other_append(sql, v)
118: end
119: end
Returns an array of insert statements for inserting multiple records. This method is used by multi_insert to format insert statements and expects a keys array and and an array of value arrays.
This method should be overridden by descendants if the support inserting multiple records in a single SQL statement.
# File lib/sequel/dataset/sql.rb, line 127
127: def multi_insert_sql(columns, values)
128: values.map{|r| insert_sql(columns, r)}
129: end
Same as select_sql, not aliased directly to make subclassing simpler.
# File lib/sequel/dataset/sql.rb, line 140
140: def sql
141: select_sql
142: end
Returns a TRUNCATE SQL query string. See truncate
DB[:items].truncate_sql # => 'TRUNCATE items'
# File lib/sequel/dataset/sql.rb, line 147
147: def truncate_sql
148: if opts[:sql]
149: static_sql(opts[:sql])
150: else
151: check_truncation_allowed!
152: raise(InvalidOperation, "Can't truncate filtered datasets") if opts[:where] || opts[:having]
153: t = ''
154: source_list_append(t, opts[:from])
155: _truncate_sql(t)
156: end
157: end
Formats an UPDATE statement using the given values. See update.
DB[:items].update_sql(:price => 100, :category => 'software') # => "UPDATE items SET price = 100, category = 'software'
Raises an Error if the dataset is grouped or includes more than one table.
# File lib/sequel/dataset/sql.rb, line 166
166: def update_sql(values = {})
167: return static_sql(opts[:sql]) if opts[:sql]
168: check_modification_allowed!
169: clone(:values=>values)._update_sql
170: end
These methods, while public, are not designed to be used directly by the end user.
| EMULATED_FUNCTION_MAP | = | {} | Map of emulated function names to native function names. | |
| WILDCARD | = | LiteralString.new('*').freeze | ||
| ALL | = | ' ALL'.freeze | ||
| AND_SEPARATOR | = | " AND ".freeze | ||
| APOS | = | "'".freeze | ||
| APOS_RE | = | /'/.freeze | ||
| ARRAY_EMPTY | = | '(NULL)'.freeze | ||
| AS | = | ' AS '.freeze | ||
| ASC | = | ' ASC'.freeze | ||
| BACKSLASH | = | "\\".freeze | ||
| BOOL_FALSE | = | "'f'".freeze | ||
| BOOL_TRUE | = | "'t'".freeze | ||
| BRACKET_CLOSE | = | ']'.freeze | ||
| BRACKET_OPEN | = | '['.freeze | ||
| CASE_ELSE | = | " ELSE ".freeze | ||
| CASE_END | = | " END)".freeze | ||
| CASE_OPEN | = | '(CASE'.freeze | ||
| CASE_THEN | = | " THEN ".freeze | ||
| CASE_WHEN | = | " WHEN ".freeze | ||
| CAST_OPEN | = | 'CAST('.freeze | ||
| COLUMN_REF_RE1 | = | Sequel::COLUMN_REF_RE1 | ||
| COLUMN_REF_RE2 | = | Sequel::COLUMN_REF_RE2 | ||
| COLUMN_REF_RE3 | = | Sequel::COLUMN_REF_RE3 | ||
| COMMA | = | ', '.freeze | ||
| COMMA_SEPARATOR | = | COMMA | ||
| CONDITION_FALSE | = | '(1 = 0)'.freeze | ||
| CONDITION_TRUE | = | '(1 = 1)'.freeze | ||
| COUNT_FROM_SELF_OPTS | = | [:distinct, :group, :sql, :limit, :offset, :compounds] | ||
| COUNT_OF_ALL_AS_COUNT | = | SQL::Function.new(:count, WILDCARD).as(:count) | ||
| DATASET_ALIAS_BASE_NAME | = | 't'.freeze | ||
| DEFAULT | = | LiteralString.new('DEFAULT').freeze | ||
| DEFAULT_VALUES | = | " DEFAULT VALUES".freeze | ||
| DELETE | = | 'DELETE'.freeze | ||
| DELETE_CLAUSE_METHODS | = | clause_methods(:delete, %w'delete from where') | ||
| DESC | = | ' DESC'.freeze | ||
| DISTINCT | = | " DISTINCT".freeze | ||
| DOT | = | '.'.freeze | ||
| DOUBLE_APOS | = | "''".freeze | ||
| DOUBLE_QUOTE | = | '""'.freeze | ||
| EQUAL | = | ' = '.freeze | ||
| ESCAPE | = | " ESCAPE ".freeze | ||
| EXTRACT | = | 'extract('.freeze | ||
| EXISTS | = | ['EXISTS '.freeze].freeze | ||
| FOR_UPDATE | = | ' FOR UPDATE'.freeze | ||
| FORMAT_DATE | = | "'%Y-%m-%d'".freeze | ||
| FORMAT_DATE_STANDARD | = | "DATE '%Y-%m-%d'".freeze | ||
| FORMAT_OFFSET | = | "%+03i%02i".freeze | ||
| FORMAT_TIMESTAMP_RE | = | /%[Nz]/.freeze | ||
| FORMAT_TIMESTAMP_USEC | = | ".%06d".freeze | ||
| FORMAT_USEC | = | '%N'.freeze | ||
| FRAME_ALL | = | "ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING".freeze | ||
| FRAME_ROWS | = | "ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW".freeze | ||
| FROM | = | ' FROM '.freeze | ||
| FUNCTION_EMPTY | = | '()'.freeze | ||
| GROUP_BY | = | " GROUP BY ".freeze | ||
| HAVING | = | " HAVING ".freeze | ||
| INSERT | = | "INSERT".freeze | ||
| INSERT_CLAUSE_METHODS | = | clause_methods(:insert, %w'insert into columns values') | ||
| INTO | = | " INTO ".freeze | ||
| IS_LITERALS | = | {nil=>'NULL'.freeze, true=>'TRUE'.freeze, false=>'FALSE'.freeze}.freeze | ||
| IS_OPERATORS | = | ::Sequel::SQL::ComplexExpression::IS_OPERATORS | ||
| LIKE_OPERATORS | = | ::Sequel::SQL::ComplexExpression::LIKE_OPERATORS | ||
| LIMIT | = | " LIMIT ".freeze | ||
| N_ARITY_OPERATORS | = | ::Sequel::SQL::ComplexExpression::N_ARITY_OPERATORS | ||
| NOT_SPACE | = | 'NOT '.freeze | ||
| NULL | = | "NULL".freeze | ||
| NULLS_FIRST | = | " NULLS FIRST".freeze | ||
| NULLS_LAST | = | " NULLS LAST".freeze | ||
| OFFSET | = | " OFFSET ".freeze | ||
| ON | = | ' ON '.freeze | ||
| ON_PAREN | = | " ON (".freeze | ||
| ORDER_BY | = | " ORDER BY ".freeze | ||
| ORDER_BY_NS | = | "ORDER BY ".freeze | ||
| OVER | = | ' OVER '.freeze | ||
| PAREN_CLOSE | = | ')'.freeze | ||
| PAREN_OPEN | = | '('.freeze | ||
| PAREN_SPACE_OPEN | = | ' ('.freeze | ||
| PARTITION_BY | = | "PARTITION BY ".freeze | ||
| QUALIFY_KEYS | = | [:select, :where, :having, :order, :group] | ||
| QUESTION_MARK | = | '?'.freeze | ||
| QUESTION_MARK_RE | = | /\?/.freeze | ||
| QUOTE | = | '"'.freeze | ||
| QUOTE_RE | = | /"/.freeze | ||
| RETURNING | = | " RETURNING ".freeze | ||
| SELECT | = | 'SELECT'.freeze | ||
| SELECT_CLAUSE_METHODS | = | clause_methods(:select, %w'with select distinct columns from join where group having compounds order limit lock') | ||
| SET | = | ' SET '.freeze | ||
| SPACE | = | ' '.freeze | ||
| SQL_WITH | = | "WITH ".freeze | ||
| SPACE_WITH | = | " WITH ".freeze | ||
| TILDE | = | '~'.freeze | ||
| TIMESTAMP_FORMAT | = | "'%Y-%m-%d %H:%M:%S%N%z'".freeze | ||
| STANDARD_TIMESTAMP_FORMAT | = | "TIMESTAMP #{TIMESTAMP_FORMAT}".freeze | ||
| TWO_ARITY_OPERATORS | = | ::Sequel::SQL::ComplexExpression::TWO_ARITY_OPERATORS | ||
| REGEXP_OPERATORS | = | ::Sequel::SQL::ComplexExpression::REGEXP_OPERATORS | ||
| UNDERSCORE | = | '_'.freeze | ||
| UPDATE | = | 'UPDATE'.freeze | ||
| UPDATE_CLAUSE_METHODS | = | clause_methods(:update, %w'update table set where') | ||
| USING | = | ' USING ('.freeze | ||
| VALUES | = | " VALUES ".freeze | ||
| V190 | = | '1.9.0'.freeze | ||
| WHERE | = | " WHERE ".freeze | ||
| PUBLIC_APPEND_METHODS | = | (<<-END).split.map{|x| x.to_sym} literal aliased_expression_sql array_sql boolean_constant_sql case_expression_sql cast_sql column_all_sql complex_expression_sql constant_sql delayed_evaluation_sql function_sql join_clause_sql join_on_clause_sql join_using_clause_sql negative_boolean_constant_sql ordered_expression_sql placeholder_literal_string_sql qualified_identifier_sql quote_identifier quote_schema_table quoted_identifier subscript_sql window_sql window_function_sql END ).split.map{|x| x.to_sym} | ||
| PRIVATE_APPEND_METHODS | = | (<<-END).split.map{|x| x.to_sym} as_sql column_list compound_dataset_sql expression_list literal_array literal_blob literal_dataset literal_expression literal_hash literal_other literal_string literal_symbol source_list subselect_sql table_ref END ).split.map{|x| x.to_sym} | ||
| DatasetClass | = | self | ||
| PREPARED_ARG_PLACEHOLDER | = | ':'.freeze | ||
| DatasetClass | = | self | ||
| DatasetClass | = | self | ||
| DatasetClass | = | self | ||
| JAVA_SQL_TIMESTAMP | = | Java::JavaSQL::Timestamp | Cache Java class constants to speed up lookups | |
| JAVA_SQL_TIME | = | Java::JavaSQL::Time | ||
| JAVA_SQL_DATE | = | Java::JavaSQL::Date | ||
| JAVA_SQL_BLOB | = | Java::JavaSQL::Blob | ||
| JAVA_SQL_CLOB | = | Java::JavaSQL::Clob | ||
| JAVA_BUFFERED_READER | = | Java::JavaIo::BufferedReader | ||
| JAVA_BIG_DECIMAL | = | Java::JavaMath::BigDecimal | ||
| JAVA_BYTE_ARRAY | = | Java::byte[] | ||
| JAVA_UUID | = | Java::JavaUtil::UUID | ||
| TYPE_TRANSLATOR_INSTANCE | = | tt = TYPE_TRANSLATOR.new | ||
| DECIMAL_METHOD | = | tt.method(:decimal) | Cache type translator methods so that duplicate Method objects are not created. | |
| TIME_METHOD | = | tt.method(:time) | ||
| DATE_METHOD | = | tt.method(:date) | ||
| BUFFERED_READER_METHOD | = | tt.method(:buffered_reader) | ||
| BYTE_ARRAY_METHOD | = | tt.method(:byte_array) | ||
| BLOB_METHOD | = | tt.method(:blob) | ||
| CLOB_METHOD | = | tt.method(:clob) | ||
| UUID_METHOD | = | tt.method(:uuid) | ||
| DatasetClass | = | self |
| convert_types | [RW] | Whether to convert some Java types to ruby types when retrieving rows. Uses the database‘s setting by default, can be set to false to roughly double performance when fetching rows. |
For each of the methods in the given array, define a method with that name that returns a string with the SQL fragment that the related *_append method would add.
Do not call this method with untrusted input, as that can result in arbitrary code execution.
# File lib/sequel/dataset/sql.rb, line 340
340: def self.def_append_methods(meths)
341: Sequel::Deprecation.deprecate('Dataset.def_append_methods', "There is no replacement planned")
342: meths.each do |meth|
343: class_eval("def \#{meth}(*args, &block)\ns = ''\n\#{meth}_append(s, *args, &block)\ns\nend\n", __FILE__, __LINE__ + 1)
344: end
345: end
SQL fragment for BooleanConstants
# File lib/sequel/dataset/sql.rb, line 395
395: def boolean_constant_sql_append(sql, constant)
396: if (constant == true || constant == false) && !supports_where_true?
397: sql << (constant == true ? CONDITION_TRUE : CONDITION_FALSE)
398: else
399: literal_append(sql, constant)
400: end
401: end
SQL fragment for CaseExpression
# File lib/sequel/dataset/sql.rb, line 404
404: def case_expression_sql_append(sql, ce)
405: sql << CASE_OPEN
406: if ce.expression?
407: sql << SPACE
408: literal_append(sql, ce.expression)
409: end
410: w = CASE_WHEN
411: t = CASE_THEN
412: ce.conditions.each do |c,r|
413: sql << w
414: literal_append(sql, c)
415: sql << t
416: literal_append(sql, r)
417: end
418: sql << CASE_ELSE
419: literal_append(sql, ce.default)
420: sql << CASE_END
421: end
SQL fragment for the complex expression.
# File lib/sequel/dataset/sql.rb, line 437
437: def complex_expression_sql_append(sql, op, args)
438: case op
439: when *IS_OPERATORS
440: r = args.at(1)
441: if r.nil? || supports_is_true?
442: raise(InvalidOperation, 'Invalid argument used for IS operator') unless val = IS_LITERALS[r]
443: sql << PAREN_OPEN
444: literal_append(sql, args.at(0))
445: sql << SPACE << op.to_s << SPACE
446: sql << val << PAREN_CLOSE
447: elsif op == :IS
448: complex_expression_sql_append(sql, "=""=", args)
449: else
450: complex_expression_sql_append(sql, :OR, [SQL::BooleanExpression.new("!=""!=", *args), SQL::BooleanExpression.new(:IS, args.at(0), nil)])
451: end
452: when :IN, "NOT IN""NOT IN"
453: cols = args.at(0)
454: vals = args.at(1)
455: col_array = true if cols.is_a?(Array)
456: if vals.is_a?(Array)
457: val_array = true
458: empty_val_array = vals == []
459: end
460: if empty_val_array
461: literal_append(sql, empty_array_value(op, cols))
462: elsif col_array
463: if !supports_multiple_column_in?
464: if val_array
465: expr = SQL::BooleanExpression.new(:OR, *vals.to_a.map{|vs| SQL::BooleanExpression.from_value_pairs(cols.to_a.zip(vs).map{|c, v| [c, v]})})
466: literal_append(sql, op == :IN ? expr : ~expr)
467: else
468: old_vals = vals
469: vals = vals.naked if vals.is_a?(Sequel::Dataset)
470: vals = vals.to_a
471: val_cols = old_vals.columns
472: complex_expression_sql_append(sql, op, [cols, vals.map!{|x| x.values_at(*val_cols)}])
473: end
474: else
475: # If the columns and values are both arrays, use array_sql instead of
476: # literal so that if values is an array of two element arrays, it
477: # will be treated as a value list instead of a condition specifier.
478: sql << PAREN_OPEN
479: literal_append(sql, cols)
480: sql << SPACE << op.to_s << SPACE
481: if val_array
482: array_sql_append(sql, vals)
483: else
484: literal_append(sql, vals)
485: end
486: sql << PAREN_CLOSE
487: end
488: else
489: sql << PAREN_OPEN
490: literal_append(sql, cols)
491: sql << SPACE << op.to_s << SPACE
492: literal_append(sql, vals)
493: sql << PAREN_CLOSE
494: end
495: when :LIKE, 'NOT LIKE''NOT LIKE'
496: sql << PAREN_OPEN
497: literal_append(sql, args.at(0))
498: sql << SPACE << op.to_s << SPACE
499: literal_append(sql, args.at(1))
500: sql << ESCAPE
501: literal_append(sql, BACKSLASH)
502: sql << PAREN_CLOSE
503: when :ILIKE, 'NOT ILIKE''NOT ILIKE'
504: complex_expression_sql_append(sql, (op == :ILIKE ? :LIKE : "NOT LIKE""NOT LIKE"), args.map{|v| Sequel.function(:UPPER, v)})
505: when *TWO_ARITY_OPERATORS
506: if REGEXP_OPERATORS.include?(op) && !supports_regexp?
507: raise InvalidOperation, "Pattern matching via regular expressions is not supported on #{db.database_type}"
508: end
509: sql << PAREN_OPEN
510: literal_append(sql, args.at(0))
511: sql << SPACE << op.to_s << SPACE
512: literal_append(sql, args.at(1))
513: sql << PAREN_CLOSE
514: when *N_ARITY_OPERATORS
515: sql << PAREN_OPEN
516: c = false
517: op_str = " #{op} "
518: args.each do |a|
519: sql << op_str if c
520: literal_append(sql, a)
521: c ||= true
522: end
523: sql << PAREN_CLOSE
524: when :NOT
525: sql << NOT_SPACE
526: literal_append(sql, args.at(0))
527: when :NOOP
528: literal_append(sql, args.at(0))
529: when 'B~''B~'
530: sql << TILDE
531: literal_append(sql, args.at(0))
532: when :extract
533: sql << EXTRACT << args.at(0).to_s << FROM
534: literal_append(sql, args.at(1))
535: sql << PAREN_CLOSE
536: else
537: raise(InvalidOperation, "invalid operator #{op}")
538: end
539: end
SQL fragment specifying an emulated SQL function call. By default, assumes just the function name may need to be emulated, adapters should set an EMULATED_FUNCTION_MAP hash mapping emulated functions to native functions in their dataset class to setup the emulation.
# File lib/sequel/dataset/sql.rb, line 557
557: def emulated_function_sql_append(sql, f)
558: _function_sql_append(sql, native_function_name(f.f), f.args)
559: end
Yield a hash for each row in the dataset.
# File lib/sequel/adapters/sqlite.rb, line 349
349: def fetch_rows(sql)
350: execute(sql) do |result|
351: i = -1
352: cps = db.conversion_procs
353: type_procs = result.types.map{|t| cps[base_type_name(t)]}
354: cols = result.columns.map{|c| i+=1; [output_identifier(c), i, type_procs[i]]}
355: @columns = cols.map{|c| c.first}
356: result.each do |values|
357: row = {}
358: cols.each do |name,id,type_proc|
359: v = values[id]
360: if type_proc && v
361: v = type_proc.call(v)
362: end
363: row[name] = v
364: end
365: yield row
366: end
367: end
368: end
Yield all rows matching this dataset. If the dataset is set to split multiple statements, yield arrays of hashes one per statement instead of yielding results for all statements as hashes.
# File lib/sequel/adapters/mysql.rb, line 295
295: def fetch_rows(sql)
296: execute(sql) do |r|
297: i = -1
298: cps = db.conversion_procs
299: cols = r.fetch_fields.map do |f|
300: # Pretend tinyint is another integer type if its length is not 1, to
301: # avoid casting to boolean if Sequel::MySQL.convert_tinyint_to_bool
302: # is set.
303: type_proc = f.type == 1 && cast_tinyint_integer?(f) ? cps[2] : cps[f.type]
304: [output_identifier(f.name), type_proc, i+=1]
305: end
306: @columns = cols.map{|c| c.first}
307: if opts[:split_multiple_result_sets]
308: s = []
309: yield_rows(r, cols){|h| s << h}
310: yield s
311: else
312: yield_rows(r, cols){|h| yield h}
313: end
314: end
315: self
316: end
Set the columns and yield the hashes to the block.
# File lib/sequel/adapters/swift.rb, line 132
132: def fetch_rows(sql)
133: execute(sql) do |res|
134: col_map = {}
135: @columns = res.fields.map do |c|
136: col_map[c] = output_identifier(c)
137: end
138: res.each do |r|
139: h = {}
140: r.each do |k, v|
141: h[col_map[k]] = v.is_a?(StringIO) ? SQL::Blob.new(v.read) : v
142: end
143: yield h
144: end
145: end
146: self
147: end
Execute the SQL on the database and yield the rows as hashes with symbol keys.
# File lib/sequel/adapters/do.rb, line 155
155: def fetch_rows(sql)
156: execute(sql) do |reader|
157: cols = @columns = reader.fields.map{|f| output_identifier(f)}
158: while(reader.next!) do
159: h = {}
160: cols.zip(reader.values).each{|k, v| h[k] = v}
161: yield h
162: end
163: end
164: self
165: end
Don‘t allow graphing a dataset that splits multiple statements
# File lib/sequel/adapters/mysql.rb, line 319
319: def graph(*)
320: raise(Error, "Can't graph a dataset that splits multiple result sets") if opts[:split_multiple_result_sets]
321: super
322: end
SQL fragment specifying a JOIN clause without ON or USING.
# File lib/sequel/dataset/sql.rb, line 567
567: def join_clause_sql_append(sql, jc)
568: table = jc.table
569: table_alias = jc.table_alias
570: table_alias = nil if table == table_alias
571: sql << SPACE << join_type_sql(jc.join_type) << SPACE
572: identifier_append(sql, table)
573: as_sql_append(sql, table_alias) if table_alias
574: end
SQL fragment for the ordered expression, used in the ORDER BY clause.
# File lib/sequel/dataset/sql.rb, line 599
599: def ordered_expression_sql_append(sql, oe)
600: literal_append(sql, oe.expression)
601: sql << (oe.descending ? DESC : ASC)
602: case oe.nulls
603: when :first
604: sql << NULLS_FIRST
605: when :last
606: sql << NULLS_LAST
607: end
608: end
SQL fragment for a literal string with placeholders
# File lib/sequel/dataset/sql.rb, line 611
611: def placeholder_literal_string_sql_append(sql, pls)
612: args = pls.args
613: str = pls.str
614: sql << PAREN_OPEN if pls.parens
615: if args.is_a?(Hash)
616: re = /:(#{args.keys.map{|k| Regexp.escape(k.to_s)}.join('|')})\b/
617: loop do
618: previous, q, str = str.partition(re)
619: sql << previous
620: literal_append(sql, args[($1||q[1..-1].to_s).to_sym]) unless q.empty?
621: break if str.empty?
622: end
623: elsif str.is_a?(Array)
624: len = args.length
625: str.each_with_index do |s, i|
626: sql << s
627: literal_append(sql, args[i]) unless i == len
628: end
629: unless str.length == args.length || str.length == args.length + 1
630: Sequel::Deprecation.deprecate("Using a mismatched number of placeholders (#{str.length}) and placeholder arguments (#{args.length}) is deprecated and will raise an Error in Sequel 4.")
631: end
632: else
633: i = -1
634: loop do
635: previous, q, str = str.partition(QUESTION_MARK)
636: sql << previous
637: literal_append(sql, args.at(i+=1)) unless q.empty?
638: if str.empty?
639: unless i + 1 == args.length
640: Sequel::Deprecation.deprecate("Using a mismatched number of placeholders (#{i+1}) and placeholder arguments (#{args.length}) is deprecated and will raise an Error in Sequel 4.")
641: end
642: break
643: end
644: end
645: end
646: sql << PAREN_CLOSE if pls.parens
647: end
Create a named prepared statement that is stored in the database (and connection) for reuse.
# File lib/sequel/adapters/jdbc.rb, line 677
677: def prepare(type, name=nil, *values)
678: ps = to_prepared_statement(type, values)
679: ps.extend(PreparedStatementMethods)
680: if name
681: ps.prepared_statement_name = name
682: db.set_prepared_statement(name, ps)
683: end
684: ps
685: end
Prepare the given type of query with the given name and store it in the database. Note that a new native prepared statement is created on each call to this prepared statement.
# File lib/sequel/adapters/sqlite.rb, line 373
373: def prepare(type, name=nil, *values)
374: ps = to_prepared_statement(type, values)
375: ps.extend(PreparedStatementMethods)
376: if name
377: ps.prepared_statement_name = name
378: db.set_prepared_statement(name, ps)
379: end
380: ps
381: end
SQL fragment for the qualifed identifier, specifying a table and a column (or schema and table). If 3 arguments are given, the 2nd should be the table/qualifier and the third should be column/qualified. If 2 arguments are given, the 2nd should be an SQL::QualifiedIdentifier.
# File lib/sequel/dataset/sql.rb, line 653
653: def qualified_identifier_sql_append(sql, table, column=(c = table.column; table = table.table; c))
654: identifier_append(sql, table)
655: sql << DOT
656: identifier_append(sql, column)
657: end
Adds quoting to identifiers (columns and tables). If identifiers are not being quoted, returns name as a string. If identifiers are being quoted quote the name with quoted_identifier.
# File lib/sequel/dataset/sql.rb, line 662
662: def quote_identifier_append(sql, name)
663: if name.is_a?(LiteralString)
664: sql << name
665: else
666: name = name.value if name.is_a?(SQL::Identifier)
667: name = input_identifier(name)
668: if quote_identifiers?
669: quoted_identifier_append(sql, name)
670: else
671: sql << name
672: end
673: end
674: end
Separates the schema from the table and returns a string with them quoted (if quoting identifiers)
# File lib/sequel/dataset/sql.rb, line 678
678: def quote_schema_table_append(sql, table)
679: schema, table = schema_and_table(table)
680: if schema
681: quote_identifier_append(sql, schema)
682: sql << DOT
683: end
684: quote_identifier_append(sql, table)
685: end
This method quotes the given name with the SQL standard double quote. should be overridden by subclasses to provide quoting not matching the SQL standard, such as backtick (used by MySQL and SQLite).
# File lib/sequel/dataset/sql.rb, line 690
690: def quoted_identifier_append(sql, name)
691: sql << QUOTE << name.to_s.gsub(QUOTE_RE, DOUBLE_QUOTE) << QUOTE
692: end
Split the schema information from the table, returning two strings, one for the schema and one for the table. The returned schema may be nil, but the table will always have a string value.
Note that this function does not handle tables with more than one level of qualification (e.g. database.schema.table on Microsoft SQL Server).
# File lib/sequel/dataset/sql.rb, line 701
701: def schema_and_table(table_name, sch=(db._default_schema if db))
702: sch = sch.to_s if sch
703: case table_name
704: when Symbol
705: s, t, _ = split_symbol(table_name)
706: [s||sch, t]
707: when SQL::QualifiedIdentifier
708: [table_name.table.to_s, table_name.column.to_s]
709: when SQL::Identifier
710: [sch, table_name.value.to_s]
711: when String
712: [sch, table_name]
713: else
714: raise Error, 'table_name should be a Symbol, SQL::QualifiedIdentifier, SQL::Identifier, or String'
715: end
716: end
Makes each yield arrays of rows, with each array containing the rows for a given result set. Does not work with graphing. So you can submit SQL with multiple statements and easily determine which statement returned which results.
Modifies the row_proc of the returned dataset so that it still works as expected (running on the hashes instead of on the arrays of hashes). If you modify the row_proc afterward, note that it will receive an array of hashes instead of a hash.
# File lib/sequel/adapters/mysql.rb, line 333
333: def split_multiple_result_sets
334: raise(Error, "Can't split multiple statements on a graphed dataset") if opts[:graph]
335: ds = clone(:split_multiple_result_sets=>true)
336: ds.row_proc = proc{|x| x.map{|h| row_proc.call(h)}} if row_proc
337: ds
338: end
Splits table_name into an array of strings.
ds.split_qualifiers(:s) # ['s'] ds.split_qualifiers(:t__s) # ['t', 's'] ds.split_qualifiers(Sequel.qualify(:d, :t__s)) # ['d', 't', 's'] ds.split_qualifiers(Sequel.qualify(:h__d, :t__s)) # ['h', 'd', 't', 's']
# File lib/sequel/dataset/sql.rb, line 724
724: def split_qualifiers(table_name, *args)
725: case table_name
726: when SQL::QualifiedIdentifier
727: split_qualifiers(table_name.table, nil) + split_qualifiers(table_name.column, nil)
728: else
729: sch, table = schema_and_table(table_name, *args)
730: sch ? [sch, table] : [table]
731: end
732: end
The SQL fragment for the given window‘s options.
# File lib/sequel/dataset/sql.rb, line 743
743: def window_sql_append(sql, opts)
744: raise(Error, 'This dataset does not support window functions') unless supports_window_functions?
745: sql << PAREN_OPEN
746: window, part, order, frame = opts.values_at(:window, :partition, :order, :frame)
747: space = false
748: space_s = SPACE
749: if window
750: literal_append(sql, window)
751: space = true
752: end
753: if part
754: sql << space_s if space
755: sql << PARTITION_BY
756: expression_list_append(sql, Array(part))
757: space = true
758: end
759: if order
760: sql << space_s if space
761: sql << ORDER_BY_NS
762: expression_list_append(sql, Array(order))
763: space = true
764: end
765: case frame
766: when nil
767: # nothing
768: when :all
769: sql << space_s if space
770: sql << FRAME_ALL
771: when :rows
772: sql << space_s if space
773: sql << FRAME_ROWS
774: when String
775: sql << space_s if space
776: sql << frame
777: else
778: raise Error, "invalid window frame clause, should be :all, :rows, a string, or nil"
779: end
780: sql << PAREN_CLOSE
781: end
These methods modify the receiving dataset and should be used with care.