summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--ChangeLog6
-rw-r--r--lib/csv.rb1892
-rw-r--r--test/csv/line_endings.gzbin0 -> 59 bytes
-rw-r--r--test/csv/tc_csv_parsing.rb164
-rw-r--r--test/csv/tc_csv_writing.rb96
-rw-r--r--test/csv/tc_data_converters.rb260
-rw-r--r--test/csv/tc_features.rb177
-rw-r--r--test/csv/tc_headers.rb261
-rw-r--r--test/csv/tc_interface.rb235
-rw-r--r--test/csv/tc_row.rb289
-rw-r--r--test/csv/tc_serialization.rb155
-rw-r--r--test/csv/tc_table.rb392
-rw-r--r--test/csv/ts_all.rb19
13 files changed, 3946 insertions, 0 deletions
diff --git a/ChangeLog b/ChangeLog
index 9812ff35a8..931d290442 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,9 @@
+Tue Dec 25 11:45:34 2007 James Edward Gray II <jeg2@ruby-lang.org>
+
+ * lib/csv.rb: Import the FasterCSV source as the new CSV class.
+
+ * test/csv/*: Added all applicable tests from FasterCSV.
+
Tue Dec 25 11:33:52 2007 Nobuyoshi Nakada <nobu@ruby-lang.org>
* error.c (report_bug): uses ruby_description.
diff --git a/lib/csv.rb b/lib/csv.rb
new file mode 100644
index 0000000000..a37bc700b2
--- /dev/null
+++ b/lib/csv.rb
@@ -0,0 +1,1892 @@
+#!/usr/local/bin/ruby -w
+
+# = csv.rb -- CSV Reading and Writing
+#
+# Created by James Edward Gray II on 2005-10-31.
+# Copyright 2005 James Edward Gray II. You can redistribute or modify this code
+# under the terms of Ruby's license.
+#
+# See CSV for documentation.
+#
+# == Description
+#
+# Welcome to the new and improved CSV.
+#
+# This version of the CSV library began its life as FasterCSV. FasterCSV was
+# intended as a replacement to Ruby's then standard CSV library. It was
+# designed to address concerns users of that library had and it had three
+# primary goals:
+#
+# 1. Be significantly faster than CSV while remaining a pure Ruby library.
+# 2. Use a smaller and easier to maintain code base. (FasterCSV eventually
+# grew larger, was also but considerably richer in features. The parsing
+# core remains quite small.)
+# 3. Improve on the CSV interface.
+#
+# Obviously, the last one is subjective. I did try to defer to the original
+# interface whenever I didn't have a compelling reason to change it though, so
+# hopefully this won't be too radically different.
+#
+# We must have met our goals because FasterCSV was renamed to CSV and replaced
+# the original library.
+#
+# == What's Different From the Old CSV?
+#
+# I'm sure I'll miss something, but I'll try to mention most of the major
+# differences I am aware of, to help others quickly get up to speed:
+#
+# === CSV Parsing
+#
+# * This library has a stricter parser and will throw MalformedCSVErrors on
+# problematic data.
+# * This library has a less liberal idea of a line ending than CSV. What you
+# set as the <tt>:row_sep</tt> is law. It can auto-detect your line endings
+# though.
+# * The old library returned empty lines as <tt>[nil]</tt>. This library calls
+# them <tt>[]</tt>.
+# * This library has a much faster parser.
+#
+# === Interface
+#
+# * CSV now uses Hash-style parameters to set options.
+# * CSV no longer has generate_row() or parse_row().
+# * The old CSV's Reader and Writer classes have been dropped.
+# * CSV::open() is now more like Ruby's open().
+# * CSV objects now support most standard IO methods.
+# * CSV now has a new() method used to wrap objects like String and IO for
+# reading and writing.
+# * CSV::generate() is different from the old method.
+# * CSV no longer supports partial reads. It works line-by-line.
+# * CSV no longer allows the instance methods to override the separators for
+# performance reasons. They must be set in the constructor.
+#
+# If you use this library and find yourself missing any functionality I have
+# trimmed, please {let me know}[mailto:james@grayproductions.net].
+#
+# == Documentation
+#
+# See CSV for documentation.
+#
+# == What is CSV, really?
+#
+# CSV maintains a pretty strict definition of CSV taken directly from
+# {the RFC}[http://www.ietf.org/rfc/rfc4180.txt]. I relax the rules in only one
+# place and that is to make using this library easier. CSV will parse all valid
+# CSV.
+#
+# What you don't want to do is feed CSV invalid data. Because of the way the
+# CSV format works, it's common for a parser to need to read until the end of
+# the file to be sure a field is invalid. This eats a lot of time and memory.
+#
+# Luckily, when working with invalid CSV, Ruby's built-in methods will almost
+# always be superior in every way. For example, parsing non-quoted fields is as
+# easy as:
+#
+# data.split(",")
+#
+# == Questions and/or Comments
+#
+# Feel free to email {James Edward Gray II}[mailto:james@grayproductions.net]
+# with any questions.
+
+require "forwardable"
+require "English"
+require "enumerator"
+require "date"
+require "stringio"
+
+#
+# This class provides a complete interface to CSV files and data. It offers
+# tools to enable you to read and write to and from Strings or IO objects, as
+# needed.
+#
+# == Reading
+#
+# === From a File
+#
+# ==== A Line at a Time
+#
+# CSV.foreach("path/to/file.csv") do |row|
+# # use row here...
+# end
+#
+# ==== All at Once
+#
+# arr_of_arrs = CSV.read("path/to/file.csv")
+#
+# === From a String
+#
+# ==== A Line at a Time
+#
+# CSV.parse("CSV,data,String") do |row|
+# # use row here...
+# end
+#
+# ==== All at Once
+#
+# arr_of_arrs = CSV.parse("CSV,data,String")
+#
+# == Writing
+#
+# === To a File
+#
+# CSV.open("path/to/file.csv", "w") do |csv|
+# csv << ["row", "of", "CSV", "data"]
+# csv << ["another", "row"]
+# # ...
+# end
+#
+# === To a String
+#
+# csv_string = CSV.generate do |csv|
+# csv << ["row", "of", "CSV", "data"]
+# csv << ["another", "row"]
+# # ...
+# end
+#
+# == Convert a Single Line
+#
+# csv_string = ["CSV", "data"].to_csv # to CSV
+# csv_array = "CSV,String".parse_csv # from CSV
+#
+# == Shortcut Interface
+#
+# CSV { |csv_out| csv_out << %w{my data here} } # to $stdout
+# CSV(csv = "") { |csv_str| csv_str << %w{my data here} } # to a String
+# CSV($stderr) { |csv_err| csv_err << %w{my data here} } # to $stderr
+#
+class CSV
+ # The version of the installed library.
+ VERSION = "2.0.0".freeze
+
+ #
+ # A CSV::Row is part Array and part Hash. It retains an order for the fields
+ # and allows duplicates just as an Array would, but also allows you to access
+ # fields by name just as you could if they were in a Hash.
+ #
+ # All rows returned by CSV will be constructed from this class, if header row
+ # processing is activated.
+ #
+ class Row
+ #
+ # Construct a new CSV::Row from +headers+ and +fields+, which are expected
+ # to be Arrays. If one Array is shorter than the other, it will be padded
+ # with +nil+ objects.
+ #
+ # The optional +header_row+ parameter can be set to +true+ to indicate, via
+ # CSV::Row.header_row?() and CSV::Row.field_row?(), that this is a header
+ # row. Otherwise, the row is assumes to be a field row.
+ #
+ # A CSV::Row object supports the following Array methods through delegation:
+ #
+ # * empty?()
+ # * length()
+ # * size()
+ #
+ def initialize(headers, fields, header_row = false)
+ @header_row = header_row
+
+ # handle extra headers or fields
+ @row = if headers.size > fields.size
+ headers.zip(fields)
+ else
+ fields.zip(headers).map { |pair| pair.reverse }
+ end
+ end
+
+ # Internal data format used to compare equality.
+ attr_reader :row
+ protected :row
+
+ ### Array Delegation ###
+
+ extend Forwardable
+ def_delegators :@row, :empty?, :length, :size
+
+ # Returns +true+ if this is a header row.
+ def header_row?
+ @header_row
+ end
+
+ # Returns +true+ if this is a field row.
+ def field_row?
+ not header_row?
+ end
+
+ # Returns the headers of this row.
+ def headers
+ @row.map { |pair| pair.first }
+ end
+
+ #
+ # :call-seq:
+ # field( header )
+ # field( header, offset )
+ # field( index )
+ #
+ # This method will fetch the field value by +header+ or +index+. If a field
+ # is not found, +nil+ is returned.
+ #
+ # When provided, +offset+ ensures that a header match occurrs on or later
+ # than the +offset+ index. You can use this to find duplicate headers,
+ # without resorting to hard-coding exact indices.
+ #
+ def field(header_or_index, minimum_index = 0)
+ # locate the pair
+ finder = header_or_index.is_a?(Integer) ? :[] : :assoc
+ pair = @row[minimum_index..-1].send(finder, header_or_index)
+
+ # return the field if we have a pair
+ pair.nil? ? nil : pair.last
+ end
+ alias_method :[], :field
+
+ #
+ # :call-seq:
+ # []=( header, value )
+ # []=( header, offset, value )
+ # []=( index, value )
+ #
+ # Looks up the field by the semantics described in CSV::Row.field() and
+ # assigns the +value+.
+ #
+ # Assigning past the end of the row with an index will set all pairs between
+ # to <tt>[nil, nil]</tt>. Assigning to an unused header appends the new
+ # pair.
+ #
+ def []=(*args)
+ value = args.pop
+
+ if args.first.is_a? Integer
+ if @row[args.first].nil? # extending past the end with index
+ @row[args.first] = [nil, value]
+ @row.map! { |pair| pair.nil? ? [nil, nil] : pair }
+ else # normal index assignment
+ @row[args.first][1] = value
+ end
+ else
+ index = index(*args)
+ if index.nil? # appending a field
+ self << [args.first, value]
+ else # normal header assignment
+ @row[index][1] = value
+ end
+ end
+ end
+
+ #
+ # :call-seq:
+ # <<( field )
+ # <<( header_and_field_array )
+ # <<( header_and_field_hash )
+ #
+ # If a two-element Array is provided, it is assumed to be a header and field
+ # and the pair is appended. A Hash works the same way with the key being
+ # the header and the value being the field. Anything else is assumed to be
+ # a lone field which is appended with a +nil+ header.
+ #
+ # This method returns the row for chaining.
+ #
+ def <<(arg)
+ if arg.is_a?(Array) and arg.size == 2 # appending a header and name
+ @row << arg
+ elsif arg.is_a?(Hash) # append header and name pairs
+ arg.each { |pair| @row << pair }
+ else # append field value
+ @row << [nil, arg]
+ end
+
+ self # for chaining
+ end
+
+ #
+ # A shortcut for appending multiple fields. Equivalent to:
+ #
+ # args.each { |arg| csv_row << arg }
+ #
+ # This method returns the row for chaining.
+ #
+ def push(*args)
+ args.each { |arg| self << arg }
+
+ self # for chaining
+ end
+
+ #
+ # :call-seq:
+ # delete( header )
+ # delete( header, offset )
+ # delete( index )
+ #
+ # Used to remove a pair from the row by +header+ or +index+. The pair is
+ # located as described in CSV::Row.field(). The deleted pair is returned,
+ # or +nil+ if a pair could not be found.
+ #
+ def delete(header_or_index, minimum_index = 0)
+ if header_or_index.is_a? Integer # by index
+ @row.delete_at(header_or_index)
+ else # by header
+ @row.delete_at(index(header_or_index, minimum_index))
+ end
+ end
+
+ #
+ # The provided +block+ is passed a header and field for each pair in the row
+ # and expected to return +true+ or +false+, depending on whether the pair
+ # should be deleted.
+ #
+ # This method returns the row for chaining.
+ #
+ def delete_if(&block)
+ @row.delete_if(&block)
+
+ self # for chaining
+ end
+
+ #
+ # This method accepts any number of arguments which can be headers, indices,
+ # Ranges of either, or two-element Arrays containing a header and offset.
+ # Each argument will be replaced with a field lookup as described in
+ # CSV::Row.field().
+ #
+ # If called with no arguments, all fields are returned.
+ #
+ def fields(*headers_and_or_indices)
+ if headers_and_or_indices.empty? # return all fields--no arguments
+ @row.map { |pair| pair.last }
+ else # or work like values_at()
+ headers_and_or_indices.inject(Array.new) do |all, h_or_i|
+ all + if h_or_i.is_a? Range
+ index_begin = h_or_i.begin.is_a?(Integer) ? h_or_i.begin :
+ index(h_or_i.begin)
+ index_end = h_or_i.end.is_a?(Integer) ? h_or_i.end :
+ index(h_or_i.end)
+ new_range = h_or_i.exclude_end? ? (index_begin...index_end) :
+ (index_begin..index_end)
+ fields.values_at(new_range)
+ else
+ [field(*Array(h_or_i))]
+ end
+ end
+ end
+ end
+ alias_method :values_at, :fields
+
+ #
+ # :call-seq:
+ # index( header )
+ # index( header, offset )
+ #
+ # This method will return the index of a field with the provided +header+.
+ # The +offset+ can be used to locate duplicate header names, as described in
+ # CSV::Row.field().
+ #
+ def index(header, minimum_index = 0)
+ # find the pair
+ index = headers[minimum_index..-1].index(header)
+ # return the index at the right offset, if we found one
+ index.nil? ? nil : index + minimum_index
+ end
+
+ # Returns +true+ if +name+ is a header for this row, and +false+ otherwise.
+ def header?(name)
+ headers.include? name
+ end
+ alias_method :include?, :header?
+
+ #
+ # Returns +true+ if +data+ matches a field in this row, and +false+
+ # otherwise.
+ #
+ def field?(data)
+ fields.include? data
+ end
+
+ include Enumerable
+
+ #
+ # Yields each pair of the row as header and field tuples (much like
+ # iterating over a Hash).
+ #
+ # Support for Enumerable.
+ #
+ # This method returns the row for chaining.
+ #
+ def each(&block)
+ @row.each(&block)
+
+ self # for chaining
+ end
+
+ #
+ # Returns +true+ if this row contains the same headers and fields in the
+ # same order as +other+.
+ #
+ def ==(other)
+ @row == other.row
+ end
+
+ #
+ # Collapses the row into a simple Hash. Be warning that this discards field
+ # order and clobbers duplicate fields.
+ #
+ def to_hash
+ # flatten just one level of the internal Array
+ Hash[*@row.inject(Array.new) { |ary, pair| ary.push(*pair) }]
+ end
+
+ #
+ # Returns the row as a CSV String. Headers are not used. Equivalent to:
+ #
+ # csv_row.fields.to_csv( options )
+ #
+ def to_csv(options = Hash.new)
+ fields.to_csv(options)
+ end
+ alias_method :to_s, :to_csv
+ end
+
+ #
+ # A CSV::Table is a two-dimensional data structure for representing CSV
+ # documents. Tables allow you to work with the data by row or column,
+ # manipulate the data, and even convert the results back to CSV, if needed.
+ #
+ # All tables returned by CSV will be constructed from this class, if header
+ # row processing is activated.
+ #
+ class Table
+ #
+ # Construct a new CSV::Table from +array_of_rows+, which are expected
+ # to be CSV::Row objects. All rows are assumed to have the same headers.
+ #
+ # A CSV::Table object supports the following Array methods through
+ # delegation:
+ #
+ # * empty?()
+ # * length()
+ # * size()
+ #
+ def initialize(array_of_rows)
+ @table = array_of_rows
+ @mode = :col_or_row
+ end
+
+ # The current access mode for indexing and iteration.
+ attr_reader :mode
+
+ # Internal data format used to compare equality.
+ attr_reader :table
+ protected :table
+
+ ### Array Delegation ###
+
+ extend Forwardable
+ def_delegators :@table, :empty?, :length, :size
+
+ #
+ # Returns a duplicate table object, in column mode. This is handy for
+ # chaining in a single call without changing the table mode, but be aware
+ # that this method can consume a fair amount of memory for bigger data sets.
+ #
+ # This method returns the duplicate table for chaining. Don't chain
+ # destructive methods (like []=()) this way though, since you are working
+ # with a duplicate.
+ #
+ def by_col
+ self.class.new(@table.dup).by_col!
+ end
+
+ #
+ # Switches the mode of this table to column mode. All calls to indexing and
+ # iteration methods will work with columns until the mode is changed again.
+ #
+ # This method returns the table and is safe to chain.
+ #
+ def by_col!
+ @mode = :col
+
+ self
+ end
+
+ #
+ # Returns a duplicate table object, in mixed mode. This is handy for
+ # chaining in a single call without changing the table mode, but be aware
+ # that this method can consume a fair amount of memory for bigger data sets.
+ #
+ # This method returns the duplicate table for chaining. Don't chain
+ # destructive methods (like []=()) this way though, since you are working
+ # with a duplicate.
+ #
+ def by_col_or_row
+ self.class.new(@table.dup).by_col_or_row!
+ end
+
+ #
+ # Switches the mode of this table to mixed mode. All calls to indexing and
+ # iteration methods will use the default intelligent indexing system until
+ # the mode is changed again. In mixed mode an index is assumed to be a row
+ # reference while anything else is assumed to be column access by headers.
+ #
+ # This method returns the table and is safe to chain.
+ #
+ def by_col_or_row!
+ @mode = :col_or_row
+
+ self
+ end
+
+ #
+ # Returns a duplicate table object, in row mode. This is handy for chaining
+ # in a single call without changing the table mode, but be aware that this
+ # method can consume a fair amount of memory for bigger data sets.
+ #
+ # This method returns the duplicate table for chaining. Don't chain
+ # destructive methods (like []=()) this way though, since you are working
+ # with a duplicate.
+ #
+ def by_row
+ self.class.new(@table.dup).by_row!
+ end
+
+ #
+ # Switches the mode of this table to row mode. All calls to indexing and
+ # iteration methods will work with rows until the mode is changed again.
+ #
+ # This method returns the table and is safe to chain.
+ #
+ def by_row!
+ @mode = :row
+
+ self
+ end
+
+ #
+ # Returns the headers for the first row of this table (assumed to match all
+ # other rows). An empty Array is returned for empty tables.
+ #
+ def headers
+ if @table.empty?
+ Array.new
+ else
+ @table.first.headers
+ end
+ end
+
+ #
+ # In the default mixed mode, this method returns rows for index access and
+ # columns for header access. You can force the index association by first
+ # calling by_col!() or by_row!().
+ #
+ # Columns are returned as an Array of values. Altering that Array has no
+ # effect on the table.
+ #
+ def [](index_or_header)
+ if @mode == :row or # by index
+ (@mode == :col_or_row and index_or_header.is_a? Integer)
+ @table[index_or_header]
+ else # by header
+ @table.map { |row| row[index_or_header] }
+ end
+ end
+
+ #
+ # In the default mixed mode, this method assigns rows for index access and
+ # columns for header access. You can force the index association by first
+ # calling by_col!() or by_row!().
+ #
+ # Rows may be set to an Array of values (which will inherit the table's
+ # headers()) or a CSV::Row.
+ #
+ # Columns may be set to a single value, which is copied to each row of the
+ # column, or an Array of values. Arrays of values are assigned to rows top
+ # to bottom in row major order. Excess values are ignored and if the Array
+ # does not have a value for each row the extra rows will receive a +nil+.
+ #
+ # Assigning to an existing column or row clobbers the data. Assigning to
+ # new columns creates them at the right end of the table.
+ #
+ def []=(index_or_header, value)
+ if @mode == :row or # by index
+ (@mode == :col_or_row and index_or_header.is_a? Integer)
+ if value.is_a? Array
+ @table[index_or_header] = Row.new(headers, value)
+ else
+ @table[index_or_header] = value
+ end
+ else # set column
+ if value.is_a? Array # multiple values
+ @table.each_with_index do |row, i|
+ if row.header_row?
+ row[index_or_header] = index_or_header
+ else
+ row[index_or_header] = value[i]
+ end
+ end
+ else # repeated value
+ @table.each do |row|
+ if row.header_row?
+ row[index_or_header] = index_or_header
+ else
+ row[index_or_header] = value
+ end
+ end
+ end
+ end
+ end
+
+ #
+ # The mixed mode default is to treat a list of indices as row access,
+ # returning the rows indicated. Anything else is considered columnar
+ # access. For columnar access, the return set has an Array for each row
+ # with the values indicated by the headers in each Array. You can force
+ # column or row mode using by_col!() or by_row!().
+ #
+ # You cannot mix column and row access.
+ #
+ def values_at(*indices_or_headers)
+ if @mode == :row or # by indices
+ ( @mode == :col_or_row and indices_or_headers.all? do |index|
+ index.is_a?(Integer) or
+ ( index.is_a?(Range) and
+ index.first.is_a?(Integer) and
+ index.last.is_a?(Integer) )
+ end )
+ @table.values_at(*indices_or_headers)
+ else # by headers
+ @table.map { |row| row.values_at(*indices_or_headers) }
+ end
+ end
+
+ #
+ # Adds a new row to the bottom end of this table. You can provide an Array,
+ # which will be converted to a CSV::Row (inheriting the table's headers()),
+ # or a CSV::Row.
+ #
+ # This method returns the table for chaining.
+ #
+ def <<(row_or_array)
+ if row_or_array.is_a? Array # append Array
+ @table << Row.new(headers, row_or_array)
+ else # append Row
+ @table << row_or_array
+ end
+
+ self # for chaining
+ end
+
+ #
+ # A shortcut for appending multiple rows. Equivalent to:
+ #
+ # rows.each { |row| self << row }
+ #
+ # This method returns the table for chaining.
+ #
+ def push(*rows)
+ rows.each { |row| self << row }
+
+ self # for chaining
+ end
+
+ #
+ # Removes and returns the indicated column or row. In the default mixed
+ # mode indices refer to rows and everything else is assumed to be a column
+ # header. Use by_col!() or by_row!() to force the lookup.
+ #
+ def delete(index_or_header)
+ if @mode == :row or # by index
+ (@mode == :col_or_row and index_or_header.is_a? Integer)
+ @table.delete_at(index_or_header)
+ else # by header
+ @table.map { |row| row.delete(index_or_header).last }
+ end
+ end
+
+ #
+ # Removes any column or row for which the block returns +true+. In the
+ # default mixed mode or row mode, iteration is the standard row major
+ # walking of rows. In column mode, interation will +yield+ two element
+ # tuples containing the column name and an Array of values for that column.
+ #
+ # This method returns the table for chaining.
+ #
+ def delete_if(&block)
+ if @mode == :row or @mode == :col_or_row # by index
+ @table.delete_if(&block)
+ else # by header
+ to_delete = Array.new
+ headers.each_with_index do |header, i|
+ to_delete << header if block[[header, self[header]]]
+ end
+ to_delete.map { |header| delete(header) }
+ end
+
+ self # for chaining
+ end
+
+ include Enumerable
+
+ #
+ # In the default mixed mode or row mode, iteration is the standard row major
+ # walking of rows. In column mode, interation will +yield+ two element
+ # tuples containing the column name and an Array of values for that column.
+ #
+ # This method returns the table for chaining.
+ #
+ def each(&block)
+ if @mode == :col
+ headers.each { |header| block[[header, self[header]]] }
+ else
+ @table.each(&block)
+ end
+
+ self # for chaining
+ end
+
+ # Returns +true+ if all rows of this table ==() +other+'s rows.
+ def ==(other)
+ @table == other.table
+ end
+
+ #
+ # Returns the table as an Array of Arrays. Headers will be the first row,
+ # then all of the field rows will follow.
+ #
+ def to_a
+ @table.inject([headers]) do |array, row|
+ if row.header_row?
+ array
+ else
+ array + [row.fields]
+ end
+ end
+ end
+
+ #
+ # Returns the table as a complete CSV String. Headers will be listed first,
+ # then all of the field rows.
+ #
+ def to_csv(options = Hash.new)
+ @table.inject([headers.to_csv(options)]) do |rows, row|
+ if row.header_row?
+ rows
+ else
+ rows + [row.fields.to_csv(options)]
+ end
+ end.join
+ end
+ alias_method :to_s, :to_csv
+ end
+
+ # The error thrown when the parser encounters illegal CSV formatting.
+ class MalformedCSVError < RuntimeError; end
+
+ #
+ # A FieldInfo Struct contains details about a field's position in the data
+ # source it was read from. CSV will pass this Struct to some blocks that make
+ # decisions based on field structure. See CSV.convert_fields() for an
+ # example.
+ #
+ # <b><tt>index</tt></b>:: The zero-based index of the field in its row.
+ # <b><tt>line</tt></b>:: The line of the data source this row is from.
+ # <b><tt>header</tt></b>:: The header for the column, when available.
+ #
+ FieldInfo = Struct.new(:index, :line, :header)
+
+ # A Regexp used to find and convert some common Date formats.
+ DateMatcher = / \A(?: (\w+,?\s+)?\w+\s+\d{1,2},?\s+\d{2,4} |
+ \d{4}-\d{2}-\d{2} )\z /x
+ # A Regexp used to find and convert some common DateTime formats.
+ DateTimeMatcher =
+ / \A(?: (\w+,?\s+)?\w+\s+\d{1,2}\s+\d{1,2}:\d{1,2}:\d{1,2},?\s+\d{2,4} |
+ \d{4}-\d{2}-\d{2}\s\d{2}:\d{2}:\d{2} )\z /x
+ #
+ # This Hash holds the built-in converters of CSV that can be accessed by name.
+ # You can select Converters with CSV.convert() or through the +options+ Hash
+ # passed to CSV::new().
+ #
+ # <b><tt>:integer</tt></b>:: Converts any field Integer() accepts.
+ # <b><tt>:float</tt></b>:: Converts any field Float() accepts.
+ # <b><tt>:numeric</tt></b>:: A combination of <tt>:integer</tt>
+ # and <tt>:float</tt>.
+ # <b><tt>:date</tt></b>:: Converts any field Date::parse() accepts.
+ # <b><tt>:date_time</tt></b>:: Converts any field DateTime::parse() accepts.
+ # <b><tt>:all</tt></b>:: All built-in converters. A combination of
+ # <tt>:date_time</tt> and <tt>:numeric</tt>.
+ #
+ # This Hash is intetionally left unfrozen and users should feel free to add
+ # values to it that can be accessed by all CSV objects.
+ #
+ # To add a combo field, the value should be an Array of names. Combo fields
+ # can be nested with other combo fields.
+ #
+ Converters = { :integer => lambda { |f| Integer(f) rescue f },
+ :float => lambda { |f| Float(f) rescue f },
+ :numeric => [:integer, :float],
+ :date => lambda { |f|
+ f =~ DateMatcher ? (Date.parse(f) rescue f) : f
+ },
+ :date_time => lambda { |f|
+ f =~ DateTimeMatcher ? (DateTime.parse(f) rescue f) : f
+ },
+ :all => [:date_time, :numeric] }
+
+ #
+ # This Hash holds the built-in header converters of CSV that can be accessed
+ # by name. You can select HeaderConverters with CSV.header_convert() or
+ # through the +options+ Hash passed to CSV::new().
+ #
+ # <b><tt>:downcase</tt></b>:: Calls downcase() on the header String.
+ # <b><tt>:symbol</tt></b>:: The header String is downcased, spaces are
+ # replaced with underscores, non-word characters
+ # are dropped, and finally to_sym() is called.
+ #
+ # This Hash is intetionally left unfrozen and users should feel free to add
+ # values to it that can be accessed by all CSV objects.
+ #
+ # To add a combo field, the value should be an Array of names. Combo fields
+ # can be nested with other combo fields.
+ #
+ HeaderConverters = {
+ :downcase => lambda { |h| h.downcase },
+ :symbol => lambda { |h|
+ h.downcase.tr(" ", "_").delete("^a-z0-9_").to_sym
+ }
+ }
+
+ #
+ # The options used when no overrides are given by calling code. They are:
+ #
+ # <b><tt>:col_sep</tt></b>:: <tt>","</tt>
+ # <b><tt>:row_sep</tt></b>:: <tt>:auto</tt>
+ # <b><tt>:quote_char</tt></b>:: <tt>'"'</tt>
+ # <b><tt>:converters</tt></b>:: +nil+
+ # <b><tt>:unconverted_fields</tt></b>:: +nil+
+ # <b><tt>:headers</tt></b>:: +false+
+ # <b><tt>:return_headers</tt></b>:: +false+
+ # <b><tt>:header_converters</tt></b>:: +nil+
+ # <b><tt>:skip_blanks</tt></b>:: +false+
+ # <b><tt>:force_quotes</tt></b>:: +false+
+ #
+ DEFAULT_OPTIONS = { :col_sep => ",",
+ :row_sep => :auto,
+ :quote_char => '"',
+ :converters => nil,
+ :unconverted_fields => nil,
+ :headers => false,
+ :return_headers => false,
+ :header_converters => nil,
+ :skip_blanks => false,
+ :force_quotes => false }.freeze
+
+ #
+ # This method allows you to serialize an Array of Ruby objects to a String or
+ # File of CSV data. This is not as powerful as Marshal or YAML, but perhaps
+ # useful for spreadsheet and database interaction.
+ #
+ # Out of the box, this method is intended to work with simple data objects or
+ # Structs. It will serialize a list of instance variables and/or
+ # Struct.members().
+ #
+ # If you need need more complicated serialization, you can control the process
+ # by adding methods to the class to be serialized.
+ #
+ # A class method csv_meta() is responsible for returning the first row of the
+ # document (as an Array). This row is considered to be a Hash of the form
+ # key_1,value_1,key_2,value_2,... CSV::load() expects to find a class key
+ # with a value of the stringified class name and CSV::dump() will create this,
+ # if you do not define this method. This method is only called on the first
+ # object of the Array.
+ #
+ # The next method you can provide is an instance method called csv_headers().
+ # This method is expected to return the second line of the document (again as
+ # an Array), which is to be used to give each column a header. By default,
+ # CSV::load() will set an instance variable if the field header starts with an
+ # @ character or call send() passing the header as the method name and
+ # the field value as an argument. This method is only called on the first
+ # object of the Array.
+ #
+ # Finally, you can provide an instance method called csv_dump(), which will
+ # be passed the headers. This should return an Array of fields that can be
+ # serialized for this object. This method is called once for every object in
+ # the Array.
+ #
+ # The +io+ parameter can be used to serialize to a File, and +options+ can be
+ # anything CSV::new() accepts.
+ #
+ def self.dump(ary_of_objs, io = "", options = Hash.new)
+ obj_template = ary_of_objs.first
+
+ csv = new(io, options)
+
+ # write meta information
+ begin
+ csv << obj_template.class.csv_meta
+ rescue NoMethodError
+ csv << [:class, obj_template.class]
+ end
+
+ # write headers
+ begin
+ headers = obj_template.csv_headers
+ rescue NoMethodError
+ headers = obj_template.instance_variables.sort
+ if obj_template.class.ancestors.find { |cls| cls.to_s =~ /\AStruct\b/ }
+ headers += obj_template.members.map { |mem| "#{mem}=" }.sort
+ end
+ end
+ csv << headers
+
+ # serialize each object
+ ary_of_objs.each do |obj|
+ begin
+ csv << obj.csv_dump(headers)
+ rescue NoMethodError
+ csv << headers.map do |var|
+ if var[0] == ?@
+ obj.instance_variable_get(var)
+ else
+ obj[var[0..-2]]
+ end
+ end
+ end
+ end
+
+ if io.is_a? String
+ csv.string
+ else
+ csv.close
+ end
+ end
+
+ #
+ # :call-seq:
+ # filter( options = Hash.new ) { |row| ... }
+ # filter( input, options = Hash.new ) { |row| ... }
+ # filter( input, output, options = Hash.new ) { |row| ... }
+ #
+ # This method is a convenience for building Unix-like filters for CSV data.
+ # Each row is yielded to the provided block which can alter it as needed.
+ # After the block returns, the row is appended to +output+ altered or not.
+ #
+ # The +input+ and +output+ arguments can be anything CSV::new() accepts
+ # (generally String or IO objects). If not given, they default to
+ # <tt>ARGF</tt> and <tt>$stdout</tt>.
+ #
+ # The +options+ parameter is also filtered down to CSV::new() after some
+ # clever key parsing. Any key beginning with <tt>:in_</tt> or
+ # <tt>:input_</tt> will have that leading identifier stripped and will only
+ # be used in the +options+ Hash for the +input+ object. Keys starting with
+ # <tt>:out_</tt> or <tt>:output_</tt> affect only +output+. All other keys
+ # are assigned to both objects.
+ #
+ # The <tt>:output_row_sep</tt> +option+ defaults to
+ # <tt>$INPUT_RECORD_SEPARATOR</tt> (<tt>$/</tt>).
+ #
+ def self.filter(*args)
+ # parse options for input, output, or both
+ in_options, out_options = Hash.new, {:row_sep => $INPUT_RECORD_SEPARATOR}
+ if args.last.is_a? Hash
+ args.pop.each do |key, value|
+ case key.to_s
+ when /\Ain(?:put)?_(.+)\Z/
+ in_options[$1.to_sym] = value
+ when /\Aout(?:put)?_(.+)\Z/
+ out_options[$1.to_sym] = value
+ else
+ in_options[key] = value
+ out_options[key] = value
+ end
+ end
+ end
+ # build input and output wrappers
+ input = new(args.shift || ARGF, in_options)
+ output = new(args.shift || $stdout, out_options)
+
+ # read, yield, write
+ input.each do |row|
+ yield row
+ output << row
+ end
+ end
+
+ #
+ # This method is intended as the primary interface for reading CSV files. You
+ # pass a +path+ and any +options+ you wish to set for the read. Each row of
+ # file will be passed to the provided +block+ in turn.
+ #
+ # The +options+ parameter can be anything CSV::new() understands.
+ #
+ def self.foreach(path, options = Hash.new, &block)
+ open(path, options) do |csv|
+ csv.each(&block)
+ end
+ end
+
+ #
+ # :call-seq:
+ # generate( str, options = Hash.new ) { |csv| ... }
+ # generate( options = Hash.new ) { |csv| ... }
+ #
+ # This method wraps a String you provide, or an empty default String, in a
+ # CSV object which is passed to the provided block. You can use the block to
+ # append CSV rows to the String and when the block exits, the final String
+ # will be returned.
+ #
+ # Note that a passed String *is* modfied by this method. Call dup() before
+ # passing if you need a new String.
+ #
+ # The +options+ parameter can be anthing CSV::new() understands.
+ #
+ def self.generate(*args)
+ # add a default empty String, if none was given
+ if args.first.is_a? String
+ io = StringIO.new(args.shift)
+ io.seek(0, IO::SEEK_END)
+ args.unshift(io)
+ else
+ args.unshift("")
+ end
+ csv = new(*args) # wrap
+ yield csv # yield for appending
+ csv.string # return final String
+ end
+
+ #
+ # This method is a shortcut for converting a single row (Array) into a CSV
+ # String.
+ #
+ # The +options+ parameter can be anthing CSV::new() understands.
+ #
+ # The <tt>:row_sep</tt> +option+ defaults to <tt>$INPUT_RECORD_SEPARATOR</tt>
+ # (<tt>$/</tt>) when calling this method.
+ #
+ def self.generate_line(row, options = Hash.new)
+ options = {:row_sep => $INPUT_RECORD_SEPARATOR}.merge(options)
+ (new("", options) << row).string
+ end
+
+ #
+ # This method will return a CSV instance, just like CSV::new(), but the
+ # instance will be cached and returned for all future calls to this method for
+ # the same +data+ object (tested by Object#object_id()) with the same
+ # +options+.
+ #
+ # If a block is given, the instance is passed to the block and the return
+ # value becomes the return value of the block.
+ #
+ def self.instance(data = $stdout, options = Hash.new)
+ # create a _signature_ for this method call, data object and options
+ sig = [data.object_id] +
+ options.values_at(*DEFAULT_OPTIONS.keys.sort_by { |sym| sym.to_s })
+
+ # fetch or create the instance for this signature
+ @@instances ||= Hash.new
+ instance = (@@instances[sig] ||= new(data, options))
+
+ if block_given?
+ yield instance # run block, if given, returning result
+ else
+ instance # or return the instance
+ end
+ end
+
+ #
+ # This method is the reading counterpart to CSV::dump(). See that method for
+ # a detailed description of the process.
+ #
+ # You can customize loading by adding a class method called csv_load() which
+ # will be passed a Hash of meta information, an Array of headers, and an Array
+ # of fields for the object the method is expected to return.
+ #
+ # Remember that all fields will be Strings after this load. If you need
+ # something else, use +options+ to setup converters or provide a custom
+ # csv_load() implementation.
+ #
+ def self.load(io_or_str, options = Hash.new)
+ csv = new(io_or_str, options)
+
+ # load meta information
+ meta = Hash[*csv.shift]
+ cls = meta["class"].split("::").inject(Object) do |c, const|
+ c.const_get(const)
+ end
+
+ # load headers
+ headers = csv.shift
+
+ # unserialize each object stored in the file
+ results = csv.inject(Array.new) do |all, row|
+ begin
+ obj = cls.csv_load(meta, headers, row)
+ rescue NoMethodError
+ obj = cls.allocate
+ headers.zip(row) do |name, value|
+ if name[0] == ?@
+ obj.instance_variable_set(name, value)
+ else
+ obj.send(name, value)
+ end
+ end
+ end
+ all << obj
+ end
+
+ csv.close unless io_or_str.is_a? String
+
+ results
+ end
+
+ #
+ # :call-seq:
+ # open( filename, mode="r", options = Hash.new ) { |csv| ... }
+ # open( filename, mode="r", options = Hash.new )
+ #
+ # This method opens an IO object, and wraps that with CSV. This is intended
+ # as the primary interface for writing a CSV file.
+ #
+ # You may pass any +args+ Ruby's open() understands followed by an optional
+ # Hash containing any +options+ CSV::new() understands.
+ #
+ # This method works like Ruby's open() call, in that it will pass a CSV object
+ # to a provided block and close it when the block termminates, or it will
+ # return the CSV object when no block is provided. (*Note*: This is different
+ # from the Ruby 1.8 CSV library which passed rows to the block. Use
+ # CSV::foreach() for that behavior.)
+ #
+ # An opened CSV object will delegate to many IO methods, for convenience. You
+ # may call:
+ #
+ # * binmode()
+ # * close()
+ # * close_read()
+ # * close_write()
+ # * closed?()
+ # * eof()
+ # * eof?()
+ # * fcntl()
+ # * fileno()
+ # * flush()
+ # * fsync()
+ # * ioctl()
+ # * isatty()
+ # * pid()
+ # * pos()
+ # * reopen()
+ # * seek()
+ # * stat()
+ # * sync()
+ # * sync=()
+ # * tell()
+ # * to_i()
+ # * to_io()
+ # * tty?()
+ #
+ def self.open(*args)
+ # find the +options+ Hash
+ options = if args.last.is_a? Hash then args.pop else Hash.new end
+ # wrap a File opened with the remaining +args+
+ csv = new(File.open(*args), options)
+
+ # handle blocks like Ruby's open(), not like the CSV library
+ if block_given?
+ begin
+ yield csv
+ ensure
+ csv.close
+ end
+ else
+ csv
+ end
+ end
+
+ #
+ # :call-seq:
+ # parse( str, options = Hash.new ) { |row| ... }
+ # parse( str, options = Hash.new )
+ #
+ # This method can be used to easily parse CSV out of a String. You may either
+ # provide a +block+ which will be called with each row of the String in turn,
+ # or just use the returned Array of Arrays (when no +block+ is given).
+ #
+ # You pass your +str+ to read from, and an optional +options+ Hash containing
+ # anything CSV::new() understands.
+ #
+ def self.parse(*args, &block)
+ csv = new(*args)
+ if block.nil? # slurp contents, if no block is given
+ begin
+ csv.read
+ ensure
+ csv.close
+ end
+ else # or pass each row to a provided block
+ csv.each(&block)
+ end
+ end
+
+ #
+ # This method is a shortcut for converting a single line of a CSV String into
+ # a into an Array. Note that if +line+ contains multiple rows, anything
+ # beyond the first row is ignored.
+ #
+ # The +options+ parameter can be anthing CSV::new() understands.
+ #
+ def self.parse_line(line, options = Hash.new)
+ new(line, options).shift
+ end
+
+ #
+ # Use to slurp a CSV file into an Array of Arrays. Pass the +path+ to the
+ # file and any +options+ CSV::new() understands.
+ #
+ def self.read(path, options = Hash.new)
+ open(path, options) { |csv| csv.read }
+ end
+
+ # Alias for CSV::read().
+ def self.readlines(*args)
+ read(*args)
+ end
+
+ #
+ # A shortcut for:
+ #
+ # CSV.read( path, { :headers => true,
+ # :converters => :numeric,
+ # :header_converters => :symbol }.merge(options) )
+ #
+ def self.table(path, options = Hash.new)
+ read( path, { :headers => true,
+ :converters => :numeric,
+ :header_converters => :symbol }.merge(options) )
+ end
+
+ #
+ # This constructor will wrap either a String or IO object passed in +data+ for
+ # reading and/or writing. In addition to the CSV instance methods, several IO
+ # methods are delegated. (See CSV::open() for a complete list.) If you pass
+ # a String for +data+, you can later retrieve it (after writing to it, for
+ # example) with CSV.string().
+ #
+ # Note that a wrapped String will be positioned at at the beginning (for
+ # reading). If you want it at the end (for writing), use CSV::generate().
+ # If you want any other positioning, pass a preset StringIO object instead.
+ #
+ # You may set any reading and/or writing preferences in the +options+ Hash.
+ # Available options are:
+ #
+ # <b><tt>:col_sep</tt></b>:: The String placed between each field.
+ # <b><tt>:row_sep</tt></b>:: The String appended to the end of each
+ # row. This can be set to the special
+ # <tt>:auto</tt> setting, which requests
+ # that CSV automatically discover this
+ # from the data. Auto-discovery reads
+ # ahead in the data looking for the next
+ # <tt>"\r\n"</tt>, <tt>"\n"</tt>, or
+ # <tt>"\r"</tt> sequence. A sequence
+ # will be selected even if it occurs in
+ # a quoted field, assuming that you
+ # would have the same line endings
+ # there. If none of those sequences is
+ # found, +data+ is <tt>ARGF</tt>,
+ # <tt>STDIN</tt>, <tt>STDOUT</tt>, or
+ # <tt>STDERR</tt>, or the stream is only
+ # available for output, the default
+ # <tt>$INPUT_RECORD_SEPARATOR</tt>
+ # (<tt>$/</tt>) is used. Obviously,
+ # discovery takes a little time. Set
+ # manually if speed is important.
+ # <b><tt>:quote_char</tt></b>:: The character used to quote fields.
+ # This has to be a single character
+ # String. This is useful for
+ # application that incorrectly use
+ # <tt>'</tt> as the quote character
+ # instead of the correct <tt>"</tt>.
+ # CSV will always consider a double
+ # sequence this character to be an
+ # escaped quote.
+ # <b><tt>:converters</tt></b>:: An Array of names from the Converters
+ # Hash and/or lambdas that handle custom
+ # conversion. A single converter
+ # doesn't have to be in an Array.
+ # <b><tt>:unconverted_fields</tt></b>:: If set to +true+, an
+ # unconverted_fields() method will be
+ # added to all returned rows (Array or
+ # CSV::Row) that will return the fields
+ # as they were before convertion. Note
+ # that <tt>:headers</tt> supplied by
+ # Array or String were not fields of the
+ # document and thus will have an empty
+ # Array attached.
+ # <b><tt>:headers</tt></b>:: If set to <tt>:first_row</tt> or
+ # +true+, the initial row of the CSV
+ # file will be treated as a row of
+ # headers. If set to an Array, the
+ # contents will be used as the headers.
+ # If set to a String, the String is run
+ # through a call of CSV::parse_line() to
+ # produce an Array of headers. This
+ # setting causes CSV.shift() to return
+ # rows as CSV::Row objects instead of
+ # Arrays and CSV.read() to return
+ # CSV::Table objects instead of an Array
+ # of Arrays.
+ # <b><tt>:return_headers</tt></b>:: When +false+, header rows are silently
+ # swallowed. If set to +true+, header
+ # rows are returned in a CSV::Row object
+ # with identical headers and
+ # fields (save that the fields do not go
+ # through the converters).
+ # <b><tt>:header_converters</tt></b>:: Identical in functionality to
+ # <tt>:converters</tt> save that the
+ # conversions are only made to header
+ # rows.
+ # <b><tt>:skip_blanks</tt></b>:: When set to a +true+ value, CSV will
+ # skip over any rows with no content.
+ # <b><tt>:force_quotes</tt></b>:: When set to a +true+ value, CSV will
+ # quote all CSV fields it creates.
+ #
+ # See CSV::DEFAULT_OPTIONS for the default settings.
+ #
+ # Options cannot be overriden in the instance methods for performance reasons,
+ # so be sure to set what you want here.
+ #
+ def initialize(data, options = Hash.new)
+ # build the options for this read/write
+ options = DEFAULT_OPTIONS.merge(options)
+
+ # create the IO object we will read from
+ @io = if data.is_a? String then StringIO.new(data) else data end
+
+ init_separators(options)
+ init_parsers(options)
+ init_converters(options)
+ init_headers(options)
+
+ unless options.empty?
+ raise ArgumentError, "Unknown options: #{options.keys.join(', ')}."
+ end
+
+ # track our own lineno since IO gets confused about line-ends is CSV fields
+ @lineno = 0
+ end
+
+ #
+ # The line number of the last row read from this file. Fields with nested
+ # line-end characters will not affect this count.
+ #
+ attr_reader :lineno
+
+ ### IO and StringIO Delegation ###
+
+ extend Forwardable
+ def_delegators :@io, :binmode, :close, :close_read, :close_write, :closed?,
+ :eof, :eof?, :fcntl, :fileno, :flush, :fsync, :ioctl,
+ :isatty, :pid, :pos, :reopen, :seek, :stat, :string,
+ :sync, :sync=, :tell, :to_i, :to_io, :tty?
+
+ # Rewinds the underlying IO object and resets CSV's lineno() counter.
+ def rewind
+ @headers = nil
+ @lineno = 0
+
+ @io.rewind
+ end
+
+ ### End Delegation ###
+
+ #
+ # The primary write method for wrapped Strings and IOs, +row+ (an Array or
+ # CSV::Row) is converted to CSV and appended to the data source. When a
+ # CSV::Row is passed, only the row's fields() are appended to the output.
+ #
+ # The data source must be open for writing.
+ #
+ def <<(row)
+ # handle CSV::Row objects and Hashes
+ row = case row
+ when self.class::Row then row.fields
+ when Hash then @headers.map { |header| row[header] }
+ else row
+ end
+
+ @headers = row if header_row?
+ @lineno += 1
+
+ @io << row.map(&@quote).join(@col_sep) + @row_sep # quote and separate
+
+ self # for chaining
+ end
+ alias_method :add_row, :<<
+ alias_method :puts, :<<
+
+ #
+ # :call-seq:
+ # convert( name )
+ # convert { |field| ... }
+ # convert { |field, field_info| ... }
+ #
+ # You can use this method to install a CSV::Converters built-in, or provide a
+ # block that handles a custom conversion.
+ #
+ # If you provide a block that takes one argument, it will be passed the field
+ # and is expected to return the converted value or the field itself. If your
+ # block takes two arguments, it will also be passed a FieldInfo Struct,
+ # containing details about the field. Again, the block should return a
+ # converted field or the field itself.
+ #
+ def convert(name = nil, &converter)
+ add_converter(:converters, self.class::Converters, name, &converter)
+ end
+
+ #
+ # :call-seq:
+ # header_convert( name )
+ # header_convert { |field| ... }
+ # header_convert { |field, field_info| ... }
+ #
+ # Identical to CSV.convert(), but for header rows.
+ #
+ # Note that this method must be called before header rows are read to have any
+ # effect.
+ #
+ def header_convert(name = nil, &converter)
+ add_converter( :header_converters,
+ self.class::HeaderConverters,
+ name,
+ &converter )
+ end
+
+ include Enumerable
+
+ #
+ # Yields each row of the data source in turn.
+ #
+ # Support for Enumerable.
+ #
+ # The data source must be open for reading.
+ #
+ def each
+ while row = shift
+ yield row
+ end
+ end
+
+ #
+ # Slurps the remaining rows and returns an Array of Arrays.
+ #
+ # The data source must be open for reading.
+ #
+ def read
+ rows = to_a
+ if @use_headers
+ Table.new(rows)
+ else
+ rows
+ end
+ end
+ alias_method :readlines, :read
+
+ # Returns +true+ if the next row read will be a header row.
+ def header_row?
+ @use_headers and @headers.nil?
+ end
+
+ #
+ # The primary read method for wrapped Strings and IOs, a single row is pulled
+ # from the data source, parsed and returned as an Array of fields (if header
+ # rows are not used) or a CSV::Row (when header rows are used).
+ #
+ # The data source must be open for reading.
+ #
+ def shift
+ #########################################################################
+ ### This method is purposefully kept a bit long as simple conditional ###
+ ### checks are faster than numerous (expensive) method calls. ###
+ #########################################################################
+
+ # handle headers not based on document content
+ if header_row? and @return_headers and
+ [Array, String].include? @use_headers.class
+ if @unconverted_fields
+ return add_unconverted_fields(parse_headers, Array.new)
+ else
+ return parse_headers
+ end
+ end
+
+ # begin with a blank line, so we can always add to it
+ line = ""
+
+ #
+ # it can take multiple calls to <tt>@io.gets()</tt> to get a full line,
+ # because of \r and/or \n characters embedded in quoted fields
+ #
+ loop do
+ # add another read to the line
+ line += @io.gets(@row_sep) rescue return nil
+ # copy the line so we can chop it up in parsing
+ parse = line.dup
+ parse.sub!(@parsers[:line_end], "")
+
+ #
+ # I believe a blank line should be an <tt>Array.new</tt>, not Ruby 1.8
+ # CSV's <tt>[nil]</tt>
+ #
+ if parse.empty?
+ @lineno += 1
+ if @skip_blanks
+ line = ""
+ next
+ elsif @unconverted_fields
+ return add_unconverted_fields(Array.new, Array.new)
+ elsif @use_headers
+ return self.class::Row.new(Array.new, Array.new)
+ else
+ return Array.new
+ end
+ end
+
+ #
+ # shave leading empty fields if needed, because the main parser chokes
+ # on these
+ #
+ csv = if parse.sub!(@parsers[:leading_fields], "")
+ [nil] * ($&.length / @col_sep.length)
+ else
+ Array.new
+ end
+ #
+ # then parse the main fields with a hyper-tuned Regexp from
+ # Mastering Regular Expressions, Second Edition
+ #
+ parse.gsub!(@parsers[:csv_row]) do
+ csv << if $1.nil? # we found an unquoted field
+ if $2.empty? # switch empty unquoted fields to +nil+...
+ nil # for Ruby 1.8 CSV compatibility
+ else
+ # I decided to take a strict approach to CSV parsing...
+ if $2.count("\r\n").zero? # verify correctness of field...
+ $2
+ else
+ # or throw an Exception
+ raise MalformedCSVError, "Unquoted fields do not allow " +
+ "\\r or \\n (line #{lineno + 1})."
+ end
+ end
+ else # we found a quoted field...
+ $1.gsub(@quote_char * 2, @quote_char) # unescape contents
+ end
+ "" # gsub!'s replacement, clear the field
+ end
+
+ # if parse is empty?(), we found all the fields on the line...
+ if parse.empty?
+ @lineno += 1
+
+ # save fields unconverted fields, if needed...
+ unconverted = csv.dup if @unconverted_fields
+
+ # convert fields, if needed...
+ csv = convert_fields(csv) unless @use_headers or @converters.empty?
+ # parse out header rows and handle CSV::Row conversions...
+ csv = parse_headers(csv) if @use_headers
+
+ # inject unconverted fields and accessor, if requested...
+ if @unconverted_fields and not csv.respond_to? :unconverted_fields
+ add_unconverted_fields(csv, unconverted)
+ end
+
+ # return the results
+ break csv
+ end
+ # if we're not empty?() but at eof?(), a quoted field wasn't closed...
+ if @io.eof?
+ raise MalformedCSVError, "Unclosed quoted field on line #{lineno + 1}."
+ end
+ # otherwise, we need to loop and pull some more data to complete the row
+ end
+ end
+ alias_method :gets, :shift
+ alias_method :readline, :shift
+
+ private
+
+ #
+ # Stores the indicated separators for later use.
+ #
+ # If auto-discovery was requested for <tt>@row_sep</tt>, this method will read
+ # ahead in the <tt>@io</tt> and try to find one. +ARGF+, +STDIN+, +STDOUT+,
+ # +STDERR+ and any stream open for output only with a default
+ # <tt>@row_sep</tt> of <tt>$INPUT_RECORD_SEPARATOR</tt> (<tt>$/</tt>).
+ #
+ # This method also establishes the quoting rules used for CSV output.
+ #
+ def init_separators(options)
+ # store the selected separators
+ @col_sep = options.delete(:col_sep)
+ @row_sep = options.delete(:row_sep)
+ @quote_char = options.delete(:quote_char)
+
+ if @quote_char.length != 1
+ raise ArgumentError, ":quote_char has to be a single character String"
+ end
+
+ # automatically discover row separator when requested
+ if @row_sep == :auto
+ if [ARGF, STDIN, STDOUT, STDERR].include?(@io) or
+ (defined?(Zlib) and @io.class == Zlib::GzipWriter)
+ @row_sep = $INPUT_RECORD_SEPARATOR
+ else
+ begin
+ saved_pos = @io.pos # remember where we were
+ while @row_sep == :auto
+ #
+ # if we run out of data, it's probably a single line
+ # (use a sensible default)
+ #
+ if @io.eof?
+ @row_sep = $INPUT_RECORD_SEPARATOR
+ break
+ end
+
+ # read ahead a bit
+ sample = @io.read(1024)
+ sample += @io.read(1) if sample[-1..-1] == "\r" and not @io.eof?
+
+ # try to find a standard separator
+ if sample =~ /\r\n?|\n/
+ @row_sep = $&
+ break
+ end
+ end
+ # tricky seek() clone to work around GzipReader's lack of seek()
+ @io.rewind
+ # reset back to the remembered position
+ while saved_pos > 1024 # avoid loading a lot of data into memory
+ @io.read(1024)
+ saved_pos -= 1024
+ end
+ @io.read(saved_pos) if saved_pos.nonzero?
+ rescue IOError # stream not opened for reading
+ @row_sep = $INPUT_RECORD_SEPARATOR
+ end
+ end
+ end
+
+ # establish quoting rules
+ do_quote = lambda do |field|
+ @quote_char +
+ String(field).gsub(@quote_char, @quote_char * 2) +
+ @quote_char
+ end
+ @quote = if options.delete(:force_quotes)
+ do_quote
+ else
+ lambda do |field|
+ if field.nil? # represent +nil+ fields as empty unquoted fields
+ ""
+ else
+ field = String(field) # Stringify fields
+ # represent empty fields as empty quoted fields
+ if field.empty? or
+ field.count("\r\n#{@col_sep}#{@quote_char}").nonzero?
+ do_quote.call(field)
+ else
+ field # unquoted field
+ end
+ end
+ end
+ end
+ end
+
+ # Pre-compiles parsers and stores them by name for access during reads.
+ def init_parsers(options)
+ # store the parser behaviors
+ @skip_blanks = options.delete(:skip_blanks)
+
+ # prebuild Regexps for faster parsing
+ esc_col_sep = Regexp.escape(@col_sep)
+ esc_row_sep = Regexp.escape(@row_sep)
+ esc_quote = Regexp.escape(@quote_char)
+ @parsers = {
+ :leading_fields =>
+ /\A(?:#{esc_col_sep})+/, # for empty leading fields
+ :csv_row =>
+ ### The Primary Parser ###
+ / \G(?:^|#{esc_col_sep}) # anchor the match
+ (?: #{esc_quote}( (?>[^#{esc_quote}]*) # find quoted fields
+ (?> #{esc_quote*2}
+ [^#{esc_quote}]* )* )#{esc_quote}
+ | # ... or ...
+ ([^#{esc_quote}#{esc_col_sep}]*) # unquoted fields
+ )/x,
+ ### End Primary Parser ###
+ :line_end =>
+ /#{esc_row_sep}\z/ # safer than chomp!()
+ }
+ end
+
+ #
+ # Loads any converters requested during construction.
+ #
+ # If +field_name+ is set <tt>:converters</tt> (the default) field converters
+ # are set. When +field_name+ is <tt>:header_converters</tt> header converters
+ # are added instead.
+ #
+ # The <tt>:unconverted_fields</tt> option is also actived for
+ # <tt>:converters</tt> calls, if requested.
+ #
+ def init_converters(options, field_name = :converters)
+ if field_name == :converters
+ @unconverted_fields = options.delete(:unconverted_fields)
+ end
+
+ instance_variable_set("@#{field_name}", Array.new)
+
+ # find the correct method to add the coverters
+ convert = method(field_name.to_s.sub(/ers\Z/, ""))
+
+ # load converters
+ unless options[field_name].nil?
+ # allow a single converter not wrapped in an Array
+ unless options[field_name].is_a? Array
+ options[field_name] = [options[field_name]]
+ end
+ # load each converter...
+ options[field_name].each do |converter|
+ if converter.is_a? Proc # custom code block
+ convert.call(&converter)
+ else # by name
+ convert.call(converter)
+ end
+ end
+ end
+
+ options.delete(field_name)
+ end
+
+ # Stores header row settings and loads header converters, if needed.
+ def init_headers(options)
+ @use_headers = options.delete(:headers)
+ @return_headers = options.delete(:return_headers)
+
+ # headers must be delayed until shift(), in case they need a row of content
+ @headers = nil
+
+ init_converters(options, :header_converters)
+ end
+
+ #
+ # The actual work method for adding converters, used by both CSV.convert() and
+ # CSV.header_convert().
+ #
+ # This method requires the +var_name+ of the instance variable to place the
+ # converters in, the +const+ Hash to lookup named converters in, and the
+ # normal parameters of the CSV.convert() and CSV.header_convert() methods.
+ #
+ def add_converter(var_name, const, name = nil, &converter)
+ if name.nil? # custom converter
+ instance_variable_get("@#{var_name}") << converter
+ else # named converter
+ combo = const[name]
+ case combo
+ when Array # combo converter
+ combo.each do |converter_name|
+ add_converter(var_name, const, converter_name)
+ end
+ else # individual named converter
+ instance_variable_get("@#{var_name}") << combo
+ end
+ end
+ end
+
+ #
+ # Processes +fields+ with <tt>@converters</tt>, or <tt>@header_converters</tt>
+ # if +headers+ is passed as +true+, returning the converted field set. Any
+ # converter that changes the field into something other than a String halts
+ # the pipeline of conversion for that field. This is primarily an efficiency
+ # shortcut.
+ #
+ def convert_fields(fields, headers = false)
+ # see if we are converting headers or fields
+ converters = headers ? @header_converters : @converters
+
+ fields.enum_for(:each_with_index).map do |field, index| # map_with_index
+ converters.each do |converter|
+ field = if converter.arity == 1 # straight field converter
+ converter[field]
+ else # FieldInfo converter
+ header = @use_headers && !headers ? @headers[index] : nil
+ converter[field, FieldInfo.new(index, lineno, header)]
+ end
+ break unless field.is_a? String # short-curcuit pipeline for speed
+ end
+ field # return final state of each field, converted or original
+ end
+ end
+
+ #
+ # This methods is used to turn a finished +row+ into a CSV::Row. Header rows
+ # are also dealt with here, either by returning a CSV::Row with identical
+ # headers and fields (save that the fields do not go through the converters)
+ # or by reading past them to return a field row. Headers are also saved in
+ # <tt>@headers</tt> for use in future rows.
+ #
+ # When +nil+, +row+ is assumed to be a header row not based on an actual row
+ # of the stream.
+ #
+ def parse_headers(row = nil)
+ if @headers.nil? # header row
+ @headers = case @use_headers # save headers
+ when Array then @use_headers # Array of headers
+ when String then self.class.parse_line(@use_headers) # CSV header String
+ else row # first row headers
+ end
+
+ # prepare converted and unconverted copies
+ row = @headers if row.nil?
+ @headers = convert_fields(@headers, true)
+
+ if @return_headers # return headers
+ return self.class::Row.new(@headers, row, true)
+ elsif not [Array, String].include? @use_headers.class # skip to field row
+ return shift
+ end
+ end
+
+ self.class::Row.new(@headers, convert_fields(row)) # field row
+ end
+
+ #
+ # Thiw methods injects an instance variable <tt>unconverted_fields</tt> into
+ # +row+ and an accessor method for it called unconverted_fields(). The
+ # variable is set to the contents of +fields+.
+ #
+ def add_unconverted_fields(row, fields)
+ class << row
+ attr_reader :unconverted_fields
+ end
+ row.instance_eval { @unconverted_fields = fields }
+ row
+ end
+end
+
+# Another name for CSV::instance().
+def CSV(*args, &block)
+ CSV.instance(*args, &block)
+end
+
+class Array
+ # Equivalent to <tt>CSV::generate_line(self, options)</tt>.
+ def to_csv(options = Hash.new)
+ CSV.generate_line(self, options)
+ end
+end
+
+class String
+ # Equivalent to <tt>CSV::parse_line(self, options)</tt>.
+ def parse_csv(options = Hash.new)
+ CSV.parse_line(self, options)
+ end
+end
diff --git a/test/csv/line_endings.gz b/test/csv/line_endings.gz
new file mode 100644
index 0000000000..39e1729ee4
--- /dev/null
+++ b/test/csv/line_endings.gz
Binary files differ
diff --git a/test/csv/tc_csv_parsing.rb b/test/csv/tc_csv_parsing.rb
new file mode 100644
index 0000000000..9eb2e398d4
--- /dev/null
+++ b/test/csv/tc_csv_parsing.rb
@@ -0,0 +1,164 @@
+#!/usr/local/bin/ruby -w
+
+# tc_csv_parsing.rb
+#
+# Created by James Edward Gray II on 2005-10-31.
+# Copyright 2005 James Edward Gray II. You can redistribute or modify this code
+# under the terms of Ruby's license.
+
+require "test/unit"
+
+require "csv"
+
+#
+# Following tests are my interpretation of the
+# {CSV RCF}[http://www.ietf.org/rfc/rfc4180.txt]. I only deviate from that
+# document in one place (intentionally) and that is to make the default row
+# separator <tt>$/</tt>.
+#
+class TestCSVParsing < Test::Unit::TestCase
+ def test_mastering_regex_example
+ ex = %Q{Ten Thousand,10000, 2710 ,,"10,000","It's ""10 Grand"", baby",10K}
+ assert_equal( [ "Ten Thousand", "10000", " 2710 ", nil, "10,000",
+ "It's \"10 Grand\", baby", "10K" ],
+ CSV.parse_line(ex) )
+ end
+
+ # Old Ruby 1.8 CSV library tests.
+ def test_std_lib_csv
+ [ ["\t", ["\t"]],
+ ["foo,\"\"\"\"\"\",baz", ["foo", "\"\"", "baz"]],
+ ["foo,\"\"\"bar\"\"\",baz", ["foo", "\"bar\"", "baz"]],
+ ["\"\"\"\n\",\"\"\"\n\"", ["\"\n", "\"\n"]],
+ ["foo,\"\r\n\",baz", ["foo", "\r\n", "baz"]],
+ ["\"\"", [""]],
+ ["foo,\"\"\"\",baz", ["foo", "\"", "baz"]],
+ ["foo,\"\r.\n\",baz", ["foo", "\r.\n", "baz"]],
+ ["foo,\"\r\",baz", ["foo", "\r", "baz"]],
+ ["foo,\"\",baz", ["foo", "", "baz"]],
+ ["\",\"", [","]],
+ ["foo", ["foo"]],
+ [",,", [nil, nil, nil]],
+ [",", [nil, nil]],
+ ["foo,\"\n\",baz", ["foo", "\n", "baz"]],
+ ["foo,,baz", ["foo", nil, "baz"]],
+ ["\"\"\"\r\",\"\"\"\r\"", ["\"\r", "\"\r"]],
+ ["\",\",\",\"", [",", ","]],
+ ["foo,bar,", ["foo", "bar", nil]],
+ [",foo,bar", [nil, "foo", "bar"]],
+ ["foo,bar", ["foo", "bar"]],
+ [";", [";"]],
+ ["\t,\t", ["\t", "\t"]],
+ ["foo,\"\r\n\r\",baz", ["foo", "\r\n\r", "baz"]],
+ ["foo,\"\r\n\n\",baz", ["foo", "\r\n\n", "baz"]],
+ ["foo,\"foo,bar\",baz", ["foo", "foo,bar", "baz"]],
+ [";,;", [";", ";"]] ].each do |csv_test|
+ assert_equal(csv_test.last, CSV.parse_line(csv_test.first))
+ end
+
+ [ ["foo,\"\"\"\"\"\",baz", ["foo", "\"\"", "baz"]],
+ ["foo,\"\"\"bar\"\"\",baz", ["foo", "\"bar\"", "baz"]],
+ ["foo,\"\r\n\",baz", ["foo", "\r\n", "baz"]],
+ ["\"\"", [""]],
+ ["foo,\"\"\"\",baz", ["foo", "\"", "baz"]],
+ ["foo,\"\r.\n\",baz", ["foo", "\r.\n", "baz"]],
+ ["foo,\"\r\",baz", ["foo", "\r", "baz"]],
+ ["foo,\"\",baz", ["foo", "", "baz"]],
+ ["foo", ["foo"]],
+ [",,", [nil, nil, nil]],
+ [",", [nil, nil]],
+ ["foo,\"\n\",baz", ["foo", "\n", "baz"]],
+ ["foo,,baz", ["foo", nil, "baz"]],
+ ["foo,bar", ["foo", "bar"]],
+ ["foo,\"\r\n\n\",baz", ["foo", "\r\n\n", "baz"]],
+ ["foo,\"foo,bar\",baz", ["foo", "foo,bar", "baz"]] ].each do |csv_test|
+ assert_equal(csv_test.last, CSV.parse_line(csv_test.first))
+ end
+ end
+
+ # From: http://ruby-talk.org/cgi-bin/scat.rb/ruby/ruby-core/6496
+ def test_aras_edge_cases
+ [ [%Q{a,b}, ["a", "b"]],
+ [%Q{a,"""b"""}, ["a", "\"b\""]],
+ [%Q{a,"""b"}, ["a", "\"b"]],
+ [%Q{a,"b"""}, ["a", "b\""]],
+ [%Q{a,"\nb"""}, ["a", "\nb\""]],
+ [%Q{a,"""\nb"}, ["a", "\"\nb"]],
+ [%Q{a,"""\nb\n"""}, ["a", "\"\nb\n\""]],
+ [%Q{a,"""\nb\n""",\nc}, ["a", "\"\nb\n\"", nil]],
+ [%Q{a,,,}, ["a", nil, nil, nil]],
+ [%Q{,}, [nil, nil]],
+ [%Q{"",""}, ["", ""]],
+ [%Q{""""}, ["\""]],
+ [%Q{"""",""}, ["\"",""]],
+ [%Q{,""}, [nil,""]],
+ [%Q{,"\r"}, [nil,"\r"]],
+ [%Q{"\r\n,"}, ["\r\n,"]],
+ [%Q{"\r\n,",}, ["\r\n,", nil]] ].each do |edge_case|
+ assert_equal(edge_case.last, CSV.parse_line(edge_case.first))
+ end
+ end
+
+ def test_james_edge_cases
+ # A read at eof? should return nil.
+ assert_equal(nil, CSV.parse_line(""))
+ #
+ # With Ruby 1.8 CSV it's impossible to tell an empty line from a line
+ # containing a single +nil+ field. The old CSV library returns
+ # <tt>[nil]</tt> in these cases, but <tt>Array.new</tt> makes more sense to
+ # me.
+ #
+ assert_equal(Array.new, CSV.parse_line("\n1,2,3\n"))
+ end
+
+ def test_malformed_csv
+ assert_raise(CSV::MalformedCSVError) do
+ CSV.parse_line("1,2\r,3", :row_sep => "\n")
+ end
+
+ bad_data = <<-END_DATA.gsub(/^ +/, "")
+ line,1,abc
+ line,2,"def\nghi"
+
+ line,4,some\rjunk
+ line,5,jkl
+ END_DATA
+ lines = bad_data.to_a
+ assert_equal(6, lines.size)
+ assert_match(/\Aline,4/, lines.find { |l| l =~ /some\rjunk/ })
+
+ csv = CSV.new(bad_data)
+ begin
+ loop do
+ assert_not_nil(csv.shift)
+ assert_send([csv.lineno, :<, 4])
+ end
+ rescue CSV::MalformedCSVError
+ assert_equal( "Unquoted fields do not allow \\r or \\n (line 4).",
+ $!.message )
+ end
+
+ assert_raise(CSV::MalformedCSVError) { CSV.parse_line('1,2,"3...') }
+
+ bad_data = <<-END_DATA.gsub(/^ +/, "")
+ line,1,abc
+ line,2,"def\nghi"
+
+ line,4,8'10"
+ line,5,jkl
+ END_DATA
+ lines = bad_data.to_a
+ assert_equal(6, lines.size)
+ assert_match(/\Aline,4/, lines.find { |l| l =~ /8'10"/ })
+
+ csv = CSV.new(bad_data)
+ begin
+ loop do
+ assert_not_nil(csv.shift)
+ assert_send([csv.lineno, :<, 4])
+ end
+ rescue CSV::MalformedCSVError
+ assert_equal("Unclosed quoted field on line 4.", $!.message)
+ end
+ end
+end
diff --git a/test/csv/tc_csv_writing.rb b/test/csv/tc_csv_writing.rb
new file mode 100644
index 0000000000..4677284306
--- /dev/null
+++ b/test/csv/tc_csv_writing.rb
@@ -0,0 +1,96 @@
+#!/usr/local/bin/ruby -w
+
+# tc_csv_writing.rb
+#
+# Created by James Edward Gray II on 2005-10-31.
+# Copyright 2005 James Edward Gray II. You can redistribute or modify this code
+# under the terms of Ruby's license.
+
+require "test/unit"
+
+require "csv"
+
+class TestCSVWriting < Test::Unit::TestCase
+ def test_writing
+ [ ["\t", ["\t"]],
+ ["foo,\"\"\"\"\"\",baz", ["foo", "\"\"", "baz"]],
+ ["foo,\"\"\"bar\"\"\",baz", ["foo", "\"bar\"", "baz"]],
+ ["\"\"\"\n\",\"\"\"\n\"", ["\"\n", "\"\n"]],
+ ["foo,\"\r\n\",baz", ["foo", "\r\n", "baz"]],
+ ["\"\"", [""]],
+ ["foo,\"\"\"\",baz", ["foo", "\"", "baz"]],
+ ["foo,\"\r.\n\",baz", ["foo", "\r.\n", "baz"]],
+ ["foo,\"\r\",baz", ["foo", "\r", "baz"]],
+ ["foo,\"\",baz", ["foo", "", "baz"]],
+ ["\",\"", [","]],
+ ["foo", ["foo"]],
+ [",,", [nil, nil, nil]],
+ [",", [nil, nil]],
+ ["foo,\"\n\",baz", ["foo", "\n", "baz"]],
+ ["foo,,baz", ["foo", nil, "baz"]],
+ ["\"\"\"\r\",\"\"\"\r\"", ["\"\r", "\"\r"]],
+ ["\",\",\",\"", [",", ","]],
+ ["foo,bar,", ["foo", "bar", nil]],
+ [",foo,bar", [nil, "foo", "bar"]],
+ ["foo,bar", ["foo", "bar"]],
+ [";", [";"]],
+ ["\t,\t", ["\t", "\t"]],
+ ["foo,\"\r\n\r\",baz", ["foo", "\r\n\r", "baz"]],
+ ["foo,\"\r\n\n\",baz", ["foo", "\r\n\n", "baz"]],
+ ["foo,\"foo,bar\",baz", ["foo", "foo,bar", "baz"]],
+ [";,;", [";", ";"]],
+ ["foo,\"\"\"\"\"\",baz", ["foo", "\"\"", "baz"]],
+ ["foo,\"\"\"bar\"\"\",baz", ["foo", "\"bar\"", "baz"]],
+ ["foo,\"\r\n\",baz", ["foo", "\r\n", "baz"]],
+ ["\"\"", [""]],
+ ["foo,\"\"\"\",baz", ["foo", "\"", "baz"]],
+ ["foo,\"\r.\n\",baz", ["foo", "\r.\n", "baz"]],
+ ["foo,\"\r\",baz", ["foo", "\r", "baz"]],
+ ["foo,\"\",baz", ["foo", "", "baz"]],
+ ["foo", ["foo"]],
+ [",,", [nil, nil, nil]],
+ [",", [nil, nil]],
+ ["foo,\"\n\",baz", ["foo", "\n", "baz"]],
+ ["foo,,baz", ["foo", nil, "baz"]],
+ ["foo,bar", ["foo", "bar"]],
+ ["foo,\"\r\n\n\",baz", ["foo", "\r\n\n", "baz"]],
+ ["foo,\"foo,bar\",baz", ["foo", "foo,bar", "baz"]],
+ [%Q{a,b}, ["a", "b"]],
+ [%Q{a,"""b"""}, ["a", "\"b\""]],
+ [%Q{a,"""b"}, ["a", "\"b"]],
+ [%Q{a,"b"""}, ["a", "b\""]],
+ [%Q{a,"\nb"""}, ["a", "\nb\""]],
+ [%Q{a,"""\nb"}, ["a", "\"\nb"]],
+ [%Q{a,"""\nb\n"""}, ["a", "\"\nb\n\""]],
+ [%Q{a,"""\nb\n""",}, ["a", "\"\nb\n\"", nil]],
+ [%Q{a,,,}, ["a", nil, nil, nil]],
+ [%Q{,}, [nil, nil]],
+ [%Q{"",""}, ["", ""]],
+ [%Q{""""}, ["\""]],
+ [%Q{"""",""}, ["\"",""]],
+ [%Q{,""}, [nil,""]],
+ [%Q{,"\r"}, [nil,"\r"]],
+ [%Q{"\r\n,"}, ["\r\n,"]],
+ [%Q{"\r\n,",}, ["\r\n,", nil]] ].each do |test_case|
+ assert_equal(test_case.first + $/, CSV.generate_line(test_case.last))
+ end
+ end
+
+ def test_col_sep
+ assert_equal( "a;b;;c\n", CSV.generate_line( ["a", "b", nil, "c"],
+ :col_sep => ";" ) )
+ assert_equal( "a\tb\t\tc\n", CSV.generate_line( ["a", "b", nil, "c"],
+ :col_sep => "\t" ) )
+ end
+
+ def test_row_sep
+ assert_equal( "a,b,,c\r\n", CSV.generate_line( ["a", "b", nil, "c"],
+ :row_sep => "\r\n" ) )
+ end
+
+ def test_force_quotes
+ assert_equal( %Q{"1","b","","already ""quoted"""\n},
+ CSV.generate_line( [1, "b", nil, %Q{already "quoted"}],
+ :force_quotes => true ) )
+ end
+end
diff --git a/test/csv/tc_data_converters.rb b/test/csv/tc_data_converters.rb
new file mode 100644
index 0000000000..24c6b6b76f
--- /dev/null
+++ b/test/csv/tc_data_converters.rb
@@ -0,0 +1,260 @@
+#!/usr/local/bin/ruby -w
+
+# tc_data_converters.rb
+#
+# Created by James Edward Gray II on 2005-10-31.
+# Copyright 2005 James Edward Gray II. You can redistribute or modify this code
+# under the terms of Ruby's license.
+
+require "test/unit"
+
+require "csv"
+
+class TestDataConverters < Test::Unit::TestCase
+ def setup
+ @data = "Numbers,:integer,1,:float,3.015"
+ @parser = CSV.new(@data)
+
+ @custom = lambda { |field| field =~ /\A:(\S.*?)\s*\Z/ ? $1.to_sym : field }
+
+ @win_safe_time_str = Time.now.strftime("%a %b %d %H:%M:%S %Y")
+ end
+
+ def test_builtin_integer_converter
+ # does convert
+ [-5, 1, 10000000000].each do |n|
+ assert_equal(n, CSV::Converters[:integer][n.to_s])
+ end
+
+ # does not convert
+ (%w{junk 1.0} + [""]).each do |str|
+ assert_equal(str, CSV::Converters[:integer][str])
+ end
+ end
+
+ def test_builtin_float_converter
+ # does convert
+ [-5.1234, 0, 2.3e-11].each do |n|
+ assert_equal(n, CSV::Converters[:float][n.to_s])
+ end
+
+ # does not convert
+ (%w{junk 1..0 .015F} + [""]).each do |str|
+ assert_equal(str, CSV::Converters[:float][str])
+ end
+ end
+
+ def test_builtin_date_converter
+ # does convert
+ assert_instance_of(
+ Date,
+ CSV::Converters[:date][@win_safe_time_str.sub(/\d+:\d+:\d+ /, "")]
+ )
+
+ # does not convert
+ assert_instance_of(String, CSV::Converters[:date]["junk"])
+ end
+
+ def test_builtin_date_time_converter
+ # does convert
+ assert_instance_of( DateTime,
+ CSV::Converters[:date_time][@win_safe_time_str] )
+
+ # does not convert
+ assert_instance_of(String, CSV::Converters[:date_time]["junk"])
+ end
+
+ def test_convert_with_builtin
+ # setup parser...
+ assert(@parser.respond_to?(:convert))
+ assert_nothing_raised(Exception) { @parser.convert(:integer) }
+
+ # and use
+ assert_equal(["Numbers", ":integer", 1, ":float", "3.015"], @parser.shift)
+
+ setup # reset
+
+ # setup parser...
+ assert_nothing_raised(Exception) { @parser.convert(:float) }
+
+ # and use
+ assert_equal(["Numbers", ":integer", 1.0, ":float", 3.015], @parser.shift)
+ end
+
+ def test_convert_order
+ # floats first, then integers...
+ assert_nothing_raised(Exception) do
+ @parser.convert(:float)
+ @parser.convert(:integer)
+ end
+
+ # gets us nothing but floats
+ assert_equal( [String, String, Float, String, Float],
+ @parser.shift.map { |field| field.class } )
+
+ setup # reset
+
+ # integers have precendance...
+ assert_nothing_raised(Exception) do
+ @parser.convert(:integer)
+ @parser.convert(:float)
+ end
+
+ # gives us proper number conversion
+ assert_equal( [String, String, Fixnum, String, Float],
+ @parser.shift.map { |field| field.class } )
+ end
+
+ def test_builtin_numeric_combo_converter
+ # setup parser...
+ assert_nothing_raised(Exception) { @parser.convert(:numeric) }
+
+ # and use
+ assert_equal( [String, String, Fixnum, String, Float],
+ @parser.shift.map { |field| field.class } )
+ end
+
+ def test_builtin_all_nested_combo_converter
+ # setup parser...
+ @data << ",#{@win_safe_time_str}" # add a DateTime field
+ @parser = CSV.new(@data) # reset parser
+ assert_nothing_raised(Exception) { @parser.convert(:all) }
+
+ # and use
+ assert_equal( [String, String, Fixnum, String, Float, DateTime],
+ @parser.shift.map { |field| field.class } )
+ end
+
+ def test_convert_with_custom_code
+ # define custom converter...
+ assert_nothing_raised(Exception) do
+ @parser.convert { |field| field =~ /\A:(\S.*?)\s*\Z/ ? $1.to_sym : field }
+ end
+
+ # and use
+ assert_equal(["Numbers", :integer, "1", :float, "3.015"], @parser.shift)
+
+ setup # reset
+
+ # mix built-in and custom...
+ assert_nothing_raised(Exception) { @parser.convert(:numeric) }
+ assert_nothing_raised(Exception) { @parser.convert(&@custom) }
+
+ # and use
+ assert_equal(["Numbers", :integer, 1, :float, 3.015], @parser.shift)
+ end
+
+ def test_convert_with_custom_code_using_field_info
+ # define custom converter that uses field information...
+ assert_nothing_raised(Exception) do
+ @parser.convert do |field, info|
+ assert_equal(1, info.line)
+ info.index == 4 ? Float(field).floor : field
+ end
+ end
+
+ # and use
+ assert_equal(["Numbers", ":integer", "1", ":float", 3], @parser.shift)
+ end
+
+ def test_convert_with_custom_code_using_field_info_header
+ @parser = CSV.new(@data, :headers => %w{one two three four five})
+
+ # define custom converter that uses field header information...
+ assert_nothing_raised(Exception) do
+ @parser.convert do |field, info|
+ info.header == "three" ? Integer(field) * 100 : field
+ end
+ end
+
+ # and use
+ assert_equal( ["Numbers", ":integer", 100, ":float", "3.015"],
+ @parser.shift.fields )
+ end
+
+ def test_shortcut_interface
+ assert_equal( ["Numbers", ":integer", 1, ":float", 3.015],
+ CSV.parse_line(@data, :converters => :numeric) )
+
+ assert_equal( ["Numbers", ":integer", 1, ":float", 3.015],
+ CSV.parse_line(@data, :converters => [:integer, :float]) )
+
+ assert_equal( ["Numbers", :integer, 1, :float, 3.015],
+ CSV.parse_line(@data, :converters => [:numeric, @custom]) )
+ end
+
+ def test_unconverted_fields
+ [ [ @data,
+ ["Numbers", :integer, 1, :float, 3.015],
+ %w{Numbers :integer 1 :float 3.015} ],
+ ["\n", Array.new, Array.new] ].each do |test, fields, unconverted|
+ row = nil
+ assert_nothing_raised(Exception) do
+ row = CSV.parse_line( test,
+ :converters => [:numeric, @custom],
+ :unconverted_fields => true )
+ end
+ assert_not_nil(row)
+ assert_equal(fields, row)
+ assert_respond_to(row, :unconverted_fields)
+ assert_equal(unconverted, row.unconverted_fields)
+ end
+
+ data = <<-END_CSV.gsub(/^\s+/, "")
+ first,second,third
+ 1,2,3
+ END_CSV
+ row = nil
+ assert_nothing_raised(Exception) do
+ row = CSV.parse_line( data,
+ :converters => :numeric,
+ :unconverted_fields => true,
+ :headers => :first_row )
+ end
+ assert_not_nil(row)
+ assert_equal([["first", 1], ["second", 2], ["third", 3]], row.to_a)
+ assert_respond_to(row, :unconverted_fields)
+ assert_equal(%w{1 2 3}, row.unconverted_fields)
+
+ assert_nothing_raised(Exception) do
+ row = CSV.parse_line( data,
+ :converters => :numeric,
+ :unconverted_fields => true,
+ :headers => :first_row,
+ :return_headers => true )
+ end
+ assert_not_nil(row)
+ assert_equal( [%w{first first}, %w{second second}, %w{third third}],
+ row.to_a )
+ assert_respond_to(row, :unconverted_fields)
+ assert_equal(%w{first second third}, row.unconverted_fields)
+
+ assert_nothing_raised(Exception) do
+ row = CSV.parse_line( data,
+ :converters => :numeric,
+ :unconverted_fields => true,
+ :headers => :first_row,
+ :return_headers => true,
+ :header_converters => :symbol )
+ end
+ assert_not_nil(row)
+ assert_equal( [[:first, "first"], [:second, "second"], [:third, "third"]],
+ row.to_a )
+ assert_respond_to(row, :unconverted_fields)
+ assert_equal(%w{first second third}, row.unconverted_fields)
+
+ assert_nothing_raised(Exception) do
+ row = CSV.parse_line( data,
+ :converters => :numeric,
+ :unconverted_fields => true,
+ :headers => %w{my new headers},
+ :return_headers => true,
+ :header_converters => :symbol )
+ end
+ assert_not_nil(row)
+ assert_equal( [[:my, "my"], [:new, "new"], [:headers, "headers"]],
+ row.to_a )
+ assert_respond_to(row, :unconverted_fields)
+ assert_equal(Array.new, row.unconverted_fields)
+ end
+end
diff --git a/test/csv/tc_features.rb b/test/csv/tc_features.rb
new file mode 100644
index 0000000000..6766839e10
--- /dev/null
+++ b/test/csv/tc_features.rb
@@ -0,0 +1,177 @@
+#!/usr/local/bin/ruby -w
+
+# tc_features.rb
+#
+# Created by James Edward Gray II on 2005-10-31.
+# Copyright 2005 James Edward Gray II. You can redistribute or modify this code
+# under the terms of Ruby's license.
+
+require "test/unit"
+require "zlib"
+
+require "csv"
+
+class TestCSVFeatures < Test::Unit::TestCase
+ TEST_CASES = [ [%Q{a,b}, ["a", "b"]],
+ [%Q{a,"""b"""}, ["a", "\"b\""]],
+ [%Q{a,"""b"}, ["a", "\"b"]],
+ [%Q{a,"b"""}, ["a", "b\""]],
+ [%Q{a,"\nb"""}, ["a", "\nb\""]],
+ [%Q{a,"""\nb"}, ["a", "\"\nb"]],
+ [%Q{a,"""\nb\n"""}, ["a", "\"\nb\n\""]],
+ [%Q{a,"""\nb\n""",\nc}, ["a", "\"\nb\n\"", nil]],
+ [%Q{a,,,}, ["a", nil, nil, nil]],
+ [%Q{,}, [nil, nil]],
+ [%Q{"",""}, ["", ""]],
+ [%Q{""""}, ["\""]],
+ [%Q{"""",""}, ["\"",""]],
+ [%Q{,""}, [nil,""]],
+ [%Q{,"\r"}, [nil,"\r"]],
+ [%Q{"\r\n,"}, ["\r\n,"]],
+ [%Q{"\r\n,",}, ["\r\n,", nil]] ]
+
+ def setup
+ @sample_data = <<-END_DATA.gsub(/^ +/, "")
+ line,1,abc
+ line,2,"def\nghi"
+
+ line,4,jkl
+ END_DATA
+ @csv = CSV.new(@sample_data)
+ end
+
+ def test_col_sep
+ [";", "\t"].each do |sep|
+ TEST_CASES.each do |test_case|
+ assert_equal( test_case.last.map { |t| t.tr(",", sep) unless t.nil? },
+ CSV.parse_line( test_case.first.tr(",", sep),
+ :col_sep => sep ) )
+ end
+ end
+ assert_equal([",,,", nil], CSV.parse_line(",,,;", :col_sep => ";"))
+ end
+
+ def test_row_sep
+ assert_raise(CSV::MalformedCSVError) do
+ CSV.parse_line("1,2,3\n,4,5\r\n", :row_sep => "\r\n")
+ end
+ assert_equal( ["1", "2", "3\n", "4", "5"],
+ CSV.parse_line(%Q{1,2,"3\n",4,5\r\n}, :row_sep => "\r\n"))
+ end
+
+ def test_quote_char
+ TEST_CASES.each do |test_case|
+ assert_equal( test_case.last.map { |t| t.tr('"', "'") unless t.nil? },
+ CSV.parse_line( test_case.first.tr('"', "'"),
+ :quote_char => "'" ) )
+ end
+ end
+
+ def test_row_sep_auto_discovery
+ ["\r\n", "\n", "\r"].each do |line_end|
+ data = "1,2,3#{line_end}4,5#{line_end}"
+ discovered = CSV.new(data).instance_eval { @row_sep }
+ assert_equal(line_end, discovered)
+ end
+
+ assert_equal("\n", CSV.new("\n\r\n\r").instance_eval { @row_sep })
+
+ assert_equal($/, CSV.new("").instance_eval { @row_sep })
+
+ assert_equal($/, CSV.new(STDERR).instance_eval { @row_sep })
+ end
+
+ def test_lineno
+ assert_equal(5, @sample_data.to_a.size)
+
+ 4.times do |line_count|
+ assert_equal(line_count, @csv.lineno)
+ assert_not_nil(@csv.shift)
+ assert_equal(line_count + 1, @csv.lineno)
+ end
+ assert_nil(@csv.shift)
+ end
+
+ def test_readline
+ test_lineno
+
+ @csv.rewind
+
+ test_lineno
+ end
+
+ def test_unknown_options
+ assert_raise(ArgumentError) { CSV.new(String.new, :unknown => :error) }
+ end
+
+ def test_skip_blanks
+ assert_equal(4, @csv.to_a.size)
+
+ @csv = CSV.new(@sample_data, :skip_blanks => true)
+
+ count = 0
+ @csv.each do |row|
+ count += 1
+ assert_equal("line", row.first)
+ end
+ assert_equal(3, count)
+ end
+
+ # reported by Kev Jackson
+ def test_failing_to_escape_col_sep_bug_fix
+ assert_nothing_raised(Exception) { CSV.new(String.new, :col_sep => "|") }
+ end
+
+ # reported by Chris Roos
+ def test_failing_to_reset_headers_in_rewind_bug_fix
+ csv = CSV.new("forename,surname", :headers => true, :return_headers => true)
+ csv.each { |row| assert row.header_row? }
+ csv.rewind
+ csv.each { |row| assert row.header_row? }
+ end
+
+ # reported by Dave Burt
+ def test_leading_empty_fields_with_multibyte_col_sep_bug_fix
+ data = <<-END_DATA.gsub(/^\s+/, "")
+ <=><=>A<=>B<=>C
+ 1<=>2<=>3
+ END_DATA
+ parsed = CSV.parse(data, :col_sep => "<=>")
+ assert_equal([[nil, nil, "A", "B", "C"], ["1", "2", "3"]], parsed)
+ end
+
+ def test_gzip_reader_bug_fix
+ zipped = nil
+ assert_nothing_raised(NoMethodError) do
+ zipped = CSV.new(
+ Zlib::GzipReader.open(
+ File.join(File.dirname(__FILE__), "line_endings.gz")
+ )
+ )
+ end
+ assert_equal("\r\n", zipped.instance_eval { @row_sep })
+ end
+
+ def test_gzip_writer_bug_fix
+ file = File.join(File.dirname(__FILE__), "temp.gz")
+ zipped = nil
+ assert_nothing_raised(NoMethodError) do
+ zipped = CSV.new(Zlib::GzipWriter.open(file))
+ end
+ zipped << %w[one two three]
+ zipped << [1, 2, 3]
+ zipped.close
+
+ assert( Zlib::GzipReader.open(file) { |f| f.read }.
+ include?($INPUT_RECORD_SEPARATOR),
+ "@row_sep did not default" )
+ File.unlink(file)
+ end
+
+ def test_version
+ assert_not_nil(CSV::VERSION)
+ assert_instance_of(String, CSV::VERSION)
+ assert(CSV::VERSION.frozen?)
+ assert_match(/\A\d\.\d\.\d\Z/, CSV::VERSION)
+ end
+end
diff --git a/test/csv/tc_headers.rb b/test/csv/tc_headers.rb
new file mode 100644
index 0000000000..74e2f54ad4
--- /dev/null
+++ b/test/csv/tc_headers.rb
@@ -0,0 +1,261 @@
+#!/usr/local/bin/ruby -w
+
+# tc_headers.rb
+#
+# Created by James Edward Gray II on 2005-10-31.
+# Copyright 2005 James Edward Gray II. You can redistribute or modify this code
+# under the terms of Ruby's license.
+
+require "test/unit"
+
+require "csv"
+
+class TestCSVHeaders < Test::Unit::TestCase
+ def setup
+ @data = <<-END_CSV.gsub(/^\s+/, "")
+ first,second,third
+ A,B,C
+ 1,2,3
+ END_CSV
+ end
+
+ def test_first_row
+ [:first_row, true].each do |setting| # two names for the same setting
+ # activate headers
+ csv = nil
+ assert_nothing_raised(Exception) do
+ csv = CSV.parse(@data, :headers => setting)
+ end
+
+ # first data row - skipping headers
+ row = csv[0]
+ assert_not_nil(row)
+ assert_instance_of(CSV::Row, row)
+ assert_equal([%w{first A}, %w{second B}, %w{third C}], row.to_a)
+
+ # second data row
+ row = csv[1]
+ assert_not_nil(row)
+ assert_instance_of(CSV::Row, row)
+ assert_equal([%w{first 1}, %w{second 2}, %w{third 3}], row.to_a)
+
+ # empty
+ assert_nil(csv[2])
+ end
+ end
+
+ def test_array_of_headers
+ # activate headers
+ csv = nil
+ assert_nothing_raised(Exception) do
+ csv = CSV.parse(@data, :headers => [:my, :new, :headers])
+ end
+
+ # first data row - skipping headers
+ row = csv[0]
+ assert_not_nil(row)
+ assert_instance_of(CSV::Row, row)
+ assert_equal( [[:my, "first"], [:new, "second"], [:headers, "third"]],
+ row.to_a )
+
+ # second data row
+ row = csv[1]
+ assert_not_nil(row)
+ assert_instance_of(CSV::Row, row)
+ assert_equal([[:my, "A"], [:new, "B"], [:headers, "C"]], row.to_a)
+
+ # third data row
+ row = csv[2]
+ assert_not_nil(row)
+ assert_instance_of(CSV::Row, row)
+ assert_equal([[:my, "1"], [:new, "2"], [:headers, "3"]], row.to_a)
+
+ # empty
+ assert_nil(csv[3])
+
+ # with return and convert
+ assert_nothing_raised(Exception) do
+ csv = CSV.parse(@data, :headers => [:my, :new, :headers],
+ :return_headers => true,
+ :header_converters => lambda { |h| h.to_s } )
+ end
+ row = csv[0]
+ assert_not_nil(row)
+ assert_instance_of(CSV::Row, row)
+ assert_equal([["my", :my], ["new", :new], ["headers", :headers]], row.to_a)
+ assert(row.header_row?)
+ assert(!row.field_row?)
+ end
+
+ def test_csv_header_string
+ # activate headers
+ csv = nil
+ assert_nothing_raised(Exception) do
+ csv = CSV.parse(@data, :headers => "my,new,headers")
+ end
+
+ # first data row - skipping headers
+ row = csv[0]
+ assert_not_nil(row)
+ assert_instance_of(CSV::Row, row)
+ assert_equal([%w{my first}, %w{new second}, %w{headers third}], row.to_a)
+
+ # second data row
+ row = csv[1]
+ assert_not_nil(row)
+ assert_instance_of(CSV::Row, row)
+ assert_equal([%w{my A}, %w{new B}, %w{headers C}], row.to_a)
+
+ # third data row
+ row = csv[2]
+ assert_not_nil(row)
+ assert_instance_of(CSV::Row, row)
+ assert_equal([%w{my 1}, %w{new 2}, %w{headers 3}], row.to_a)
+
+ # empty
+ assert_nil(csv[3])
+
+ # with return and convert
+ assert_nothing_raised(Exception) do
+ csv = CSV.parse(@data, :headers => "my,new,headers",
+ :return_headers => true,
+ :header_converters => :symbol )
+ end
+ row = csv[0]
+ assert_not_nil(row)
+ assert_instance_of(CSV::Row, row)
+ assert_equal([[:my, "my"], [:new, "new"], [:headers, "headers"]], row.to_a)
+ assert(row.header_row?)
+ assert(!row.field_row?)
+ end
+
+ def test_return_headers
+ # activate headers and request they are returned
+ csv = nil
+ assert_nothing_raised(Exception) do
+ csv = CSV.parse(@data, :headers => true, :return_headers => true)
+ end
+
+ # header row
+ row = csv[0]
+ assert_not_nil(row)
+ assert_instance_of(CSV::Row, row)
+ assert_equal( [%w{first first}, %w{second second}, %w{third third}],
+ row.to_a )
+ assert(row.header_row?)
+ assert(!row.field_row?)
+
+ # first data row - skipping headers
+ row = csv[1]
+ assert_not_nil(row)
+ assert_instance_of(CSV::Row, row)
+ assert_equal([%w{first A}, %w{second B}, %w{third C}], row.to_a)
+ assert(!row.header_row?)
+ assert(row.field_row?)
+
+ # second data row
+ row = csv[2]
+ assert_not_nil(row)
+ assert_instance_of(CSV::Row, row)
+ assert_equal([%w{first 1}, %w{second 2}, %w{third 3}], row.to_a)
+ assert(!row.header_row?)
+ assert(row.field_row?)
+
+ # empty
+ assert_nil(csv[3])
+ end
+
+ def test_converters
+ # create test data where headers and fields look alike
+ data = <<-END_MATCHING_CSV.gsub(/^\s+/, "")
+ 1,2,3
+ 1,2,3
+ END_MATCHING_CSV
+
+ # normal converters do not affect headers
+ csv = CSV.parse( data, :headers => true,
+ :return_headers => true,
+ :converters => :numeric )
+ assert_equal([%w{1 1}, %w{2 2}, %w{3 3}], csv[0].to_a)
+ assert_equal([["1", 1], ["2", 2], ["3", 3]], csv[1].to_a)
+ assert_nil(csv[2])
+
+ # header converters do affect headers (only)
+ assert_nothing_raised(Exception) do
+ csv = CSV.parse( data, :headers => true,
+ :return_headers => true,
+ :converters => :numeric,
+ :header_converters => :symbol )
+ end
+ assert_equal([[:"1", "1"], [:"2", "2"], [:"3", "3"]], csv[0].to_a)
+ assert_equal([[:"1", 1], [:"2", 2], [:"3", 3]], csv[1].to_a)
+ assert_nil(csv[2])
+ end
+
+ def test_builtin_downcase_converter
+ csv = CSV.parse( "One,TWO Three", :headers => true,
+ :return_headers => true,
+ :header_converters => :downcase )
+ assert_equal(%w{one two\ three}, csv.headers)
+ end
+
+ def test_builtin_symbol_converter
+ csv = CSV.parse( "One,TWO Three", :headers => true,
+ :return_headers => true,
+ :header_converters => :symbol )
+ assert_equal([:one, :two_three], csv.headers)
+ end
+
+ def test_custom_converter
+ converter = lambda { |header| header.tr(" ", "_") }
+ csv = CSV.parse( "One,TWO Three",
+ :headers => true,
+ :return_headers => true,
+ :header_converters => converter )
+ assert_equal(%w{One TWO_Three}, csv.headers)
+ end
+
+ def test_table_support
+ csv = nil
+ assert_nothing_raised(Exception) do
+ csv = CSV.parse(@data, :headers => true)
+ end
+
+ assert_instance_of(CSV::Table, csv)
+ end
+
+ def test_skip_blanks
+ @data = <<-END_CSV.gsub(/^ +/, "")
+
+
+ A,B,C
+
+ 1,2,3
+
+
+
+ END_CSV
+
+ expected = [%w[1 2 3]]
+ CSV.parse(@data, :headers => true, :skip_blanks => true) do |row|
+ assert_equal(expected.shift, row.fields)
+ end
+
+ expected = [%w[A B C], %w[1 2 3]]
+ CSV.parse( @data,
+ :headers => true,
+ :return_headers => true,
+ :skip_blanks => true ) do |row|
+ assert_equal(expected.shift, row.fields)
+ end
+ end
+
+ def test_blank_row_bug_fix
+ @data += "\n#{@data}" # add a blank row
+
+ # ensure that everything returned is a Row object
+ CSV.parse(@data, :headers => true) do |row|
+ assert_instance_of(CSV::Row, row)
+ end
+ end
+end
diff --git a/test/csv/tc_interface.rb b/test/csv/tc_interface.rb
new file mode 100644
index 0000000000..e8cc920f9d
--- /dev/null
+++ b/test/csv/tc_interface.rb
@@ -0,0 +1,235 @@
+#!/usr/local/bin/ruby -w
+
+# tc_interface.rb
+#
+# Created by James Edward Gray II on 2005-10-31.
+# Copyright 2005 James Edward Gray II. You can redistribute or modify this code
+# under the terms of Ruby's license.
+
+require "test/unit"
+
+require "csv"
+
+class TestCSVInterface < Test::Unit::TestCase
+ def setup
+ @path = File.join(File.dirname(__FILE__), "temp_test_data.csv")
+
+ File.open(@path, "w") do |file|
+ file << "1\t2\t3\r\n"
+ file << "4\t5\r\n"
+ end
+
+ @expected = [%w{1 2 3}, %w{4 5}]
+ end
+
+ def teardown
+ File.unlink(@path)
+ end
+
+ ### Test Read Interface ###
+
+ def test_foreach
+ CSV.foreach(@path, :col_sep => "\t", :row_sep => "\r\n") do |row|
+ assert_equal(@expected.shift, row)
+ end
+ end
+
+ def test_open_and_close
+ csv = CSV.open(@path, "r+", :col_sep => "\t", :row_sep => "\r\n")
+ assert_not_nil(csv)
+ assert_instance_of(CSV, csv)
+ assert_equal(false, csv.closed?)
+ csv.close
+ assert(csv.closed?)
+
+ ret = CSV.open(@path) do |csv|
+ assert_instance_of(CSV, csv)
+ "Return value."
+ end
+ assert(csv.closed?)
+ assert_equal("Return value.", ret)
+ end
+
+ def test_parse
+ data = File.read(@path)
+ assert_equal( @expected,
+ CSV.parse(data, :col_sep => "\t", :row_sep => "\r\n") )
+
+ CSV.parse(data, :col_sep => "\t", :row_sep => "\r\n") do |row|
+ assert_equal(@expected.shift, row)
+ end
+ end
+
+ def test_parse_line
+ row = CSV.parse_line("1;2;3", :col_sep => ";")
+ assert_not_nil(row)
+ assert_instance_of(Array, row)
+ assert_equal(%w{1 2 3}, row)
+
+ # shortcut interface
+ row = "1;2;3".parse_csv(:col_sep => ";")
+ assert_not_nil(row)
+ assert_instance_of(Array, row)
+ assert_equal(%w{1 2 3}, row)
+ end
+
+ def test_read_and_readlines
+ assert_equal( @expected,
+ CSV.read(@path, :col_sep => "\t", :row_sep => "\r\n") )
+ assert_equal( @expected,
+ CSV.readlines(@path, :col_sep => "\t", :row_sep => "\r\n") )
+
+
+ data = CSV.open(@path, :col_sep => "\t", :row_sep => "\r\n") do |csv|
+ csv.read
+ end
+ assert_equal(@expected, data)
+ data = CSV.open(@path, :col_sep => "\t", :row_sep => "\r\n") do |csv|
+ csv.readlines
+ end
+ assert_equal(@expected, data)
+ end
+
+ def test_table
+ table = CSV.table(@path, :col_sep => "\t", :row_sep => "\r\n")
+ assert_instance_of(CSV::Table, table)
+ assert_equal([[:"1", :"2", :"3"], [4, 5, nil]], table.to_a)
+ end
+
+ def test_shift # aliased as gets() and readline()
+ CSV.open(@path, "r+", :col_sep => "\t", :row_sep => "\r\n") do |csv|
+ assert_equal(@expected.shift, csv.shift)
+ assert_equal(@expected.shift, csv.shift)
+ assert_equal(nil, csv.shift)
+ end
+ end
+
+ ### Test Write Interface ###
+
+ def test_generate
+ str = CSV.generate do |csv| # default empty String
+ assert_instance_of(CSV, csv)
+ assert_equal(csv, csv << [1, 2, 3])
+ assert_equal(csv, csv << [4, nil, 5])
+ end
+ assert_not_nil(str)
+ assert_instance_of(String, str)
+ assert_equal("1,2,3\n4,,5\n", str)
+
+ CSV.generate(str) do |csv| # appending to a String
+ assert_equal(csv, csv << ["last", %Q{"row"}])
+ end
+ assert_equal(%Q{1,2,3\n4,,5\nlast,"""row"""\n}, str)
+ end
+
+ def test_generate_line
+ line = CSV.generate_line(%w{1 2 3}, :col_sep => ";")
+ assert_not_nil(line)
+ assert_instance_of(String, line)
+ assert_equal("1;2;3\n", line)
+
+ # shortcut interface
+ line = %w{1 2 3}.to_csv(:col_sep => ";")
+ assert_not_nil(line)
+ assert_instance_of(String, line)
+ assert_equal("1;2;3\n", line)
+ end
+
+ def test_write_header_detection
+ File.unlink(@path)
+
+ headers = %w{a b c}
+ CSV.open(@path, "w", :headers => true) do |csv|
+ csv << headers
+ csv << %w{1 2 3}
+ assert_equal(headers, csv.instance_variable_get(:@headers))
+ end
+ end
+
+ def test_write_lineno
+ File.unlink(@path)
+
+ CSV.open(@path, "w") do |csv|
+ lines = 20
+ lines.times { csv << %w{a b c} }
+ assert_equal(lines, csv.lineno)
+ end
+ end
+
+ def test_write_hash
+ File.unlink(@path)
+
+ lines = [{:a => 1, :b => 2, :c => 3}, {:a => 4, :b => 5, :c => 6}]
+ CSV.open( @path, "w", :headers => true,
+ :converters => :all,
+ :header_converters => :symbol ) do |csv|
+ csv << lines.first.keys
+ lines.each { |line| csv << line }
+ end
+ CSV.open( @path, "w", :headers => true,
+ :converters => :all,
+ :header_converters => :symbol ) do |csv|
+ csv.each { |line| assert_equal(lines.shift, line.to_hash) }
+ end
+ end
+
+ def test_append # aliased add_row() and puts()
+ File.unlink(@path)
+
+ CSV.open(@path, "w", :col_sep => "\t", :row_sep => "\r\n") do |csv|
+ @expected.each { |row| csv << row }
+ end
+
+ test_shift
+
+ # same thing using CSV::Row objects
+ File.unlink(@path)
+
+ CSV.open(@path, "w", :col_sep => "\t", :row_sep => "\r\n") do |csv|
+ @expected.each { |row| csv << CSV::Row.new(Array.new, row) }
+ end
+
+ test_shift
+ end
+
+ ### Test Read and Write Interface ###
+
+ def test_filter
+ assert_respond_to(CSV, :filter)
+
+ expected = [[1, 2, 3], [4, 5]]
+ CSV.filter( "1;2;3\n4;5\n", (result = String.new),
+ :in_col_sep => ";", :out_col_sep => ",",
+ :converters => :all ) do |row|
+ assert_equal(row, expected.shift)
+ row.map! { |n| n * 2 }
+ row << "Added\r"
+ end
+ assert_equal("2,4,6,\"Added\r\"\n8,10,\"Added\r\"\n", result)
+ end
+
+ def test_instance
+ csv = String.new
+
+ first = nil
+ assert_nothing_raised(Exception) do
+ first = CSV.instance(csv, :col_sep => ";")
+ first << %w{a b c}
+ end
+
+ assert_equal("a;b;c\n", csv)
+
+ second = nil
+ assert_nothing_raised(Exception) do
+ second = CSV.instance(csv, :col_sep => ";")
+ second << [1, 2, 3]
+ end
+
+ assert_equal(first.object_id, second.object_id)
+ assert_equal("a;b;c\n1;2;3\n", csv)
+
+ # shortcuts
+ assert_equal(STDOUT, CSV.instance.instance_eval { @io })
+ assert_equal(STDOUT, CSV { |csv| csv.instance_eval { @io } })
+ end
+end
diff --git a/test/csv/tc_row.rb b/test/csv/tc_row.rb
new file mode 100644
index 0000000000..a9b7f042b2
--- /dev/null
+++ b/test/csv/tc_row.rb
@@ -0,0 +1,289 @@
+#!/usr/local/bin/ruby -w
+
+# tc_row.rb
+#
+# Created by James Edward Gray II on 2005-10-31.
+# Copyright 2005 James Edward Gray II. You can redistribute or modify this code
+# under the terms of Ruby's license.
+
+require "test/unit"
+
+require "csv"
+
+class TestCSVRow < Test::Unit::TestCase
+ def setup
+ @row = CSV::Row.new(%w{A B C A A}, [1, 2, 3, 4])
+ end
+
+ def test_initialize
+ # basic
+ row = CSV::Row.new(%w{A B C}, [1, 2, 3])
+ assert_not_nil(row)
+ assert_instance_of(CSV::Row, row)
+ assert_equal([["A", 1], ["B", 2], ["C", 3]], row.to_a)
+
+ # missing headers
+ row = CSV::Row.new(%w{A}, [1, 2, 3])
+ assert_not_nil(row)
+ assert_instance_of(CSV::Row, row)
+ assert_equal([["A", 1], [nil, 2], [nil, 3]], row.to_a)
+
+ # missing fields
+ row = CSV::Row.new(%w{A B C}, [1, 2])
+ assert_not_nil(row)
+ assert_instance_of(CSV::Row, row)
+ assert_equal([["A", 1], ["B", 2], ["C", nil]], row.to_a)
+ end
+
+ def test_row_type
+ # field rows
+ row = CSV::Row.new(%w{A B C}, [1, 2, 3]) # implicit
+ assert(!row.header_row?)
+ assert(row.field_row?)
+ row = CSV::Row.new(%w{A B C}, [1, 2, 3], false) # explicit
+ assert(!row.header_row?)
+ assert(row.field_row?)
+
+ # header row
+ row = CSV::Row.new(%w{A B C}, [1, 2, 3], true)
+ assert(row.header_row?)
+ assert(!row.field_row?)
+ end
+
+ def test_headers
+ assert_equal(%w{A B C A A}, @row.headers)
+ end
+
+ def test_field
+ # by name
+ assert_equal(2, @row.field("B"))
+ assert_equal(2, @row["B"]) # alias
+
+ # by index
+ assert_equal(3, @row.field(2))
+
+ # missing
+ assert_nil(@row.field("Missing"))
+ assert_nil(@row.field(10))
+
+ # minimum index
+ assert_equal(1, @row.field("A"))
+ assert_equal(1, @row.field("A", 0))
+ assert_equal(4, @row.field("A", 1))
+ assert_equal(4, @row.field("A", 2))
+ assert_equal(4, @row.field("A", 3))
+ assert_equal(nil, @row.field("A", 4))
+ assert_equal(nil, @row.field("A", 5))
+ end
+
+ def test_set_field
+ # set field by name
+ assert_equal(100, @row["A"] = 100)
+
+ # set field by index
+ assert_equal(300, @row[3] = 300)
+
+ # set field by name and minimum index
+ assert_equal([:a, :b, :c], @row["A", 4] = [:a, :b, :c])
+
+ # verify the changes
+ assert_equal( [ ["A", 100],
+ ["B", 2],
+ ["C", 3],
+ ["A", 300],
+ ["A", [:a, :b, :c]] ], @row.to_a )
+
+ # assigning an index past the end
+ assert_equal("End", @row[10] = "End")
+ assert_equal( [ ["A", 100],
+ ["B", 2],
+ ["C", 3],
+ ["A", 300],
+ ["A", [:a, :b, :c]],
+ [nil, nil],
+ [nil, nil],
+ [nil, nil],
+ [nil, nil],
+ [nil, nil],
+ [nil, "End"] ], @row.to_a )
+
+ # assigning a new field by header
+ assert_equal("New", @row[:new] = "New")
+ assert_equal( [ ["A", 100],
+ ["B", 2],
+ ["C", 3],
+ ["A", 300],
+ ["A", [:a, :b, :c]],
+ [nil, nil],
+ [nil, nil],
+ [nil, nil],
+ [nil, nil],
+ [nil, nil],
+ [nil, "End"],
+ [:new, "New"] ], @row.to_a )
+ end
+
+ def test_append
+ # add a value
+ assert_equal(@row, @row << "Value")
+ assert_equal( [ ["A", 1],
+ ["B", 2],
+ ["C", 3],
+ ["A", 4],
+ ["A", nil],
+ [nil, "Value"] ], @row.to_a )
+
+ # add a pair
+ assert_equal(@row, @row << %w{Header Field})
+ assert_equal( [ ["A", 1],
+ ["B", 2],
+ ["C", 3],
+ ["A", 4],
+ ["A", nil],
+ [nil, "Value"],
+ %w{Header Field} ], @row.to_a )
+
+ # a pair with Hash syntax
+ assert_equal(@row, @row << {:key => :value})
+ assert_equal( [ ["A", 1],
+ ["B", 2],
+ ["C", 3],
+ ["A", 4],
+ ["A", nil],
+ [nil, "Value"],
+ %w{Header Field},
+ [:key, :value] ], @row.to_a )
+
+ # multiple fields at once
+ assert_equal(@row, @row.push(100, 200, [:last, 300]))
+ assert_equal( [ ["A", 1],
+ ["B", 2],
+ ["C", 3],
+ ["A", 4],
+ ["A", nil],
+ [nil, "Value"],
+ %w{Header Field},
+ [:key, :value],
+ [nil, 100],
+ [nil, 200],
+ [:last, 300] ], @row.to_a )
+ end
+
+ def test_delete
+ # by index
+ assert_equal(["B", 2], @row.delete(1))
+
+ # by header
+ assert_equal(["C", 3], @row.delete("C"))
+
+ # using a block
+ assert_equal(@row, @row.delete_if { |h, f| h == "A" and not f.nil? })
+ assert_equal([["A", nil]], @row.to_a)
+ end
+
+ def test_fields
+ # all fields
+ assert_equal([1, 2, 3, 4, nil], @row.fields)
+
+ # by header
+ assert_equal([1, 3], @row.fields("A", "C"))
+
+ # by index
+ assert_equal([2, 3, nil], @row.fields(1, 2, 10))
+
+ # by both
+ assert_equal([2, 3, 4], @row.fields("B", "C", 3))
+
+ # with minimum indices
+ assert_equal([2, 3, 4], @row.fields("B", "C", ["A", 3]))
+
+ # by header range
+ assert_equal([2, 3], @row.values_at("B".."C"))
+ end
+
+ def test_index
+ # basic usage
+ assert_equal(0, @row.index("A"))
+ assert_equal(1, @row.index("B"))
+ assert_equal(2, @row.index("C"))
+ assert_equal(nil, @row.index("Z"))
+
+ # with minimum index
+ assert_equal(0, @row.index("A"))
+ assert_equal(0, @row.index("A", 0))
+ assert_equal(3, @row.index("A", 1))
+ assert_equal(3, @row.index("A", 2))
+ assert_equal(3, @row.index("A", 3))
+ assert_equal(4, @row.index("A", 4))
+ assert_equal(nil, @row.index("A", 5))
+ end
+
+ def test_queries
+ # headers
+ assert(@row.header?("A"))
+ assert(@row.header?("C"))
+ assert(!@row.header?("Z"))
+ assert(@row.include?("A")) # alias
+
+ # fields
+ assert(@row.field?(4))
+ assert(@row.field?(nil))
+ assert(!@row.field?(10))
+ end
+
+ def test_each
+ # array style
+ ary = @row.to_a
+ @row.each do |pair|
+ assert_equal(ary.first.first, pair.first)
+ assert_equal(ary.shift.last, pair.last)
+ end
+
+ # hash style
+ ary = @row.to_a
+ @row.each do |header, field|
+ assert_equal(ary.first.first, header)
+ assert_equal(ary.shift.last, field)
+ end
+
+ # verify that we can chain the call
+ assert_equal(@row, @row.each { })
+ end
+
+ def test_enumerable
+ assert_equal( [["A", 1], ["A", 4], ["A", nil]],
+ @row.select { |pair| pair.first == "A" } )
+
+ assert_equal(10, @row.inject(0) { |sum, (header, n)| sum + (n || 0) })
+ end
+
+ def test_to_a
+ row = CSV::Row.new(%w{A B C}, [1, 2, 3]).to_a
+ assert_instance_of(Array, row)
+ row.each do |pair|
+ assert_instance_of(Array, pair)
+ assert_equal(2, pair.size)
+ end
+ assert_equal([["A", 1], ["B", 2], ["C", 3]], row)
+ end
+
+ def test_to_hash
+ assert_equal({"A" => nil, "B" => 2, "C" => 3}, @row.to_hash)
+ end
+
+ def test_to_csv
+ # normal conversion
+ assert_equal("1,2,3,4,\n", @row.to_csv)
+ assert_equal("1,2,3,4,\n", @row.to_s) # alias
+
+ # with options
+ assert_equal( "1|2|3|4|\r\n",
+ @row.to_csv(:col_sep => "|", :row_sep => "\r\n") )
+ end
+
+ def test_array_delegation
+ assert(!@row.empty?, "Row was empty.")
+
+ assert_equal([@row.headers.size, @row.fields.size].max, @row.size)
+ end
+end
diff --git a/test/csv/tc_serialization.rb b/test/csv/tc_serialization.rb
new file mode 100644
index 0000000000..45e26bc0b5
--- /dev/null
+++ b/test/csv/tc_serialization.rb
@@ -0,0 +1,155 @@
+#!/usr/local/bin/ruby -w
+
+# tc_serialization.rb
+#
+# Created by James Edward Gray II on 2005-10-31.
+# Copyright 2005 James Edward Gray II. You can redistribute or modify this code
+# under the terms of Ruby's license.
+
+require "test/unit"
+
+require "csv"
+
+# An example of how to provide custom CSV serialization.
+class Hash
+ def self.csv_load( meta, headers, fields )
+ self[*headers.zip(fields).flatten.map { |e| eval(e) }]
+ end
+
+ def csv_headers
+ keys.map { |key| key.inspect }
+ end
+
+ def csv_dump( headers )
+ headers.map { |header| fetch(eval(header)).inspect }
+ end
+end
+
+class TestSerialization < Test::Unit::TestCase
+
+ ### Classes Used to Test Serialization ###
+
+ class ReadOnlyName
+ def initialize( first, last )
+ @first, @last = first, last
+ end
+
+ attr_reader :first, :last
+
+ def ==( other )
+ %w{first last}.all? { |att| send(att) == other.send(att) }
+ end
+ end
+
+ Name = Struct.new(:first, :last)
+
+ class FullName < Name
+ def initialize( first, last, suffix = nil )
+ super(first, last)
+
+ @suffix = suffix
+ end
+
+ attr_accessor :suffix
+
+ def ==( other )
+ %w{first last suffix}.all? { |att| send(att) == other.send(att) }
+ end
+ end
+
+ ### Tests ###
+
+ def test_class_dump
+ @names = [ %w{James Gray},
+ %w{Dana Gray},
+ %w{Greg Brown} ].map do |first, last|
+ ReadOnlyName.new(first, last)
+ end
+
+ assert_nothing_raised(Exception) do
+ @data = CSV.dump(@names)
+ end
+ assert_equal(<<-END_CLASS_DUMP.gsub(/^\s*/, ""), @data)
+ class,TestSerialization::ReadOnlyName
+ @first,@last
+ James,Gray
+ Dana,Gray
+ Greg,Brown
+ END_CLASS_DUMP
+ end
+
+ def test_struct_dump
+ @names = [ %w{James Gray},
+ %w{Dana Gray},
+ %w{Greg Brown} ].map do |first, last|
+ Name.new(first, last)
+ end
+
+ assert_nothing_raised(Exception) do
+ @data = CSV.dump(@names)
+ end
+ assert_equal(<<-END_STRUCT_DUMP.gsub(/^\s*/, ""), @data)
+ class,TestSerialization::Name
+ first=,last=
+ James,Gray
+ Dana,Gray
+ Greg,Brown
+ END_STRUCT_DUMP
+ end
+
+ def test_inherited_struct_dump
+ @names = [ %w{James Gray II},
+ %w{Dana Gray},
+ %w{Greg Brown} ].map do |first, last, suffix|
+ FullName.new(first, last, suffix)
+ end
+
+ assert_nothing_raised(Exception) do
+ @data = CSV.dump(@names)
+ end
+ assert_equal(<<-END_STRUCT_DUMP.gsub(/^\s*/, ""), @data)
+ class,TestSerialization::FullName
+ @suffix,first=,last=
+ II,James,Gray
+ ,Dana,Gray
+ ,Greg,Brown
+ END_STRUCT_DUMP
+ end
+
+ def test_load
+ %w{ test_class_dump
+ test_struct_dump
+ test_inherited_struct_dump }.each do |test|
+ send(test)
+ CSV.load(@data).each do |loaded|
+ assert_instance_of(@names.first.class, loaded)
+ assert_equal(@names.shift, loaded)
+ end
+ end
+ end
+
+ def test_io
+ test_class_dump
+
+ data_file = File.join(File.dirname(__FILE__), "temp_test_data.csv")
+ CSV.dump(@names, File.open(data_file, "w"))
+
+ assert(File.exist?(data_file))
+ assert_equal(<<-END_IO_DUMP.gsub(/^\s*/, ""), File.read(data_file))
+ class,TestSerialization::ReadOnlyName
+ @first,@last
+ James,Gray
+ Dana,Gray
+ Greg,Brown
+ END_IO_DUMP
+
+ assert_equal(@names, CSV.load(File.open(data_file)))
+
+ File.unlink(data_file)
+ end
+
+ def test_custom_dump_and_load
+ obj = {1 => "simple", :test => Hash}
+ assert_equal(obj, CSV.load(CSV.dump([obj])).first)
+ end
+end
diff --git a/test/csv/tc_table.rb b/test/csv/tc_table.rb
new file mode 100644
index 0000000000..028274d97f
--- /dev/null
+++ b/test/csv/tc_table.rb
@@ -0,0 +1,392 @@
+#!/usr/local/bin/ruby -w
+
+# tc_table.rb
+#
+# Created by James Edward Gray II on 2005-10-31.
+# Copyright 2005 James Edward Gray II. You can redistribute or modify this code
+# under the terms of Ruby's license.
+
+require "test/unit"
+
+require "csv"
+
+class TestCSVTable < Test::Unit::TestCase
+ def setup
+ @rows = [ CSV::Row.new(%w{A B C}, [1, 2, 3]),
+ CSV::Row.new(%w{A B C}, [4, 5, 6]),
+ CSV::Row.new(%w{A B C}, [7, 8, 9]) ]
+ @table = CSV::Table.new(@rows)
+
+ @header_table = CSV::Table.new(
+ [CSV::Row.new(%w{A B C}, %w{A B C}, true)] + @rows
+ )
+ end
+
+ def test_initialze
+ assert_not_nil(@table)
+ assert_instance_of(CSV::Table, @table)
+ end
+
+ def test_modes
+ assert_equal(:col_or_row, @table.mode)
+
+ # non-destructive changes, intended for one shot calls
+ cols = @table.by_col
+ assert_equal(:col_or_row, @table.mode)
+ assert_equal(:col, cols.mode)
+ assert_equal(@table, cols)
+
+ rows = @table.by_row
+ assert_equal(:col_or_row, @table.mode)
+ assert_equal(:row, rows.mode)
+ assert_equal(@table, rows)
+
+ # destructive mode changing calls
+ assert_equal(@table, @table.by_row!)
+ assert_equal(:row, @table.mode)
+ assert_equal(@table, @table.by_col_or_row!)
+ assert_equal(:col_or_row, @table.mode)
+ end
+
+ def test_headers
+ assert_equal(@rows.first.headers, @table.headers)
+ end
+
+ def test_index
+ ##################
+ ### Mixed Mode ###
+ ##################
+ # by row
+ @rows.each_index { |i| assert_equal(@rows[i], @table[i]) }
+ assert_equal(nil, @table[100]) # empty row
+
+ # by col
+ @rows.first.headers.each do |header|
+ assert_equal(@rows.map { |row| row[header] }, @table[header])
+ end
+ assert_equal([nil] * @rows.size, @table["Z"]) # empty col
+
+ # by cell, row then col
+ assert_equal(2, @table[0][1])
+ assert_equal(6, @table[1]["C"])
+
+ # by cell, col then row
+ assert_equal(5, @table["B"][1])
+ assert_equal(9, @table["C"][2])
+
+ # with headers (by col)
+ assert_equal(["B", 2, 5, 8], @header_table["B"])
+
+ ###################
+ ### Column Mode ###
+ ###################
+ @table.by_col!
+
+ assert_equal([2, 5, 8], @table[1])
+ assert_equal([2, 5, 8], @table["B"])
+
+ ################
+ ### Row Mode ###
+ ################
+ @table.by_row!
+
+ assert_equal(@rows[1], @table[1])
+ assert_raise(TypeError) { @table["B"] }
+
+ ############################
+ ### One Shot Mode Change ###
+ ############################
+ assert_equal(@rows[1], @table[1])
+ assert_equal([2, 5, 8], @table.by_col[1])
+ assert_equal(@rows[1], @table[1])
+ end
+
+ def test_set_row_or_column
+ ##################
+ ### Mixed Mode ###
+ ##################
+ # set row
+ @table[2] = [10, 11, 12]
+ assert_equal([%w[A B C], [1, 2, 3], [4, 5, 6], [10, 11, 12]], @table.to_a)
+
+ @table[3] = CSV::Row.new(%w[A B C], [13, 14, 15])
+ assert_equal( [%w[A B C], [1, 2, 3], [4, 5, 6], [10, 11, 12], [13, 14, 15]],
+ @table.to_a )
+
+ # set col
+ @table["Type"] = "data"
+ assert_equal( [ %w[A B C Type],
+ [1, 2, 3, "data"],
+ [4, 5, 6, "data"],
+ [10, 11, 12, "data"],
+ [13, 14, 15, "data"] ],
+ @table.to_a )
+
+ @table["Index"] = [1, 2, 3]
+ assert_equal( [ %w[A B C Type Index],
+ [1, 2, 3, "data", 1],
+ [4, 5, 6, "data", 2],
+ [10, 11, 12, "data", 3],
+ [13, 14, 15, "data", nil] ],
+ @table.to_a )
+
+ @table["B"] = [100, 200]
+ assert_equal( [ %w[A B C Type Index],
+ [1, 100, 3, "data", 1],
+ [4, 200, 6, "data", 2],
+ [10, nil, 12, "data", 3],
+ [13, nil, 15, "data", nil] ],
+ @table.to_a )
+
+ # verify resulting table
+ assert_equal(<<-END_RESULT.gsub(/^\s+/, ""), @table.to_csv)
+ A,B,C,Type,Index
+ 1,100,3,data,1
+ 4,200,6,data,2
+ 10,,12,data,3
+ 13,,15,data,
+ END_RESULT
+
+ # with headers
+ @header_table["Type"] = "data"
+ assert_equal(%w[Type data data data], @header_table["Type"])
+
+ ###################
+ ### Column Mode ###
+ ###################
+ @table.by_col!
+
+ @table[1] = [2, 5, 11, 14]
+ assert_equal( [ %w[A B C Type Index],
+ [1, 2, 3, "data", 1],
+ [4, 5, 6, "data", 2],
+ [10, 11, 12, "data", 3],
+ [13, 14, 15, "data", nil] ],
+ @table.to_a )
+
+ @table["Extra"] = "new stuff"
+ assert_equal( [ %w[A B C Type Index Extra],
+ [1, 2, 3, "data", 1, "new stuff"],
+ [4, 5, 6, "data", 2, "new stuff"],
+ [10, 11, 12, "data", 3, "new stuff"],
+ [13, 14, 15, "data", nil, "new stuff"] ],
+ @table.to_a )
+
+ ################
+ ### Row Mode ###
+ ################
+ @table.by_row!
+
+ @table[1] = (1..6).to_a
+ assert_equal( [ %w[A B C Type Index Extra],
+ [1, 2, 3, "data", 1, "new stuff"],
+ [1, 2, 3, 4, 5, 6],
+ [10, 11, 12, "data", 3, "new stuff"],
+ [13, 14, 15, "data", nil, "new stuff"] ],
+ @table.to_a )
+
+ assert_raise(TypeError) { @table["Extra"] = nil }
+ end
+
+ def test_each
+ ######################
+ ### Mixed/Row Mode ###
+ ######################
+ i = 0
+ @table.each do |row|
+ assert_equal(@rows[i], row)
+ i += 1
+ end
+
+ # verify that we can chain the call
+ assert_equal(@table, @table.each { })
+
+ ###################
+ ### Column Mode ###
+ ###################
+ @table.by_col!
+
+ headers = @table.headers
+ @table.each do |header, column|
+ assert_equal(headers.shift, header)
+ assert_equal(@table[header], column)
+ end
+
+ ############################
+ ### One Shot Mode Change ###
+ ############################
+ @table.by_col_or_row!
+
+ @table.each { |row| assert_instance_of(CSV::Row, row) }
+ @table.by_col.each { |tuple| assert_instance_of(Array, tuple) }
+ @table.each { |row| assert_instance_of(CSV::Row, row) }
+ end
+
+ def test_enumerable
+ assert_equal( @rows.values_at(0, 2),
+ @table.select { |row| (row["B"] % 2).zero? } )
+
+ assert_equal(@rows[1], @table.find { |row| row["C"] > 5 })
+ end
+
+ def test_to_a
+ assert_equal([%w[A B C], [1, 2, 3], [4, 5, 6], [7, 8, 9]], @table.to_a)
+
+ # with headers
+ assert_equal( [%w[A B C], [1, 2, 3], [4, 5, 6], [7, 8, 9]],
+ @header_table.to_a )
+ end
+
+ def test_to_csv
+ csv = <<-END_CSV.gsub(/^\s+/, "")
+ A,B,C
+ 1,2,3
+ 4,5,6
+ 7,8,9
+ END_CSV
+
+ # normal conversion
+ assert_equal(csv, @table.to_csv)
+ assert_equal(csv, @table.to_s) # alias
+
+ # with options
+ assert_equal( csv.gsub(",", "|").gsub("\n", "\r\n"),
+ @table.to_csv(:col_sep => "|", :row_sep => "\r\n") )
+
+ # with headers
+ assert_equal(csv, @header_table.to_csv)
+ end
+
+ def test_append
+ # verify that we can chain the call
+ assert_equal(@table, @table << [10, 11, 12])
+
+ # Array append
+ assert_equal(CSV::Row.new(%w[A B C], [10, 11, 12]), @table[-1])
+
+ # Row append
+ assert_equal(@table, @table << CSV::Row.new(%w[A B C], [13, 14, 15]))
+ assert_equal(CSV::Row.new(%w[A B C], [13, 14, 15]), @table[-1])
+ end
+
+ def test_delete
+ ##################
+ ### Mixed Mode ###
+ ##################
+ # delete a row
+ assert_equal(@rows[1], @table.delete(1))
+
+ # delete a col
+ assert_equal(@rows.map { |row| row["A"] }, @table.delete("A"))
+
+ # verify resulting table
+ assert_equal(<<-END_RESULT.gsub(/^\s+/, ""), @table.to_csv)
+ B,C
+ 2,3
+ 8,9
+ END_RESULT
+
+ ###################
+ ### Column Mode ###
+ ###################
+ setup
+ @table.by_col!
+
+ assert_equal(@rows.map { |row| row[0] }, @table.delete(0))
+ assert_equal(@rows.map { |row| row["C"] }, @table.delete("C"))
+
+ # verify resulting table
+ assert_equal(<<-END_RESULT.gsub(/^\s+/, ""), @table.to_csv)
+ B
+ 2
+ 5
+ 8
+ END_RESULT
+
+ ################
+ ### Row Mode ###
+ ################
+ setup
+ @table.by_row!
+
+ assert_equal(@rows[1], @table.delete(1))
+ assert_raise(TypeError) { @table.delete("C") }
+
+ # verify resulting table
+ assert_equal(<<-END_RESULT.gsub(/^\s+/, ""), @table.to_csv)
+ A,B,C
+ 1,2,3
+ 7,8,9
+ END_RESULT
+ end
+
+ def test_delete_if
+ ######################
+ ### Mixed/Row Mode ###
+ ######################
+ # verify that we can chain the call
+ assert_equal(@table, @table.delete_if { |row| (row["B"] % 2).zero? })
+
+ # verify resulting table
+ assert_equal(<<-END_RESULT.gsub(/^\s+/, ""), @table.to_csv)
+ A,B,C
+ 4,5,6
+ END_RESULT
+
+ ###################
+ ### Column Mode ###
+ ###################
+ setup
+ @table.by_col!
+
+ assert_equal(@table, @table.delete_if { |h, v| h > "A" })
+ assert_equal(<<-END_RESULT.gsub(/^\s+/, ""), @table.to_csv)
+ A
+ 1
+ 4
+ 7
+ END_RESULT
+ end
+
+ def test_values_at
+ ##################
+ ### Mixed Mode ###
+ ##################
+ # rows
+ assert_equal(@rows.values_at(0, 2), @table.values_at(0, 2))
+ assert_equal(@rows.values_at(1..2), @table.values_at(1..2))
+
+ # cols
+ assert_equal([[1, 3], [4, 6], [7, 9]], @table.values_at("A", "C"))
+ assert_equal([[2, 3], [5, 6], [8, 9]], @table.values_at("B".."C"))
+
+ ###################
+ ### Column Mode ###
+ ###################
+ @table.by_col!
+
+ assert_equal([[1, 3], [4, 6], [7, 9]], @table.values_at(0, 2))
+ assert_equal([[1, 3], [4, 6], [7, 9]], @table.values_at("A", "C"))
+
+ ################
+ ### Row Mode ###
+ ################
+ @table.by_row!
+
+ assert_equal(@rows.values_at(0, 2), @table.values_at(0, 2))
+ assert_raise(TypeError) { @table.values_at("A", "C") }
+
+ ############################
+ ### One Shot Mode Change ###
+ ############################
+ assert_equal(@rows.values_at(0, 2), @table.values_at(0, 2))
+ assert_equal([[1, 3], [4, 6], [7, 9]], @table.by_col.values_at(0, 2))
+ assert_equal(@rows.values_at(0, 2), @table.values_at(0, 2))
+ end
+
+ def test_array_delegation
+ assert(!@table.empty?, "Table was empty.")
+
+ assert_equal(@rows.size, @table.size)
+ end
+end
diff --git a/test/csv/ts_all.rb b/test/csv/ts_all.rb
new file mode 100644
index 0000000000..c930523757
--- /dev/null
+++ b/test/csv/ts_all.rb
@@ -0,0 +1,19 @@
+#!/usr/local/bin/ruby -w
+
+# ts_all.rb
+#
+# Created by James Edward Gray II on 2005-10-31.
+# Copyright 2005 James Edward Gray II. You can redistribute or modify this code
+# under the terms of Ruby's license.
+
+require "test/unit"
+
+require "tc_csv_parsing"
+require "tc_features"
+require "tc_interface"
+require "tc_csv_writing"
+require "tc_data_converters"
+require "tc_row"
+require "tc_table"
+require "tc_headers"
+require "tc_serialization"