From 0644ff49d3d902ff5384d0c2e940004438c51538 Mon Sep 17 00:00:00 2001 From: Matt Fullerton Date: Tue, 12 Aug 2014 09:04:55 +0200 Subject: [PATCH 1/9] Update tutorial-views.markdown Given that exact paths to the dependencies are given, they may as well agree with the ones currently in the repository. In addition, extra dependencies are required to get SlickGrid to attempt to do anything. This may not yet be 100% correct (see Issue #432). --- docs/tutorial-views.markdown | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/docs/tutorial-views.markdown b/docs/tutorial-views.markdown index de0bcae4..b9f56186 100644 --- a/docs/tutorial-views.markdown +++ b/docs/tutorial-views.markdown @@ -28,10 +28,13 @@ Before writing any code with Recline, you need to do the following preparation s 3. Include the relevant Javascript files somewhere on the page (preferably before body close tag): {% highlight html %} - - - + + + + + + - - - + + + + + From b71229ba360031e6ca30f5305156a03cfa418d8f Mon Sep 17 00:00:00 2001 From: Rufus Pollock Date: Sat, 16 Aug 2014 17:02:51 +0100 Subject: [PATCH 6/9] [demos][s]: simplify multiview demo. --- demos/multiview/app.js | 125 ++++++++++++++++++++--------------------- 1 file changed, 61 insertions(+), 64 deletions(-) diff --git a/demos/multiview/app.js b/demos/multiview/app.js index 2a5a1453..806c9a96 100755 --- a/demos/multiview/app.js +++ b/demos/multiview/app.js @@ -1,73 +1,69 @@ jQuery(function($) { - window.dataExplorer = null; + window.multiView = null; window.explorerDiv = $('.data-explorer-here'); - // This is some fancy stuff to allow configuring the multiview from - // parameters in the query string - // - // For more on state see the view documentation. - var state = recline.View.parseQueryString(decodeURIComponent(window.location.search)); - if (state) { - _.each(state, function(value, key) { - try { - value = JSON.parse(value); - } catch(e) {} - state[key] = value; - }); - } else { - state.url = 'demo'; - } - var dataset = null; - if (state.dataset || state.url) { - var datasetInfo = _.extend({ - url: state.url, - backend: state.backend - }, - state.dataset - ); - dataset = new recline.Model.Dataset(datasetInfo); - } else { - dataset = new recline.Model.Dataset({ - records: [ - {id: 0, date: '2011-01-01', x: 1, y: 2, z: 3, country: 'DE', title: 'first', lat:52.56, lon:13.40}, - {id: 1, date: '2011-02-02', x: 2, y: 4, z: 24, country: 'UK', title: 'second', lat:54.97, lon:-1.60}, - {id: 2, date: '2011-03-03', x: 3, y: 6, z: 9, country: 'US', title: 'third', lat:40.00, lon:-75.5}, - {id: 3, date: '2011-04-04', x: 4, y: 8, z: 6, country: 'UK', title: 'fourth', lat:57.27, lon:-6.20}, - {id: 4, date: '2011-05-04', x: 5, y: 10, z: 15, country: 'UK', title: 'fifth', lat:51.58, lon:0}, - {id: 5, date: '2011-06-02', x: 6, y: 12, z: 18, country: 'DE', title: 'sixth', lat:51.04, lon:7.9} - ], - // let's be really explicit about fields - // Plus take opportunity to set date to be a date field and set some labels - fields: [ - {id: 'id'}, - {id: 'date', type: 'date'}, - {id: 'x', type: 'number'}, - {id: 'y', type: 'number'}, - {id: 'z', type: 'number'}, - {id: 'country', 'label': 'Country'}, - {id: 'title', 'label': 'Title'}, - {id: 'lat'}, - {id: 'lon'} - ] - }); - } - createExplorer(dataset, state); + // create the demo dataset + var dataset = createDemoDataset(); + // now create the multiview + // this is rather more elaborate than the minimum as we configure the + // MultiView in various ways (see function below) + window.multiview = createMultiView(dataset); + + // last, we'll demonstrate binding to changes in the dataset + // this will print out a summary of each change onto the page in the + // changelog section + dataset.records.bind('all', function(name, obj) { + var $info = $('
'); + $info.html(name + ': ' + JSON.stringify(obj.toJSON())); + $('.changelog').append($info); + $('.changelog').show(); + }); }); +// create standard demo dataset +function createDemoDataset() { + var dataset = new recline.Model.Dataset({ + records: [ + {id: 0, date: '2011-01-01', x: 1, y: 2, z: 3, country: 'DE', title: 'first', lat:52.56, lon:13.40}, + {id: 1, date: '2011-02-02', x: 2, y: 4, z: 24, country: 'UK', title: 'second', lat:54.97, lon:-1.60}, + {id: 2, date: '2011-03-03', x: 3, y: 6, z: 9, country: 'US', title: 'third', lat:40.00, lon:-75.5}, + {id: 3, date: '2011-04-04', x: 4, y: 8, z: 6, country: 'UK', title: 'fourth', lat:57.27, lon:-6.20}, + {id: 4, date: '2011-05-04', x: 5, y: 10, z: 15, country: 'UK', title: 'fifth', lat:51.58, lon:0}, + {id: 5, date: '2011-06-02', x: 6, y: 12, z: 18, country: 'DE', title: 'sixth', lat:51.04, lon:7.9} + ], + // let's be really explicit about fields + // Plus take opportunity to set date to be a date field and set some labels + fields: [ + {id: 'id'}, + {id: 'date', type: 'date'}, + {id: 'x', type: 'number'}, + {id: 'y', type: 'number'}, + {id: 'z', type: 'number'}, + {id: 'country', 'label': 'Country'}, + {id: 'title', 'label': 'Title'}, + {id: 'lat'}, + {id: 'lon'} + ] + }); + return dataset; +} -// make Explorer creation / initialization in a function so we can call it -// again and again -var createExplorer = function(dataset, state) { - // remove existing data explorer view +// make MultivView +// +// creation / initialization in a function so we can call it again and again +var createMultiView = function(dataset, state) { + // remove existing multiview if present var reload = false; - if (window.dataExplorer) { - window.dataExplorer.remove(); + if (window.multiView) { + window.multiView.remove(); + window.multiView = null; reload = true; } - window.dataExplorer = null; + var $el = $('
'); $el.appendTo(window.explorerDiv); + // customize the subviews for the MultiView var views = [ { id: 'grid', @@ -77,12 +73,12 @@ var createExplorer = function(dataset, state) { state: { gridOptions: { editable: true, - // Enable support for row add + // Enable support for row add enabledAddRow: true, - // Enable support for row delete - enabledDelRow: true, - // Enable support for row Reoder - enableReOrderRow:true, + // Enable support for row delete + enabledDelRow: true, + // Enable support for row Reoder + enableReOrderRow:true, autoEdit: false, enableCellNavigation: true }, @@ -110,11 +106,12 @@ var createExplorer = function(dataset, state) { } ]; - window.dataExplorer = new recline.View.MultiView({ + var multiView = new recline.View.MultiView({ model: dataset, el: $el, state: state, views: views }); + return multiView; } From 5930fbce43cfcff651b17f737e752f83e4a2e8fa Mon Sep 17 00:00:00 2001 From: Rufus Pollock Date: Sat, 16 Aug 2014 17:04:05 +0100 Subject: [PATCH 7/9] [demos][xs]: left out html for new changelog stuff in multiview. --- demos/multiview/index.html | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/demos/multiview/index.html b/demos/multiview/index.html index e33203a7..4028fa3a 100644 --- a/demos/multiview/index.html +++ b/demos/multiview/index.html @@ -7,14 +7,24 @@ root: ../../ +
+

Changes

+
+
From 12c07b45dc5a7f1c88064811e0692ae6fbd18d3b Mon Sep 17 00:00:00 2001 From: Rufus Pollock Date: Sat, 16 Aug 2014 17:23:05 +0100 Subject: [PATCH 8/9] [README][xs]: update changelog for v0.6 and v0.7 releases. --- README.md | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 12f3b9ac..002fd285 100755 --- a/README.md +++ b/README.md @@ -33,21 +33,31 @@ See CONTRIBUTING.md. ## Changelog -### v0.6 - Sprint 2013 (tbc) -[v0.6 milestone](https://github.com/okfn/recline/issues?milestone=5) +### v0.7 - Summer 2014 (tbc) + +[v0.7 milestone](https://github.com/okfn/recline/issues?milestone=7) Possible breaking changes + * Support for row/add/delete/Reorder for recline slickGrid check `_includes/recline-deps.html` for slcikGrid plugins required #396 +* Upgraded timelinejs lib - #316 + +### v0.6 - Summer 2013 + +[v0.6 milestone](https://github.com/okfn/recline/issues?milestone=5) (more than 40 issues) + +Possible breaking changes + * Many backends moved to their own repositories #314 +* Upgarde to Backbone v1.0 #351 * Updated Leaflet to latest version 0.4.4 #220 -* Added marker clustering in map view to handle a large number of markers +* Added marker clustering in map view to handle a large number of markers (and allowed it to disabled) * Dataset.restore method removed (not used internally except from Multiview.restore) * Views no longer call render in initialize but must be called client code * Backend.Memory.Store attribute for holding 'records' renamed to `records` from `data` -* Require new underscore.deferred vendor library for all use (jQuery no longer required if just using recline.dataset.js) +* Option to use underscore.deferred vendor library and not use jQuery (jQuery no longer required if just using recline.dataset.js) * View.el is now the raw DOM element. If you want a jQuery-wrapped version, use view.$el. #350 -* Upgraded timelinejs lib - #316 * Pager widget now takes Dataset object rather than QueryState object #386 ### v0.5 - July 5th 2012 (first public release) From 095f64e1f3cca2f036d82f614928415217198aaf Mon Sep 17 00:00:00 2001 From: Rufus Pollock Date: Sat, 16 Aug 2014 18:10:06 +0100 Subject: [PATCH 9/9] [fixes #444,csv][s]: remove csv backend as now in own repo. * Moved to https://github.com/okfn/csv.js in Feb and been improving since * Updated the docs to reflect the removal (plus did some other improvements to backend docs at same time) --- README.md | 1 + _includes/backend-list.html | 18 +- _includes/example-backends-online-csv.js | 2 +- _includes/recline-deps.html | 2 +- dist/recline.js | 317 +---------------------- docs/backends.markdown | 22 +- docs/index.html | 4 - docs/tutorial-backends.markdown | 54 ++-- src/backend.csv.js | 307 ---------------------- test/backend.csv.test.js | 112 -------- 10 files changed, 59 insertions(+), 780 deletions(-) delete mode 100644 src/backend.csv.js delete mode 100644 test/backend.csv.test.js diff --git a/README.md b/README.md index 002fd285..c295aedc 100755 --- a/README.md +++ b/README.md @@ -42,6 +42,7 @@ Possible breaking changes * Support for row/add/delete/Reorder for recline slickGrid check `_includes/recline-deps.html` for slcikGrid plugins required #396 * Upgraded timelinejs lib - #316 +* Removed csv backend (as now in separate repo) #444 ### v0.6 - Summer 2013 diff --git a/_includes/backend-list.html b/_includes/backend-list.html index 315f856f..af33cb01 100644 --- a/_includes/backend-list.html +++ b/_includes/backend-list.html @@ -1,10 +1,8 @@ - +* gdocs: Google Docs (Spreadsheet) +* csv: CSV files +* solr: SOLR (partial) +* elasticsearch: ElasticSearch +* dataproxy: DataProxy (CSV and XLS on the Web) +* ckan: CKAN – support for CKAN datastore +* couchdb: CouchDB +* memory: Memory (local data) diff --git a/_includes/example-backends-online-csv.js b/_includes/example-backends-online-csv.js index 284998c9..9cf128b3 100644 --- a/_includes/example-backends-online-csv.js +++ b/_includes/example-backends-online-csv.js @@ -1,7 +1,7 @@ // Create the dataset in the usual way // Note the additional options you can specify for parsing the CSV file var dataset = new recline.Model.Dataset({ - url: '{{page.root}}/demos/data/sample.csv', + url: '{{page.root}}demos/data/sample.csv', backend: 'csv', // delimiter: ',', // quotechar: '"', diff --git a/_includes/recline-deps.html b/_includes/recline-deps.html index 7e26d98e..60b2005e 100644 --- a/_includes/recline-deps.html +++ b/_includes/recline-deps.html @@ -67,7 +67,7 @@ - + diff --git a/dist/recline.js b/dist/recline.js index c439eb96..6f2aab1d 100644 --- a/dist/recline.js +++ b/dist/recline.js @@ -1,312 +1,5 @@ this.recline = this.recline || {}; this.recline.Backend = this.recline.Backend || {}; -this.recline.Backend.CSV = this.recline.Backend.CSV || {}; - -// Note that provision of jQuery is optional (it is **only** needed if you use fetch on a remote file) -(function(my) { - "use strict"; - my.__type__ = 'csv'; - - // use either jQuery or Underscore Deferred depending on what is available - var Deferred = (typeof jQuery !== "undefined" && jQuery.Deferred) || _.Deferred; - - // ## fetch - // - // fetch supports 3 options depending on the attribute provided on the dataset argument - // - // 1. `dataset.file`: `file` is an HTML5 file object. This is opened and parsed with the CSV parser. - // 2. `dataset.data`: `data` is a string in CSV format. This is passed directly to the CSV parser - // 3. `dataset.url`: a url to an online CSV file that is ajax accessible (note this usually requires either local or on a server that is CORS enabled). The file is then loaded using jQuery.ajax and parsed using the CSV parser (NB: this requires jQuery) - // - // All options generates similar data and use the memory store outcome, that is they return something like: - // - //
-  // {
-  //   records: [ [...], [...], ... ],
-  //   metadata: { may be some metadata e.g. file name }
-  //   useMemoryStore: true
-  // }
-  // 
- my.fetch = function(dataset) { - var dfd = new Deferred(); - if (dataset.file) { - var reader = new FileReader(); - var encoding = dataset.encoding || 'UTF-8'; - reader.onload = function(e) { - var out = my.extractFields(my.parseCSV(e.target.result, dataset), dataset); - out.useMemoryStore = true; - out.metadata = { - filename: dataset.file.name - } - dfd.resolve(out); - }; - reader.onerror = function (e) { - alert('Failed to load file. Code: ' + e.target.error.code); - }; - reader.readAsText(dataset.file, encoding); - } else if (dataset.data) { - var out = my.extractFields(my.parseCSV(dataset.data, dataset), dataset); - out.useMemoryStore = true; - dfd.resolve(out); - } else if (dataset.url) { - jQuery.get(dataset.url).done(function(data) { - var out = my.extractFields(my.parseCSV(data, dataset), dataset); - out.useMemoryStore = true; - dfd.resolve(out); - }); - } - return dfd.promise(); - }; - - // Convert array of rows in { records: [ ...] , fields: [ ... ] } - // @param {Boolean} noHeaderRow If true assume that first row is not a header (i.e. list of fields but is data. - my.extractFields = function(rows, noFields) { - if (noFields.noHeaderRow !== true && rows.length > 0) { - return { - fields: rows[0], - records: rows.slice(1) - } - } else { - return { - records: rows - } - } - }; - - // ## parseCSV - // - // Converts a Comma Separated Values string into an array of arrays. - // Each line in the CSV becomes an array. - // - // Empty fields are converted to nulls and non-quoted numbers are converted to integers or floats. - // - // @return The CSV parsed as an array - // @type Array - // - // @param {String} s The string to convert - // @param {Object} options Options for loading CSV including - // @param {Boolean} [trim=false] If set to True leading and trailing - // whitespace is stripped off of each non-quoted field as it is imported - // @param {String} [delimiter=','] A one-character string used to separate - // fields. It defaults to ',' - // @param {String} [quotechar='"'] A one-character string used to quote - // fields containing special characters, such as the delimiter or - // quotechar, or which contain new-line characters. It defaults to '"' - // - // @param {Integer} skipInitialRows A integer number of rows to skip (default 0) - // - // Heavily based on uselesscode's JS CSV parser (MIT Licensed): - // http://www.uselesscode.org/javascript/csv/ - my.parseCSV= function(s, options) { - // Get rid of any trailing \n - s = chomp(s); - - var options = options || {}; - var trm = (options.trim === false) ? false : true; - var delimiter = options.delimiter || ','; - var quotechar = options.quotechar || '"'; - - var cur = '', // The character we are currently processing. - inQuote = false, - fieldQuoted = false, - field = '', // Buffer for building up the current field - row = [], - out = [], - i, - processField; - - processField = function (field) { - if (fieldQuoted !== true) { - // If field is empty set to null - if (field === '') { - field = null; - // If the field was not quoted and we are trimming fields, trim it - } else if (trm === true) { - field = trim(field); - } - - // Convert unquoted numbers to their appropriate types - if (rxIsInt.test(field)) { - field = parseInt(field, 10); - } else if (rxIsFloat.test(field)) { - field = parseFloat(field, 10); - } - } - return field; - }; - - for (i = 0; i < s.length; i += 1) { - cur = s.charAt(i); - - // If we are at a EOF or EOR - if (inQuote === false && (cur === delimiter || cur === "\n")) { - field = processField(field); - // Add the current field to the current row - row.push(field); - // If this is EOR append row to output and flush row - if (cur === "\n") { - out.push(row); - row = []; - } - // Flush the field buffer - field = ''; - fieldQuoted = false; - } else { - // If it's not a quotechar, add it to the field buffer - if (cur !== quotechar) { - field += cur; - } else { - if (!inQuote) { - // We are not in a quote, start a quote - inQuote = true; - fieldQuoted = true; - } else { - // Next char is quotechar, this is an escaped quotechar - if (s.charAt(i + 1) === quotechar) { - field += quotechar; - // Skip the next char - i += 1; - } else { - // It's not escaping, so end quote - inQuote = false; - } - } - } - } - } - - // Add the last field - field = processField(field); - row.push(field); - out.push(row); - - // Expose the ability to discard initial rows - if (options.skipInitialRows) out = out.slice(options.skipInitialRows); - - return out; - }; - - // ## serializeCSV - // - // Convert an Object or a simple array of arrays into a Comma - // Separated Values string. - // - // Nulls are converted to empty fields and integers or floats are converted to non-quoted numbers. - // - // @return The array serialized as a CSV - // @type String - // - // @param {Object or Array} dataToSerialize The Object or array of arrays to convert. Object structure must be as follows: - // - // { - // fields: [ {id: .., ...}, {id: ..., - // records: [ { record }, { record }, ... ] - // ... // more attributes we do not care about - // } - // - // @param {object} options Options for serializing the CSV file including - // delimiter and quotechar (see parseCSV options parameter above for - // details on these). - // - // Heavily based on uselesscode's JS CSV serializer (MIT Licensed): - // http://www.uselesscode.org/javascript/csv/ - my.serializeCSV= function(dataToSerialize, options) { - var a = null; - if (dataToSerialize instanceof Array) { - a = dataToSerialize; - } else { - a = []; - var fieldNames = _.pluck(dataToSerialize.fields, 'id'); - a.push(fieldNames); - _.each(dataToSerialize.records, function(record, index) { - var tmp = _.map(fieldNames, function(fn) { - return record[fn]; - }); - a.push(tmp); - }); - } - var options = options || {}; - var delimiter = options.delimiter || ','; - var quotechar = options.quotechar || '"'; - - var cur = '', // The character we are currently processing. - field = '', // Buffer for building up the current field - row = '', - out = '', - i, - j, - processField; - - processField = function (field) { - if (field === null) { - // If field is null set to empty string - field = ''; - } else if (typeof field === "string" && rxNeedsQuoting.test(field)) { - // Convert string to delimited string - field = quotechar + field + quotechar; - } else if (typeof field === "number") { - // Convert number to string - field = field.toString(10); - } - - return field; - }; - - for (i = 0; i < a.length; i += 1) { - cur = a[i]; - - for (j = 0; j < cur.length; j += 1) { - field = processField(cur[j]); - // If this is EOR append row to output and flush row - if (j === (cur.length - 1)) { - row += field; - out += row + "\n"; - row = ''; - } else { - // Add the current field to the current row - row += field + delimiter; - } - // Flush the field buffer - field = ''; - } - } - - return out; - }; - - var rxIsInt = /^\d+$/, - rxIsFloat = /^\d*\.\d+$|^\d+\.\d*$/, - // If a string has leading or trailing space, - // contains a comma double quote or a newline - // it needs to be quoted in CSV output - rxNeedsQuoting = /^\s|\s$|,|"|\n/, - trim = (function () { - // Fx 3.1 has a native trim function, it's about 10x faster, use it if it exists - if (String.prototype.trim) { - return function (s) { - return s.trim(); - }; - } else { - return function (s) { - return s.replace(/^\s*/, '').replace(/\s*$/, ''); - }; - } - }()); - - function chomp(s) { - if (s.charAt(s.length - 1) !== "\n") { - // Does not end with \n, just return string - return s; - } else { - // Remove the \n - return s.substring(0, s.length - 1); - } - } - - -}(this.recline.Backend.CSV)); -this.recline = this.recline || {}; -this.recline.Backend = this.recline.Backend || {}; this.recline.Backend.DataProxy = this.recline.Backend.DataProxy || {}; (function(my) { @@ -2813,7 +2506,7 @@ this.recline.View = this.recline.View || {}; // Manage multiple views together along with query editor etc. Usage: // //
-// var myExplorer = new model.recline.MultiView({
+// var myExplorer = new recline.View.MultiView({
 //   model: {{recline.Model.Dataset instance}}
 //   el: {{an existing dom element}}
 //   views: {{dataset views}}
@@ -2863,7 +2556,7 @@ this.recline.View = this.recline.View || {};
 //   {
 //     id: 'filterEditor', // used for routing
 //     label: 'Filters', // used for view switcher
-//     view: new recline.View.FielterEditor({
+//     view: new recline.View.FilterEditor({
 //       model: dataset
 //     })
 //   },
@@ -2881,10 +2574,10 @@ this.recline.View = this.recline.View || {};
 //  special as it includes config of many of the subviews.
 //
 // 
-// state = {
+// var state = {
 //     query: {dataset query state - see dataset.queryState object}
-//     view-{id1}: {view-state for this view}
-//     view-{id2}: {view-state for }
+//     'view-{id1}': {view-state for this view}
+//     'view-{id2}': {view-state for }
 //     ...
 //     // Explorer
 //     currentView: id of current view (defaults to first view if not specified)
diff --git a/docs/backends.markdown b/docs/backends.markdown
index 527f9014..09adb183 100644
--- a/docs/backends.markdown
+++ b/docs/backends.markdown
@@ -20,17 +20,25 @@ on the backend.
 
 Backends come in 2 flavours:
 
-1. Loader backends - only implement fetch method. The data is then cached in a Memory.Store on the Dataset and interacted with there. This is best for sources which just allow you to load data or where you want to load the data once and work with it locally.
-2. Store backends - these support fetch, query and, if write-enabled, save. These are suitable where the backend contains a lot of data (infeasible to load locally - for examples a million rows) or where the backend has capabilities you want to take advantage of.
+* Loader backends - only implement fetch method. The data is then cached in a
+  Memory.Store on the Dataset and interacted with there. This is best for
+  sources which just allow you to load data or where you want to load the data
+  once and work with it locally.
+* Store backends - these support fetch, query and, if write-enabled, save.
+  These are suitable where the backend contains a lot of data (infeasible to
+  load locally - for examples a million rows) or where the backend has
+  capabilities you want to take advantage of.
 
-# List of Backends Shipped with Recline
+Examples of the 2 types of backends are provided by the Google docs backend (a
+"Loader" backend) and the ElasticSearch backend (a Store backend).
 
-{% include backend-list.html %}
+# Available Backends
 
-NB: examples of the 2 types of backends are provided by the Google docs backend (a "Loader" backend) and the ElasticSearch backend (a Store backend).
-
-It's easy to write your own backend - you just need to implement the API as described below.
+You can find a list of the available Backends along with examples of how to use
+them in the [Backends Tutorial](tutorial-backends.html).
 
+Note that it's easy to write your own backend - you just need to implement the
+Recline Backend API described below.
 
 # Backend API
 
diff --git a/docs/index.html b/docs/index.html
index 94b500d5..c30d8c3f 100644
--- a/docs/index.html
+++ b/docs/index.html
@@ -52,10 +52,6 @@ root: ../
       
  • Models
  • -
    -

    Backends

    - {% include backend-list.html %} -

    Dataset Views and Widgets

      diff --git a/docs/tutorial-backends.markdown b/docs/tutorial-backends.markdown index 1b27fb0a..f64abd3d 100644 --- a/docs/tutorial-backends.markdown +++ b/docs/tutorial-backends.markdown @@ -14,26 +14,25 @@ sources such as Google Docs or the DataHub using Recline
    - -
    -

    Note: often you are loading data from a given source in -order to load it into a Recline Dataset and display it in a View. However, you -can also happily use a Backend to load data on its own without using any other -part of the Recline library as all the Backends are designed to have no -dependency on other parts of Recline.

    -
    - ## Overview -Backends connect Dataset and Documents to data from a specific 'Backend' data -source. They provide methods for loading and saving Datasets and individuals +Backends connect Recline Datasets to data from a specific 'Backend' data +source. + +They provide methods for loading and saving Datasets and individuals Documents as well as for bulk loading via a query API and doing bulk transforms on the backend. Backends come in 2 flavours: -1. Loader backends - only implement fetch method. The data is then cached in a Memory.Store on the Dataset and interacted with there. This is best for sources which just allow you to load data or where you want to load the data once and work with it locally. -2. Store backends - these support fetch, query and, if write-enabled, save. These are suitable where the backend contains a lot of data (infeasible to load locally - for examples a million rows) or where the backend has capabilities you want to take advantage of. +* Loader backends - only implement fetch method. The data is then cached in a + Memory.Store on the Dataset and interacted with there. This is best for + sources which just allow you to load data or where you want to load the data + once and work with it locally. +* Store backends - these support fetch, query and, if write-enabled, save. + These are suited to cases where the source datastore contains a lot of data + (infeasible to load locally - for examples a million rows) or where the + backend has, for example, query capabilities you want to take advantage of. ### Instantiation and Use @@ -58,13 +57,15 @@ How do you know the backend identifier for a given Backend? It's just the name of the 'class' in recline.Backend module (but case-insensitive). E.g. recline.Backend.ElasticSearch can be identified as 'ElasticSearch' or 'elasticsearch'.

    -

    What Backends are available from Recline? -{% include backend-list.html %} -

    -

    Backend you'd like to see not available? It's easy to write your own – see the Backend reference docs for details of the required API. -

    +## What Backends are available from Recline? + +{% include backend-list.html %} + +**Backend you'd like to see not available?** It's easy to write your own +– see the Backend reference docs for details +of the required API. ## Preparing your app @@ -76,8 +77,9 @@ much more limited if you are just using a Backend. Specifically: + - + @@ -91,13 +93,6 @@ Doc](https://docs.google.com/spreadsheet/ccc?key=0Aon3JiuouxLUdGZPaUZsMjBxeGhfOW For Recline to be able to access a Google Spreadsheet it **must** have been 'Published to the Web' (enabled via File -> Publish to the Web menu). -
    -Want a real world example? This Open Data Census micro-app loads -data from Google Docs and then displays it on a specialist interface combining -a bespoke chooser and a Kartograph (svg-only) map. -
    - {% highlight javascript %} // include the Recline backend for Google Docs @@ -131,6 +126,13 @@ For loading data from CSV files there are 3 cases: 2. CSV is on local disk -- if your browser supports HTML5 File API we can load the CSV file off disk 3. CSV is online but not on same domain -- use DataProxy (see below) +In all cases we'll need to have loaded the Recline CSV backend (for your own +app you'll probably want this locally): + +{% highlight html %} + +{% endhighlight %} + ### Local online CSV file Let's start with first case: loading a "local" online CSV file. We'll be using this [example file]({{page.root}}/demos/data/sample.csv). diff --git a/src/backend.csv.js b/src/backend.csv.js deleted file mode 100644 index 6d805827..00000000 --- a/src/backend.csv.js +++ /dev/null @@ -1,307 +0,0 @@ -this.recline = this.recline || {}; -this.recline.Backend = this.recline.Backend || {}; -this.recline.Backend.CSV = this.recline.Backend.CSV || {}; - -// Note that provision of jQuery is optional (it is **only** needed if you use fetch on a remote file) -(function(my) { - "use strict"; - my.__type__ = 'csv'; - - // use either jQuery or Underscore Deferred depending on what is available - var Deferred = (typeof jQuery !== "undefined" && jQuery.Deferred) || _.Deferred; - - // ## fetch - // - // fetch supports 3 options depending on the attribute provided on the dataset argument - // - // 1. `dataset.file`: `file` is an HTML5 file object. This is opened and parsed with the CSV parser. - // 2. `dataset.data`: `data` is a string in CSV format. This is passed directly to the CSV parser - // 3. `dataset.url`: a url to an online CSV file that is ajax accessible (note this usually requires either local or on a server that is CORS enabled). The file is then loaded using jQuery.ajax and parsed using the CSV parser (NB: this requires jQuery) - // - // All options generates similar data and use the memory store outcome, that is they return something like: - // - //
    -  // {
    -  //   records: [ [...], [...], ... ],
    -  //   metadata: { may be some metadata e.g. file name }
    -  //   useMemoryStore: true
    -  // }
    -  // 
    - my.fetch = function(dataset) { - var dfd = new Deferred(); - if (dataset.file) { - var reader = new FileReader(); - var encoding = dataset.encoding || 'UTF-8'; - reader.onload = function(e) { - var out = my.extractFields(my.parseCSV(e.target.result, dataset), dataset); - out.useMemoryStore = true; - out.metadata = { - filename: dataset.file.name - } - dfd.resolve(out); - }; - reader.onerror = function (e) { - alert('Failed to load file. Code: ' + e.target.error.code); - }; - reader.readAsText(dataset.file, encoding); - } else if (dataset.data) { - var out = my.extractFields(my.parseCSV(dataset.data, dataset), dataset); - out.useMemoryStore = true; - dfd.resolve(out); - } else if (dataset.url) { - jQuery.get(dataset.url).done(function(data) { - var out = my.extractFields(my.parseCSV(data, dataset), dataset); - out.useMemoryStore = true; - dfd.resolve(out); - }); - } - return dfd.promise(); - }; - - // Convert array of rows in { records: [ ...] , fields: [ ... ] } - // @param {Boolean} noHeaderRow If true assume that first row is not a header (i.e. list of fields but is data. - my.extractFields = function(rows, noFields) { - if (noFields.noHeaderRow !== true && rows.length > 0) { - return { - fields: rows[0], - records: rows.slice(1) - } - } else { - return { - records: rows - } - } - }; - - // ## parseCSV - // - // Converts a Comma Separated Values string into an array of arrays. - // Each line in the CSV becomes an array. - // - // Empty fields are converted to nulls and non-quoted numbers are converted to integers or floats. - // - // @return The CSV parsed as an array - // @type Array - // - // @param {String} s The string to convert - // @param {Object} options Options for loading CSV including - // @param {Boolean} [trim=false] If set to True leading and trailing - // whitespace is stripped off of each non-quoted field as it is imported - // @param {String} [delimiter=','] A one-character string used to separate - // fields. It defaults to ',' - // @param {String} [quotechar='"'] A one-character string used to quote - // fields containing special characters, such as the delimiter or - // quotechar, or which contain new-line characters. It defaults to '"' - // - // @param {Integer} skipInitialRows A integer number of rows to skip (default 0) - // - // Heavily based on uselesscode's JS CSV parser (MIT Licensed): - // http://www.uselesscode.org/javascript/csv/ - my.parseCSV= function(s, options) { - // Get rid of any trailing \n - s = chomp(s); - - var options = options || {}; - var trm = (options.trim === false) ? false : true; - var delimiter = options.delimiter || ','; - var quotechar = options.quotechar || '"'; - - var cur = '', // The character we are currently processing. - inQuote = false, - fieldQuoted = false, - field = '', // Buffer for building up the current field - row = [], - out = [], - i, - processField; - - processField = function (field) { - if (fieldQuoted !== true) { - // If field is empty set to null - if (field === '') { - field = null; - // If the field was not quoted and we are trimming fields, trim it - } else if (trm === true) { - field = trim(field); - } - - // Convert unquoted numbers to their appropriate types - if (rxIsInt.test(field)) { - field = parseInt(field, 10); - } else if (rxIsFloat.test(field)) { - field = parseFloat(field, 10); - } - } - return field; - }; - - for (i = 0; i < s.length; i += 1) { - cur = s.charAt(i); - - // If we are at a EOF or EOR - if (inQuote === false && (cur === delimiter || cur === "\n")) { - field = processField(field); - // Add the current field to the current row - row.push(field); - // If this is EOR append row to output and flush row - if (cur === "\n") { - out.push(row); - row = []; - } - // Flush the field buffer - field = ''; - fieldQuoted = false; - } else { - // If it's not a quotechar, add it to the field buffer - if (cur !== quotechar) { - field += cur; - } else { - if (!inQuote) { - // We are not in a quote, start a quote - inQuote = true; - fieldQuoted = true; - } else { - // Next char is quotechar, this is an escaped quotechar - if (s.charAt(i + 1) === quotechar) { - field += quotechar; - // Skip the next char - i += 1; - } else { - // It's not escaping, so end quote - inQuote = false; - } - } - } - } - } - - // Add the last field - field = processField(field); - row.push(field); - out.push(row); - - // Expose the ability to discard initial rows - if (options.skipInitialRows) out = out.slice(options.skipInitialRows); - - return out; - }; - - // ## serializeCSV - // - // Convert an Object or a simple array of arrays into a Comma - // Separated Values string. - // - // Nulls are converted to empty fields and integers or floats are converted to non-quoted numbers. - // - // @return The array serialized as a CSV - // @type String - // - // @param {Object or Array} dataToSerialize The Object or array of arrays to convert. Object structure must be as follows: - // - // { - // fields: [ {id: .., ...}, {id: ..., - // records: [ { record }, { record }, ... ] - // ... // more attributes we do not care about - // } - // - // @param {object} options Options for serializing the CSV file including - // delimiter and quotechar (see parseCSV options parameter above for - // details on these). - // - // Heavily based on uselesscode's JS CSV serializer (MIT Licensed): - // http://www.uselesscode.org/javascript/csv/ - my.serializeCSV= function(dataToSerialize, options) { - var a = null; - if (dataToSerialize instanceof Array) { - a = dataToSerialize; - } else { - a = []; - var fieldNames = _.pluck(dataToSerialize.fields, 'id'); - a.push(fieldNames); - _.each(dataToSerialize.records, function(record, index) { - var tmp = _.map(fieldNames, function(fn) { - return record[fn]; - }); - a.push(tmp); - }); - } - var options = options || {}; - var delimiter = options.delimiter || ','; - var quotechar = options.quotechar || '"'; - - var cur = '', // The character we are currently processing. - field = '', // Buffer for building up the current field - row = '', - out = '', - i, - j, - processField; - - processField = function (field) { - if (field === null) { - // If field is null set to empty string - field = ''; - } else if (typeof field === "string" && rxNeedsQuoting.test(field)) { - // Convert string to delimited string - field = quotechar + field + quotechar; - } else if (typeof field === "number") { - // Convert number to string - field = field.toString(10); - } - - return field; - }; - - for (i = 0; i < a.length; i += 1) { - cur = a[i]; - - for (j = 0; j < cur.length; j += 1) { - field = processField(cur[j]); - // If this is EOR append row to output and flush row - if (j === (cur.length - 1)) { - row += field; - out += row + "\n"; - row = ''; - } else { - // Add the current field to the current row - row += field + delimiter; - } - // Flush the field buffer - field = ''; - } - } - - return out; - }; - - var rxIsInt = /^\d+$/, - rxIsFloat = /^\d*\.\d+$|^\d+\.\d*$/, - // If a string has leading or trailing space, - // contains a comma double quote or a newline - // it needs to be quoted in CSV output - rxNeedsQuoting = /^\s|\s$|,|"|\n/, - trim = (function () { - // Fx 3.1 has a native trim function, it's about 10x faster, use it if it exists - if (String.prototype.trim) { - return function (s) { - return s.trim(); - }; - } else { - return function (s) { - return s.replace(/^\s*/, '').replace(/\s*$/, ''); - }; - } - }()); - - function chomp(s) { - if (s.charAt(s.length - 1) !== "\n") { - // Does not end with \n, just return string - return s; - } else { - // Remove the \n - return s.substring(0, s.length - 1); - } - } - - -}(this.recline.Backend.CSV)); diff --git a/test/backend.csv.test.js b/test/backend.csv.test.js deleted file mode 100644 index e7b09b72..00000000 --- a/test/backend.csv.test.js +++ /dev/null @@ -1,112 +0,0 @@ -(function ($) { -module("Backend Local CSV"); - -test("parseCSV", function() { - var csv = '"Jones, Jay",10\n' + - '"Xyz ""ABC"" O\'Brien",11:35\n' + - '"Other, AN",12:35\n'; - - var array = recline.Backend.CSV.parseCSV(csv); - var exp = [ - ['Jones, Jay', 10], - ['Xyz "ABC" O\'Brien', '11:35' ], - ['Other, AN', '12:35' ] - ]; - deepEqual(exp, array); - - var csv = '"Jones, Jay", 10\n' + - '"Xyz ""ABC"" O\'Brien", 11:35\n' + - '"Other, AN", 12:35\n'; - var array = recline.Backend.CSV.parseCSV(csv, {trim : true}); - deepEqual(exp, array); - - var csv = 'Name, Value\n' + - '"Jones, Jay", 10\n' + - '"Xyz ""ABC"" O\'Brien", 11:35\n' + - '"Other, AN", 12:35\n'; - var dataset = new recline.Model.Dataset({ - data: csv, - backend: 'csv' - }); - dataset.fetch(); - equal(dataset.records.length, 3); - var row = dataset.records.models[0].toJSON(); - deepEqual(row, {Name: 'Jones, Jay', Value: 10}); -}); - -test("parseCSV - semicolon", function() { - var csv = '"Jones; Jay";10\n' + - '"Xyz ""ABC"" O\'Brien";11:35\n' + - '"Other; AN";12:35\n'; - - var array = recline.Backend.CSV.parseCSV(csv, {delimiter : ';'}); - var exp = [ - ['Jones; Jay', 10], - ['Xyz "ABC" O\'Brien', '11:35' ], - ['Other; AN', '12:35' ] - ]; - deepEqual(exp, array); - -}); - -test("parseCSV - quotechar", function() { - var csv = "'Jones, Jay',10\n" + - "'Xyz \"ABC\" O''Brien',11:35\n" + - "'Other; AN',12:35\n"; - - var array = recline.Backend.CSV.parseCSV(csv, {quotechar:"'"}); - var exp = [ - ["Jones, Jay", 10], - ["Xyz \"ABC\" O'Brien", "11:35" ], - ["Other; AN", "12:35" ] - ]; - deepEqual(exp, array); - -}); - -test("parseCSV skipInitialRows", function() { - var csv = '"Jones, Jay",10\n' + - '"Xyz ""ABC"" O\'Brien",11:35\n' + - '"Other, AN",12:35\n'; - - var array = recline.Backend.CSV.parseCSV(csv, {skipInitialRows: 1}); - var exp = [ - ['Xyz "ABC" O\'Brien', '11:35' ], - ['Other, AN', '12:35' ] - ]; - deepEqual(exp, array); -}); - -test("serializeCSV - Array", function() { - var csv = [ - ['Jones, Jay', 10], - ['Xyz "ABC" O\'Brien', '11:35' ], - ['Other, AN', '12:35' ] - ]; - - var array = recline.Backend.CSV.serializeCSV(csv); - var exp = '"Jones, Jay",10\n' + - '"Xyz \"ABC\" O\'Brien",11:35\n' + - '"Other, AN",12:35\n'; - deepEqual(array, exp); -}); - -test("serializeCSV - Object", function() { - var indata = { - fields: [ {id: 'name'}, {id: 'number'}], - records: [ - {name: 'Jones, Jay', number: 10}, - {name: 'Xyz "ABC" O\'Brien', number: '11:35' }, - {name: 'Other, AN', number: '12:35' } - ] - }; - - var array = recline.Backend.CSV.serializeCSV(indata); - var exp = 'name,number\n' + - '"Jones, Jay",10\n' + - '"Xyz \"ABC\" O\'Brien",11:35\n' + - '"Other, AN",12:35\n'; - deepEqual(array, exp); -}); - -})(this.jQuery);