Merge pull request #44 from CodeforLeipzig/fix-rubocop-offenses

Fix rubocop offenses
This commit is contained in:
Joerg Reichert 2020-03-11 20:10:05 +01:00 committed by GitHub
commit 155b57d58a
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
63 changed files with 604 additions and 613 deletions

25
.rubocop.yml Normal file
View file

@ -0,0 +1,25 @@
inherit_from: .rubocop_todo.yml
AllCops:
TargetRubyVersion: 2.5
Exclude:
- 'bin/**/*'
- 'db/schema.rb'
- 'vendor/**/*'
Style/Documentation:
Enabled: false
Style/FrozenStringLiteralComment:
Enabled: true
Exclude:
- 'config.ru'
- 'Gemfile'
- 'Rakefile'
- 'app/views/**/*'
- 'config/**/*'
- 'db/**/*'
- 'lib/tasks/**/*'
Style/IfUnlessModifier:
Enabled: false

73
.rubocop_todo.yml Normal file
View file

@ -0,0 +1,73 @@
# This configuration was generated by
# `rubocop --auto-gen-config`
# on 2020-03-09 14:56:53 +0100 using RuboCop version 0.80.1.
# The point is for the user to remove these configuration records
# one by one as the offenses are removed from the code base.
# Note that changes in the inspected code, or installation of new
# versions of RuboCop, may require this file to be generated again.
# Offense count: 1
Lint/InterpolationCheck:
Exclude:
- 'app/controllers/search_controller.rb'
# Offense count: 1
# Cop supports --auto-correct.
Lint/SendWithMixinArgument:
Exclude:
- 'config/initializers/kaminari_config.rb'
# Offense count: 4
Metrics/AbcSize:
Max: 58
# Offense count: 8
# Configuration parameters: CountComments, ExcludedMethods.
# ExcludedMethods: refine
Metrics/BlockLength:
Max: 203
# Offense count: 1
Metrics/CyclomaticComplexity:
Max: 9
# Offense count: 4
# Configuration parameters: CountComments, ExcludedMethods.
Metrics/MethodLength:
Max: 57
# Offense count: 1
Metrics/PerceivedComplexity:
Max: 10
# Offense count: 1
# Configuration parameters: MinNameLength, AllowNamesEndingInNumbers, AllowedNames, ForbiddenNames.
# AllowedNames: io, id, to, by, on, in, at, ip, db, os, pp
Naming/MethodParameterName:
Exclude:
- 'app/models/paper_search.rb'
# Offense count: 2
# Configuration parameters: EnforcedStyle.
# SupportedStyles: snake_case, normalcase, non_integer
Naming/VariableNumber:
Exclude:
- 'spec/features/search_filters_spec.rb'
# Offense count: 1
Style/DoubleNegation:
Exclude:
- 'app/controllers/application_controller.rb'
# Offense count: 2
Style/MultilineTernaryOperator:
Exclude:
- 'app/controllers/search_controller.rb'
- 'spec/features/basic_search_spec.rb'
# Offense count: 57
# Cop supports --auto-correct.
# Configuration parameters: AutoCorrect, AllowHeredoc, AllowURI, URISchemes, IgnoreCopDirectives, IgnoredPatterns.
# URISchemes: http, https
Layout/LineLength:
Max: 148

View file

@ -59,6 +59,7 @@ group :development, :test do
gem 'capybara'
gem 'puma'
gem 'launchy'
gem 'rubocop'
end
group :test do

View file

@ -329,6 +329,7 @@ DEPENDENCIES
rails-controller-testing
rails-i18n
rspec-rails (~> 3.0)
rubocop
rubocop-faker
sass-rails
sdoc (~> 0.4.0)

View file

@ -1,3 +1,5 @@
# frozen_string_literal: true
# A sample Guardfile
# More info at https://github.com/guard/guard#readme
@ -24,8 +26,8 @@
# * zeus: 'zeus rspec' (requires the server to be started separately)
# * 'just' rspec: 'rspec'
guard :rspec, cmd: "bin/rspec" do
require "guard/rspec/dsl"
guard :rspec, cmd: 'bin/rspec' do
require 'guard/rspec/dsl'
dsl = Guard::RSpec::Dsl.new(self)
# Feel free to open issues for suggestions and improvements
@ -41,15 +43,15 @@ guard :rspec, cmd: "bin/rspec" do
dsl.watch_spec_files_for(ruby.lib_files)
# Rails files
rails = dsl.rails(view_extensions: %w(erb haml slim))
rails = dsl.rails(view_extensions: %w[erb haml slim])
dsl.watch_spec_files_for(rails.app_files)
dsl.watch_spec_files_for(rails.views)
watch(rails.controllers) do |m|
[
rspec.spec.("routing/#{m[1]}_routing"),
rspec.spec.("controllers/#{m[1]}_controller"),
rspec.spec.("acceptance/#{m[1]}")
rspec.spec.call("routing/#{m[1]}_routing"),
rspec.spec.call("controllers/#{m[1]}_controller"),
rspec.spec.call("acceptance/#{m[1]}")
]
end
@ -59,12 +61,12 @@ guard :rspec, cmd: "bin/rspec" do
watch(rails.app_controller) { "#{rspec.spec_dir}/controllers" }
# Capybara features specs
watch(rails.view_dirs) { |m| rspec.spec.("features/#{m[1]}") }
watch(rails.layouts) { |m| rspec.spec.("features/#{m[1]}") }
watch(rails.view_dirs) { |m| rspec.spec.call("features/#{m[1]}") }
watch(rails.layouts) { |m| rspec.spec.call("features/#{m[1]}") }
# Turnip features and steps
watch(%r{^spec/acceptance/(.+)\.feature$})
watch(%r{^spec/acceptance/steps/(.+)_steps\.rb$}) do |m|
Dir[File.join("**/#{m[1]}.feature")][0] || "spec/acceptance"
Dir[File.join("**/#{m[1]}.feature")][0] || 'spec/acceptance'
end
end

View file

@ -1,6 +1,6 @@
# Add your own tasks in files placed in lib/tasks ending in .rake,
# for example lib/tasks/capistrano.rake, and they will automatically be available to Rake.
require File.expand_path('../config/application', __FILE__)
require File.expand_path('config/application', __dir__)
Rails.application.load_tasks

View file

@ -1,3 +1,5 @@
# frozen_string_literal: true
class ApplicationController < ActionController::Base
# Prevent CSRF attacks by raising an exception.
# For APIs, you may want to use :null_session instead.
@ -12,11 +14,11 @@ class ApplicationController < ActionController::Base
end
def glossary
render action: "glossary"
render action: 'glossary'
end
def impressum
render action: "impressum"
render action: 'impressum'
end
helper_method :current_user, :signed_in?

View file

@ -1,6 +1,7 @@
class GeoController < ApplicationController
# frozen_string_literal: true
class GeoController < ApplicationController
def index
render action: "index"
render action: 'index'
end
end

View file

@ -1,11 +1,13 @@
# frozen_string_literal: true
class ImportController < ApplicationController
skip_before_action :verify_authenticity_token, :only => [:new_papers_callback]
skip_before_action :verify_authenticity_token, only: [:new_papers_callback]
def new_papers_callback
require 'open-uri'
api_key = Rails.application.config_for(:morph)["key"]
api_key = Rails.application.config_for(:morph)['key']
uri = URI.parse("https://api.morph.io/jrlover/city_council_leipzig_recent_papers/data.json?key=#{api_key}&query=select%20*%20from%20%27data%27")
Paper.import_from_json(uri.read)
render :nothing => true
render nothing: true
end
end
end

View file

@ -1,14 +1,15 @@
SearchFacet = Struct.new("SearchFacet", :term, :count) do
# frozen_string_literal: true
SearchFacet = Struct.new('SearchFacet', :term, :count) do
def term_with_count
"#{term} (#{count})"
end
end
class SearchController < ApplicationController
def index
@search_definition = PaperSearch.new(search_params)
@search_definition.sort_by ||= "date"
@search_definition.sort_by ||= 'date'
execute_search
end
@ -16,7 +17,7 @@ class SearchController < ApplicationController
def show
@search_definition = PaperSearch.find params[:id]
execute_search
render action: "index"
render action: 'index'
end
private
@ -24,35 +25,35 @@ class SearchController < ApplicationController
def execute_search
@response = Paper.search(@search_definition.to_definition)
@papers = @response.page(params[:page]).results
@sub = Hash.new
@sub = {}
@papers.each do |paper|
unless paper.reference.nil? && paper.reference.contains("-")
segments = paper.reference.split("-")
id = ((paper.reference.start_with?("VI-") || paper.reference.start_with?("VII-")) && segments.count > 2 ?
segments[2] : segments[1])
escaped_chars = Regexp.escape('\\+-*:()[]{}&!?^|\/')
sanitized_id = id.gsub(/([#{escaped_chars}])/, '\\\\\1')
['AND', 'OR', 'NOT'].each do |reserved|
escaped_reserved = reserved.split('').map { |c| "\\#{c}" }.join('')
sanitized_id = sanitized_id.gsub('/\s*\b(#{reserved.upcase})\b\s*/',
" #{escaped_reserved} ")
end
@sub_search_definition = Elasticsearch::DSL::Search.search do
query do
query_string do
query "*" + sanitized_id + "*"
fields ["reference"]
end
end
next if paper.reference.nil? && paper.reference.contains('-')
sort do
by :published_at, order: 'desc'
by :reference, order: 'desc'
segments = paper.reference.split('-')
id = ((paper.reference.start_with?('VI-') || paper.reference.start_with?('VII-')) && segments.count > 2 ?
segments[2] : segments[1])
escaped_chars = Regexp.escape('\\+-*:()[]{}&!?^|\/')
sanitized_id = id.gsub(/([#{escaped_chars}])/, '\\\\\1')
%w[AND OR NOT].each do |reserved|
escaped_reserved = reserved.split('').map { |c| "\\#{c}" }.join('')
sanitized_id = sanitized_id.gsub('/\s*\b(#{reserved.upcase})\b\s*/',
" #{escaped_reserved} ")
end
@sub_search_definition = Elasticsearch::DSL::Search.search do
query do
query_string do
query '*' + sanitized_id + '*'
fields ['reference']
end
end
@sub_papers = Paper.search(@sub_search_definition)
@sub[paper.reference] = @sub_papers
sort do
by :published_at, order: 'desc'
by :reference, order: 'desc'
end
end
@sub_papers = Paper.search(@sub_search_definition)
@sub[paper.reference] = @sub_papers
end
@paper_type_facets = extract_facets('paper_types')
@originator_facets = extract_facets('originators')
@ -63,9 +64,8 @@ class SearchController < ApplicationController
end
def extract_facets(name)
@response.
response['aggregations'][name.to_s][name.to_s]['buckets'].
map {|m| SearchFacet.new(m['key'], m['doc_count'])}
@response
.response['aggregations'][name.to_s][name.to_s]['buckets']
.map { |m| SearchFacet.new(m['key'], m['doc_count']) }
end
end

View file

@ -1,26 +1,27 @@
# frozen_string_literal: true
module SearchHelper
def facet_list(facets)
return unless facets.present?
content_tag(:ul) do
facets.each do |facet|
concat content_tag(:li,
"#{facet.term} (#{facet.count})",
class: "facet"
)
class: 'facet')
end
end
end
def filter_select(builder, name, desc, facets, selected)
capture do
concat(builder.label name, desc)
concat(builder.label(name, desc))
concat(
builder.select name,
options_from_collection_for_select(facets, :term, :term_with_count, selected),
{ include_blank: true },
{ onchange: "this.form.submit();" }
builder.select(name,
options_from_collection_for_select(facets, :term, :term_with_count, selected),
{ include_blank: true },
{ onchange: 'this.form.submit();' })
)
end
end
end

View file

View file

@ -1,3 +1,5 @@
# frozen_string_literal: true
class Importer < ActiveRecord::Base
validates :url, presence: true, uniqueness: true
end

View file

@ -1,3 +1,5 @@
# frozen_string_literal: true
require 'elasticsearch/model'
require 'json'
require 'parseable_date_validator'
@ -9,7 +11,7 @@ class Paper < ActiveRecord::Base
validates :name, presence: true, length: { maximum: 1000 }
validates :url, presence: true,
length: { maximum: 1000 },
uniqueness: true, # TODO use unique index instead
uniqueness: true, # TODO: use unique index instead
url: true
validates :reference, presence: true, length: { maximum: 100 }
validates :body, presence: true, length: { maximum: 100 }
@ -19,45 +21,46 @@ class Paper < ActiveRecord::Base
validates :published_at, presence: true, parseable_date: true
validates :resolution, length: { maximum: 30_000 }
index_name ['srm', Rails.env, self.base_class.to_s.pluralize.underscore].join('_')
index_name ['srm', Rails.env, base_class.to_s.pluralize.underscore].join('_')
settings index: {
number_of_shards: 1,
analysis: {
filter: {
german_stop: {
type: "stop",
stopwords: "_german_"
type: 'stop',
stopwords: '_german_'
},
german_stemmer: {
type: "stemmer",
language: "light_german"
type: 'stemmer',
language: 'light_german'
},
german_decompounder: {
type: "hyphenation_decompounder",
word_list_path: "analysis/dictionary-de.txt",
hyphenation_patterns_path: "analysis/de_DR.xml",
type: 'hyphenation_decompounder',
word_list_path: 'analysis/dictionary-de.txt',
hyphenation_patterns_path: 'analysis/de_DR.xml',
only_longest_match: true,
min_subword_size: 4
},
}
},
analyzer: {
german: {
tokenizer: "standard",
filter: [
"lowercase",
"german_stop",
"german_decompounder",
"german_normalization",
"german_stemmer"
tokenizer: 'standard',
filter: %w[
lowercase
german_stop
german_decompounder
german_normalization
german_stemmer
]
}
}
}
} do mappings dynamic: false do
indexes :name, type: :text, analyzer: "german"
indexes :content, type: :text, analyzer: "german"
indexes :resolution, type: :text, analyzer: "german"
} do
mappings dynamic: false do
indexes :name, type: :text, analyzer: 'german'
indexes :content, type: :text, analyzer: 'german'
indexes :resolution, type: :text, analyzer: 'german'
indexes :reference, type: :keyword, index: true
indexes :paper_type, type: :keyword, index: true
indexes :published_at, type: :date, index: true
@ -66,10 +69,10 @@ class Paper < ActiveRecord::Base
end
def split_originator
originator.split(/\d\.\s/).reject {|s| s.blank?} || originator
originator.split(/\d\.\s/).reject(&:blank?) || originator
end
def as_indexed_json(options={})
def as_indexed_json(_options = {})
as_json.merge(originator: split_originator)
end
@ -86,7 +89,7 @@ class Paper < ActiveRecord::Base
paper_type: record['paper_type'],
published_at: record['published_at'],
reference: record['reference'],
url: record['url'],
url: record['url']
}
record = find_or_initialize_by(url: attributes[:url])
record.update_attributes(attributes)
@ -104,8 +107,7 @@ class Paper < ActiveRecord::Base
def reset_index!
__elasticsearch__.create_index! force: true
all.each {|p| p.__elasticsearch__.index_document }
all.each { |p| p.__elasticsearch__.index_document }
end
end
end

View file

@ -1,39 +1,43 @@
class PaperSearch < ActiveRecord::Base
# frozen_string_literal: true
class PaperSearch < ActiveRecord::Base
def to_definition
options = {paper_type: paper_type, originator: originator, sort_by: sort_by}
options = { paper_type: paper_type, originator: originator, sort_by: sort_by }
PaperSearch.definition(query, options)
end
def self.definition(q, options={})
def self.definition(q, options = {})
Elasticsearch::DSL::Search.search do
sort do
if options[:sort_by] == 'score'
by '_score'
end
by '_score' if options[:sort_by] == 'score'
by :published_at, order: 'desc'
end
query do
# search query
unless q.blank?
if q.blank?
match_all
else
multi_match do
query q
fields ["name", "content"]
fields %w[name content]
end
else
match_all
end
end
# apply filter after aggregations
post_filter do
bool do
must { term paper_type: options[:paper_type] } if options[:paper_type].present?
must { term originator: options[:originator] } if options[:originator].present?
if options[:paper_type].present?
must { term paper_type: options[:paper_type] }
end
if options[:originator].present?
must { term originator: options[:originator] }
end
# catchall when no filters set
must { match_all } unless (options[:paper_type].present? || options[:originator].present?)
unless options[:paper_type].present? || options[:originator].present?
must { match_all }
end
end
end
@ -41,7 +45,9 @@ class PaperSearch < ActiveRecord::Base
# filter by originator
f = Elasticsearch::DSL::Search::Filters::Bool.new
f.must { match_all }
f.must { term originator: options[:originator] } if options[:originator].present?
if options[:originator].present?
f.must { term originator: options[:originator] }
end
filter f.to_hash do
aggregation :paper_types do
terms do
@ -55,7 +61,9 @@ class PaperSearch < ActiveRecord::Base
# filter by paper_type
f = Elasticsearch::DSL::Search::Filters::Bool.new
f.must { match_all }
f.must { term paper_type: options[:paper_type] } if options[:paper_type].present?
if options[:paper_type].present?
f.must { term paper_type: options[:paper_type] }
end
filter f.to_hash do
aggregation :originators do
terms do

View file

@ -1,3 +1,5 @@
# frozen_string_literal: true
class User < ActiveRecord::Base
def self.find_or_create_from_auth_hash(hash)
User.find_or_create_by(email: hash['info']['email'])

View file

@ -1,35 +1,37 @@
# frozen_string_literal: true
require 'date'
xml.instruct! :xml, :version => "1.0"
xml.rss :version => "2.0", "xmlns:dc" => "http://purl.org/dc/elements/1.1/" do
xml.instruct! :xml, version: '1.0'
xml.rss :version => '2.0', 'xmlns:dc' => 'http://purl.org/dc/elements/1.1/' do
xml.channel do
xml.title "Search results"
xml.description "Papers matching search criteria"
xml.title 'Search results'
xml.description 'Papers matching search criteria'
@papers.each do |doc|
xml.item do
xml.title doc.name
if !doc.content.blank?
xml.description do
xml.cdata! truncate(doc.content.sub("------------------------------- ", ""), length: 768)
end
unless doc.content.blank?
xml.description do
xml.cdata! truncate(doc.content.sub('------------------------------- ', ''), length: 768)
end
end
if !doc.published_at.blank?
xml.pubDate DateTime.parse(doc.published_at).utc.strftime("%a, %d %b %Y %H:%M:%S %z")
unless doc.published_at.blank?
xml.pubDate DateTime.parse(doc.published_at).utc.strftime('%a, %d %b %Y %H:%M:%S %z')
end
doc.originator.each do |originator|
xml.dc :creator do
xml.cdata! originator
end
doc.originator.each do |originator|
xml.dc :creator do
xml.cdata! originator
end
end
if !doc.paper_type.blank?
xml.category do
xml.cdata! doc.paper_type
end
unless doc.paper_type.blank?
xml.category do
xml.cdata! doc.paper_type
end
end
xml.link doc.url
xml.guid doc.url
end
end
end
end
end

View file

@ -1,4 +1,4 @@
# This file is used by Rack-based servers to start the application.
require ::File.expand_path('../config/environment', __FILE__)
require ::File.expand_path('../config/environment', __FILE__)
run Rails.application

View file

@ -1,4 +1,4 @@
require File.expand_path('../boot', __FILE__)
require File.expand_path('boot', __dir__)
require 'rails/all'
@ -20,9 +20,7 @@ module LorisWeb
# config.i18n.load_path += Dir[Rails.root.join('my', 'locales', '*.{rb,yml}').to_s]
config.i18n.default_locale = :de
# https://stackoverflow.com/a/28008145
#config.active_record.raise_in_transactional_callbacks = true
# config.active_record.raise_in_transactional_callbacks = true
end
end

View file

@ -1,4 +1,4 @@
# Set up gems listed in the Gemfile.
ENV['BUNDLE_GEMFILE'] ||= File.expand_path('../../Gemfile', __FILE__)
ENV['BUNDLE_GEMFILE'] ||= File.expand_path('../Gemfile', __dir__)
require 'bundler/setup' if File.exist?(ENV['BUNDLE_GEMFILE'])

View file

@ -1,5 +1,5 @@
# Load the Rails application.
require File.expand_path('../application', __FILE__)
require File.expand_path('application', __dir__)
# Initialize the Rails application.
Rails.application.initialize!

View file

@ -37,5 +37,4 @@ Rails.application.configure do
# flush stdout in order to get logs in realtime
$stdout.sync = true
end

View file

@ -6,4 +6,4 @@ Rails.application.config.assets.version = '1.0'
# Precompile additional assets.
# application.js, application.css, and all non-JS/CSS in app/assets folder are already added.
# Rails.application.config.assets.precompile += %w( search.js )
# Rails.application.config.assets.precompile += %w( geo.js )
# Rails.application.config.assets.precompile += %w( geo.js )

View file

@ -1,3 +1,3 @@
# Be sure to restart your server when you modify this file.
Rails.application.config.action_dispatch.cookies_serializer = :json
Rails.application.config.action_dispatch.cookies_serializer = :json

View file

@ -1,5 +1,5 @@
Rails.application.routes.draw do
root :to => 'search#index', as: :search
root to: 'search#index', as: :search
post '/import' => 'import#new_papers_callback'
get '/map' => 'geo#index', as: :geo
get '/glossary' => 'application#glossary', as: :glossary

View file

@ -1,4 +1,4 @@
class CreateUsers < ActiveRecord::Migration
class CreateUsers < ActiveRecord::Migration[4.2]
def change
create_table :users do |t|
t.string :email

View file

@ -1,4 +1,4 @@
class CreatePapers < ActiveRecord::Migration
class CreatePapers < ActiveRecord::Migration[4.2]
def change
create_table :papers do |t|
t.string :name
@ -19,6 +19,6 @@ class CreatePapers < ActiveRecord::Migration
add_index(:papers, :reference)
add_index(:papers, :originator)
add_index(:papers, :body)
add_index(:papers, [:reference, :body], unique: true)
add_index(:papers, %i[reference body], unique: true)
end
end

View file

@ -1,4 +1,4 @@
class CreateImporters < ActiveRecord::Migration
class CreateImporters < ActiveRecord::Migration[4.2]
def change
create_table :importers do |t|
t.string :url

View file

@ -1,4 +1,4 @@
class CreatePaperSearches < ActiveRecord::Migration
class CreatePaperSearches < ActiveRecord::Migration[4.2]
def change
create_table :paper_searches do |t|
t.string :query

View file

@ -1,4 +1,3 @@
# encoding: UTF-8
# This file is auto-generated from the current state of the database. Instead
# of editing this file, please use the migrations feature of Active Record to
# incrementally modify your database, and then regenerate this schema definition.
@ -14,7 +13,7 @@
ActiveRecord::Schema.define(version: 20151010070158) do
create_table "importers", force: :cascade do |t|
t.string "url"
t.string "url"
t.datetime "created_at", null: false
t.datetime "updated_at", null: false
end
@ -27,31 +26,29 @@ ActiveRecord::Schema.define(version: 20151010070158) do
end
create_table "papers", force: :cascade do |t|
t.string "name"
t.string "url"
t.string "reference"
t.string "body"
t.string "name"
t.string "url"
t.string "reference"
t.string "body"
t.datetime "published_at"
t.datetime "scraped_at"
t.string "paper_type"
t.string "originator"
t.text "resolution"
t.text "content"
t.string "paper_type"
t.string "originator"
t.text "resolution"
t.text "content"
t.datetime "created_at"
t.datetime "updated_at"
t.index ["body"], name: "index_papers_on_body"
t.index ["originator"], name: "index_papers_on_originator"
t.index ["reference", "body"], name: "index_papers_on_reference_and_body", unique: true
t.index ["reference"], name: "index_papers_on_reference"
end
add_index "papers", ["body"], name: "index_papers_on_body"
add_index "papers", ["originator"], name: "index_papers_on_originator"
add_index "papers", ["reference", "body"], name: "index_papers_on_reference_and_body", unique: true
add_index "papers", ["reference"], name: "index_papers_on_reference"
create_table "users", force: :cascade do |t|
t.string "email"
t.string "email"
t.datetime "created_at"
t.datetime "updated_at"
t.index ["email"], name: "index_users_on_email", unique: true
end
add_index "users", ["email"], name: "index_users_on_email", unique: true
end

View file

@ -1,12 +1,13 @@
class ParseableDateValidator < ActiveModel::EachValidator
# frozen_string_literal: true
def validate_each(record, attribute, value)
class ParseableDateValidator < ActiveModel::EachValidator
def validate_each(record, attribute, _value)
raw_value = record.read_attribute_before_type_cast(attribute)
return nil if raw_value.nil?
Date.parse(raw_value.to_json)
nil
rescue ArgumentError => e
record.errors[attribute] << (options[:message] || I18n.t("errors.messages.unparseable_date"))
rescue ArgumentError
record.errors[attribute] << (options[:message] || I18n.t('errors.messages.unparseable_date'))
end
end

View file

View file

@ -2,7 +2,7 @@ desc 'Import Paper records from setup importers'
task import_papers: :environment do
require 'open-uri'
OpenSSL::SSL::VERIFY_PEER = OpenSSL::SSL::VERIFY_NONE
api_key = Rails.application.config_for(:morph)["key"]
api_key = Rails.application.config_for(:morph)['key']
uri = URI.parse("https://api.morph.io/jrlover/city_council_leipzig_recent_papers/data.json?key=#{api_key}&query=select%20*%20from%20%27data%27")
Paper.import_from_json(uri.read)
end

View file

@ -1,48 +1,37 @@
# frozen_string_literal: true
require 'rails_helper'
RSpec.describe SearchController, type: :controller, elasticsearch: true do
describe "GET #index" do
it "returns http success" do
describe 'GET #index' do
it 'returns http success' do
get :index, body: 'leipzig'
expect(response).to have_http_status(:success)
end
it "assigns @search_definition with default sort order" do
it 'assigns @search_definition with default sort order' do
search = PaperSearch.new(sort_by: 'date')
get :index, body: 'leipzig'
expect(assigns(:search_definition).attributes).to eq(search.attributes)
end
it "renders the index template" do
it 'renders the index template' do
get :index, body: 'leipzig'
expect(response).to render_template(:index)
end
it "executes the search with PaperSearch parameters" do
pending("simplify search implementation")
result_page = double("page", results: []) # MEH
response = double("es_response", page: result_page)
expect(Paper).to receive(:search).and_return(response)
get :index, body: 'leipzig'
end
it "returns rss" do
get :index, :format => "rss", body: 'leipzig'
it 'returns rss' do
get :index, format: 'rss', body: 'leipzig'
expect(response).to be_success
expect(response).to render_template(:index)
expect(response.content_type).to eq("application/rss+xml")
#expect(response.body).to have_tag "rss" do
expect(response.content_type).to eq('application/rss+xml')
# expect(response.body).to have_tag "rss" do
# with_tag "channel" do
# with_tag "title"
# with_tag "description"
# with_tag "link"
# end
#end
# end
end
end
end

View file

@ -1,13 +1,15 @@
# frozen_string_literal: true
require_relative '../factory_helper'
FactoryBot.define do
factory :paper do
name { Faker::Lorem.sentence }
sequence(:url) { |n| Faker::Internet.url(host: "ris.example.org", path: "/paper-#{n}.html") }
sequence(:url) { |n| Faker::Internet.url(host: 'ris.example.org', path: "/paper-#{n}.html") }
sequence(:reference) { |n| FactoryHelper.reference(n) }
body { "leipzig" }
published_at { "2015-07-20 21:16:53" }
scraped_at { "2015-07-20 21:16:53" }
body { 'leipzig' }
published_at { '2015-07-20 21:16:53' }
scraped_at { '2015-07-20 21:16:53' }
paper_type { FactoryHelper.paper_type }
originator { Faker::Name.name }
resolution { Faker::Lorem.paragraph(sentence_count: 3) }

View file

@ -1,29 +1,29 @@
module FactoryHelper
# frozen_string_literal: true
module FactoryHelper
PAPER_TYPES = [
"Verwaltungsstandpunkt",
"Anfrage",
"Beschlussvorlage",
"Änderungsantrag",
"Antrag",
"Neufassung",
"Informationsvorlage",
"Einwohneranfrage",
"Petition",
"schriftliche Antwort zur Anfrage",
"Wichtige Angelegenheit",
"Eilentscheidung",
"Dringliche Anfrage"
]
'Verwaltungsstandpunkt',
'Anfrage',
'Beschlussvorlage',
'Änderungsantrag',
'Antrag',
'Neufassung',
'Informationsvorlage',
'Einwohneranfrage',
'Petition',
'schriftliche Antwort zur Anfrage',
'Wichtige Angelegenheit',
'Eilentscheidung',
'Dringliche Anfrage'
].freeze
def self.paper_type
PAPER_TYPES.sample
end
REFERENCE = "A-%05i/16"
REFERENCE = 'A-%05i/16'
def self.reference(seq)
REFERENCE % seq
end
end

View file

@ -1,239 +1,238 @@
# frozen_string_literal: true
require 'rails_helper'
require 'pp'
RSpec.feature "Basic search", type: :feature, elasticsearch: true do
RSpec.feature 'Basic search', type: :feature, elasticsearch: true do
before(:each) do
@papers = FactoryBot.create_list(:paper, 11)
Paper.__elasticsearch__.refresh_index!
end
scenario "It displays the search form" do
visit search_path body: "leipzig"
expect(page).to have_content("Stadtratmonitor")
expect(page).to have_field("paper_search_query")
expect(page).to have_select("Typ")
expect(page).to have_select("Einreicher")
expect(page).to have_selector("label", text: "Sortierung")
expect(page).to have_field("paper_search_sort_by_date", type: "radio")
expect(page).to have_field("paper_search_sort_by_score", type: "radio")
scenario 'It displays the search form' do
visit search_path body: 'leipzig'
expect(page).to have_content('Stadtratmonitor')
expect(page).to have_field('paper_search_query')
expect(page).to have_select('Typ')
expect(page).to have_select('Einreicher')
expect(page).to have_selector('label', text: 'Sortierung')
expect(page).to have_field('paper_search_sort_by_date', type: 'radio')
expect(page).to have_field('paper_search_sort_by_score', type: 'radio')
end
scenario "With empty query displays all documents" do
visit search_path body: "leipzig"
expect(page).to have_selector("ul#search_results")
scenario 'With empty query displays all documents' do
visit search_path body: 'leipzig'
expect(page).to have_selector('ul#search_results')
expect(page).to have_content("#{@papers.size} Dokumente in der Datenbank")
end
scenario "Search results are paginated" do
visit search_path body: "leipzig"
expect(page).to have_css("li.search-result", count: 10)
expect(page).to have_css("div#pagination")
within("div#pagination") do
expect(page).to have_css("li", count: 4) # two pages + next + last
expect(page).to have_css("li.current", text: "1")
expect(page).to have_link("2")
expect(page).to have_link("Weiter")
expect(page).to have_link("Ende")
scenario 'Search results are paginated' do
visit search_path body: 'leipzig'
expect(page).to have_css('li.search-result', count: 10)
expect(page).to have_css('div#pagination')
within('div#pagination') do
expect(page).to have_css('li', count: 4) # two pages + next + last
expect(page).to have_css('li.current', text: '1')
expect(page).to have_link('2')
expect(page).to have_link('Weiter')
expect(page).to have_link('Ende')
end
page.find("div#pagination").click_link("2")
expect(page).to have_css("li.search-result", count: 1)
within("div#pagination") do
expect(page).to have_css("li.current", text: "2")
page.find('div#pagination').click_link('2')
expect(page).to have_css('li.search-result', count: 1)
within('div#pagination') do
expect(page).to have_css('li.current', text: '2')
end
end
scenario "Search results have basic information" do
visit search_path body: "leipzig"
scenario 'Search results have basic information' do
visit search_path body: 'leipzig'
paper = @papers.first
resultEntry = page.find("li.search-result", match: :first)
expect(resultEntry).to have_content(paper.name)
result_entry = page.find('li.search-result', match: :first)
expect(result_entry).to have_content(paper.name)
resultSubEntry = resultEntry.find("li.current", match: :first)
linkName = getLinkName(paper)
expect(resultSubEntry).to have_link(linkName, href: paper.url)
result_subentry = result_entry.find('li.current', match: :first)
linkname = get_linkname(paper)
expect(result_subentry).to have_link(linkname, href: paper.url)
end
def getLinkName(paper)
dateStr = I18n.l(paper.published_at.to_date)
originatorStr = (paper.originator.kind_of?(Array) ?
paper.originator.join(", ") : paper.originator)
return "#{dateStr}: #{paper.paper_type} von #{originatorStr}"
def get_linkname(paper)
date = I18n.l(paper.published_at.to_date)
originator = (paper.originator.is_a?(Array) ?
paper.originator.join(', ') : paper.originator)
"#{date}: #{paper.paper_type} von #{originator}"
end
scenario "Finds papers by name" do
paper = FactoryBot.create(:paper, name: "Opendata als default")
scenario 'Finds papers by name' do
paper = FactoryBot.create(:paper, name: 'Opendata als default')
Paper.__elasticsearch__.refresh_index!
visit search_path body: "leipzig", paper_search: {query: "Opendata"}
expect(page).to have_content("1 Dokument in der Datenbank")
resultEntry = page.find("li.search-result", match: :first)
expect(resultEntry).to have_content(paper.name)
visit search_path body: 'leipzig', paper_search: { query: 'Opendata' }
expect(page).to have_content('1 Dokument in der Datenbank')
result_entry = page.find('li.search-result', match: :first)
expect(result_entry).to have_content(paper.name)
resultSubEntry = resultEntry.find("li.current", match: :first)
linkName = getLinkName(paper)
expect(resultSubEntry).to have_link(linkName, href: paper.url)
result_subentry = result_entry.find('li.current', match: :first)
linkname = get_linkname(paper)
expect(result_subentry).to have_link(linkname, href: paper.url)
end
scenario "Finds papers by content" do
scenario 'Finds papers by content' do
paper = FactoryBot.create(:paper,
name: "Opendata als default",
content: "Alle Verwaltungsdokumente werden als Opendata veröffentlicht"
)
name: 'Opendata als default',
content: 'Alle Verwaltungsdokumente werden als Opendata veröffentlicht')
Paper.__elasticsearch__.refresh_index!
visit search_path body: "leipzig", paper_search: {query: "Verwaltungsdokumente"}
expect(page).to have_content("1 Dokument in der Datenbank")
resultEntry = page.find("li.search-result", match: :first)
expect(resultEntry).to have_content(paper.name)
visit search_path body: 'leipzig', paper_search: { query: 'Verwaltungsdokumente' }
expect(page).to have_content('1 Dokument in der Datenbank')
result_entry = page.find('li.search-result', match: :first)
expect(result_entry).to have_content(paper.name)
resultSubEntry = resultEntry.find("li.current", match: :first)
linkName = getLinkName(paper)
expect(resultSubEntry).to have_link(linkName, href: paper.url)
result_subentry = result_entry.find('li.current', match: :first)
linkname = get_linkname(paper)
expect(result_subentry).to have_link(linkname, href: paper.url)
end
scenario "Papers with common reference id in search result ordered by date" do
mainPaper = FactoryBot.create(:paper, published_at: '2016-12-19T19:00:00',
name: "Opendata als default", reference: "VI-0815")
newPaper = FactoryBot.create(:paper, published_at: '2016-12-23T12:00:00',
name: "Opendata als optional", reference: "VI-0815-ÄA-01")
scenario 'Papers with common reference id in search result ordered by date' do
main_paper = FactoryBot.create(:paper, published_at: '2016-12-19T19:00:00',
name: 'Opendata als default', reference: 'VI-0815')
new_paper = FactoryBot.create(:paper, published_at: '2016-12-23T12:00:00',
name: 'Opendata als optional', reference: 'VI-0815-ÄA-01')
Paper.__elasticsearch__.refresh_index!
visit search_path body: "leipzig", paper_search: {query: "default"}
expect(page).to have_content("1 Dokument in der Datenbank")
resultEntry = page.find("li.search-result", match: :first)
expect(resultEntry).to have_content(mainPaper.name)
visit search_path body: 'leipzig', paper_search: { query: 'default' }
expect(page).to have_content('1 Dokument in der Datenbank')
result_entry = page.find('li.search-result', match: :first)
expect(result_entry).to have_content(main_paper.name)
resultSubEntry1 = resultEntry.find("li.current", match: :first)
linkName1 = getLinkName(mainPaper)
expect(resultSubEntry1).to have_link(linkName1, href: mainPaper.url)
result_subentry1 = result_entry.find('li.current', match: :first)
linkname1 = get_linkname(main_paper)
expect(result_subentry1).to have_link(linkname1, href: main_paper.url)
resultSubEntries = resultEntry.find("ul").all("li")
linkName2 = getLinkName(newPaper)
expect(resultSubEntries[0]).to have_link(linkName2, href: newPaper.url)
expect(resultSubEntries[1]).to have_link(linkName1, href: mainPaper.url)
result_subentries = result_entry.find('ul').all('li')
linkname2 = get_linkname(new_paper)
expect(result_subentries[0]).to have_link(linkname2, href: new_paper.url)
expect(result_subentries[1]).to have_link(linkname1, href: main_paper.url)
end
scenario "Papers with common reference id in search result ordered by ref" do
mainPaper = FactoryBot.create(:paper, published_at: '2016-12-19T19:00:00',
name: "Opendata als default", reference: "VI-0815")
newPaper1 = FactoryBot.create(:paper, published_at: '2016-12-23T12:00:00',
name: "Opendata als optional", reference: "VI-0815-ÄA-02")
newPaper2 = FactoryBot.create(:paper, published_at: '2016-12-23T12:00:00',
name: "Opendata als optional", reference: "VI-0815-ÄA-01")
scenario 'Papers with common reference id in search result ordered by ref' do
main_paper = FactoryBot.create(:paper, published_at: '2016-12-19T19:00:00',
name: 'Opendata als default', reference: 'VI-0815')
new_paper1 = FactoryBot.create(:paper, published_at: '2016-12-23T12:00:00',
name: 'Opendata als optional', reference: 'VI-0815-ÄA-02')
new_paper2 = FactoryBot.create(:paper, published_at: '2016-12-23T12:00:00',
name: 'Opendata als optional', reference: 'VI-0815-ÄA-01')
Paper.__elasticsearch__.refresh_index!
visit search_path body: "leipzig", paper_search: {query: "default"}
expect(page).to have_content("1 Dokument in der Datenbank")
resultEntry = page.find("li.search-result", match: :first)
visit search_path body: 'leipzig', paper_search: { query: 'default' }
expect(page).to have_content('1 Dokument in der Datenbank')
result_entry = page.find('li.search-result', match: :first)
resultSubEntries = resultEntry.find("ul").all("li")
linkName1 = getLinkName(newPaper1)
expect(resultSubEntries[0]).to have_link(linkName1, href: newPaper1.url)
linkName2 = getLinkName(newPaper2)
expect(resultSubEntries[1]).to have_link(linkName2, href: newPaper2.url)
linkName3 = getLinkName(mainPaper)
expect(resultSubEntries[2]).to have_link(linkName3, href: mainPaper.url)
result_subentries = result_entry.find('ul').all('li')
linkname1 = get_linkname(new_paper1)
expect(result_subentries[0]).to have_link(linkname1, href: new_paper1.url)
linkname2 = get_linkname(new_paper2)
expect(result_subentries[1]).to have_link(linkname2, href: new_paper2.url)
linkname3 = get_linkname(main_paper)
expect(result_subentries[2]).to have_link(linkname3, href: main_paper.url)
end
scenario "Papers with common reference id handled also for missing prefix" do
mainPaper = FactoryBot.create(:paper, published_at: '2016-12-19T19:00:00',
name: "Opendata als default", reference: "VI-0815")
newPaper1 = FactoryBot.create(:paper, published_at: '2016-12-23T12:00:00',
name: "Opendata als optional", reference: "VI-0815-NF-01")
newPaper1Change = FactoryBot.create(:paper, published_at: '2016-12-23T12:00:00',
name: "Opendata als nicht optional", reference: "-0815-NF-01-ÄA-01")
scenario 'Papers with common reference id handled also for missing prefix' do
main_paper = FactoryBot.create(:paper, published_at: '2016-12-19T19:00:00',
name: 'Opendata als default', reference: 'VI-0815')
new_paper1 = FactoryBot.create(:paper, published_at: '2016-12-23T12:00:00',
name: 'Opendata als optional', reference: 'VI-0815-NF-01')
new_paper2 = FactoryBot.create(:paper, published_at: '2016-12-23T12:00:00',
name: 'Opendata als nicht optional', reference: '-0815-NF-01-ÄA-01')
Paper.__elasticsearch__.refresh_index!
visit search_path body: "leipzig", paper_search: {query: "default"}
expect(page).to have_content("1 Dokument in der Datenbank")
resultEntry = page.find("li.search-result", match: :first)
visit search_path body: 'leipzig', paper_search: { query: 'default' }
expect(page).to have_content('1 Dokument in der Datenbank')
result_entry = page.find('li.search-result', match: :first)
resultSubEntries = resultEntry.find("ul").all("li")
linkName1 = getLinkName(newPaper1)
expect(resultSubEntries[0]).to have_link(linkName1, href: newPaper1.url)
linkName2 = getLinkName(newPaper1Change)
expect(resultSubEntries[1]).to have_link(linkName2, href: newPaper1Change.url)
linkName3 = getLinkName(mainPaper)
expect(resultSubEntries[2]).to have_link(linkName3, href: mainPaper.url)
result_subentries = result_entry.find('ul').all('li')
linkname1 = get_linkname(new_paper1)
expect(result_subentries[0]).to have_link(linkname1, href: new_paper1.url)
linkname2 = get_linkname(new_paper2)
expect(result_subentries[1]).to have_link(linkname2, href: new_paper2.url)
linkname3 = get_linkname(main_paper)
expect(result_subentries[2]).to have_link(linkname3, href: main_paper.url)
end
scenario "Finds 'Testen' with search 'Test'" do
paper = FactoryBot.create(:paper, name: "Testen")
paper = FactoryBot.create(:paper, name: 'Testen')
Paper.__elasticsearch__.refresh_index!
visit search_path body: "leipzig", paper_search: {query: "Test"}
expect(page).to have_content("1 Dokument in der Datenbank")
resultEntry = page.find("li.search-result", match: :first)
expect(resultEntry).to have_content(paper.name)
visit search_path body: 'leipzig', paper_search: { query: 'Test' }
expect(page).to have_content('1 Dokument in der Datenbank')
result_entry = page.find('li.search-result', match: :first)
expect(result_entry).to have_content(paper.name)
end
scenario "Finds 'Test' with search 'Testen'" do
paper = FactoryBot.create(:paper, name: "Test")
paper = FactoryBot.create(:paper, name: 'Test')
Paper.__elasticsearch__.refresh_index!
visit search_path body: "leipzig", paper_search: {query: "Testen"}
expect(page).to have_content("1 Dokument in der Datenbank")
resultEntry = page.find("li.search-result", match: :first)
expect(resultEntry).to have_content(paper.name)
visit search_path body: 'leipzig', paper_search: { query: 'Testen' }
expect(page).to have_content('1 Dokument in der Datenbank')
result_entry = page.find('li.search-result', match: :first)
expect(result_entry).to have_content(paper.name)
end
scenario "Finds 'Fahrräderverleih' with search 'Fahrrad'" do
paper = FactoryBot.create(:paper, name: "Fahrräderverleih")
paper = FactoryBot.create(:paper, name: 'Fahrräderverleih')
Paper.__elasticsearch__.refresh_index!
visit search_path body: "leipzig", paper_search: {query: "Fahrrad"}
expect(page).to have_content("1 Dokument in der Datenbank")
resultEntry = page.find("li.search-result", match: :first)
expect(resultEntry).to have_content(paper.name)
visit search_path body: 'leipzig', paper_search: { query: 'Fahrrad' }
expect(page).to have_content('1 Dokument in der Datenbank')
result_entry = page.find('li.search-result', match: :first)
expect(result_entry).to have_content(paper.name)
end
scenario "Finds 'Fahrräderverleih' with search 'Fahrräder'" do
paper = FactoryBot.create(:paper, name: "Fahrräderverleih")
paper = FactoryBot.create(:paper, name: 'Fahrräderverleih')
Paper.__elasticsearch__.refresh_index!
visit search_path body: "leipzig", paper_search: {query: "Fahrräder"}
expect(page).to have_content("1 Dokument in der Datenbank")
resultEntry = page.find("li.search-result", match: :first)
expect(resultEntry).to have_content(paper.name)
visit search_path body: 'leipzig', paper_search: { query: 'Fahrräder' }
expect(page).to have_content('1 Dokument in der Datenbank')
result_entry = page.find('li.search-result', match: :first)
expect(result_entry).to have_content(paper.name)
end
scenario "Finds 'Fahrräderverleih' with search 'Verleih'" do
paper = FactoryBot.create(:paper, name: "Fahrräderverleih")
paper = FactoryBot.create(:paper, name: 'Fahrräderverleih')
Paper.__elasticsearch__.refresh_index!
visit search_path body: "leipzig", paper_search: {query: "Verleih"}
expect(page).to have_content("1 Dokument in der Datenbank")
resultEntry = page.find("li.search-result", match: :first)
expect(resultEntry).to have_content(paper.name)
visit search_path body: 'leipzig', paper_search: { query: 'Verleih' }
expect(page).to have_content('1 Dokument in der Datenbank')
result_entry = page.find('li.search-result', match: :first)
expect(result_entry).to have_content(paper.name)
end
scenario "Finds 'Fahrräderverleih' with search 'Autoverleih'" do
paper = FactoryBot.create(:paper, name: "Fahrräderverleih")
paper = FactoryBot.create(:paper, name: 'Fahrräderverleih')
Paper.__elasticsearch__.refresh_index!
visit search_path body: "leipzig", paper_search: {query: "Autoverleih"}
expect(page).to have_content("1 Dokument in der Datenbank")
resultEntry = page.find("li.search-result", match: :first)
expect(resultEntry).to have_content(paper.name)
visit search_path body: 'leipzig', paper_search: { query: 'Autoverleih' }
expect(page).to have_content('1 Dokument in der Datenbank')
result_entry = page.find('li.search-result', match: :first)
expect(result_entry).to have_content(paper.name)
end
scenario "Finds no 'Fahrrad' with search 'Rad'" do
paper = FactoryBot.create(:paper, name: "Fahrrad")
FactoryBot.create(:paper, name: 'Fahrrad')
Paper.__elasticsearch__.refresh_index!
visit search_path body: "leipzig", paper_search: {query: "Rad"}
expect(page).to have_content("0 Dokumente in der Datenbank")
visit search_path body: 'leipzig', paper_search: { query: 'Rad' }
expect(page).to have_content('0 Dokumente in der Datenbank')
end
scenario "Papers with reference id having slash is escaped" do
mainPaper = FactoryBot.create(:paper, published_at: '2016-12-19T19:00:00',
name: "Opendata als default", reference: "VI-00768/14")
newPaper = FactoryBot.create(:paper, published_at: '2016-12-23T12:00:00',
name: "Opendata als optional", reference: "VI-00768/14-ÄA-01")
scenario 'Papers with reference id having slash is escaped' do
main_paper = FactoryBot.create(:paper, published_at: '2016-12-19T19:00:00',
name: 'Opendata als default', reference: 'VI-00768/14')
new_paper = FactoryBot.create(:paper, published_at: '2016-12-23T12:00:00',
name: 'Opendata als optional', reference: 'VI-00768/14-ÄA-01')
Paper.__elasticsearch__.refresh_index!
visit search_path body: "leipzig", paper_search: {query: "default"}
expect(page).to have_content("1 Dokument in der Datenbank")
resultEntry = page.find("li.search-result", match: :first)
expect(resultEntry).to have_content(mainPaper.name)
visit search_path body: 'leipzig', paper_search: { query: 'default' }
expect(page).to have_content('1 Dokument in der Datenbank')
result_entry = page.find('li.search-result', match: :first)
expect(result_entry).to have_content(main_paper.name)
resultSubEntry1 = resultEntry.find("li.current", match: :first)
linkName1 = getLinkName(mainPaper)
expect(resultSubEntry1).to have_link(linkName1, href: mainPaper.url)
result_subentry1 = result_entry.find('li.current', match: :first)
linkname1 = get_linkname(main_paper)
expect(result_subentry1).to have_link(linkname1, href: main_paper.url)
resultSubEntries = resultEntry.find("ul").all("li")
linkName2 = getLinkName(newPaper)
expect(resultSubEntries[0]).to have_link(linkName2, href: newPaper.url)
expect(resultSubEntries[1]).to have_link(linkName1, href: mainPaper.url)
result_subentries = result_entry.find('ul').all('li')
linkname2 = get_linkname(new_paper)
expect(result_subentries[0]).to have_link(linkname2, href: new_paper.url)
expect(result_subentries[1]).to have_link(linkname1, href: main_paper.url)
end
end

View file

@ -1,69 +1,65 @@
# frozen_string_literal: true
require 'rails_helper'
RSpec.feature "Search filters", type: :feature, elasticsearch: true do
RSpec.feature 'Search filters', type: :feature, elasticsearch: true do
before(:each) do
@antrag = FactoryBot.create(:paper,
paper_type: "Antrag",
name: "Mehr Spielplätze in Leipzig",
originator: "Dezernat Jugend, Soziales, Gesundheit und Schule"
)
paper_type: 'Antrag',
name: 'Mehr Spielplätze in Leipzig',
originator: 'Dezernat Jugend, Soziales, Gesundheit und Schule')
@anfrage = FactoryBot.create(:paper,
paper_type: "Anfrage",
originator: "CDU-Fraktion"
)
paper_type: 'Anfrage',
originator: 'CDU-Fraktion')
@vorlage_1 = FactoryBot.create(:paper,
paper_type: "Vorlage",
name: "Zustand der Spielplätze",
originator: "Dezernat Jugend, Soziales, Gesundheit und Schule"
)
paper_type: 'Vorlage',
name: 'Zustand der Spielplätze',
originator: 'Dezernat Jugend, Soziales, Gesundheit und Schule')
@vorlage_2 = FactoryBot.create(:paper,
paper_type: "Vorlage",
name: "Mehr Ampeln in der Innenstadt",
originator: "Oberbürgermeister"
)
paper_type: 'Vorlage',
name: 'Mehr Ampeln in der Innenstadt',
originator: 'Oberbürgermeister')
Paper.__elasticsearch__.refresh_index!
end
scenario "Displays paper types and their respective count witin the search results" do
visit search_path body: "leipzig"
paper_type_filter = page.find("select#paper_search_paper_type")
expect(paper_type_filter).to have_css("option", text: "Antrag (1)")
expect(paper_type_filter).to have_css("option", text: "Anfrage (1)")
expect(paper_type_filter).to have_css("option", text: "Vorlage (2)")
scenario 'Displays paper types and their respective count witin the search results' do
visit search_path body: 'leipzig'
paper_type_filter = page.find('select#paper_search_paper_type')
expect(paper_type_filter).to have_css('option', text: 'Antrag (1)')
expect(paper_type_filter).to have_css('option', text: 'Anfrage (1)')
expect(paper_type_filter).to have_css('option', text: 'Vorlage (2)')
visit search_path body: "leipzig", paper_search: {query: "Spielplätze"}
paper_type_filter = page.find("select#paper_search_paper_type")
expect(paper_type_filter).to have_css("option", text: "Antrag (1)")
expect(paper_type_filter).not_to have_css("option", text: "Anfrage")
expect(paper_type_filter).to have_css("option", text: "Vorlage (1)")
visit search_path body: 'leipzig', paper_search: { query: 'Spielplätze' }
paper_type_filter = page.find('select#paper_search_paper_type')
expect(paper_type_filter).to have_css('option', text: 'Antrag (1)')
expect(paper_type_filter).not_to have_css('option', text: 'Anfrage')
expect(paper_type_filter).to have_css('option', text: 'Vorlage (1)')
end
scenario "Filtering by paper type", js: true do
visit search_path body: "leipzig"
expect(page).to have_css("li.search-result", count: 4)
select "Antrag (1)", from: "Typ"
expect(page).to have_css("li.search-result", count: 1, text: "Spielplätze")
scenario 'Filtering by paper type', js: true do
visit search_path body: 'leipzig'
expect(page).to have_css('li.search-result', count: 4)
select 'Antrag (1)', from: 'Typ'
expect(page).to have_css('li.search-result', count: 1, text: 'Spielplätze')
end
scenario "Displays originators and their respective count within the search results" do
visit search_path body: "leipzig"
originator_filter = page.find("select#paper_search_originator")
expect(originator_filter).to have_css("option", text: "CDU-Fraktion (1)")
expect(originator_filter).to have_css("option", text: "Dezernat Jugend, Soziales, Gesundheit und Schule (2)")
expect(originator_filter).to have_css("option", text: "Oberbürgermeister (1)")
scenario 'Displays originators and their respective count within the search results' do
visit search_path body: 'leipzig'
originator_filter = page.find('select#paper_search_originator')
expect(originator_filter).to have_css('option', text: 'CDU-Fraktion (1)')
expect(originator_filter).to have_css('option', text: 'Dezernat Jugend, Soziales, Gesundheit und Schule (2)')
expect(originator_filter).to have_css('option', text: 'Oberbürgermeister (1)')
visit search_path body: "leipzig", paper_search: {query: "Spielplätze"}
originator_filter = page.find("select#paper_search_originator")
expect(originator_filter).to have_css("option", text: "Dezernat Jugend, Soziales, Gesundheit und Schule (2)")
expect(originator_filter).not_to have_css("option", text: "Oberbürgermeister")
visit search_path body: 'leipzig', paper_search: { query: 'Spielplätze' }
originator_filter = page.find('select#paper_search_originator')
expect(originator_filter).to have_css('option', text: 'Dezernat Jugend, Soziales, Gesundheit und Schule (2)')
expect(originator_filter).not_to have_css('option', text: 'Oberbürgermeister')
end
scenario "Filtering by originator", js: true do
visit search_path body: "leipzig"
expect(page).to have_css("li.search-result", count: 4)
select "Oberbürgermeister (1)", from: "Einreicher"
expect(page).to have_css("li.search-result", count: 1, text: "Ampeln")
scenario 'Filtering by originator', js: true do
visit search_path body: 'leipzig'
expect(page).to have_css('li.search-result', count: 4)
select 'Oberbürgermeister (1)', from: 'Einreicher'
expect(page).to have_css('li.search-result', count: 1, text: 'Ampeln')
end
end

View file

View file

@ -1,22 +1,24 @@
# frozen_string_literal: true
require 'rails_helper'
RSpec.describe Paper do
context "Validations" do
context 'Validations' do
it { should validate_presence_of(:name) }
it { should validate_length_of(:name).is_at_most(1000) }
it { should validate_length_of(:name).is_at_most(1000) }
it { should validate_presence_of(:url) }
it { should validate_length_of(:url).is_at_most(1000) }
context "URL uniqueness" do
context 'URL uniqueness' do
subject { FactoryBot.build(:paper) }
it { should validate_uniqueness_of(:url) }
end
it "validate url format sane" do
expected_error = "ist keine gültige URL"
paper = FactoryBot.build(:paper, url: "wtf")
expect(paper).not_to be_valid, "Expected paper to not be valid with invalid URL"
it 'validate url format sane' do
expected_error = 'ist keine gültige URL'
paper = FactoryBot.build(:paper, url: 'wtf')
expect(paper).not_to be_valid, 'Expected paper to not be valid with invalid URL'
expect(paper.errors[:url]).not_to be_empty
expect(paper.errors[:url]).to include(expected_error), "Expected #{paper.errors[:url]} to include \"#{expected_error}\""
end
@ -36,11 +38,11 @@ RSpec.describe Paper do
it { should validate_presence_of(:paper_type) }
it { should validate_length_of(:paper_type).is_at_most(50) }
context "published_at" do
context 'published_at' do
it { should validate_presence_of(:published_at) }
it "validate date is parseable" do
expected_error = "ist kein gültiges Datum"
paper = FactoryBot.build(:paper, published_at: "fubar")
it 'validate date is parseable' do
expected_error = 'ist kein gültiges Datum'
paper = FactoryBot.build(:paper, published_at: 'fubar')
expect(paper).not_to be_valid
expect(paper.errors[:published_at]).not_to be_empty
expect(paper.errors[:published_at]).to include(expected_error), "Expected #{paper.errors[:published_at]} to include \"#{expected_error}\""

View file

@ -1,8 +1,12 @@
# frozen_string_literal: true
# This file is copied to spec/ when you run 'rails generate rspec:install'
ENV['RAILS_ENV'] ||= 'test'
require File.expand_path('../../config/environment', __FILE__)
require File.expand_path('../config/environment', __dir__)
# Prevent database truncation if the environment is production
abort("The Rails environment is running in production mode!") if Rails.env.production?
if Rails.env.production?
abort('The Rails environment is running in production mode!')
end
require 'spec_helper'
require 'rspec/rails'
# Add additional requires below this line. Rails is not loaded until this point!
@ -20,7 +24,7 @@ require 'rspec/rails'
# directory. Alternatively, in the individual `*_spec.rb` files, manually
# require only the support files necessary.
#
Dir[Rails.root.join('spec/support/**/*.rb')].each { |f| require f }
Dir[Rails.root.join('spec/support/**/*.rb')].sort.each { |f| require f }
# Checks for pending migrations before tests are run.
# If you are not using ActiveRecord, you can remove this line.

View file

@ -1,3 +1,5 @@
# frozen_string_literal: true
require 'coveralls'
Coveralls.wear!('rails')
@ -43,53 +45,51 @@ RSpec.configure do |config|
mocks.verify_partial_doubles = true
end
# The settings below are suggested to provide a good initial experience
# with RSpec, but feel free to customize to your heart's content.
=begin
# These two settings work together to allow you to limit a spec run
# to individual examples or groups you care about by tagging them with
# `:focus` metadata. When nothing is tagged with `:focus`, all examples
# get run.
config.filter_run :focus
config.run_all_when_everything_filtered = true
# Allows RSpec to persist some state between runs in order to support
# the `--only-failures` and `--next-failure` CLI options. We recommend
# you configure your source control system to ignore this file.
config.example_status_persistence_file_path = "spec/examples.txt"
# Limits the available syntax to the non-monkey patched syntax that is
# recommended. For more details, see:
# - http://myronmars.to/n/dev-blog/2012/06/rspecs-new-expectation-syntax
# - http://www.teaisaweso.me/blog/2013/05/27/rspecs-new-message-expectation-syntax/
# - http://myronmars.to/n/dev-blog/2014/05/notable-changes-in-rspec-3#new__config_option_to_disable_rspeccore_monkey_patching
config.disable_monkey_patching!
# Many RSpec users commonly either run the entire suite or an individual
# file, and it's useful to allow more verbose output when running an
# individual spec file.
if config.files_to_run.one?
# Use the documentation formatter for detailed output,
# unless a formatter has already been configured
# (e.g. via a command-line flag).
config.default_formatter = 'doc'
end
# Print the 10 slowest examples and example groups at the
# end of the spec run, to help surface which specs are running
# particularly slow.
config.profile_examples = 10
# Run specs in random order to surface order dependencies. If you find an
# order dependency and want to debug it, you can fix the order by providing
# the seed, which is printed after each run.
# --seed 1234
config.order = :random
# Seed global randomization in this process using the `--seed` CLI option.
# Setting this allows you to use `--seed` to deterministically reproduce
# test failures related to randomization by passing the same `--seed` value
# as the one that triggered the failure.
Kernel.srand config.seed
=end
# The settings below are suggested to provide a good initial experience
# with RSpec, but feel free to customize to your heart's content.
# # These two settings work together to allow you to limit a spec run
# # to individual examples or groups you care about by tagging them with
# # `:focus` metadata. When nothing is tagged with `:focus`, all examples
# # get run.
# config.filter_run :focus
# config.run_all_when_everything_filtered = true
#
# # Allows RSpec to persist some state between runs in order to support
# # the `--only-failures` and `--next-failure` CLI options. We recommend
# # you configure your source control system to ignore this file.
# config.example_status_persistence_file_path = "spec/examples.txt"
#
# # Limits the available syntax to the non-monkey patched syntax that is
# # recommended. For more details, see:
# # - http://myronmars.to/n/dev-blog/2012/06/rspecs-new-expectation-syntax
# # - http://www.teaisaweso.me/blog/2013/05/27/rspecs-new-message-expectation-syntax/
# # - http://myronmars.to/n/dev-blog/2014/05/notable-changes-in-rspec-3#new__config_option_to_disable_rspeccore_monkey_patching
# config.disable_monkey_patching!
#
# # Many RSpec users commonly either run the entire suite or an individual
# # file, and it's useful to allow more verbose output when running an
# # individual spec file.
# if config.files_to_run.one?
# # Use the documentation formatter for detailed output,
# # unless a formatter has already been configured
# # (e.g. via a command-line flag).
# config.default_formatter = 'doc'
# end
#
# # Print the 10 slowest examples and example groups at the
# # end of the spec run, to help surface which specs are running
# # particularly slow.
# config.profile_examples = 10
#
# # Run specs in random order to surface order dependencies. If you find an
# # order dependency and want to debug it, you can fix the order by providing
# # the seed, which is printed after each run.
# # --seed 1234
# config.order = :random
#
# # Seed global randomization in this process using the `--seed` CLI option.
# # Setting this allows you to use `--seed` to deterministically reproduce
# # test failures related to randomization by passing the same `--seed` value
# # as the one that triggered the failure.
# Kernel.srand config.seed
end

View file

@ -1,10 +1,12 @@
# frozen_string_literal: true
require 'capybara/apparition'
Capybara.register_driver :apparition do |app|
Capybara::Apparition::Driver.new(
app,
headless: true,
browser_options: [ :no_sandbox, disable_features: 'VizDisplayCompositor']
)
Capybara::Apparition::Driver.new(
app,
headless: true,
browser_options: [:no_sandbox, disable_features: 'VizDisplayCompositor']
)
end
Capybara.javascript_driver = :apparition

View file

@ -1,3 +1,5 @@
# frozen_string_literal: true
RSpec.configure do |config|
config.before(:suite) do
DatabaseCleaner.clean_with(:truncation)
@ -18,5 +20,4 @@ RSpec.configure do |config|
config.append_after(:each) do
DatabaseCleaner.clean
end
end

View file

@ -1,6 +1,8 @@
# frozen_string_literal: true
RSpec.configure do |config|
config.before :each, elasticsearch: true do
Paper.__elasticsearch__.create_index!(force: true)
Elasticsearch::Model.client.cluster.health wait_for_status: "yellow"
Elasticsearch::Model.client.cluster.health wait_for_status: 'yellow'
end
end

View file

@ -1,3 +1,5 @@
# frozen_string_literal: true
RSpec.configure do |config|
config.include FactoryBot::Syntax::Methods
end

View file

@ -1,3 +1,5 @@
# frozen_string_literal: true
Shoulda::Matchers.configure do |config|
config.integrate do |with|
with.test_framework :rspec

View file

View file

@ -1,7 +0,0 @@
require 'test_helper'
class SearchControllerTest < ActionController::TestCase
# test "the truth" do
# assert true
# end
end

View file

@ -1,7 +0,0 @@
require 'test_helper'
class SessionsControllerTest < ActionController::TestCase
# test "the truth" do
# assert true
# end
end

0
test/fixtures/.keep vendored
View file

View file

@ -1,11 +0,0 @@
# Read about fixtures at http://api.rubyonrails.org/classes/ActiveRecord/FixtureSet.html
# This model initially had no columns defined. If you add columns to the
# model remove the '{}' from the fixture names and add the columns immediately
# below each fixture, per the syntax in the comments below
#
one: {}
# column: value
#
two: {}
# column: value

View file

@ -1,11 +0,0 @@
# Read about fixtures at http://api.rubyonrails.org/classes/ActiveRecord/FixtureSet.html
# This model initially had no columns defined. If you add columns to the
# model remove the '{}' from the fixture names and add the columns immediately
# below each fixture, per the syntax in the comments below
#
one: {}
# column: value
#
two: {}
# column: value

View file

View file

View file

@ -1,8 +0,0 @@
require 'test_helper'
class RoutesTest < ActionDispatch::IntegrationTest
test "route test" do
assert_generates "/import", { :controller => "import", :action => "new_papers_callback" }
assert_generates "/", :controller => "search", :action => "index"
end
end

View file

View file

View file

@ -1,7 +0,0 @@
require 'test_helper'
class ImporterTest < ActiveSupport::TestCase
# test "the truth" do
# assert true
# end
end

View file

@ -1,52 +0,0 @@
require 'test_helper'
class PaperTest < ActiveSupport::TestCase
context "Validations" do
should validate_presence_of(:name)
should validate_length_of(:name).is_at_most(1000)
should validate_presence_of(:url)
should validate_length_of(:url).is_at_most(1000)
context "URL uniqueness" do
subject { FactoryBot.build(:paper) }
should validate_uniqueness_of(:url)
end
should "validate url format sane" do
expected_error = "ist keine gültige URL"
paper = FactoryBot.build(:paper, url: "wtf")
assert_not paper.valid?, "Expected paper to not be valid with invalid URL"
assert_not paper.errors[:url].empty?
assert paper.errors[:url].include?(expected_error), "Expected #{paper.errors[:url]} to include \"#{expected_error}\""
end
should validate_presence_of(:reference)
should validate_length_of(:reference).is_at_most(100)
should validate_presence_of(:body)
should validate_length_of(:body).is_at_most(100)
should validate_presence_of(:content)
should validate_length_of(:content).is_at_most(100_000)
should validate_presence_of(:originator)
should validate_length_of(:originator).is_at_most(300)
should validate_presence_of(:paper_type)
should validate_length_of(:paper_type).is_at_most(50)
context "published_at" do
should validate_presence_of(:published_at)
should "validate date is parseable" do
expected_error = "ist kein gültiges Datum"
paper = FactoryBot.build(:paper, published_at: "fubar")
assert_not paper.valid?
assert_not paper.errors[:published_at].empty?
assert paper.errors[:published_at].include?(expected_error), "Expected #{paper.errors[:published_at]} to include \"#{expected_error}\""
end
end
should validate_length_of(:resolution).is_at_most(30_000)
end
end

View file

@ -1,7 +0,0 @@
require 'test_helper'
class UserTest < ActiveSupport::TestCase
# test "the truth" do
# assert true
# end
end

View file

@ -1,19 +0,0 @@
require 'simplecov'
SimpleCov.start 'rails'
ENV['RAILS_ENV'] ||= 'test'
require File.expand_path('../../config/environment', __FILE__)
require 'rails/test_help'
require 'capybara/rails'
class ActiveSupport::TestCase
# Setup all fixtures in test/fixtures/*.yml for all tests in alphabetical order.
# fixtures :all
# Add more helper methods to be used by all tests here...
end
class ActionDispatch::IntegrationTest
# Make the Capybara DSL available in all integration tests
include Capybara::DSL
end