Skip to content

Commit 596e886

Browse files
Check clippy in CI
1 parent 985be8c commit 596e886

File tree

10 files changed

+885
-1854
lines changed

10 files changed

+885
-1854
lines changed

.github/workflows/rust.yml

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -7,27 +7,30 @@ on:
77
branches: [ master ]
88

99
env:
10+
CARGO_INCREMENTAL: 0
1011
CARGO_TERM_COLOR: always
12+
RUST_BACKTRACE: 1
13+
RUSTFLAGS: -D warnings
14+
RUSTDOCFLAGS: -D warnings
1115

1216
jobs:
1317
build:
14-
1518
runs-on: ubuntu-latest
16-
1719
steps:
1820
- uses: actions/checkout@v2
1921
- name: Build
2022
run: cargo build --verbose
2123
- name: Run tests
2224
run: cargo test --verbose
23-
fmt:
25+
- name: Run clippy
26+
run: cargo clippy --all-targets --all --verbose
2427

28+
fmt:
2529
runs-on: ubuntu-latest
26-
2730
steps:
2831
- uses: actions/checkout@v2
2932
- name: Rustfmt
30-
run: cargo fmt --check
33+
run: cargo fmt --all --check
3134
- name: Verify regenerated files
3235
run: ./scripts/unicode.py && diff tables.rs src/tables.rs
3336
- name: Verify regenerated tests

benches/chars.rs

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,6 @@
66
//! is how much slower full unicode handling is.
77
88
use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion};
9-
use unicode_segmentation;
109

1110
use std::fs;
1211
use unicode_segmentation::UnicodeSegmentation;
@@ -24,14 +23,14 @@ const FILES: &[&str] = &[
2423

2524
#[inline(always)]
2625
fn grapheme(text: &str) {
27-
for c in UnicodeSegmentation::graphemes(black_box(&*text), true) {
26+
for c in UnicodeSegmentation::graphemes(black_box(text), true) {
2827
black_box(c);
2928
}
3029
}
3130

3231
#[inline(always)]
3332
fn scalar(text: &str) {
34-
for c in black_box(&*text).chars() {
33+
for c in black_box(text).chars() {
3534
black_box(c);
3635
}
3736
}

scripts/unicode_gen_breaktests.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -169,8 +169,8 @@ def create_grapheme_data(f):
169169
else:
170170
test_diff.append((allchars, extgraphs, c))
171171

172-
stype = "&'static [(&'static str, &'static [&'static str])]"
173-
dtype = "&'static [(&'static str, &'static [&'static str], &'static [&'static str])]"
172+
stype = "&[(&str, &[&str])]"
173+
dtype = "&[(&str, &[&str], &[&str])]"
174174
f.write(" // official Unicode test data\n")
175175
f.write(" // http://www.unicode.org/Public/%s/ucd/auxiliary/GraphemeBreakTest.txt\n" % unicode.UNICODE_VERSION_NUMBER)
176176
unicode.emit_table(f, "TEST_SAME", test_same, stype, True, showfun, True)
@@ -185,7 +185,7 @@ def create_words_data(f):
185185
allchars = [cn for s in c for cn in s]
186186
test.append((allchars, c))
187187

188-
wtype = "&'static [(&'static str, &'static [&'static str])]"
188+
wtype = "&[(&str, &[&str])]"
189189
f.write(" // official Unicode test data\n")
190190
f.write(" // http://www.unicode.org/Public/%s/ucd/auxiliary/WordBreakTest.txt\n" % unicode.UNICODE_VERSION_NUMBER)
191191
unicode.emit_table(f, "TEST_WORD", test, wtype, True, showfun, True)
@@ -199,7 +199,7 @@ def create_sentence_data(f):
199199
allchars = [cn for s in c for cn in s]
200200
test.append((allchars, c))
201201

202-
wtype = "&'static [(&'static str, &'static [&'static str])]"
202+
wtype = "&[(&str, &[&str])]"
203203
f.write(" // official Unicode test data\n")
204204
f.write(" // http://www.unicode.org/Public/%s/ucd/auxiliary/SentenceBreakTest.txt\n" % unicode.UNICODE_VERSION_NUMBER)
205205
unicode.emit_table(f, "TEST_SENTENCE", test, wtype, True, showfun, True)

src/grapheme.rs

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -140,7 +140,7 @@ impl<'a> DoubleEndedIterator for Graphemes<'a> {
140140
}
141141

142142
#[inline]
143-
pub fn new_graphemes<'b>(s: &'b str, is_extended: bool) -> Graphemes<'b> {
143+
pub fn new_graphemes(s: &str, is_extended: bool) -> Graphemes<'_> {
144144
let len = s.len();
145145
Graphemes {
146146
string: s,
@@ -150,7 +150,7 @@ pub fn new_graphemes<'b>(s: &'b str, is_extended: bool) -> Graphemes<'b> {
150150
}
151151

152152
#[inline]
153-
pub fn new_grapheme_indices<'b>(s: &'b str, is_extended: bool) -> GraphemeIndices<'b> {
153+
pub fn new_grapheme_indices(s: &str, is_extended: bool) -> GraphemeIndices<'_> {
154154
GraphemeIndices {
155155
start_offset: s.as_ptr() as usize,
156156
iter: new_graphemes(s, is_extended),
@@ -296,10 +296,10 @@ impl GraphemeCursor {
296296
GraphemeState::Unknown
297297
};
298298
GraphemeCursor {
299-
offset: offset,
300-
len: len,
301-
state: state,
302-
is_extended: is_extended,
299+
offset,
300+
len,
301+
state,
302+
is_extended,
303303
cat_before: None,
304304
cat_after: None,
305305
pre_context_offset: None,
@@ -406,7 +406,7 @@ impl GraphemeCursor {
406406
assert!(chunk_start + chunk.len() == self.pre_context_offset.unwrap());
407407
self.pre_context_offset = None;
408408
if self.is_extended && chunk_start + chunk.len() == self.offset {
409-
let ch = chunk.chars().rev().next().unwrap();
409+
let ch = chunk.chars().next_back().unwrap();
410410
if self.grapheme_category(ch) == gr::GC_Prepend {
411411
self.decide(false); // GB9b
412412
return;
@@ -417,7 +417,7 @@ impl GraphemeCursor {
417417
GraphemeState::Emoji => self.handle_emoji(chunk, chunk_start),
418418
_ => {
419419
if self.cat_before.is_none() && self.offset == chunk.len() + chunk_start {
420-
let ch = chunk.chars().rev().next().unwrap();
420+
let ch = chunk.chars().next_back().unwrap();
421421
self.cat_before = Some(self.grapheme_category(ch));
422422
}
423423
}
@@ -540,10 +540,10 @@ impl GraphemeCursor {
540540
if self.state == GraphemeState::NotBreak {
541541
return Ok(false);
542542
}
543-
if self.offset < chunk_start || self.offset >= chunk_start + chunk.len() {
544-
if self.offset > chunk_start + chunk.len() || self.cat_after.is_none() {
545-
return Err(GraphemeIncomplete::InvalidOffset);
546-
}
543+
if (self.offset < chunk_start || self.offset >= chunk_start + chunk.len())
544+
&& (self.offset > chunk_start + chunk.len() || self.cat_after.is_none())
545+
{
546+
return Err(GraphemeIncomplete::InvalidOffset);
547547
}
548548
if let Some(pre_context_offset) = self.pre_context_offset {
549549
return Err(GraphemeIncomplete::PreContext(pre_context_offset));
@@ -566,15 +566,15 @@ impl GraphemeCursor {
566566
}
567567
}
568568
if self.cat_before.is_none() {
569-
let ch = chunk[..offset_in_chunk].chars().rev().next().unwrap();
569+
let ch = chunk[..offset_in_chunk].chars().next_back().unwrap();
570570
self.cat_before = Some(self.grapheme_category(ch));
571571
}
572572
match check_pair(self.cat_before.unwrap(), self.cat_after.unwrap()) {
573-
PairResult::NotBreak => return self.decision(false),
574-
PairResult::Break => return self.decision(true),
573+
PairResult::NotBreak => self.decision(false),
574+
PairResult::Break => self.decision(true),
575575
PairResult::Extended => {
576576
let is_extended = self.is_extended;
577-
return self.decision(!is_extended);
577+
self.decision(!is_extended)
578578
}
579579
PairResult::Regional => {
580580
if let Some(ris_count) = self.ris_count {

src/lib.rs

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,7 @@ pub trait UnicodeSegmentation {
9696
///
9797
/// assert_eq!(&gr2[..], b);
9898
/// ```
99-
fn graphemes<'a>(&'a self, is_extended: bool) -> Graphemes<'a>;
99+
fn graphemes(&self, is_extended: bool) -> Graphemes<'_>;
100100

101101
/// Returns an iterator over the grapheme clusters of `self` and their
102102
/// byte offsets. See `graphemes()` for more information.
@@ -111,7 +111,7 @@ pub trait UnicodeSegmentation {
111111
///
112112
/// assert_eq!(&gr_inds[..], b);
113113
/// ```
114-
fn grapheme_indices<'a>(&'a self, is_extended: bool) -> GraphemeIndices<'a>;
114+
fn grapheme_indices(&self, is_extended: bool) -> GraphemeIndices<'_>;
115115

116116
/// Returns an iterator over the words of `self`, separated on
117117
/// [UAX#29 word boundaries](http://www.unicode.org/reports/tr29/#Word_Boundaries).
@@ -133,7 +133,7 @@ pub trait UnicodeSegmentation {
133133
///
134134
/// assert_eq!(&uw1[..], b);
135135
/// ```
136-
fn unicode_words<'a>(&'a self) -> UnicodeWords<'a>;
136+
fn unicode_words(&self) -> UnicodeWords<'_>;
137137

138138
/// Returns an iterator over the words of `self`, separated on
139139
/// [UAX#29 word boundaries](http://www.unicode.org/reports/tr29/#Word_Boundaries), and their
@@ -157,7 +157,7 @@ pub trait UnicodeSegmentation {
157157
///
158158
/// assert_eq!(&uwi1[..], b);
159159
/// ```
160-
fn unicode_word_indices<'a>(&'a self) -> UnicodeWordIndices<'a>;
160+
fn unicode_word_indices(&self) -> UnicodeWordIndices<'_>;
161161

162162
/// Returns an iterator over substrings of `self` separated on
163163
/// [UAX#29 word boundaries](http://www.unicode.org/reports/tr29/#Word_Boundaries).
@@ -173,7 +173,7 @@ pub trait UnicodeSegmentation {
173173
///
174174
/// assert_eq!(&swu1[..], b);
175175
/// ```
176-
fn split_word_bounds<'a>(&'a self) -> UWordBounds<'a>;
176+
fn split_word_bounds(&self) -> UWordBounds<'_>;
177177

178178
/// Returns an iterator over substrings of `self`, split on UAX#29 word boundaries,
179179
/// and their offsets. See `split_word_bounds()` for more information.
@@ -188,7 +188,7 @@ pub trait UnicodeSegmentation {
188188
///
189189
/// assert_eq!(&swi1[..], b);
190190
/// ```
191-
fn split_word_bound_indices<'a>(&'a self) -> UWordBoundIndices<'a>;
191+
fn split_word_bound_indices(&self) -> UWordBoundIndices<'_>;
192192

193193
/// Returns an iterator over substrings of `self` separated on
194194
/// [UAX#29 sentence boundaries](http://www.unicode.org/reports/tr29/#Sentence_Boundaries).
@@ -210,7 +210,7 @@ pub trait UnicodeSegmentation {
210210
///
211211
/// assert_eq!(&us1[..], b);
212212
/// ```
213-
fn unicode_sentences<'a>(&'a self) -> UnicodeSentences<'a>;
213+
fn unicode_sentences(&self) -> UnicodeSentences<'_>;
214214

215215
/// Returns an iterator over substrings of `self` separated on
216216
/// [UAX#29 sentence boundaries](http://www.unicode.org/reports/tr29/#Sentence_Boundaries).
@@ -227,7 +227,7 @@ pub trait UnicodeSegmentation {
227227
///
228228
/// assert_eq!(&ssb1[..], b);
229229
/// ```
230-
fn split_sentence_bounds<'a>(&'a self) -> USentenceBounds<'a>;
230+
fn split_sentence_bounds(&self) -> USentenceBounds<'_>;
231231

232232
/// Returns an iterator over substrings of `self`, split on UAX#29 sentence boundaries,
233233
/// and their offsets. See `split_sentence_bounds()` for more information.
@@ -243,7 +243,7 @@ pub trait UnicodeSegmentation {
243243
///
244244
/// assert_eq!(&ssi1[..], b);
245245
/// ```
246-
fn split_sentence_bound_indices<'a>(&'a self) -> USentenceBoundIndices<'a>;
246+
fn split_sentence_bound_indices(&self) -> USentenceBoundIndices<'_>;
247247
}
248248

249249
impl UnicodeSegmentation for str {

src/sentence.rs

Lines changed: 7 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -264,9 +264,7 @@ mod fwd {
264264
}
265265

266266
// SB2 https://unicode.org/reports/tr29/#SB2
267-
if self.state.match1(StatePart::Sot) {
268-
None
269-
} else if self.state.match1(StatePart::Eot) {
267+
if self.state.match1(StatePart::Sot) || self.state.match1(StatePart::Eot) {
270268
None
271269
} else {
272270
self.state = self.state.end();
@@ -275,7 +273,7 @@ mod fwd {
275273
}
276274
}
277275

278-
pub fn new_sentence_breaks<'a>(source: &'a str) -> SentenceBreaks<'a> {
276+
pub fn new_sentence_breaks(source: &str) -> SentenceBreaks<'_> {
279277
SentenceBreaks {
280278
string: source,
281279
pos: 0,
@@ -329,28 +327,28 @@ pub struct USentenceBoundIndices<'a> {
329327
}
330328

331329
#[inline]
332-
pub fn new_sentence_bounds<'a>(source: &'a str) -> USentenceBounds<'a> {
330+
pub fn new_sentence_bounds(source: &str) -> USentenceBounds<'_> {
333331
USentenceBounds {
334332
iter: fwd::new_sentence_breaks(source),
335333
sentence_start: None,
336334
}
337335
}
338336

339337
#[inline]
340-
pub fn new_sentence_bound_indices<'a>(source: &'a str) -> USentenceBoundIndices<'a> {
338+
pub fn new_sentence_bound_indices(source: &str) -> USentenceBoundIndices<'_> {
341339
USentenceBoundIndices {
342340
start_offset: source.as_ptr() as usize,
343341
iter: new_sentence_bounds(source),
344342
}
345343
}
346344

347345
#[inline]
348-
pub fn new_unicode_sentences<'b>(s: &'b str) -> UnicodeSentences<'b> {
346+
pub fn new_unicode_sentences(s: &str) -> UnicodeSentences<'_> {
349347
use super::UnicodeSegmentation;
350348
use crate::tables::util::is_alphanumeric;
351349

352350
fn has_alphanumeric(s: &&str) -> bool {
353-
s.chars().any(|c| is_alphanumeric(c))
351+
s.chars().any(is_alphanumeric)
354352
}
355353
let has_alphanumeric: fn(&&str) -> bool = has_alphanumeric; // coerce to fn pointer
356354

@@ -384,7 +382,7 @@ impl<'a> Iterator for USentenceBounds<'a> {
384382

385383
#[inline]
386384
fn next(&mut self) -> Option<&'a str> {
387-
if self.sentence_start == None {
385+
if self.sentence_start.is_none() {
388386
if let Some(start_pos) = self.iter.next() {
389387
self.sentence_start = Some(start_pos)
390388
} else {

0 commit comments

Comments
 (0)
pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy