1 |
#!/usr/bin/perl -w |
2 |
|
3 |
# reblog-dupe.pl - remove unread duplicate posts which have need read |
4 |
# |
5 |
# currently works without any care about users, so use is limited |
6 |
# to single-user installations |
7 |
# |
8 |
# 07/08/2006 06:26:47 PM CEST Dobrica Pavlinusic <dpavlin@rot13.org> |
9 |
|
10 |
use strict; |
11 |
use DBI; |
12 |
use Text::DeDuper; |
13 |
use Data::Dump qw/dump/; |
14 |
|
15 |
$|++; |
16 |
|
17 |
my $connect = "DBI:mysql:database=reblog"; |
18 |
my $dbh = DBI->connect($connect,"","") || die $DBI::errstr; |
19 |
|
20 |
# select all posts which have been read or unread |
21 |
my $sql = qq{ |
22 |
select |
23 |
id, content, feed_id |
24 |
from items |
25 |
join items_userdata on id=item_id |
26 |
where label = 'read' and value_numeric = ? |
27 |
}; |
28 |
|
29 |
my $sth = $dbh->prepare($sql) || die $dbh->errstr(); |
30 |
$sth->execute( 1 ) || die $sth->errstr(); |
31 |
|
32 |
print "found ",$sth->rows," items to process..."; |
33 |
|
34 |
my $deduper = new Text::DeDuper(); |
35 |
|
36 |
sub strip { |
37 |
my $t = shift || return; |
38 |
$t =~ s/<[^>]*>//gs; |
39 |
$t =~ s/\s+/ /gs; |
40 |
return $t if ($t ne ' '); |
41 |
} |
42 |
|
43 |
while (my $row = $sth->fetchrow_hashref() ) { |
44 |
|
45 |
my $t = strip( $row->{content} ) || next; |
46 |
|
47 |
$deduper->add_doc( $row->{id}, $t ); |
48 |
|
49 |
print "."; |
50 |
|
51 |
} |
52 |
|
53 |
print STDERR "\n"; |
54 |
|
55 |
# now, take unread posts to find duplicates |
56 |
$sth->execute( 0 ) || die $sth->errstr(); |
57 |
|
58 |
print "comparing with ", $sth->rows," unread items...\n"; |
59 |
|
60 |
my @duplicates; |
61 |
my $feeds; |
62 |
|
63 |
while (my $row = $sth->fetchrow_hashref() ) { |
64 |
|
65 |
my $id = $row->{id} || die "no id in now"; |
66 |
|
67 |
my $t = strip( $row->{content} ) || next; |
68 |
|
69 |
my @s = $deduper->find_similar($t); |
70 |
next if (! @s); |
71 |
|
72 |
print $id, " has ", $#s + 1, " copies: ", join(",", @s), "\n"; |
73 |
push @duplicates, $id; |
74 |
|
75 |
$feeds->{ $row->{feed_id} }++; |
76 |
} |
77 |
|
78 |
# mark duplicates as read |
79 |
|
80 |
my $ids = join(",", @duplicates); |
81 |
if (! $ids) { |
82 |
print "no duplicates found\n"; |
83 |
exit; |
84 |
} |
85 |
|
86 |
print "found ", $#duplicates + 1, " duplicate items: $ids\n"; |
87 |
|
88 |
$sql = qq{ |
89 |
update items_userdata |
90 |
set value_numeric = 1 |
91 |
where label = 'read' and item_id in ($ids) |
92 |
}; |
93 |
|
94 |
$dbh->do( $sql ); |
95 |
|
96 |
# update usage_unread on modified feeds |
97 |
|
98 |
$sql = qq{ |
99 |
update feeds_userdata |
100 |
set value_numeric = value_numeric - ? |
101 |
where label = 'usage_unread' and feed_id = ? |
102 |
}; |
103 |
|
104 |
$sth = $dbh->prepare($sql) || die $dbh->errstr(); |
105 |
foreach my $feed_id (keys %$feeds) { |
106 |
my $nr_read = $feeds->{$feed_id} || die "no messages marked as read"; |
107 |
$sth->execute( $nr_read, $feed_id ) || die $sth->errstr(); |
108 |
print "removed $nr_read messages from feed $feed_id\n"; |
109 |
} |
110 |
|